Source file src/internal/runtime/maps/table.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/runtime/math"
    11  	"unsafe"
    12  )
    13  
    14  // Maximum size of a table before it is split at the directory level.
    15  //
    16  // TODO: Completely made up value. This should be tuned for performance vs grow
    17  // latency.
    18  // TODO: This should likely be based on byte size, as copying costs will
    19  // dominate grow latency for large objects.
    20  const maxTableCapacity = 1024
    21  
    22  // Ensure the max capacity fits in uint16, used for capacity and growthLeft
    23  // below.
    24  var _ = uint16(maxTableCapacity)
    25  
    26  // table is a Swiss table hash table structure.
    27  //
    28  // Each table is a complete hash table implementation.
    29  //
    30  // Map uses one or more tables to store entries. Extendible hashing (hash
    31  // prefix) is used to select the table to use for a specific key. Using
    32  // multiple tables enables incremental growth by growing only one table at a
    33  // time.
    34  type table struct {
    35  	// The number of filled slots (i.e. the number of elements in the table).
    36  	used uint16
    37  
    38  	// The total number of slots (always 2^N). Equal to
    39  	// `(groups.lengthMask+1)*abi.MapGroupSlots`.
    40  	capacity uint16
    41  
    42  	// The number of slots we can still fill without needing to rehash.
    43  	//
    44  	// We rehash when used + tombstones > loadFactor*capacity, including
    45  	// tombstones so the table doesn't overfill with tombstones. This field
    46  	// counts down remaining empty slots before the next rehash.
    47  	growthLeft uint16
    48  
    49  	// The number of bits used by directory lookups above this table. Note
    50  	// that this may be less then globalDepth, if the directory has grown
    51  	// but this table has not yet been split.
    52  	localDepth uint8
    53  
    54  	// Index of this table in the Map directory. This is the index of the
    55  	// _first_ location in the directory. The table may occur in multiple
    56  	// sequential indices.
    57  	//
    58  	// index is -1 if the table is stale (no longer installed in the
    59  	// directory).
    60  	index int
    61  
    62  	// groups is an array of slot groups. Each group holds abi.MapGroupSlots
    63  	// key/elem slots and their control bytes. A table has a fixed size
    64  	// groups array. The table is replaced (in rehash) when more space is
    65  	// required.
    66  	//
    67  	// TODO(prattmic): keys and elements are interleaved to maximize
    68  	// locality, but it comes at the expense of wasted space for some types
    69  	// (consider uint8 key, uint64 element). Consider placing all keys
    70  	// together in these cases to save space.
    71  	groups groupsReference
    72  }
    73  
    74  func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
    75  	if capacity < abi.MapGroupSlots {
    76  		capacity = abi.MapGroupSlots
    77  	}
    78  
    79  	t := &table{
    80  		index:      index,
    81  		localDepth: localDepth,
    82  	}
    83  
    84  	if capacity > maxTableCapacity {
    85  		panic("initial table capacity too large")
    86  	}
    87  
    88  	// N.B. group count must be a power of two for probeSeq to visit every
    89  	// group.
    90  	capacity, overflow := alignUpPow2(capacity)
    91  	if overflow {
    92  		panic("rounded-up capacity overflows uint64")
    93  	}
    94  
    95  	t.reset(typ, uint16(capacity))
    96  
    97  	return t
    98  }
    99  
   100  // reset resets the table with new, empty groups with the specified new total
   101  // capacity.
   102  func (t *table) reset(typ *abi.MapType, capacity uint16) {
   103  	groupCount := uint64(capacity) / abi.MapGroupSlots
   104  	t.groups = newGroups(typ, groupCount)
   105  	t.capacity = capacity
   106  	t.growthLeft = t.maxGrowthLeft()
   107  
   108  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   109  		g := t.groups.group(typ, i)
   110  		g.ctrls().setEmpty()
   111  	}
   112  }
   113  
   114  // maxGrowthLeft is the number of inserts we can do before
   115  // resizing, starting from an empty table.
   116  func (t *table) maxGrowthLeft() uint16 {
   117  	if t.capacity == 0 {
   118  		// No real reason to support zero capacity table, since an
   119  		// empty Map simply won't have a table.
   120  		panic("table must have positive capacity")
   121  	} else if t.capacity <= abi.MapGroupSlots {
   122  		// If the map fits in a single group then we're able to fill all of
   123  		// the slots except 1 (an empty slot is needed to terminate find
   124  		// operations).
   125  		//
   126  		// TODO(go.dev/issue/54766): With a special case in probing for
   127  		// single-group tables, we could fill all slots.
   128  		return t.capacity - 1
   129  	} else {
   130  		if t.capacity > math.MaxUint16/maxAvgGroupLoad {
   131  			panic("overflow")
   132  		}
   133  		return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
   134  	}
   135  
   136  }
   137  
   138  func (t *table) Used() uint64 {
   139  	return uint64(t.used)
   140  }
   141  
   142  // Get performs a lookup of the key that key points to. It returns a pointer to
   143  // the element, or false if the key doesn't exist.
   144  func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
   145  	// TODO(prattmic): We could avoid hashing in a variety of special
   146  	// cases.
   147  	//
   148  	// - One entry maps could just directly compare the single entry
   149  	//   without hashing.
   150  	// - String keys could do quick checks of a few bytes before hashing.
   151  	hash := typ.Hasher(key, m.seed)
   152  	_, elem, ok := t.getWithKey(typ, hash, key)
   153  	return elem, ok
   154  }
   155  
   156  // getWithKey performs a lookup of key, returning a pointer to the version of
   157  // the key in the map in addition to the element.
   158  //
   159  // This is relevant when multiple different key values compare equal (e.g.,
   160  // +0.0 and -0.0). When a grow occurs during iteration, iteration perform a
   161  // lookup of keys from the old group in the new group in order to correctly
   162  // expose updated elements. For NeedsKeyUpdate keys, iteration also must return
   163  // the new key value, not the old key value.
   164  // hash must be the hash of the key.
   165  func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
   166  	// To find the location of a key in the table, we compute hash(key). From
   167  	// h1(hash(key)) and the capacity, we construct a probeSeq that visits
   168  	// every group of slots in some interesting order. See [probeSeq].
   169  	//
   170  	// We walk through these indices. At each index, we select the entire
   171  	// group starting with that index and extract potential candidates:
   172  	// occupied slots with a control byte equal to h2(hash(key)). The key
   173  	// at candidate slot i is compared with key; if key == g.slot(i).key
   174  	// we are done and return the slot; if there is an empty slot in the
   175  	// group, we stop and return an error; otherwise we continue to the
   176  	// next probe index. Tombstones (ctrlDeleted) effectively behave like
   177  	// full slots that never match the value we're looking for.
   178  	//
   179  	// The h2 bits ensure when we compare a key we are likely to have
   180  	// actually found the object. That is, the chance is low that keys
   181  	// compare false. Thus, when we search for an object, we are unlikely
   182  	// to call Equal many times. This likelihood can be analyzed as follows
   183  	// (assuming that h2 is a random enough hash function).
   184  	//
   185  	// Let's assume that there are k "wrong" objects that must be examined
   186  	// in a probe sequence. For example, when doing a find on an object
   187  	// that is in the table, k is the number of objects between the start
   188  	// of the probe sequence and the final found object (not including the
   189  	// final found object). The expected number of objects with an h2 match
   190  	// is then k/128. Measurements and analysis indicate that even at high
   191  	// load factors, k is less than 32, meaning that the number of false
   192  	// positive comparisons we must perform is less than 1/8 per find.
   193  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   194  	h2Hash := h2(hash)
   195  	for ; ; seq = seq.next() {
   196  		g := t.groups.group(typ, seq.offset)
   197  
   198  		match := g.ctrls().matchH2(h2Hash)
   199  
   200  		for match != 0 {
   201  			i := match.first()
   202  
   203  			slotKey := g.key(typ, i)
   204  			if typ.IndirectKey() {
   205  				slotKey = *((*unsafe.Pointer)(slotKey))
   206  			}
   207  			if typ.Key.Equal(key, slotKey) {
   208  				slotElem := g.elem(typ, i)
   209  				if typ.IndirectElem() {
   210  					slotElem = *((*unsafe.Pointer)(slotElem))
   211  				}
   212  				return slotKey, slotElem, true
   213  			}
   214  			match = match.removeFirst()
   215  		}
   216  
   217  		match = g.ctrls().matchEmpty()
   218  		if match != 0 {
   219  			// Finding an empty slot means we've reached the end of
   220  			// the probe sequence.
   221  			return nil, nil, false
   222  		}
   223  	}
   224  }
   225  
   226  func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
   227  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   228  	h2Hash := h2(hash)
   229  	for ; ; seq = seq.next() {
   230  		g := t.groups.group(typ, seq.offset)
   231  
   232  		match := g.ctrls().matchH2(h2Hash)
   233  
   234  		for match != 0 {
   235  			i := match.first()
   236  
   237  			slotKey := g.key(typ, i)
   238  			if typ.IndirectKey() {
   239  				slotKey = *((*unsafe.Pointer)(slotKey))
   240  			}
   241  			if typ.Key.Equal(key, slotKey) {
   242  				slotElem := g.elem(typ, i)
   243  				if typ.IndirectElem() {
   244  					slotElem = *((*unsafe.Pointer)(slotElem))
   245  				}
   246  				return slotElem, true
   247  			}
   248  			match = match.removeFirst()
   249  		}
   250  
   251  		match = g.ctrls().matchEmpty()
   252  		if match != 0 {
   253  			// Finding an empty slot means we've reached the end of
   254  			// the probe sequence.
   255  			return nil, false
   256  		}
   257  	}
   258  }
   259  
   260  // PutSlot returns a pointer to the element slot where an inserted element
   261  // should be written, and ok if it returned a valid slot.
   262  //
   263  // PutSlot returns ok false if the table was split and the Map needs to find
   264  // the new table.
   265  //
   266  // hash must be the hash of key.
   267  func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
   268  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   269  
   270  	// As we look for a match, keep track of the first deleted slot we
   271  	// find, which we'll use to insert the new entry if necessary.
   272  	var firstDeletedGroup groupReference
   273  	var firstDeletedSlot uintptr
   274  
   275  	h2Hash := h2(hash)
   276  	for ; ; seq = seq.next() {
   277  		g := t.groups.group(typ, seq.offset)
   278  		match := g.ctrls().matchH2(h2Hash)
   279  
   280  		// Look for an existing slot containing this key.
   281  		for match != 0 {
   282  			i := match.first()
   283  
   284  			slotKey := g.key(typ, i)
   285  			if typ.IndirectKey() {
   286  				slotKey = *((*unsafe.Pointer)(slotKey))
   287  			}
   288  			if typ.Key.Equal(key, slotKey) {
   289  				if typ.NeedKeyUpdate() {
   290  					typedmemmove(typ.Key, slotKey, key)
   291  				}
   292  
   293  				slotElem := g.elem(typ, i)
   294  				if typ.IndirectElem() {
   295  					slotElem = *((*unsafe.Pointer)(slotElem))
   296  				}
   297  
   298  				t.checkInvariants(typ, m)
   299  				return slotElem, true
   300  			}
   301  			match = match.removeFirst()
   302  		}
   303  
   304  		// No existing slot for this key in this group. Is this the end
   305  		// of the probe sequence?
   306  		match = g.ctrls().matchEmptyOrDeleted()
   307  		if match == 0 {
   308  			continue // nothing but filled slots. Keep probing.
   309  		}
   310  		i := match.first()
   311  		if g.ctrls().get(i) == ctrlDeleted {
   312  			// There are some deleted slots. Remember
   313  			// the first one, and keep probing.
   314  			if firstDeletedGroup.data == nil {
   315  				firstDeletedGroup = g
   316  				firstDeletedSlot = i
   317  			}
   318  			continue
   319  		}
   320  		// We've found an empty slot, which means we've reached the end of
   321  		// the probe sequence.
   322  
   323  		// If we found a deleted slot along the way, we can
   324  		// replace it without consuming growthLeft.
   325  		if firstDeletedGroup.data != nil {
   326  			g = firstDeletedGroup
   327  			i = firstDeletedSlot
   328  			t.growthLeft++ // will be decremented below to become a no-op.
   329  		}
   330  
   331  		// If we have no space left, first try to remove some tombstones.
   332  		if t.growthLeft == 0 {
   333  			t.pruneTombstones(typ, m)
   334  		}
   335  
   336  		// If there is room left to grow, just insert the new entry.
   337  		if t.growthLeft > 0 {
   338  			slotKey := g.key(typ, i)
   339  			if typ.IndirectKey() {
   340  				kmem := newobject(typ.Key)
   341  				*(*unsafe.Pointer)(slotKey) = kmem
   342  				slotKey = kmem
   343  			}
   344  			typedmemmove(typ.Key, slotKey, key)
   345  
   346  			slotElem := g.elem(typ, i)
   347  			if typ.IndirectElem() {
   348  				emem := newobject(typ.Elem)
   349  				*(*unsafe.Pointer)(slotElem) = emem
   350  				slotElem = emem
   351  			}
   352  
   353  			g.ctrls().set(i, ctrl(h2Hash))
   354  			t.growthLeft--
   355  			t.used++
   356  			m.used++
   357  
   358  			t.checkInvariants(typ, m)
   359  			return slotElem, true
   360  		}
   361  
   362  		t.rehash(typ, m)
   363  		return nil, false
   364  	}
   365  }
   366  
   367  // uncheckedPutSlot inserts an entry known not to be in the table.
   368  // This is used for grow/split where we are making a new table from
   369  // entries in an existing table.
   370  //
   371  // Decrements growthLeft and increments used.
   372  //
   373  // Requires that the entry does not exist in the table, and that the table has
   374  // room for another element without rehashing.
   375  //
   376  // Requires that there are no deleted entries in the table.
   377  //
   378  // For indirect keys and/or elements, the key and elem pointers can be
   379  // put directly into the map, they do not need to be copied. This
   380  // requires the caller to ensure that the referenced memory never
   381  // changes (by sourcing those pointers from another indirect key/elem
   382  // map).
   383  func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
   384  	if t.growthLeft == 0 {
   385  		panic("invariant failed: growthLeft is unexpectedly 0")
   386  	}
   387  
   388  	// Given key and its hash hash(key), to insert it, we construct a
   389  	// probeSeq, and use it to find the first group with an unoccupied (empty
   390  	// or deleted) slot. We place the key/value into the first such slot in
   391  	// the group and mark it as full with key's H2.
   392  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   393  	for ; ; seq = seq.next() {
   394  		g := t.groups.group(typ, seq.offset)
   395  
   396  		match := g.ctrls().matchEmptyOrDeleted()
   397  		if match != 0 {
   398  			i := match.first()
   399  
   400  			slotKey := g.key(typ, i)
   401  			if typ.IndirectKey() {
   402  				*(*unsafe.Pointer)(slotKey) = key
   403  			} else {
   404  				typedmemmove(typ.Key, slotKey, key)
   405  			}
   406  
   407  			slotElem := g.elem(typ, i)
   408  			if typ.IndirectElem() {
   409  				*(*unsafe.Pointer)(slotElem) = elem
   410  			} else {
   411  				typedmemmove(typ.Elem, slotElem, elem)
   412  			}
   413  
   414  			t.growthLeft--
   415  			t.used++
   416  			g.ctrls().set(i, ctrl(h2(hash)))
   417  			return
   418  		}
   419  	}
   420  }
   421  
   422  // Delete returns true if it put a tombstone in t.
   423  func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
   424  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   425  	h2Hash := h2(hash)
   426  	for ; ; seq = seq.next() {
   427  		g := t.groups.group(typ, seq.offset)
   428  		match := g.ctrls().matchH2(h2Hash)
   429  
   430  		for match != 0 {
   431  			i := match.first()
   432  
   433  			slotKey := g.key(typ, i)
   434  			origSlotKey := slotKey
   435  			if typ.IndirectKey() {
   436  				slotKey = *((*unsafe.Pointer)(slotKey))
   437  			}
   438  
   439  			if typ.Key.Equal(key, slotKey) {
   440  				t.used--
   441  				m.used--
   442  
   443  				if typ.IndirectKey() {
   444  					// Clearing the pointer is sufficient.
   445  					*(*unsafe.Pointer)(origSlotKey) = nil
   446  				} else if typ.Key.Pointers() {
   447  					// Only bothing clear the key if there
   448  					// are pointers in it.
   449  					typedmemclr(typ.Key, slotKey)
   450  				}
   451  
   452  				slotElem := g.elem(typ, i)
   453  				if typ.IndirectElem() {
   454  					// Clearing the pointer is sufficient.
   455  					*(*unsafe.Pointer)(slotElem) = nil
   456  				} else {
   457  					// Unlike keys, always clear the elem (even if
   458  					// it contains no pointers), as compound
   459  					// assignment operations depend on cleared
   460  					// deleted values. See
   461  					// https://go.dev/issue/25936.
   462  					typedmemclr(typ.Elem, slotElem)
   463  				}
   464  
   465  				// Only a full group can appear in the middle
   466  				// of a probe sequence (a group with at least
   467  				// one empty slot terminates probing). Once a
   468  				// group becomes full, it stays full until
   469  				// rehashing/resizing. So if the group isn't
   470  				// full now, we can simply remove the element.
   471  				// Otherwise, we create a tombstone to mark the
   472  				// slot as deleted.
   473  				var tombstone bool
   474  				if g.ctrls().matchEmpty() != 0 {
   475  					g.ctrls().set(i, ctrlEmpty)
   476  					t.growthLeft++
   477  				} else {
   478  					g.ctrls().set(i, ctrlDeleted)
   479  					tombstone = true
   480  				}
   481  
   482  				t.checkInvariants(typ, m)
   483  				return tombstone
   484  			}
   485  			match = match.removeFirst()
   486  		}
   487  
   488  		match = g.ctrls().matchEmpty()
   489  		if match != 0 {
   490  			// Finding an empty slot means we've reached the end of
   491  			// the probe sequence.
   492  			return false
   493  		}
   494  	}
   495  }
   496  
   497  // pruneTombstones goes through the table and tries to remove
   498  // tombstones that are no longer needed. Best effort.
   499  // Note that it only removes tombstones, it does not move elements.
   500  // Moving elements would do a better job but is infeasbile due to
   501  // iterator semantics.
   502  //
   503  // Pruning should only succeed if it can remove O(n) tombstones.
   504  // It would be bad if we did O(n) work to find 1 tombstone to remove.
   505  // Then the next insert would spend another O(n) work to find 1 more
   506  // tombstone to remove, etc.
   507  //
   508  // We really need to remove O(n) tombstones so we can pay for the cost
   509  // of finding them. If we can't, then we need to grow (which is also O(n),
   510  // but guarantees O(n) subsequent inserts can happen in constant time).
   511  func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
   512  	if t.tombstones()*10 < t.capacity { // 10% of capacity
   513  		// Not enough tombstones to be worth the effort.
   514  		return
   515  	}
   516  
   517  	// Bit set marking all the groups whose tombstones are needed.
   518  	var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
   519  
   520  	// Trace the probe sequence of every full entry.
   521  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   522  		g := t.groups.group(typ, i)
   523  		match := g.ctrls().matchFull()
   524  		for match != 0 {
   525  			j := match.first()
   526  			match = match.removeFirst()
   527  			key := g.key(typ, j)
   528  			if typ.IndirectKey() {
   529  				key = *((*unsafe.Pointer)(key))
   530  			}
   531  			if !typ.Key.Equal(key, key) {
   532  				// Key not equal to itself. We never have to find these
   533  				// keys on lookup (only on iteration), so we can break
   534  				// their probe sequences at will.
   535  				continue
   536  			}
   537  			// Walk probe sequence for this key.
   538  			// Each tombstone group we need to walk past is marked required.
   539  			hash := typ.Hasher(key, m.seed)
   540  			for seq := makeProbeSeq(h1(hash), t.groups.lengthMask); ; seq = seq.next() {
   541  				if seq.offset == i {
   542  					break // reached group of element in probe sequence
   543  				}
   544  				g := t.groups.group(typ, seq.offset)
   545  				m := g.ctrls().matchEmptyOrDeleted()
   546  				if m != 0 { // must be deleted, not empty, as we haven't found our key yet
   547  					// Mark this group's tombstone as required.
   548  					needed[seq.offset/32] |= 1 << (seq.offset % 32)
   549  				}
   550  			}
   551  		}
   552  		if g.ctrls().matchEmpty() != 0 {
   553  			// Also mark non-tombstone-containing groups, so we don't try
   554  			// to remove tombstones from them below.
   555  			needed[i/32] |= 1 << (i % 32)
   556  		}
   557  	}
   558  
   559  	// First, see if we can remove enough tombstones to restore capacity.
   560  	// This function is O(n), so only remove tombstones if we can remove
   561  	// enough of them to justify the O(n) cost.
   562  	cnt := 0
   563  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   564  		if needed[i/32]>>(i%32)&1 != 0 {
   565  			continue
   566  		}
   567  		g := t.groups.group(typ, i)
   568  		m := g.ctrls().matchEmptyOrDeleted() // must be deleted
   569  		cnt += m.count()
   570  	}
   571  	if cnt*10 < int(t.capacity) { // Can we restore 10% of capacity?
   572  		return // don't bother removing tombstones. Caller will grow instead.
   573  	}
   574  
   575  	// Prune unneeded tombstones.
   576  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
   577  		if needed[i/32]>>(i%32)&1 != 0 {
   578  			continue
   579  		}
   580  		g := t.groups.group(typ, i)
   581  		m := g.ctrls().matchEmptyOrDeleted() // must be deleted
   582  		for m != 0 {
   583  			k := m.first()
   584  			m = m.removeFirst()
   585  			g.ctrls().set(k, ctrlEmpty)
   586  			t.growthLeft++
   587  		}
   588  		// TODO: maybe we could convert all slots at once
   589  		// using some bitvector trickery.
   590  	}
   591  }
   592  
   593  // tombstones returns the number of deleted (tombstone) entries in the table. A
   594  // tombstone is a slot that has been deleted but is still considered occupied
   595  // so as not to violate the probing invariant.
   596  func (t *table) tombstones() uint16 {
   597  	return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
   598  }
   599  
   600  // Clear deletes all entries from the map resulting in an empty map.
   601  func (t *table) Clear(typ *abi.MapType) {
   602  	mgl := t.maxGrowthLeft()
   603  	if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones
   604  		return
   605  	}
   606  	// We only want to do the work of clearing slots
   607  	// if they are full. But we also don't want to do too
   608  	// much work to figure out whether a slot is full or not,
   609  	// especially if clearing a slot is cheap.
   610  	//  1) We decide group-by-group instead of slot-by-slot.
   611  	//     If any slot in a group is full, we zero the whole group.
   612  	//  2) If groups are unlikely to be empty, don't bother
   613  	//     testing for it.
   614  	//  3) If groups are 50%/50% likely to be empty, also don't
   615  	//     bother testing, as it confuses the branch predictor. See #75097.
   616  	//  4) But if a group is really large, do the test anyway, as
   617  	//     clearing is expensive.
   618  	fullTest := uint64(t.used)*4 <= t.groups.lengthMask // less than ~0.25 entries per group -> >3/4 empty groups
   619  	if typ.SlotSize > 32 {
   620  		// For large slots, it is always worth doing the test first.
   621  		fullTest = true
   622  	}
   623  	if fullTest {
   624  		for i := uint64(0); i <= t.groups.lengthMask; i++ {
   625  			g := t.groups.group(typ, i)
   626  			if g.ctrls().anyFull() {
   627  				typedmemclr(typ.Group, g.data)
   628  			}
   629  			g.ctrls().setEmpty()
   630  		}
   631  	} else {
   632  		for i := uint64(0); i <= t.groups.lengthMask; i++ {
   633  			g := t.groups.group(typ, i)
   634  			typedmemclr(typ.Group, g.data)
   635  			g.ctrls().setEmpty()
   636  		}
   637  	}
   638  	t.used = 0
   639  	t.growthLeft = mgl
   640  }
   641  
   642  type Iter struct {
   643  	key  unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
   644  	elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
   645  	typ  *abi.MapType
   646  	m    *Map
   647  
   648  	// Randomize iteration order by starting iteration at a random slot
   649  	// offset. The offset into the directory uses a separate offset, as it
   650  	// must adjust when the directory grows.
   651  	entryOffset uint64
   652  	dirOffset   uint64
   653  
   654  	// Snapshot of Map.clearSeq at iteration initialization time. Used to
   655  	// detect clear during iteration.
   656  	clearSeq uint64
   657  
   658  	// Value of Map.globalDepth during the last call to Next. Used to
   659  	// detect directory grow during iteration.
   660  	globalDepth uint8
   661  
   662  	// dirIdx is the current directory index, prior to adjustment by
   663  	// dirOffset.
   664  	dirIdx int
   665  
   666  	// tab is the table at dirIdx during the previous call to Next.
   667  	tab *table
   668  
   669  	// group is the group at entryIdx during the previous call to Next.
   670  	group groupReference
   671  
   672  	// entryIdx is the current entry index, prior to adjustment by entryOffset.
   673  	// The lower 3 bits of the index are the slot index, and the upper bits
   674  	// are the group index.
   675  	entryIdx uint64
   676  }
   677  
   678  // Init initializes Iter for iteration.
   679  func (it *Iter) Init(typ *abi.MapType, m *Map) {
   680  	it.typ = typ
   681  
   682  	if m == nil || m.used == 0 {
   683  		return
   684  	}
   685  
   686  	dirIdx := 0
   687  	var groupSmall groupReference
   688  	if m.dirLen <= 0 {
   689  		// Use dirIdx == -1 as sentinel for small maps.
   690  		dirIdx = -1
   691  		groupSmall.data = m.dirPtr
   692  	}
   693  
   694  	it.m = m
   695  	it.entryOffset = rand()
   696  	it.dirOffset = rand()
   697  	it.globalDepth = m.globalDepth
   698  	it.dirIdx = dirIdx
   699  	it.group = groupSmall
   700  	it.clearSeq = m.clearSeq
   701  }
   702  
   703  func (it *Iter) Initialized() bool {
   704  	return it.typ != nil
   705  }
   706  
   707  // Map returns the map this iterator is iterating over.
   708  func (it *Iter) Map() *Map {
   709  	return it.m
   710  }
   711  
   712  // Key returns a pointer to the current key. nil indicates end of iteration.
   713  //
   714  // Must not be called prior to Next.
   715  func (it *Iter) Key() unsafe.Pointer {
   716  	return it.key
   717  }
   718  
   719  // Key returns a pointer to the current element. nil indicates end of
   720  // iteration.
   721  //
   722  // Must not be called prior to Next.
   723  func (it *Iter) Elem() unsafe.Pointer {
   724  	return it.elem
   725  }
   726  
   727  func (it *Iter) nextDirIdx() {
   728  	// Skip other entries in the directory that refer to the same
   729  	// logical table. There are two cases of this:
   730  	//
   731  	// Consider this directory:
   732  	//
   733  	// - 0: *t1
   734  	// - 1: *t1
   735  	// - 2: *t2a
   736  	// - 3: *t2b
   737  	//
   738  	// At some point, the directory grew to accommodate a split of
   739  	// t2. t1 did not split, so entries 0 and 1 both point to t1.
   740  	// t2 did split, so the two halves were installed in entries 2
   741  	// and 3.
   742  	//
   743  	// If dirIdx is 0 and it.tab is t1, then we should skip past
   744  	// entry 1 to avoid repeating t1.
   745  	//
   746  	// If dirIdx is 2 and it.tab is t2 (pre-split), then we should
   747  	// skip past entry 3 because our pre-split t2 already covers
   748  	// all keys from t2a and t2b (except for new insertions, which
   749  	// iteration need not return).
   750  	//
   751  	// We can achieve both of these by using to difference between
   752  	// the directory and table depth to compute how many entries
   753  	// the table covers.
   754  	entries := 1 << (it.m.globalDepth - it.tab.localDepth)
   755  	it.dirIdx += entries
   756  	it.tab = nil
   757  	it.group = groupReference{}
   758  	it.entryIdx = 0
   759  }
   760  
   761  // Return the appropriate key/elem for key at slotIdx index within it.group, if
   762  // any.
   763  func (it *Iter) grownKeyElem(key unsafe.Pointer, slotIdx uintptr) (unsafe.Pointer, unsafe.Pointer, bool) {
   764  	newKey, newElem, ok := it.m.getWithKey(it.typ, key)
   765  	if !ok {
   766  		// Key has likely been deleted, and
   767  		// should be skipped.
   768  		//
   769  		// One exception is keys that don't
   770  		// compare equal to themselves (e.g.,
   771  		// NaN). These keys cannot be looked
   772  		// up, so getWithKey will fail even if
   773  		// the key exists.
   774  		//
   775  		// However, we are in luck because such
   776  		// keys cannot be updated and they
   777  		// cannot be deleted except with clear.
   778  		// Thus if no clear has occurred, the
   779  		// key/elem must still exist exactly as
   780  		// in the old groups, so we can return
   781  		// them from there.
   782  		//
   783  		// TODO(prattmic): Consider checking
   784  		// clearSeq early. If a clear occurred,
   785  		// Next could always return
   786  		// immediately, as iteration doesn't
   787  		// need to return anything added after
   788  		// clear.
   789  		if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
   790  			elem := it.group.elem(it.typ, slotIdx)
   791  			if it.typ.IndirectElem() {
   792  				elem = *((*unsafe.Pointer)(elem))
   793  			}
   794  			return key, elem, true
   795  		}
   796  
   797  		// This entry doesn't exist anymore.
   798  		return nil, nil, false
   799  	}
   800  
   801  	return newKey, newElem, true
   802  }
   803  
   804  // Next proceeds to the next element in iteration, which can be accessed via
   805  // the Key and Elem methods.
   806  //
   807  // The table can be mutated during iteration, though there is no guarantee that
   808  // the mutations will be visible to the iteration.
   809  //
   810  // Init must be called prior to Next.
   811  func (it *Iter) Next() {
   812  	if it.m == nil {
   813  		// Map was empty at Iter.Init.
   814  		it.key = nil
   815  		it.elem = nil
   816  		return
   817  	}
   818  
   819  	if it.m.writing != 0 {
   820  		fatal("concurrent map iteration and map write")
   821  		return
   822  	}
   823  
   824  	if it.dirIdx < 0 {
   825  		// Map was small at Init.
   826  		for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
   827  			k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
   828  
   829  			if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
   830  				// Empty or deleted.
   831  				continue
   832  			}
   833  
   834  			key := it.group.key(it.typ, k)
   835  			if it.typ.IndirectKey() {
   836  				key = *((*unsafe.Pointer)(key))
   837  			}
   838  
   839  			// As below, if we have grown to a full map since Init,
   840  			// we continue to use the old group to decide the keys
   841  			// to return, but must look them up again in the new
   842  			// tables.
   843  			grown := it.m.dirLen > 0
   844  			var elem unsafe.Pointer
   845  			if grown {
   846  				var ok bool
   847  				newKey, newElem, ok := it.m.getWithKey(it.typ, key)
   848  				if !ok {
   849  					// See comment below.
   850  					if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
   851  						elem = it.group.elem(it.typ, k)
   852  						if it.typ.IndirectElem() {
   853  							elem = *((*unsafe.Pointer)(elem))
   854  						}
   855  					} else {
   856  						continue
   857  					}
   858  				} else {
   859  					key = newKey
   860  					elem = newElem
   861  				}
   862  			} else {
   863  				elem = it.group.elem(it.typ, k)
   864  				if it.typ.IndirectElem() {
   865  					elem = *((*unsafe.Pointer)(elem))
   866  				}
   867  			}
   868  
   869  			it.entryIdx++
   870  			it.key = key
   871  			it.elem = elem
   872  			return
   873  		}
   874  		it.key = nil
   875  		it.elem = nil
   876  		return
   877  	}
   878  
   879  	if it.globalDepth != it.m.globalDepth {
   880  		// Directory has grown since the last call to Next. Adjust our
   881  		// directory index.
   882  		//
   883  		// Consider:
   884  		//
   885  		// Before:
   886  		// - 0: *t1
   887  		// - 1: *t2  <- dirIdx
   888  		//
   889  		// After:
   890  		// - 0: *t1a (split)
   891  		// - 1: *t1b (split)
   892  		// - 2: *t2  <- dirIdx
   893  		// - 3: *t2
   894  		//
   895  		// That is, we want to double the current index when the
   896  		// directory size doubles (or quadruple when the directory size
   897  		// quadruples, etc).
   898  		//
   899  		// The actual (randomized) dirIdx is computed below as:
   900  		//
   901  		// dirIdx := (it.dirIdx + it.dirOffset) % it.m.dirLen
   902  		//
   903  		// Multiplication is associative across modulo operations,
   904  		// A * (B % C) = (A * B) % (A * C),
   905  		// provided that A is positive.
   906  		//
   907  		// Thus we can achieve this by adjusting it.dirIdx,
   908  		// it.dirOffset, and it.m.dirLen individually.
   909  		orders := it.m.globalDepth - it.globalDepth
   910  		it.dirIdx <<= orders
   911  		it.dirOffset <<= orders
   912  		// it.m.dirLen was already adjusted when the directory grew.
   913  
   914  		it.globalDepth = it.m.globalDepth
   915  	}
   916  
   917  	// Continue iteration until we find a full slot.
   918  	for ; it.dirIdx < it.m.dirLen; it.nextDirIdx() {
   919  		// Resolve the table.
   920  		if it.tab == nil {
   921  			dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1))
   922  			newTab := it.m.directoryAt(uintptr(dirIdx))
   923  			if newTab.index != dirIdx {
   924  				// Normally we skip past all duplicates of the
   925  				// same entry in the table (see updates to
   926  				// it.dirIdx at the end of the loop below), so
   927  				// this case wouldn't occur.
   928  				//
   929  				// But on the very first call, we have a
   930  				// completely randomized dirIdx that may refer
   931  				// to a middle of a run of tables in the
   932  				// directory. Do a one-time adjustment of the
   933  				// offset to ensure we start at first index for
   934  				// newTable.
   935  				diff := dirIdx - newTab.index
   936  				it.dirOffset -= uint64(diff)
   937  				dirIdx = newTab.index
   938  			}
   939  			it.tab = newTab
   940  		}
   941  
   942  		// N.B. Use it.tab, not newTab. It is important to use the old
   943  		// table for key selection if the table has grown. See comment
   944  		// on grown below.
   945  
   946  		entryMask := uint64(it.tab.capacity) - 1
   947  		if it.entryIdx > entryMask {
   948  			// Continue to next table.
   949  			continue
   950  		}
   951  
   952  		// Fast path: skip matching and directly check if entryIdx is a
   953  		// full slot.
   954  		//
   955  		// In the slow path below, we perform an 8-slot match check to
   956  		// look for full slots within the group.
   957  		//
   958  		// However, with a max load factor of 7/8, each slot in a
   959  		// mostly full map has a high probability of being full. Thus
   960  		// it is cheaper to check a single slot than do a full control
   961  		// match.
   962  
   963  		entryIdx := (it.entryIdx + it.entryOffset) & entryMask
   964  		slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
   965  		if slotIdx == 0 || it.group.data == nil {
   966  			// Only compute the group (a) when we switch
   967  			// groups (slotIdx rolls over) and (b) on the
   968  			// first iteration in this table (slotIdx may
   969  			// not be zero due to entryOffset).
   970  			groupIdx := entryIdx >> abi.MapGroupSlotsBits
   971  			it.group = it.tab.groups.group(it.typ, groupIdx)
   972  		}
   973  
   974  		if (it.group.ctrls().get(slotIdx) & ctrlEmpty) == 0 {
   975  			// Slot full.
   976  
   977  			key := it.group.key(it.typ, slotIdx)
   978  			if it.typ.IndirectKey() {
   979  				key = *((*unsafe.Pointer)(key))
   980  			}
   981  
   982  			grown := it.tab.index == -1
   983  			var elem unsafe.Pointer
   984  			if grown {
   985  				newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
   986  				if !ok {
   987  					// This entry doesn't exist
   988  					// anymore. Continue to the
   989  					// next one.
   990  					goto next
   991  				} else {
   992  					key = newKey
   993  					elem = newElem
   994  				}
   995  			} else {
   996  				elem = it.group.elem(it.typ, slotIdx)
   997  				if it.typ.IndirectElem() {
   998  					elem = *((*unsafe.Pointer)(elem))
   999  				}
  1000  			}
  1001  
  1002  			it.entryIdx++
  1003  			it.key = key
  1004  			it.elem = elem
  1005  			return
  1006  		}
  1007  
  1008  	next:
  1009  		it.entryIdx++
  1010  
  1011  		// Slow path: use a match on the control word to jump ahead to
  1012  		// the next full slot.
  1013  		//
  1014  		// This is highly effective for maps with particularly low load
  1015  		// (e.g., map allocated with large hint but few insertions).
  1016  		//
  1017  		// For maps with medium load (e.g., 3-4 empty slots per group)
  1018  		// it also tends to work pretty well. Since slots within a
  1019  		// group are filled in order, then if there have been no
  1020  		// deletions, a match will allow skipping past all empty slots
  1021  		// at once.
  1022  		//
  1023  		// Note: it is tempting to cache the group match result in the
  1024  		// iterator to use across Next calls. However because entries
  1025  		// may be deleted between calls later calls would still need to
  1026  		// double-check the control value.
  1027  
  1028  		var groupMatch bitset
  1029  		for it.entryIdx <= entryMask {
  1030  			entryIdx := (it.entryIdx + it.entryOffset) & entryMask
  1031  			slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
  1032  
  1033  			if slotIdx == 0 || it.group.data == nil {
  1034  				// Only compute the group (a) when we switch
  1035  				// groups (slotIdx rolls over) and (b) on the
  1036  				// first iteration in this table (slotIdx may
  1037  				// not be zero due to entryOffset).
  1038  				groupIdx := entryIdx >> abi.MapGroupSlotsBits
  1039  				it.group = it.tab.groups.group(it.typ, groupIdx)
  1040  			}
  1041  
  1042  			if groupMatch == 0 {
  1043  				groupMatch = it.group.ctrls().matchFull()
  1044  
  1045  				if slotIdx != 0 {
  1046  					// Starting in the middle of the group.
  1047  					// Ignore earlier groups.
  1048  					groupMatch = groupMatch.removeBelow(slotIdx)
  1049  				}
  1050  
  1051  				// Skip over groups that are composed of only empty or
  1052  				// deleted slots.
  1053  				if groupMatch == 0 {
  1054  					// Jump past remaining slots in this
  1055  					// group.
  1056  					it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
  1057  					continue
  1058  				}
  1059  
  1060  				i := groupMatch.first()
  1061  				it.entryIdx += uint64(i - slotIdx)
  1062  				if it.entryIdx > entryMask {
  1063  					// Past the end of this table's iteration.
  1064  					continue
  1065  				}
  1066  				entryIdx += uint64(i - slotIdx)
  1067  				slotIdx = i
  1068  			}
  1069  
  1070  			key := it.group.key(it.typ, slotIdx)
  1071  			if it.typ.IndirectKey() {
  1072  				key = *((*unsafe.Pointer)(key))
  1073  			}
  1074  
  1075  			// If the table has changed since the last
  1076  			// call, then it has grown or split. In this
  1077  			// case, further mutations (changes to
  1078  			// key->elem or deletions) will not be visible
  1079  			// in our snapshot table. Instead we must
  1080  			// consult the new table by doing a full
  1081  			// lookup.
  1082  			//
  1083  			// We still use our old table to decide which
  1084  			// keys to lookup in order to avoid returning
  1085  			// the same key twice.
  1086  			grown := it.tab.index == -1
  1087  			var elem unsafe.Pointer
  1088  			if grown {
  1089  				newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
  1090  				if !ok {
  1091  					// This entry doesn't exist anymore.
  1092  					// Continue to the next one.
  1093  					groupMatch = groupMatch.removeFirst()
  1094  					if groupMatch == 0 {
  1095  						// No more entries in this
  1096  						// group. Continue to next
  1097  						// group.
  1098  						it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
  1099  						continue
  1100  					}
  1101  
  1102  					// Next full slot.
  1103  					i := groupMatch.first()
  1104  					it.entryIdx += uint64(i - slotIdx)
  1105  					continue
  1106  				} else {
  1107  					key = newKey
  1108  					elem = newElem
  1109  				}
  1110  			} else {
  1111  				elem = it.group.elem(it.typ, slotIdx)
  1112  				if it.typ.IndirectElem() {
  1113  					elem = *((*unsafe.Pointer)(elem))
  1114  				}
  1115  			}
  1116  
  1117  			// Jump ahead to the next full slot or next group.
  1118  			groupMatch = groupMatch.removeFirst()
  1119  			if groupMatch == 0 {
  1120  				// No more entries in
  1121  				// this group. Continue
  1122  				// to next group.
  1123  				it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
  1124  			} else {
  1125  				// Next full slot.
  1126  				i := groupMatch.first()
  1127  				it.entryIdx += uint64(i - slotIdx)
  1128  			}
  1129  
  1130  			it.key = key
  1131  			it.elem = elem
  1132  			return
  1133  		}
  1134  
  1135  		// Continue to next table.
  1136  	}
  1137  
  1138  	it.key = nil
  1139  	it.elem = nil
  1140  	return
  1141  }
  1142  
  1143  // Replaces the table with one larger table or two split tables to fit more
  1144  // entries. Since the table is replaced, t is now stale and should not be
  1145  // modified.
  1146  func (t *table) rehash(typ *abi.MapType, m *Map) {
  1147  	// TODO(prattmic): SwissTables typically perform a "rehash in place"
  1148  	// operation which recovers capacity consumed by tombstones without growing
  1149  	// the table by reordering slots as necessary to maintain the probe
  1150  	// invariant while eliminating all tombstones.
  1151  	//
  1152  	// However, it is unclear how to make rehash in place work with
  1153  	// iteration. Since iteration simply walks through all slots in order
  1154  	// (with random start offset), reordering the slots would break
  1155  	// iteration.
  1156  	//
  1157  	// As an alternative, we could do a "resize" to new groups allocation
  1158  	// of the same size. This would eliminate the tombstones, but using a
  1159  	// new allocation, so the existing grow support in iteration would
  1160  	// continue to work.
  1161  
  1162  	newCapacity := 2 * t.capacity
  1163  	if newCapacity <= maxTableCapacity {
  1164  		t.grow(typ, m, newCapacity)
  1165  		return
  1166  	}
  1167  
  1168  	t.split(typ, m)
  1169  }
  1170  
  1171  // Bitmask for the last selection bit at this depth.
  1172  func localDepthMask(localDepth uint8) uintptr {
  1173  	if goarch.PtrSize == 4 {
  1174  		return uintptr(1) << (32 - localDepth)
  1175  	}
  1176  	return uintptr(1) << (64 - localDepth)
  1177  }
  1178  
  1179  // split the table into two, installing the new tables in the map directory.
  1180  func (t *table) split(typ *abi.MapType, m *Map) {
  1181  	localDepth := t.localDepth
  1182  	localDepth++
  1183  
  1184  	// TODO: is this the best capacity?
  1185  	left := newTable(typ, maxTableCapacity, -1, localDepth)
  1186  	right := newTable(typ, maxTableCapacity, -1, localDepth)
  1187  
  1188  	// Split in half at the localDepth bit from the top.
  1189  	mask := localDepthMask(localDepth)
  1190  
  1191  	for i := uint64(0); i <= t.groups.lengthMask; i++ {
  1192  		g := t.groups.group(typ, i)
  1193  		for j := uintptr(0); j < abi.MapGroupSlots; j++ {
  1194  			if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
  1195  				// Empty or deleted
  1196  				continue
  1197  			}
  1198  
  1199  			key := g.key(typ, j)
  1200  			if typ.IndirectKey() {
  1201  				key = *((*unsafe.Pointer)(key))
  1202  			}
  1203  
  1204  			elem := g.elem(typ, j)
  1205  			if typ.IndirectElem() {
  1206  				elem = *((*unsafe.Pointer)(elem))
  1207  			}
  1208  
  1209  			hash := typ.Hasher(key, m.seed)
  1210  			var newTable *table
  1211  			if hash&mask == 0 {
  1212  				newTable = left
  1213  			} else {
  1214  				newTable = right
  1215  			}
  1216  			newTable.uncheckedPutSlot(typ, hash, key, elem)
  1217  		}
  1218  	}
  1219  
  1220  	m.installTableSplit(t, left, right)
  1221  	t.index = -1
  1222  }
  1223  
  1224  // grow the capacity of the table by allocating a new table with a bigger array
  1225  // and uncheckedPutting each element of the table into the new table (we know
  1226  // that no insertion here will Put an already-present value), and discard the
  1227  // old table.
  1228  func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
  1229  	newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
  1230  
  1231  	if t.capacity > 0 {
  1232  		for i := uint64(0); i <= t.groups.lengthMask; i++ {
  1233  			g := t.groups.group(typ, i)
  1234  			for j := uintptr(0); j < abi.MapGroupSlots; j++ {
  1235  				if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
  1236  					// Empty or deleted
  1237  					continue
  1238  				}
  1239  
  1240  				key := g.key(typ, j)
  1241  				if typ.IndirectKey() {
  1242  					key = *((*unsafe.Pointer)(key))
  1243  				}
  1244  
  1245  				elem := g.elem(typ, j)
  1246  				if typ.IndirectElem() {
  1247  					elem = *((*unsafe.Pointer)(elem))
  1248  				}
  1249  
  1250  				hash := typ.Hasher(key, m.seed)
  1251  
  1252  				newTable.uncheckedPutSlot(typ, hash, key, elem)
  1253  			}
  1254  		}
  1255  	}
  1256  
  1257  	newTable.checkInvariants(typ, m)
  1258  	m.replaceTable(newTable)
  1259  	t.index = -1
  1260  }
  1261  
  1262  // probeSeq maintains the state for a probe sequence that iterates through the
  1263  // groups in a table. The sequence is a triangular progression of the form
  1264  //
  1265  //	p(i) := (i^2 + i)/2 + hash (mod mask+1)
  1266  //
  1267  // The sequence effectively outputs the indexes of *groups*. The group
  1268  // machinery allows us to check an entire group with minimal branching.
  1269  //
  1270  // It turns out that this probe sequence visits every group exactly once if
  1271  // the number of groups is a power of two, since (i^2+i)/2 is a bijection in
  1272  // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
  1273  type probeSeq struct {
  1274  	mask   uint64
  1275  	offset uint64
  1276  	index  uint64
  1277  }
  1278  
  1279  func makeProbeSeq(hash uintptr, mask uint64) probeSeq {
  1280  	return probeSeq{
  1281  		mask:   mask,
  1282  		offset: uint64(hash) & mask,
  1283  		index:  0,
  1284  	}
  1285  }
  1286  
  1287  func (s probeSeq) next() probeSeq {
  1288  	s.index++
  1289  	s.offset = (s.offset + s.index) & s.mask
  1290  	return s
  1291  }
  1292  
  1293  func (t *table) clone(typ *abi.MapType) *table {
  1294  	// Shallow copy the table structure.
  1295  	t2 := new(table)
  1296  	*t2 = *t
  1297  	t = t2
  1298  
  1299  	// We need to just deep copy the groups.data field.
  1300  	oldGroups := t.groups
  1301  	newGroups := newGroups(typ, oldGroups.lengthMask+1)
  1302  	for i := uint64(0); i <= oldGroups.lengthMask; i++ {
  1303  		oldGroup := oldGroups.group(typ, i)
  1304  		newGroup := newGroups.group(typ, i)
  1305  		cloneGroup(typ, newGroup, oldGroup)
  1306  	}
  1307  	t.groups = newGroups
  1308  
  1309  	return t
  1310  }
  1311  

View as plain text