Source file src/internal/runtime/maps/runtime.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/asan"
    10  	"internal/msan"
    11  	"internal/race"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  // Functions below pushed from runtime.
    17  
    18  //go:linkname fatal
    19  func fatal(s string)
    20  
    21  //go:linkname rand
    22  func rand() uint64
    23  
    24  //go:linkname typedmemmove
    25  func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer)
    26  
    27  //go:linkname typedmemclr
    28  func typedmemclr(typ *abi.Type, ptr unsafe.Pointer)
    29  
    30  //go:linkname newarray
    31  func newarray(typ *abi.Type, n int) unsafe.Pointer
    32  
    33  //go:linkname newobject
    34  func newobject(typ *abi.Type) unsafe.Pointer
    35  
    36  // Pushed from runtime in order to use runtime.plainError
    37  //
    38  //go:linkname errNilAssign
    39  var errNilAssign error
    40  
    41  // Pull from runtime. It is important that is this the exact same copy as the
    42  // runtime because runtime.mapaccess1_fat compares the returned pointer with
    43  // &runtime.zeroVal[0].
    44  // TODO: move zeroVal to internal/abi?
    45  //
    46  //go:linkname zeroVal runtime.zeroVal
    47  var zeroVal [abi.ZeroValSize]byte
    48  
    49  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
    50  // it will return a reference to the zero object for the elem type if
    51  // the key is not in the map.
    52  // NOTE: The returned pointer may keep the whole map live, so don't
    53  // hold onto it for very long.
    54  //
    55  //go:linkname runtime_mapaccess1 runtime.mapaccess1
    56  func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
    57  	if race.Enabled && m != nil {
    58  		callerpc := sys.GetCallerPC()
    59  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    60  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    61  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
    62  	}
    63  	if msan.Enabled && m != nil {
    64  		msan.Read(key, typ.Key.Size_)
    65  	}
    66  	if asan.Enabled && m != nil {
    67  		asan.Read(key, typ.Key.Size_)
    68  	}
    69  
    70  	if m == nil || m.Used() == 0 {
    71  		if err := mapKeyError(typ, key); err != nil {
    72  			panic(err) // see issue 23734
    73  		}
    74  		return unsafe.Pointer(&zeroVal[0])
    75  	}
    76  
    77  	if m.writing != 0 {
    78  		fatal("concurrent map read and map write")
    79  	}
    80  
    81  	hash := typ.Hasher(key, m.seed)
    82  
    83  	if m.dirLen <= 0 {
    84  		_, elem, ok := m.getWithKeySmall(typ, hash, key)
    85  		if !ok {
    86  			return unsafe.Pointer(&zeroVal[0])
    87  		}
    88  		return elem
    89  	}
    90  
    91  	// Select table.
    92  	idx := m.directoryIndex(hash)
    93  	t := m.directoryAt(idx)
    94  
    95  	// Probe table.
    96  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    97  	h2Hash := h2(hash)
    98  	for ; ; seq = seq.next() {
    99  		g := t.groups.group(typ, seq.offset)
   100  
   101  		match := g.ctrls().matchH2(h2Hash)
   102  
   103  		for match != 0 {
   104  			i := match.first()
   105  
   106  			slotKey := g.key(typ, i)
   107  			slotKeyOrig := slotKey
   108  			if typ.IndirectKey() {
   109  				slotKey = *((*unsafe.Pointer)(slotKey))
   110  			}
   111  			if typ.Key.Equal(key, slotKey) {
   112  				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   113  				if typ.IndirectElem() {
   114  					slotElem = *((*unsafe.Pointer)(slotElem))
   115  				}
   116  				return slotElem
   117  			}
   118  			match = match.removeFirst()
   119  		}
   120  
   121  		match = g.ctrls().matchEmpty()
   122  		if match != 0 {
   123  			// Finding an empty slot means we've reached the end of
   124  			// the probe sequence.
   125  			return unsafe.Pointer(&zeroVal[0])
   126  		}
   127  	}
   128  }
   129  
   130  //go:linkname runtime_mapaccess2 runtime.mapaccess2
   131  func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
   132  	if race.Enabled && m != nil {
   133  		callerpc := sys.GetCallerPC()
   134  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
   135  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   136  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
   137  	}
   138  	if msan.Enabled && m != nil {
   139  		msan.Read(key, typ.Key.Size_)
   140  	}
   141  	if asan.Enabled && m != nil {
   142  		asan.Read(key, typ.Key.Size_)
   143  	}
   144  
   145  	if m == nil || m.Used() == 0 {
   146  		if err := mapKeyError(typ, key); err != nil {
   147  			panic(err) // see issue 23734
   148  		}
   149  		return unsafe.Pointer(&zeroVal[0]), false
   150  	}
   151  
   152  	if m.writing != 0 {
   153  		fatal("concurrent map read and map write")
   154  	}
   155  
   156  	hash := typ.Hasher(key, m.seed)
   157  
   158  	if m.dirLen == 0 {
   159  		_, elem, ok := m.getWithKeySmall(typ, hash, key)
   160  		if !ok {
   161  			return unsafe.Pointer(&zeroVal[0]), false
   162  		}
   163  		return elem, true
   164  	}
   165  
   166  	// Select table.
   167  	idx := m.directoryIndex(hash)
   168  	t := m.directoryAt(idx)
   169  
   170  	// Probe table.
   171  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   172  	h2Hash := h2(hash)
   173  	for ; ; seq = seq.next() {
   174  		g := t.groups.group(typ, seq.offset)
   175  
   176  		match := g.ctrls().matchH2(h2Hash)
   177  
   178  		for match != 0 {
   179  			i := match.first()
   180  
   181  			slotKey := g.key(typ, i)
   182  			slotKeyOrig := slotKey
   183  			if typ.IndirectKey() {
   184  				slotKey = *((*unsafe.Pointer)(slotKey))
   185  			}
   186  			if typ.Key.Equal(key, slotKey) {
   187  				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   188  				if typ.IndirectElem() {
   189  					slotElem = *((*unsafe.Pointer)(slotElem))
   190  				}
   191  				return slotElem, true
   192  			}
   193  			match = match.removeFirst()
   194  		}
   195  
   196  		match = g.ctrls().matchEmpty()
   197  		if match != 0 {
   198  			// Finding an empty slot means we've reached the end of
   199  			// the probe sequence.
   200  			return unsafe.Pointer(&zeroVal[0]), false
   201  		}
   202  	}
   203  }
   204  
   205  //go:linkname runtime_mapassign runtime.mapassign
   206  func runtime_mapassign(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   207  	if m == nil {
   208  		panic(errNilAssign)
   209  	}
   210  	if race.Enabled {
   211  		callerpc := sys.GetCallerPC()
   212  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   213  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   214  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
   215  	}
   216  	if msan.Enabled {
   217  		msan.Read(key, typ.Key.Size_)
   218  	}
   219  	if asan.Enabled {
   220  		asan.Read(key, typ.Key.Size_)
   221  	}
   222  	if m.writing != 0 {
   223  		fatal("concurrent map writes")
   224  	}
   225  
   226  	hash := typ.Hasher(key, m.seed)
   227  
   228  	// Set writing after calling Hasher, since Hasher may panic, in which
   229  	// case we have not actually done a write.
   230  	m.writing ^= 1 // toggle, see comment on writing
   231  
   232  	if m.dirPtr == nil {
   233  		m.growToSmall(typ)
   234  	}
   235  
   236  	if m.dirLen == 0 {
   237  		if m.used < abi.MapGroupSlots {
   238  			elem := m.putSlotSmall(typ, hash, key)
   239  
   240  			if m.writing == 0 {
   241  				fatal("concurrent map writes")
   242  			}
   243  			m.writing ^= 1
   244  
   245  			return elem
   246  		}
   247  
   248  		// Can't fit another entry, grow to full size map.
   249  		m.growToTable(typ)
   250  	}
   251  
   252  	var slotElem unsafe.Pointer
   253  outer:
   254  	for {
   255  		// Select table.
   256  		idx := m.directoryIndex(hash)
   257  		t := m.directoryAt(idx)
   258  
   259  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   260  
   261  		// As we look for a match, keep track of the first deleted slot
   262  		// we find, which we'll use to insert the new entry if
   263  		// necessary.
   264  		var firstDeletedGroup groupReference
   265  		var firstDeletedSlot uintptr
   266  
   267  		h2Hash := h2(hash)
   268  		for ; ; seq = seq.next() {
   269  			g := t.groups.group(typ, seq.offset)
   270  			match := g.ctrls().matchH2(h2Hash)
   271  
   272  			// Look for an existing slot containing this key.
   273  			for match != 0 {
   274  				i := match.first()
   275  
   276  				slotKey := g.key(typ, i)
   277  				slotKeyOrig := slotKey
   278  				if typ.IndirectKey() {
   279  					slotKey = *((*unsafe.Pointer)(slotKey))
   280  				}
   281  				if typ.Key.Equal(key, slotKey) {
   282  					if typ.NeedKeyUpdate() {
   283  						typedmemmove(typ.Key, slotKey, key)
   284  					}
   285  
   286  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   287  					if typ.IndirectElem() {
   288  						slotElem = *((*unsafe.Pointer)(slotElem))
   289  					}
   290  
   291  					t.checkInvariants(typ, m)
   292  					break outer
   293  				}
   294  				match = match.removeFirst()
   295  			}
   296  
   297  			// No existing slot for this key in this group. Is this the end
   298  			// of the probe sequence?
   299  			match = g.ctrls().matchEmpty()
   300  			if match != 0 {
   301  				// Finding an empty slot means we've reached the end of
   302  				// the probe sequence.
   303  
   304  				var i uintptr
   305  
   306  				// If we found a deleted slot along the way, we
   307  				// can replace it without consuming growthLeft.
   308  				if firstDeletedGroup.data != nil {
   309  					g = firstDeletedGroup
   310  					i = firstDeletedSlot
   311  					t.growthLeft++ // will be decremented below to become a no-op.
   312  				} else {
   313  					// Otherwise, use the empty slot.
   314  					i = match.first()
   315  				}
   316  
   317  				// If there is room left to grow, just insert the new entry.
   318  				if t.growthLeft > 0 {
   319  					slotKey := g.key(typ, i)
   320  					slotKeyOrig := slotKey
   321  					if typ.IndirectKey() {
   322  						kmem := newobject(typ.Key)
   323  						*(*unsafe.Pointer)(slotKey) = kmem
   324  						slotKey = kmem
   325  					}
   326  					typedmemmove(typ.Key, slotKey, key)
   327  
   328  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   329  					if typ.IndirectElem() {
   330  						emem := newobject(typ.Elem)
   331  						*(*unsafe.Pointer)(slotElem) = emem
   332  						slotElem = emem
   333  					}
   334  
   335  					g.ctrls().set(i, ctrl(h2Hash))
   336  					t.growthLeft--
   337  					t.used++
   338  					m.used++
   339  
   340  					t.checkInvariants(typ, m)
   341  					break outer
   342  				}
   343  
   344  				t.rehash(typ, m)
   345  				continue outer
   346  			}
   347  
   348  			// No empty slots in this group. Check for a deleted
   349  			// slot, which we'll use if we don't find a match later
   350  			// in the probe sequence.
   351  			//
   352  			// We only need to remember a single deleted slot.
   353  			if firstDeletedGroup.data == nil {
   354  				// Since we already checked for empty slots
   355  				// above, matches here must be deleted slots.
   356  				match = g.ctrls().matchEmptyOrDeleted()
   357  				if match != 0 {
   358  					firstDeletedGroup = g
   359  					firstDeletedSlot = match.first()
   360  				}
   361  			}
   362  		}
   363  	}
   364  
   365  	if m.writing == 0 {
   366  		fatal("concurrent map writes")
   367  	}
   368  	m.writing ^= 1
   369  
   370  	return slotElem
   371  }
   372  

View as plain text