1
2
3
4
5 package maps
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "internal/runtime/math"
11 "unsafe"
12 )
13
14
15
16
17
18
19
20 const maxTableCapacity = 1024
21
22
23
24 var _ = uint16(maxTableCapacity)
25
26
27
28
29
30
31
32
33
34 type table struct {
35
36 used uint16
37
38
39
40 capacity uint16
41
42
43
44
45
46
47 growthLeft uint16
48
49
50
51
52 localDepth uint8
53
54
55
56
57
58
59
60 index int
61
62
63
64
65
66
67
68
69
70
71 groups groupsReference
72 }
73
74 func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
75 if capacity < abi.MapGroupSlots {
76 capacity = abi.MapGroupSlots
77 }
78
79 t := &table{
80 index: index,
81 localDepth: localDepth,
82 }
83
84 if capacity > maxTableCapacity {
85 panic("initial table capacity too large")
86 }
87
88
89
90 capacity, overflow := alignUpPow2(capacity)
91 if overflow {
92 panic("rounded-up capacity overflows uint64")
93 }
94
95 t.reset(typ, uint16(capacity))
96
97 return t
98 }
99
100
101
102 func (t *table) reset(typ *abi.MapType, capacity uint16) {
103 groupCount := uint64(capacity) / abi.MapGroupSlots
104 t.groups = newGroups(typ, groupCount)
105 t.capacity = capacity
106 t.growthLeft = t.maxGrowthLeft()
107
108 for i := uint64(0); i <= t.groups.lengthMask; i++ {
109 g := t.groups.group(typ, i)
110 g.ctrls().setEmpty()
111 }
112 }
113
114
115
116 func (t *table) maxGrowthLeft() uint16 {
117 if t.capacity == 0 {
118
119
120 panic("table must have positive capacity")
121 } else if t.capacity <= abi.MapGroupSlots {
122
123
124
125
126
127
128 return t.capacity - 1
129 } else {
130 if t.capacity > math.MaxUint16/maxAvgGroupLoad {
131 panic("overflow")
132 }
133 return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
134 }
135
136 }
137
138 func (t *table) Used() uint64 {
139 return uint64(t.used)
140 }
141
142
143
144 func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
145
146
147
148
149
150
151 hash := typ.Hasher(key, m.seed)
152 _, elem, ok := t.getWithKey(typ, hash, key)
153 return elem, ok
154 }
155
156
157
158
159
160
161
162
163
164
165 func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
194 h2Hash := h2(hash)
195 for ; ; seq = seq.next() {
196 g := t.groups.group(typ, seq.offset)
197
198 match := g.ctrls().matchH2(h2Hash)
199
200 for match != 0 {
201 i := match.first()
202
203 slotKey := g.key(typ, i)
204 if typ.IndirectKey() {
205 slotKey = *((*unsafe.Pointer)(slotKey))
206 }
207 if typ.Key.Equal(key, slotKey) {
208 slotElem := g.elem(typ, i)
209 if typ.IndirectElem() {
210 slotElem = *((*unsafe.Pointer)(slotElem))
211 }
212 return slotKey, slotElem, true
213 }
214 match = match.removeFirst()
215 }
216
217 match = g.ctrls().matchEmpty()
218 if match != 0 {
219
220
221 return nil, nil, false
222 }
223 }
224 }
225
226 func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
227 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
228 h2Hash := h2(hash)
229 for ; ; seq = seq.next() {
230 g := t.groups.group(typ, seq.offset)
231
232 match := g.ctrls().matchH2(h2Hash)
233
234 for match != 0 {
235 i := match.first()
236
237 slotKey := g.key(typ, i)
238 if typ.IndirectKey() {
239 slotKey = *((*unsafe.Pointer)(slotKey))
240 }
241 if typ.Key.Equal(key, slotKey) {
242 slotElem := g.elem(typ, i)
243 if typ.IndirectElem() {
244 slotElem = *((*unsafe.Pointer)(slotElem))
245 }
246 return slotElem, true
247 }
248 match = match.removeFirst()
249 }
250
251 match = g.ctrls().matchEmpty()
252 if match != 0 {
253
254
255 return nil, false
256 }
257 }
258 }
259
260
261
262
263
264
265
266
267 func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
268 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
269
270
271
272 var firstDeletedGroup groupReference
273 var firstDeletedSlot uintptr
274
275 h2Hash := h2(hash)
276 for ; ; seq = seq.next() {
277 g := t.groups.group(typ, seq.offset)
278 match := g.ctrls().matchH2(h2Hash)
279
280
281 for match != 0 {
282 i := match.first()
283
284 slotKey := g.key(typ, i)
285 if typ.IndirectKey() {
286 slotKey = *((*unsafe.Pointer)(slotKey))
287 }
288 if typ.Key.Equal(key, slotKey) {
289 if typ.NeedKeyUpdate() {
290 typedmemmove(typ.Key, slotKey, key)
291 }
292
293 slotElem := g.elem(typ, i)
294 if typ.IndirectElem() {
295 slotElem = *((*unsafe.Pointer)(slotElem))
296 }
297
298 t.checkInvariants(typ, m)
299 return slotElem, true
300 }
301 match = match.removeFirst()
302 }
303
304
305
306 match = g.ctrls().matchEmptyOrDeleted()
307 if match == 0 {
308 continue
309 }
310 i := match.first()
311 if g.ctrls().get(i) == ctrlDeleted {
312
313
314 if firstDeletedGroup.data == nil {
315 firstDeletedGroup = g
316 firstDeletedSlot = i
317 }
318 continue
319 }
320
321
322
323
324
325 if firstDeletedGroup.data != nil {
326 g = firstDeletedGroup
327 i = firstDeletedSlot
328 t.growthLeft++
329 }
330
331
332 if t.growthLeft == 0 {
333 t.pruneTombstones(typ, m)
334 }
335
336
337 if t.growthLeft > 0 {
338 slotKey := g.key(typ, i)
339 if typ.IndirectKey() {
340 kmem := newobject(typ.Key)
341 *(*unsafe.Pointer)(slotKey) = kmem
342 slotKey = kmem
343 }
344 typedmemmove(typ.Key, slotKey, key)
345
346 slotElem := g.elem(typ, i)
347 if typ.IndirectElem() {
348 emem := newobject(typ.Elem)
349 *(*unsafe.Pointer)(slotElem) = emem
350 slotElem = emem
351 }
352
353 g.ctrls().set(i, ctrl(h2Hash))
354 t.growthLeft--
355 t.used++
356 m.used++
357
358 t.checkInvariants(typ, m)
359 return slotElem, true
360 }
361
362 t.rehash(typ, m)
363 return nil, false
364 }
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
384 if t.growthLeft == 0 {
385 panic("invariant failed: growthLeft is unexpectedly 0")
386 }
387
388
389
390
391
392 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
393 for ; ; seq = seq.next() {
394 g := t.groups.group(typ, seq.offset)
395
396 match := g.ctrls().matchEmptyOrDeleted()
397 if match != 0 {
398 i := match.first()
399
400 slotKey := g.key(typ, i)
401 if typ.IndirectKey() {
402 *(*unsafe.Pointer)(slotKey) = key
403 } else {
404 typedmemmove(typ.Key, slotKey, key)
405 }
406
407 slotElem := g.elem(typ, i)
408 if typ.IndirectElem() {
409 *(*unsafe.Pointer)(slotElem) = elem
410 } else {
411 typedmemmove(typ.Elem, slotElem, elem)
412 }
413
414 t.growthLeft--
415 t.used++
416 g.ctrls().set(i, ctrl(h2(hash)))
417 return
418 }
419 }
420 }
421
422
423 func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
424 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
425 h2Hash := h2(hash)
426 for ; ; seq = seq.next() {
427 g := t.groups.group(typ, seq.offset)
428 match := g.ctrls().matchH2(h2Hash)
429
430 for match != 0 {
431 i := match.first()
432
433 slotKey := g.key(typ, i)
434 origSlotKey := slotKey
435 if typ.IndirectKey() {
436 slotKey = *((*unsafe.Pointer)(slotKey))
437 }
438
439 if typ.Key.Equal(key, slotKey) {
440 t.used--
441 m.used--
442
443 if typ.IndirectKey() {
444
445 *(*unsafe.Pointer)(origSlotKey) = nil
446 } else if typ.Key.Pointers() {
447
448
449 typedmemclr(typ.Key, slotKey)
450 }
451
452 slotElem := g.elem(typ, i)
453 if typ.IndirectElem() {
454
455 *(*unsafe.Pointer)(slotElem) = nil
456 } else {
457
458
459
460
461
462 typedmemclr(typ.Elem, slotElem)
463 }
464
465
466
467
468
469
470
471
472
473 var tombstone bool
474 if g.ctrls().matchEmpty() != 0 {
475 g.ctrls().set(i, ctrlEmpty)
476 t.growthLeft++
477 } else {
478 g.ctrls().set(i, ctrlDeleted)
479 tombstone = true
480 }
481
482 t.checkInvariants(typ, m)
483 return tombstone
484 }
485 match = match.removeFirst()
486 }
487
488 match = g.ctrls().matchEmpty()
489 if match != 0 {
490
491
492 return false
493 }
494 }
495 }
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511 func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
512 if t.tombstones()*10 < t.capacity {
513
514 return
515 }
516
517
518 var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
519
520
521 for i := uint64(0); i <= t.groups.lengthMask; i++ {
522 g := t.groups.group(typ, i)
523 match := g.ctrls().matchFull()
524 for match != 0 {
525 j := match.first()
526 match = match.removeFirst()
527 key := g.key(typ, j)
528 if typ.IndirectKey() {
529 key = *((*unsafe.Pointer)(key))
530 }
531 if !typ.Key.Equal(key, key) {
532
533
534
535 continue
536 }
537
538
539 hash := typ.Hasher(key, m.seed)
540 for seq := makeProbeSeq(h1(hash), t.groups.lengthMask); ; seq = seq.next() {
541 if seq.offset == i {
542 break
543 }
544 g := t.groups.group(typ, seq.offset)
545 m := g.ctrls().matchEmptyOrDeleted()
546 if m != 0 {
547
548 needed[seq.offset/32] |= 1 << (seq.offset % 32)
549 }
550 }
551 }
552 if g.ctrls().matchEmpty() != 0 {
553
554
555 needed[i/32] |= 1 << (i % 32)
556 }
557 }
558
559
560
561
562 cnt := 0
563 for i := uint64(0); i <= t.groups.lengthMask; i++ {
564 if needed[i/32]>>(i%32)&1 != 0 {
565 continue
566 }
567 g := t.groups.group(typ, i)
568 m := g.ctrls().matchEmptyOrDeleted()
569 cnt += m.count()
570 }
571 if cnt*10 < int(t.capacity) {
572 return
573 }
574
575
576 for i := uint64(0); i <= t.groups.lengthMask; i++ {
577 if needed[i/32]>>(i%32)&1 != 0 {
578 continue
579 }
580 g := t.groups.group(typ, i)
581 m := g.ctrls().matchEmptyOrDeleted()
582 for m != 0 {
583 k := m.first()
584 m = m.removeFirst()
585 g.ctrls().set(k, ctrlEmpty)
586 t.growthLeft++
587 }
588
589
590 }
591 }
592
593
594
595
596 func (t *table) tombstones() uint16 {
597 return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
598 }
599
600
601 func (t *table) Clear(typ *abi.MapType) {
602 mgl := t.maxGrowthLeft()
603 if t.used == 0 && t.growthLeft == mgl {
604 return
605 }
606
607
608
609
610
611
612
613
614
615
616
617
618 fullTest := uint64(t.used)*4 <= t.groups.lengthMask
619 if typ.SlotSize > 32 {
620
621 fullTest = true
622 }
623 if fullTest {
624 for i := uint64(0); i <= t.groups.lengthMask; i++ {
625 g := t.groups.group(typ, i)
626 if g.ctrls().anyFull() {
627 typedmemclr(typ.Group, g.data)
628 }
629 g.ctrls().setEmpty()
630 }
631 } else {
632 for i := uint64(0); i <= t.groups.lengthMask; i++ {
633 g := t.groups.group(typ, i)
634 typedmemclr(typ.Group, g.data)
635 g.ctrls().setEmpty()
636 }
637 }
638 t.used = 0
639 t.growthLeft = mgl
640 }
641
642 type Iter struct {
643 key unsafe.Pointer
644 elem unsafe.Pointer
645 typ *abi.MapType
646 m *Map
647
648
649
650
651 entryOffset uint64
652 dirOffset uint64
653
654
655
656 clearSeq uint64
657
658
659
660 globalDepth uint8
661
662
663
664 dirIdx int
665
666
667 tab *table
668
669
670 group groupReference
671
672
673
674
675 entryIdx uint64
676 }
677
678
679 func (it *Iter) Init(typ *abi.MapType, m *Map) {
680 it.typ = typ
681
682 if m == nil || m.used == 0 {
683 return
684 }
685
686 dirIdx := 0
687 var groupSmall groupReference
688 if m.dirLen <= 0 {
689
690 dirIdx = -1
691 groupSmall.data = m.dirPtr
692 }
693
694 it.m = m
695 it.entryOffset = rand()
696 it.dirOffset = rand()
697 it.globalDepth = m.globalDepth
698 it.dirIdx = dirIdx
699 it.group = groupSmall
700 it.clearSeq = m.clearSeq
701 }
702
703 func (it *Iter) Initialized() bool {
704 return it.typ != nil
705 }
706
707
708 func (it *Iter) Map() *Map {
709 return it.m
710 }
711
712
713
714
715 func (it *Iter) Key() unsafe.Pointer {
716 return it.key
717 }
718
719
720
721
722
723 func (it *Iter) Elem() unsafe.Pointer {
724 return it.elem
725 }
726
727 func (it *Iter) nextDirIdx() {
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754 entries := 1 << (it.m.globalDepth - it.tab.localDepth)
755 it.dirIdx += entries
756 it.tab = nil
757 it.group = groupReference{}
758 it.entryIdx = 0
759 }
760
761
762
763 func (it *Iter) grownKeyElem(key unsafe.Pointer, slotIdx uintptr) (unsafe.Pointer, unsafe.Pointer, bool) {
764 newKey, newElem, ok := it.m.getWithKey(it.typ, key)
765 if !ok {
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789 if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
790 elem := it.group.elem(it.typ, slotIdx)
791 if it.typ.IndirectElem() {
792 elem = *((*unsafe.Pointer)(elem))
793 }
794 return key, elem, true
795 }
796
797
798 return nil, nil, false
799 }
800
801 return newKey, newElem, true
802 }
803
804
805
806
807
808
809
810
811 func (it *Iter) Next() {
812 if it.m == nil {
813
814 it.key = nil
815 it.elem = nil
816 return
817 }
818
819 if it.m.writing != 0 {
820 fatal("concurrent map iteration and map write")
821 return
822 }
823
824 if it.dirIdx < 0 {
825
826 for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
827 k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
828
829 if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
830
831 continue
832 }
833
834 key := it.group.key(it.typ, k)
835 if it.typ.IndirectKey() {
836 key = *((*unsafe.Pointer)(key))
837 }
838
839
840
841
842
843 grown := it.m.dirLen > 0
844 var elem unsafe.Pointer
845 if grown {
846 var ok bool
847 newKey, newElem, ok := it.m.getWithKey(it.typ, key)
848 if !ok {
849
850 if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
851 elem = it.group.elem(it.typ, k)
852 if it.typ.IndirectElem() {
853 elem = *((*unsafe.Pointer)(elem))
854 }
855 } else {
856 continue
857 }
858 } else {
859 key = newKey
860 elem = newElem
861 }
862 } else {
863 elem = it.group.elem(it.typ, k)
864 if it.typ.IndirectElem() {
865 elem = *((*unsafe.Pointer)(elem))
866 }
867 }
868
869 it.entryIdx++
870 it.key = key
871 it.elem = elem
872 return
873 }
874 it.key = nil
875 it.elem = nil
876 return
877 }
878
879 if it.globalDepth != it.m.globalDepth {
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909 orders := it.m.globalDepth - it.globalDepth
910 it.dirIdx <<= orders
911 it.dirOffset <<= orders
912
913
914 it.globalDepth = it.m.globalDepth
915 }
916
917
918 for ; it.dirIdx < it.m.dirLen; it.nextDirIdx() {
919
920 if it.tab == nil {
921 dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1))
922 newTab := it.m.directoryAt(uintptr(dirIdx))
923 if newTab.index != dirIdx {
924
925
926
927
928
929
930
931
932
933
934
935 diff := dirIdx - newTab.index
936 it.dirOffset -= uint64(diff)
937 dirIdx = newTab.index
938 }
939 it.tab = newTab
940 }
941
942
943
944
945
946 entryMask := uint64(it.tab.capacity) - 1
947 if it.entryIdx > entryMask {
948
949 continue
950 }
951
952
953
954
955
956
957
958
959
960
961
962
963 entryIdx := (it.entryIdx + it.entryOffset) & entryMask
964 slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
965 if slotIdx == 0 || it.group.data == nil {
966
967
968
969
970 groupIdx := entryIdx >> abi.MapGroupSlotsBits
971 it.group = it.tab.groups.group(it.typ, groupIdx)
972 }
973
974 if (it.group.ctrls().get(slotIdx) & ctrlEmpty) == 0 {
975
976
977 key := it.group.key(it.typ, slotIdx)
978 if it.typ.IndirectKey() {
979 key = *((*unsafe.Pointer)(key))
980 }
981
982 grown := it.tab.index == -1
983 var elem unsafe.Pointer
984 if grown {
985 newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
986 if !ok {
987
988
989
990 goto next
991 } else {
992 key = newKey
993 elem = newElem
994 }
995 } else {
996 elem = it.group.elem(it.typ, slotIdx)
997 if it.typ.IndirectElem() {
998 elem = *((*unsafe.Pointer)(elem))
999 }
1000 }
1001
1002 it.entryIdx++
1003 it.key = key
1004 it.elem = elem
1005 return
1006 }
1007
1008 next:
1009 it.entryIdx++
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 var groupMatch bitset
1029 for it.entryIdx <= entryMask {
1030 entryIdx := (it.entryIdx + it.entryOffset) & entryMask
1031 slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
1032
1033 if slotIdx == 0 || it.group.data == nil {
1034
1035
1036
1037
1038 groupIdx := entryIdx >> abi.MapGroupSlotsBits
1039 it.group = it.tab.groups.group(it.typ, groupIdx)
1040 }
1041
1042 if groupMatch == 0 {
1043 groupMatch = it.group.ctrls().matchFull()
1044
1045 if slotIdx != 0 {
1046
1047
1048 groupMatch = groupMatch.removeBelow(slotIdx)
1049 }
1050
1051
1052
1053 if groupMatch == 0 {
1054
1055
1056 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1057 continue
1058 }
1059
1060 i := groupMatch.first()
1061 it.entryIdx += uint64(i - slotIdx)
1062 if it.entryIdx > entryMask {
1063
1064 continue
1065 }
1066 entryIdx += uint64(i - slotIdx)
1067 slotIdx = i
1068 }
1069
1070 key := it.group.key(it.typ, slotIdx)
1071 if it.typ.IndirectKey() {
1072 key = *((*unsafe.Pointer)(key))
1073 }
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 grown := it.tab.index == -1
1087 var elem unsafe.Pointer
1088 if grown {
1089 newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
1090 if !ok {
1091
1092
1093 groupMatch = groupMatch.removeFirst()
1094 if groupMatch == 0 {
1095
1096
1097
1098 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1099 continue
1100 }
1101
1102
1103 i := groupMatch.first()
1104 it.entryIdx += uint64(i - slotIdx)
1105 continue
1106 } else {
1107 key = newKey
1108 elem = newElem
1109 }
1110 } else {
1111 elem = it.group.elem(it.typ, slotIdx)
1112 if it.typ.IndirectElem() {
1113 elem = *((*unsafe.Pointer)(elem))
1114 }
1115 }
1116
1117
1118 groupMatch = groupMatch.removeFirst()
1119 if groupMatch == 0 {
1120
1121
1122
1123 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1124 } else {
1125
1126 i := groupMatch.first()
1127 it.entryIdx += uint64(i - slotIdx)
1128 }
1129
1130 it.key = key
1131 it.elem = elem
1132 return
1133 }
1134
1135
1136 }
1137
1138 it.key = nil
1139 it.elem = nil
1140 return
1141 }
1142
1143
1144
1145
1146 func (t *table) rehash(typ *abi.MapType, m *Map) {
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 newCapacity := 2 * t.capacity
1163 if newCapacity <= maxTableCapacity {
1164 t.grow(typ, m, newCapacity)
1165 return
1166 }
1167
1168 t.split(typ, m)
1169 }
1170
1171
1172 func localDepthMask(localDepth uint8) uintptr {
1173 if goarch.PtrSize == 4 {
1174 return uintptr(1) << (32 - localDepth)
1175 }
1176 return uintptr(1) << (64 - localDepth)
1177 }
1178
1179
1180 func (t *table) split(typ *abi.MapType, m *Map) {
1181 localDepth := t.localDepth
1182 localDepth++
1183
1184
1185 left := newTable(typ, maxTableCapacity, -1, localDepth)
1186 right := newTable(typ, maxTableCapacity, -1, localDepth)
1187
1188
1189 mask := localDepthMask(localDepth)
1190
1191 for i := uint64(0); i <= t.groups.lengthMask; i++ {
1192 g := t.groups.group(typ, i)
1193 for j := uintptr(0); j < abi.MapGroupSlots; j++ {
1194 if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
1195
1196 continue
1197 }
1198
1199 key := g.key(typ, j)
1200 if typ.IndirectKey() {
1201 key = *((*unsafe.Pointer)(key))
1202 }
1203
1204 elem := g.elem(typ, j)
1205 if typ.IndirectElem() {
1206 elem = *((*unsafe.Pointer)(elem))
1207 }
1208
1209 hash := typ.Hasher(key, m.seed)
1210 var newTable *table
1211 if hash&mask == 0 {
1212 newTable = left
1213 } else {
1214 newTable = right
1215 }
1216 newTable.uncheckedPutSlot(typ, hash, key, elem)
1217 }
1218 }
1219
1220 m.installTableSplit(t, left, right)
1221 t.index = -1
1222 }
1223
1224
1225
1226
1227
1228 func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
1229 newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
1230
1231 if t.capacity > 0 {
1232 for i := uint64(0); i <= t.groups.lengthMask; i++ {
1233 g := t.groups.group(typ, i)
1234 for j := uintptr(0); j < abi.MapGroupSlots; j++ {
1235 if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
1236
1237 continue
1238 }
1239
1240 key := g.key(typ, j)
1241 if typ.IndirectKey() {
1242 key = *((*unsafe.Pointer)(key))
1243 }
1244
1245 elem := g.elem(typ, j)
1246 if typ.IndirectElem() {
1247 elem = *((*unsafe.Pointer)(elem))
1248 }
1249
1250 hash := typ.Hasher(key, m.seed)
1251
1252 newTable.uncheckedPutSlot(typ, hash, key, elem)
1253 }
1254 }
1255 }
1256
1257 newTable.checkInvariants(typ, m)
1258 m.replaceTable(newTable)
1259 t.index = -1
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 type probeSeq struct {
1274 mask uint64
1275 offset uint64
1276 index uint64
1277 }
1278
1279 func makeProbeSeq(hash uintptr, mask uint64) probeSeq {
1280 return probeSeq{
1281 mask: mask,
1282 offset: uint64(hash) & mask,
1283 index: 0,
1284 }
1285 }
1286
1287 func (s probeSeq) next() probeSeq {
1288 s.index++
1289 s.offset = (s.offset + s.index) & s.mask
1290 return s
1291 }
1292
1293 func (t *table) clone(typ *abi.MapType) *table {
1294
1295 t2 := new(table)
1296 *t2 = *t
1297 t = t2
1298
1299
1300 oldGroups := t.groups
1301 newGroups := newGroups(typ, oldGroups.lengthMask+1)
1302 for i := uint64(0); i <= oldGroups.lengthMask; i++ {
1303 oldGroup := oldGroups.group(typ, i)
1304 newGroup := newGroups.group(typ, i)
1305 cloneGroup(typ, newGroup, oldGroup)
1306 }
1307 t.groups = newGroups
1308
1309 return t
1310 }
1311
View as plain text