Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/sys"
16 "internal/strconv"
17 "internal/stringslite"
18 "unsafe"
19 )
20
21
22 var modinfo string
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 var (
119 m0 m
120 g0 g
121 mcache0 *mcache
122 raceprocctx0 uintptr
123 raceFiniLock mutex
124 )
125
126
127
128 var runtime_inittasks []*initTask
129
130
131
132
133
134 var main_init_done chan bool
135
136
137 func main_main()
138
139
140 var mainStarted bool
141
142
143 var runtimeInitTime int64
144
145
146 var initSigmask sigset
147
148
149 func main() {
150 mp := getg().m
151
152
153
154 mp.g0.racectx = 0
155
156
157
158
159 if goarch.PtrSize == 8 {
160 maxstacksize = 1000000000
161 } else {
162 maxstacksize = 250000000
163 }
164
165
166
167
168 maxstackceiling = 2 * maxstacksize
169
170
171 mainStarted = true
172
173 if haveSysmon {
174 systemstack(func() {
175 newm(sysmon, nil, -1)
176 })
177 }
178
179
180
181
182
183
184
185 lockOSThread()
186
187 if mp != &m0 {
188 throw("runtime.main not on m0")
189 }
190
191
192
193 runtimeInitTime = nanotime()
194 if runtimeInitTime == 0 {
195 throw("nanotime returning zero")
196 }
197
198 if debug.inittrace != 0 {
199 inittrace.id = getg().goid
200 inittrace.active = true
201 }
202
203 doInit(runtime_inittasks)
204
205
206 needUnlock := true
207 defer func() {
208 if needUnlock {
209 unlockOSThread()
210 }
211 }()
212
213 gcenable()
214 defaultGOMAXPROCSUpdateEnable()
215
216 main_init_done = make(chan bool)
217 if iscgo {
218 if _cgo_pthread_key_created == nil {
219 throw("_cgo_pthread_key_created missing")
220 }
221
222 if _cgo_thread_start == nil {
223 throw("_cgo_thread_start missing")
224 }
225 if GOOS != "windows" {
226 if _cgo_setenv == nil {
227 throw("_cgo_setenv missing")
228 }
229 if _cgo_unsetenv == nil {
230 throw("_cgo_unsetenv missing")
231 }
232 }
233 if _cgo_notify_runtime_init_done == nil {
234 throw("_cgo_notify_runtime_init_done missing")
235 }
236
237
238 if set_crosscall2 == nil {
239 throw("set_crosscall2 missing")
240 }
241 set_crosscall2()
242
243
244
245 startTemplateThread()
246 cgocall(_cgo_notify_runtime_init_done, nil)
247 }
248
249
250
251
252
253
254
255
256 last := lastmoduledatap
257 for m := &firstmoduledata; true; m = m.next {
258 doInit(m.inittasks)
259 if m == last {
260 break
261 }
262 }
263
264
265
266 inittrace.active = false
267
268 close(main_init_done)
269
270 needUnlock = false
271 unlockOSThread()
272
273 if isarchive || islibrary {
274
275
276 if GOARCH == "wasm" {
277
278
279
280
281
282
283
284 pause(sys.GetCallerSP() - 16)
285 panic("unreachable")
286 }
287 return
288 }
289 fn := main_main
290 fn()
291
292
293
294
295
296
297
298
299 exitHooksRun := false
300 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
301 runExitHooks(0)
302 exitHooksRun = true
303 lsandoleakcheck()
304 }
305
306
307
308
309
310 if runningPanicDefers.Load() != 0 {
311
312 for c := 0; c < 1000; c++ {
313 if runningPanicDefers.Load() == 0 {
314 break
315 }
316 Gosched()
317 }
318 }
319 if panicking.Load() != 0 {
320 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
321 }
322 if !exitHooksRun {
323 runExitHooks(0)
324 }
325 if raceenabled {
326 racefini()
327 }
328
329 exit(0)
330 for {
331 var x *int32
332 *x = 0
333 }
334 }
335
336
337
338
339 func os_beforeExit(exitCode int) {
340 runExitHooks(exitCode)
341 if exitCode == 0 && raceenabled {
342 racefini()
343 }
344
345
346 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
347 lsandoleakcheck()
348 }
349 }
350
351 func init() {
352 exithook.Gosched = Gosched
353 exithook.Goid = func() uint64 { return getg().goid }
354 exithook.Throw = throw
355 }
356
357 func runExitHooks(code int) {
358 exithook.Run(code)
359 }
360
361
362 func init() {
363 go forcegchelper()
364 }
365
366 func forcegchelper() {
367 forcegc.g = getg()
368 lockInit(&forcegc.lock, lockRankForcegc)
369 for {
370 lock(&forcegc.lock)
371 if forcegc.idle.Load() {
372 throw("forcegc: phase error")
373 }
374 forcegc.idle.Store(true)
375 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
376
377 if debug.gctrace > 0 {
378 println("GC forced")
379 }
380
381 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
382 }
383 }
384
385
386
387
388
389 func Gosched() {
390 checkTimeouts()
391 mcall(gosched_m)
392 }
393
394
395
396
397
398 func goschedguarded() {
399 mcall(goschedguarded_m)
400 }
401
402
403
404
405
406
407 func goschedIfBusy() {
408 gp := getg()
409
410
411 if !gp.preempt && sched.npidle.Load() > 0 {
412 return
413 }
414 mcall(gosched_m)
415 }
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
446 if reason != waitReasonSleep {
447 checkTimeouts()
448 }
449 mp := acquirem()
450 gp := mp.curg
451 status := readgstatus(gp)
452 if status != _Grunning && status != _Gscanrunning {
453 throw("gopark: bad g status")
454 }
455 mp.waitlock = lock
456 mp.waitunlockf = unlockf
457 gp.waitreason = reason
458 mp.waitTraceBlockReason = traceReason
459 mp.waitTraceSkip = traceskip
460 releasem(mp)
461
462 mcall(park_m)
463 }
464
465
466
467 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
468 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
469 }
470
471
472
473
474
475
476
477
478
479
480
481 func goready(gp *g, traceskip int) {
482 systemstack(func() {
483 ready(gp, traceskip, true)
484 })
485 }
486
487
488 func acquireSudog() *sudog {
489
490
491
492
493
494
495
496
497 mp := acquirem()
498 pp := mp.p.ptr()
499 if len(pp.sudogcache) == 0 {
500 lock(&sched.sudoglock)
501
502 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
503 s := sched.sudogcache
504 sched.sudogcache = s.next
505 s.next = nil
506 pp.sudogcache = append(pp.sudogcache, s)
507 }
508 unlock(&sched.sudoglock)
509
510 if len(pp.sudogcache) == 0 {
511 pp.sudogcache = append(pp.sudogcache, new(sudog))
512 }
513 }
514 n := len(pp.sudogcache)
515 s := pp.sudogcache[n-1]
516 pp.sudogcache[n-1] = nil
517 pp.sudogcache = pp.sudogcache[:n-1]
518 if s.elem.get() != nil {
519 throw("acquireSudog: found s.elem != nil in cache")
520 }
521 releasem(mp)
522 return s
523 }
524
525
526 func releaseSudog(s *sudog) {
527 if s.elem.get() != nil {
528 throw("runtime: sudog with non-nil elem")
529 }
530 if s.isSelect {
531 throw("runtime: sudog with non-false isSelect")
532 }
533 if s.next != nil {
534 throw("runtime: sudog with non-nil next")
535 }
536 if s.prev != nil {
537 throw("runtime: sudog with non-nil prev")
538 }
539 if s.waitlink != nil {
540 throw("runtime: sudog with non-nil waitlink")
541 }
542 if s.c.get() != nil {
543 throw("runtime: sudog with non-nil c")
544 }
545 gp := getg()
546 if gp.param != nil {
547 throw("runtime: releaseSudog with non-nil gp.param")
548 }
549 mp := acquirem()
550 pp := mp.p.ptr()
551 if len(pp.sudogcache) == cap(pp.sudogcache) {
552
553 var first, last *sudog
554 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
555 n := len(pp.sudogcache)
556 p := pp.sudogcache[n-1]
557 pp.sudogcache[n-1] = nil
558 pp.sudogcache = pp.sudogcache[:n-1]
559 if first == nil {
560 first = p
561 } else {
562 last.next = p
563 }
564 last = p
565 }
566 lock(&sched.sudoglock)
567 last.next = sched.sudogcache
568 sched.sudogcache = first
569 unlock(&sched.sudoglock)
570 }
571 pp.sudogcache = append(pp.sudogcache, s)
572 releasem(mp)
573 }
574
575
576 func badmcall(fn func(*g)) {
577 throw("runtime: mcall called on m->g0 stack")
578 }
579
580 func badmcall2(fn func(*g)) {
581 throw("runtime: mcall function returned")
582 }
583
584 func badreflectcall() {
585 panic(plainError("arg size to reflect.call more than 1GB"))
586 }
587
588
589
590 func badmorestackg0() {
591 if !crashStackImplemented {
592 writeErrStr("fatal: morestack on g0\n")
593 return
594 }
595
596 g := getg()
597 switchToCrashStack(func() {
598 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
599 g.m.traceback = 2
600 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
601 print("\n")
602
603 throw("morestack on g0")
604 })
605 }
606
607
608
609 func badmorestackgsignal() {
610 writeErrStr("fatal: morestack on gsignal\n")
611 }
612
613
614 func badctxt() {
615 throw("ctxt != 0")
616 }
617
618
619
620 var gcrash g
621
622 var crashingG atomic.Pointer[g]
623
624
625
626
627
628
629
630
631
632 func switchToCrashStack(fn func()) {
633 me := getg()
634 if crashingG.CompareAndSwapNoWB(nil, me) {
635 switchToCrashStack0(fn)
636 abort()
637 }
638 if crashingG.Load() == me {
639
640 writeErrStr("fatal: recursive switchToCrashStack\n")
641 abort()
642 }
643
644 usleep_no_g(100)
645 writeErrStr("fatal: concurrent switchToCrashStack\n")
646 abort()
647 }
648
649
650
651
652 const crashStackImplemented = GOOS != "windows"
653
654
655 func switchToCrashStack0(fn func())
656
657 func lockedOSThread() bool {
658 gp := getg()
659 return gp.lockedm != 0 && gp.m.lockedg != 0
660 }
661
662 var (
663
664
665
666
667
668
669 allglock mutex
670 allgs []*g
671
672
673
674
675
676
677
678
679
680
681
682
683
684 allglen uintptr
685 allgptr **g
686 )
687
688 func allgadd(gp *g) {
689 if readgstatus(gp) == _Gidle {
690 throw("allgadd: bad status Gidle")
691 }
692
693 lock(&allglock)
694 allgs = append(allgs, gp)
695 if &allgs[0] != allgptr {
696 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
697 }
698 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
699 unlock(&allglock)
700 }
701
702
703
704
705 func allGsSnapshot() []*g {
706 assertWorldStoppedOrLockHeld(&allglock)
707
708
709
710
711
712
713 return allgs[:len(allgs):len(allgs)]
714 }
715
716
717 func atomicAllG() (**g, uintptr) {
718 length := atomic.Loaduintptr(&allglen)
719 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
720 return ptr, length
721 }
722
723
724 func atomicAllGIndex(ptr **g, i uintptr) *g {
725 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
726 }
727
728
729
730
731 func forEachG(fn func(gp *g)) {
732 lock(&allglock)
733 for _, gp := range allgs {
734 fn(gp)
735 }
736 unlock(&allglock)
737 }
738
739
740
741
742
743 func forEachGRace(fn func(gp *g)) {
744 ptr, length := atomicAllG()
745 for i := uintptr(0); i < length; i++ {
746 gp := atomicAllGIndex(ptr, i)
747 fn(gp)
748 }
749 return
750 }
751
752 const (
753
754
755 _GoidCacheBatch = 16
756 )
757
758
759
760 func cpuinit(env string) {
761 cpu.Initialize(env)
762
763
764
765 switch GOARCH {
766 case "386", "amd64":
767 x86HasAVX = cpu.X86.HasAVX
768 x86HasFMA = cpu.X86.HasFMA
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771
772 case "arm":
773 armHasVFPv4 = cpu.ARM.HasVFPv4
774
775 case "arm64":
776 arm64HasATOMICS = cpu.ARM64.HasATOMICS
777
778 case "loong64":
779 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
780 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
781 loong64HasLSX = cpu.Loong64.HasLSX
782
783 case "riscv64":
784 riscv64HasZbb = cpu.RISCV64.HasZbb
785 }
786 }
787
788
789
790
791
792
793 func getGodebugEarly() (string, bool) {
794 const prefix = "GODEBUG="
795 var env string
796 switch GOOS {
797 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
798
799
800
801 n := int32(0)
802 for argv_index(argv, argc+1+n) != nil {
803 n++
804 }
805
806 for i := int32(0); i < n; i++ {
807 p := argv_index(argv, argc+1+i)
808 s := unsafe.String(p, findnull(p))
809
810 if stringslite.HasPrefix(s, prefix) {
811 env = gostringnocopy(p)[len(prefix):]
812 break
813 }
814 }
815 break
816
817 default:
818 return "", false
819 }
820 return env, true
821 }
822
823
824
825
826
827
828
829
830
831 func schedinit() {
832 lockInit(&sched.lock, lockRankSched)
833 lockInit(&sched.sysmonlock, lockRankSysmon)
834 lockInit(&sched.deferlock, lockRankDefer)
835 lockInit(&sched.sudoglock, lockRankSudog)
836 lockInit(&deadlock, lockRankDeadlock)
837 lockInit(&paniclk, lockRankPanic)
838 lockInit(&allglock, lockRankAllg)
839 lockInit(&allpLock, lockRankAllp)
840 lockInit(&reflectOffs.lock, lockRankReflectOffs)
841 lockInit(&finlock, lockRankFin)
842 lockInit(&cpuprof.lock, lockRankCpuprof)
843 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
844 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
845 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
846 traceLockInit()
847
848
849
850 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
851
852 lockVerifyMSize()
853
854 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
855
856
857
858 gp := getg()
859 if raceenabled {
860 gp.racectx, raceprocctx0 = raceinit()
861 }
862
863 sched.maxmcount = 10000
864 crashFD.Store(^uintptr(0))
865
866
867 worldStopped()
868
869 godebug, parsedGodebug := getGodebugEarly()
870 if parsedGodebug {
871 parseRuntimeDebugVars(godebug)
872 }
873 ticks.init()
874 moduledataverify()
875 stackinit()
876 randinit()
877 mallocinit()
878 cpuinit(godebug)
879 alginit()
880 mcommoninit(gp.m, -1)
881 modulesinit()
882 typelinksinit()
883 itabsinit()
884 stkobjinit()
885
886 sigsave(&gp.m.sigmask)
887 initSigmask = gp.m.sigmask
888
889 goargs()
890 goenvs()
891 secure()
892 checkfds()
893 if !parsedGodebug {
894
895
896 parseRuntimeDebugVars(gogetenv("GODEBUG"))
897 }
898 finishDebugVarsSetup()
899 gcinit()
900
901
902
903 gcrash.stack = stackalloc(16384)
904 gcrash.stackguard0 = gcrash.stack.lo + 1000
905 gcrash.stackguard1 = gcrash.stack.lo + 1000
906
907
908
909
910
911 if disableMemoryProfiling {
912 MemProfileRate = 0
913 }
914
915
916 mProfStackInit(gp.m)
917 defaultGOMAXPROCSInit()
918
919 lock(&sched.lock)
920 sched.lastpoll.Store(nanotime())
921 var procs int32
922 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
923 procs = int32(n)
924 sched.customGOMAXPROCS = true
925 } else {
926
927
928
929
930
931
932
933
934 procs = defaultGOMAXPROCS(numCPUStartup)
935 }
936 if procresize(procs) != nil {
937 throw("unknown runnable goroutine during bootstrap")
938 }
939 unlock(&sched.lock)
940
941
942 worldStarted()
943
944 if buildVersion == "" {
945
946
947 buildVersion = "unknown"
948 }
949 if len(modinfo) == 1 {
950
951
952 modinfo = ""
953 }
954 }
955
956 func dumpgstatus(gp *g) {
957 thisg := getg()
958 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
959 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
960 }
961
962
963 func checkmcount() {
964 assertLockHeld(&sched.lock)
965
966
967
968
969
970
971
972
973
974 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
975 if count > sched.maxmcount {
976 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
977 throw("thread exhaustion")
978 }
979 }
980
981
982
983
984
985 func mReserveID() int64 {
986 assertLockHeld(&sched.lock)
987
988 if sched.mnext+1 < sched.mnext {
989 throw("runtime: thread ID overflow")
990 }
991 id := sched.mnext
992 sched.mnext++
993 checkmcount()
994 return id
995 }
996
997
998 func mcommoninit(mp *m, id int64) {
999 gp := getg()
1000
1001
1002 if gp != gp.m.g0 {
1003 callers(1, mp.createstack[:])
1004 }
1005
1006 lock(&sched.lock)
1007
1008 if id >= 0 {
1009 mp.id = id
1010 } else {
1011 mp.id = mReserveID()
1012 }
1013
1014 mp.self = newMWeakPointer(mp)
1015
1016 mrandinit(mp)
1017
1018 mpreinit(mp)
1019 if mp.gsignal != nil {
1020 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1021 }
1022
1023
1024
1025 mp.alllink = allm
1026
1027
1028
1029 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1030 unlock(&sched.lock)
1031
1032
1033 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1034 mp.cgoCallers = new(cgoCallers)
1035 }
1036 mProfStackInit(mp)
1037 }
1038
1039
1040
1041
1042
1043 func mProfStackInit(mp *m) {
1044 if debug.profstackdepth == 0 {
1045
1046
1047 return
1048 }
1049 mp.profStack = makeProfStackFP()
1050 mp.mLockProfile.stack = makeProfStackFP()
1051 }
1052
1053
1054
1055
1056 func makeProfStackFP() []uintptr {
1057
1058
1059
1060
1061
1062
1063 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1064 }
1065
1066
1067
1068 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1069
1070
1071 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1072
1073 func (mp *m) becomeSpinning() {
1074 mp.spinning = true
1075 sched.nmspinning.Add(1)
1076 sched.needspinning.Store(0)
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086 func (mp *m) snapshotAllp() []*p {
1087 mp.allpSnapshot = allp
1088 return mp.allpSnapshot
1089 }
1090
1091
1092
1093
1094
1095
1096
1097 func (mp *m) clearAllpSnapshot() {
1098 mp.allpSnapshot = nil
1099 }
1100
1101 func (mp *m) hasCgoOnStack() bool {
1102 return mp.ncgo > 0 || mp.isextra
1103 }
1104
1105 const (
1106
1107
1108 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1109
1110
1111
1112 osHasLowResClockInt = goos.IsWindows
1113
1114
1115
1116 osHasLowResClock = osHasLowResClockInt > 0
1117 )
1118
1119
1120 func ready(gp *g, traceskip int, next bool) {
1121 status := readgstatus(gp)
1122
1123
1124 mp := acquirem()
1125 if status&^_Gscan != _Gwaiting {
1126 dumpgstatus(gp)
1127 throw("bad g->status in ready")
1128 }
1129
1130
1131 trace := traceAcquire()
1132 casgstatus(gp, _Gwaiting, _Grunnable)
1133 if trace.ok() {
1134 trace.GoUnpark(gp, traceskip)
1135 traceRelease(trace)
1136 }
1137 runqput(mp.p.ptr(), gp, next)
1138 wakep()
1139 releasem(mp)
1140 }
1141
1142
1143
1144 const freezeStopWait = 0x7fffffff
1145
1146
1147
1148 var freezing atomic.Bool
1149
1150
1151
1152
1153 func freezetheworld() {
1154 freezing.Store(true)
1155 if debug.dontfreezetheworld > 0 {
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 usleep(1000)
1181 return
1182 }
1183
1184
1185
1186
1187 for i := 0; i < 5; i++ {
1188
1189 sched.stopwait = freezeStopWait
1190 sched.gcwaiting.Store(true)
1191
1192 if !preemptall() {
1193 break
1194 }
1195 usleep(1000)
1196 }
1197
1198 usleep(1000)
1199 preemptall()
1200 usleep(1000)
1201 }
1202
1203
1204
1205
1206
1207 func readgstatus(gp *g) uint32 {
1208 return gp.atomicstatus.Load()
1209 }
1210
1211
1212
1213
1214
1215 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1216 success := false
1217
1218
1219 switch oldval {
1220 default:
1221 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1222 dumpgstatus(gp)
1223 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1224 case _Gscanrunnable,
1225 _Gscanwaiting,
1226 _Gscanrunning,
1227 _Gscansyscall,
1228 _Gscanleaked,
1229 _Gscanpreempted,
1230 _Gscandeadextra:
1231 if newval == oldval&^_Gscan {
1232 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1233 }
1234 }
1235 if !success {
1236 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1237 dumpgstatus(gp)
1238 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1239 }
1240 releaseLockRankAndM(lockRankGscan)
1241 }
1242
1243
1244
1245 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1246 switch oldval {
1247 case _Grunnable,
1248 _Grunning,
1249 _Gwaiting,
1250 _Gleaked,
1251 _Gsyscall,
1252 _Gdeadextra:
1253 if newval == oldval|_Gscan {
1254 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1255 if r {
1256 acquireLockRankAndM(lockRankGscan)
1257 }
1258 return r
1259
1260 }
1261 }
1262 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1263 throw("bad oldval passed to castogscanstatus")
1264 return false
1265 }
1266
1267
1268
1269 var casgstatusAlwaysTrack = false
1270
1271
1272
1273
1274
1275
1276
1277 func casgstatus(gp *g, oldval, newval uint32) {
1278 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1279 systemstack(func() {
1280
1281
1282 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1283 throw("casgstatus: bad incoming values")
1284 })
1285 }
1286
1287 lockWithRankMayAcquire(nil, lockRankGscan)
1288
1289
1290 const yieldDelay = 5 * 1000
1291 var nextYield int64
1292
1293
1294
1295 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1296 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1297 systemstack(func() {
1298
1299
1300 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1301 })
1302 }
1303 if i == 0 {
1304 nextYield = nanotime() + yieldDelay
1305 }
1306 if nanotime() < nextYield {
1307 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1308 procyield(1)
1309 }
1310 } else {
1311 osyield()
1312 nextYield = nanotime() + yieldDelay/2
1313 }
1314 }
1315
1316 if gp.bubble != nil {
1317 systemstack(func() {
1318 gp.bubble.changegstatus(gp, oldval, newval)
1319 })
1320 }
1321
1322 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1323
1324
1325 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1326 gp.tracking = true
1327 }
1328 gp.trackingSeq++
1329 }
1330 if !gp.tracking {
1331 return
1332 }
1333
1334
1335
1336
1337
1338
1339 switch oldval {
1340 case _Grunnable:
1341
1342
1343
1344 now := nanotime()
1345 gp.runnableTime += now - gp.trackingStamp
1346 gp.trackingStamp = 0
1347 case _Gwaiting:
1348 if !gp.waitreason.isMutexWait() {
1349
1350 break
1351 }
1352
1353
1354
1355
1356
1357 now := nanotime()
1358 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1359 gp.trackingStamp = 0
1360 }
1361 switch newval {
1362 case _Gwaiting:
1363 if !gp.waitreason.isMutexWait() {
1364
1365 break
1366 }
1367
1368 now := nanotime()
1369 gp.trackingStamp = now
1370 case _Grunnable:
1371
1372
1373 now := nanotime()
1374 gp.trackingStamp = now
1375 case _Grunning:
1376
1377
1378
1379 gp.tracking = false
1380 sched.timeToRun.record(gp.runnableTime)
1381 gp.runnableTime = 0
1382 }
1383 }
1384
1385
1386
1387
1388 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1389
1390 gp.waitreason = reason
1391 casgstatus(gp, old, _Gwaiting)
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1402 if !reason.isWaitingForSuspendG() {
1403 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1404 }
1405 casGToWaiting(gp, old, reason)
1406 }
1407
1408
1409
1410
1411
1412 func casGToPreemptScan(gp *g, old, new uint32) {
1413 if old != _Grunning || new != _Gscan|_Gpreempted {
1414 throw("bad g transition")
1415 }
1416 acquireLockRankAndM(lockRankGscan)
1417 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1418 }
1419
1420
1421
1422
1423
1424
1425 }
1426
1427
1428
1429
1430 func casGFromPreempted(gp *g, old, new uint32) bool {
1431 if old != _Gpreempted || new != _Gwaiting {
1432 throw("bad g transition")
1433 }
1434 gp.waitreason = waitReasonPreempted
1435 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1436 return false
1437 }
1438 if bubble := gp.bubble; bubble != nil {
1439 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1440 }
1441 return true
1442 }
1443
1444
1445 type stwReason uint8
1446
1447
1448
1449
1450 const (
1451 stwUnknown stwReason = iota
1452 stwGCMarkTerm
1453 stwGCSweepTerm
1454 stwWriteHeapDump
1455 stwGoroutineProfile
1456 stwGoroutineProfileCleanup
1457 stwAllGoroutinesStack
1458 stwReadMemStats
1459 stwAllThreadsSyscall
1460 stwGOMAXPROCS
1461 stwStartTrace
1462 stwStopTrace
1463 stwForTestCountPagesInUse
1464 stwForTestReadMetricsSlow
1465 stwForTestReadMemStatsSlow
1466 stwForTestPageCachePagesLeaked
1467 stwForTestResetDebugLog
1468 )
1469
1470 func (r stwReason) String() string {
1471 return stwReasonStrings[r]
1472 }
1473
1474 func (r stwReason) isGC() bool {
1475 return r == stwGCMarkTerm || r == stwGCSweepTerm
1476 }
1477
1478
1479
1480
1481 var stwReasonStrings = [...]string{
1482 stwUnknown: "unknown",
1483 stwGCMarkTerm: "GC mark termination",
1484 stwGCSweepTerm: "GC sweep termination",
1485 stwWriteHeapDump: "write heap dump",
1486 stwGoroutineProfile: "goroutine profile",
1487 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1488 stwAllGoroutinesStack: "all goroutines stack trace",
1489 stwReadMemStats: "read mem stats",
1490 stwAllThreadsSyscall: "AllThreadsSyscall",
1491 stwGOMAXPROCS: "GOMAXPROCS",
1492 stwStartTrace: "start trace",
1493 stwStopTrace: "stop trace",
1494 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1495 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1496 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1497 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1498 stwForTestResetDebugLog: "ResetDebugLog (test)",
1499 }
1500
1501
1502
1503 type worldStop struct {
1504 reason stwReason
1505 startedStopping int64
1506 finishedStopping int64
1507 stoppingCPUTime int64
1508 }
1509
1510
1511
1512
1513 var stopTheWorldContext worldStop
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 func stopTheWorld(reason stwReason) worldStop {
1533 semacquire(&worldsema)
1534 gp := getg()
1535 gp.m.preemptoff = reason.String()
1536 systemstack(func() {
1537 stopTheWorldContext = stopTheWorldWithSema(reason)
1538 })
1539 return stopTheWorldContext
1540 }
1541
1542
1543
1544
1545 func startTheWorld(w worldStop) {
1546 systemstack(func() { startTheWorldWithSema(0, w) })
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 mp := acquirem()
1564 mp.preemptoff = ""
1565 semrelease1(&worldsema, true, 0)
1566 releasem(mp)
1567 }
1568
1569
1570
1571
1572 func stopTheWorldGC(reason stwReason) worldStop {
1573 semacquire(&gcsema)
1574 return stopTheWorld(reason)
1575 }
1576
1577
1578
1579
1580 func startTheWorldGC(w worldStop) {
1581 startTheWorld(w)
1582 semrelease(&gcsema)
1583 }
1584
1585
1586 var worldsema uint32 = 1
1587
1588
1589
1590
1591
1592
1593
1594 var gcsema uint32 = 1
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628 func stopTheWorldWithSema(reason stwReason) worldStop {
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1642
1643 trace := traceAcquire()
1644 if trace.ok() {
1645 trace.STWStart(reason)
1646 traceRelease(trace)
1647 }
1648 gp := getg()
1649
1650
1651
1652 if gp.m.locks > 0 {
1653 throw("stopTheWorld: holding locks")
1654 }
1655
1656 lock(&sched.lock)
1657 start := nanotime()
1658 sched.stopwait = gomaxprocs
1659 sched.gcwaiting.Store(true)
1660 preemptall()
1661
1662
1663 gp.m.p.ptr().status = _Pgcstop
1664 gp.m.p.ptr().gcStopTime = start
1665 sched.stopwait--
1666
1667
1668 for _, pp := range allp {
1669 if thread, ok := setBlockOnExitSyscall(pp); ok {
1670 thread.gcstopP()
1671 thread.resume()
1672 }
1673 }
1674
1675
1676 now := nanotime()
1677 for {
1678 pp, _ := pidleget(now)
1679 if pp == nil {
1680 break
1681 }
1682 pp.status = _Pgcstop
1683 pp.gcStopTime = nanotime()
1684 sched.stopwait--
1685 }
1686 wait := sched.stopwait > 0
1687 unlock(&sched.lock)
1688
1689
1690 if wait {
1691 for {
1692
1693 if notetsleep(&sched.stopnote, 100*1000) {
1694 noteclear(&sched.stopnote)
1695 break
1696 }
1697 preemptall()
1698 }
1699 }
1700
1701 finish := nanotime()
1702 startTime := finish - start
1703 if reason.isGC() {
1704 sched.stwStoppingTimeGC.record(startTime)
1705 } else {
1706 sched.stwStoppingTimeOther.record(startTime)
1707 }
1708
1709
1710
1711
1712
1713 stoppingCPUTime := int64(0)
1714 bad := ""
1715 if sched.stopwait != 0 {
1716 bad = "stopTheWorld: not stopped (stopwait != 0)"
1717 } else {
1718 for _, pp := range allp {
1719 if pp.status != _Pgcstop {
1720 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1721 }
1722 if pp.gcStopTime == 0 && bad == "" {
1723 bad = "stopTheWorld: broken CPU time accounting"
1724 }
1725 stoppingCPUTime += finish - pp.gcStopTime
1726 pp.gcStopTime = 0
1727 }
1728 }
1729 if freezing.Load() {
1730
1731
1732
1733
1734 lock(&deadlock)
1735 lock(&deadlock)
1736 }
1737 if bad != "" {
1738 throw(bad)
1739 }
1740
1741 worldStopped()
1742
1743
1744 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1745
1746 return worldStop{
1747 reason: reason,
1748 startedStopping: start,
1749 finishedStopping: finish,
1750 stoppingCPUTime: stoppingCPUTime,
1751 }
1752 }
1753
1754
1755
1756
1757
1758
1759
1760 func startTheWorldWithSema(now int64, w worldStop) int64 {
1761 assertWorldStopped()
1762
1763 mp := acquirem()
1764 if netpollinited() {
1765 list, delta := netpoll(0)
1766 injectglist(&list)
1767 netpollAdjustWaiters(delta)
1768 }
1769 lock(&sched.lock)
1770
1771 procs := gomaxprocs
1772 if newprocs != 0 {
1773 procs = newprocs
1774 newprocs = 0
1775 }
1776 p1 := procresize(procs)
1777 sched.gcwaiting.Store(false)
1778 if sched.sysmonwait.Load() {
1779 sched.sysmonwait.Store(false)
1780 notewakeup(&sched.sysmonnote)
1781 }
1782 unlock(&sched.lock)
1783
1784 worldStarted()
1785
1786 for p1 != nil {
1787 p := p1
1788 p1 = p1.link.ptr()
1789 if p.m != 0 {
1790 mp := p.m.ptr()
1791 p.m = 0
1792 if mp.nextp != 0 {
1793 throw("startTheWorld: inconsistent mp->nextp")
1794 }
1795 mp.nextp.set(p)
1796 notewakeup(&mp.park)
1797 } else {
1798
1799 newm(nil, p, -1)
1800 }
1801 }
1802
1803
1804 if now == 0 {
1805 now = nanotime()
1806 }
1807 totalTime := now - w.startedStopping
1808 if w.reason.isGC() {
1809 sched.stwTotalTimeGC.record(totalTime)
1810 } else {
1811 sched.stwTotalTimeOther.record(totalTime)
1812 }
1813 trace := traceAcquire()
1814 if trace.ok() {
1815 trace.STWDone()
1816 traceRelease(trace)
1817 }
1818
1819
1820
1821
1822 wakep()
1823
1824 releasem(mp)
1825
1826 return now
1827 }
1828
1829
1830
1831 func usesLibcall() bool {
1832 switch GOOS {
1833 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1834 return true
1835 }
1836 return false
1837 }
1838
1839
1840
1841 func mStackIsSystemAllocated() bool {
1842 switch GOOS {
1843 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1844 return true
1845 }
1846 return false
1847 }
1848
1849
1850
1851 func mstart()
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 func mstart0() {
1863 gp := getg()
1864
1865 osStack := gp.stack.lo == 0
1866 if osStack {
1867
1868
1869
1870
1871
1872
1873
1874
1875 size := gp.stack.hi
1876 if size == 0 {
1877 size = 16384 * sys.StackGuardMultiplier
1878 }
1879 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1880 gp.stack.lo = gp.stack.hi - size + 1024
1881 }
1882
1883
1884 gp.stackguard0 = gp.stack.lo + stackGuard
1885
1886
1887 gp.stackguard1 = gp.stackguard0
1888 mstart1()
1889
1890
1891 if mStackIsSystemAllocated() {
1892
1893
1894
1895 osStack = true
1896 }
1897 mexit(osStack)
1898 }
1899
1900
1901
1902
1903
1904 func mstart1() {
1905 gp := getg()
1906
1907 if gp != gp.m.g0 {
1908 throw("bad runtime·mstart")
1909 }
1910
1911
1912
1913
1914
1915
1916
1917 gp.sched.g = guintptr(unsafe.Pointer(gp))
1918 gp.sched.pc = sys.GetCallerPC()
1919 gp.sched.sp = sys.GetCallerSP()
1920
1921 asminit()
1922 minit()
1923
1924
1925
1926 if gp.m == &m0 {
1927 mstartm0()
1928 }
1929
1930 if debug.dataindependenttiming == 1 {
1931 sys.EnableDIT()
1932 }
1933
1934 if fn := gp.m.mstartfn; fn != nil {
1935 fn()
1936 }
1937
1938 if gp.m != &m0 {
1939 acquirep(gp.m.nextp.ptr())
1940 gp.m.nextp = 0
1941 }
1942 schedule()
1943 }
1944
1945
1946
1947
1948
1949
1950
1951 func mstartm0() {
1952
1953
1954
1955 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1956 cgoHasExtraM = true
1957 newextram()
1958 }
1959 initsig(false)
1960 }
1961
1962
1963
1964
1965 func mPark() {
1966 gp := getg()
1967 notesleep(&gp.m.park)
1968 noteclear(&gp.m.park)
1969 }
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981 func mexit(osStack bool) {
1982 mp := getg().m
1983
1984 if mp == &m0 {
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 handoffp(releasep())
1997 lock(&sched.lock)
1998 sched.nmfreed++
1999 checkdead()
2000 unlock(&sched.lock)
2001 mPark()
2002 throw("locked m0 woke up")
2003 }
2004
2005 sigblock(true)
2006 unminit()
2007
2008
2009 if mp.gsignal != nil {
2010 stackfree(mp.gsignal.stack)
2011 if valgrindenabled {
2012 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2013 mp.gsignal.valgrindStackID = 0
2014 }
2015
2016
2017
2018
2019 mp.gsignal = nil
2020 }
2021
2022
2023 vgetrandomDestroy(mp)
2024
2025
2026
2027 mp.self.clear()
2028
2029
2030 lock(&sched.lock)
2031 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2032 if *pprev == mp {
2033 *pprev = mp.alllink
2034 goto found
2035 }
2036 }
2037 throw("m not found in allm")
2038 found:
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 mp.freeWait.Store(freeMWait)
2054 mp.freelink = sched.freem
2055 sched.freem = mp
2056 unlock(&sched.lock)
2057
2058 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2059 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2060
2061
2062 handoffp(releasep())
2063
2064
2065
2066
2067
2068 lock(&sched.lock)
2069 sched.nmfreed++
2070 checkdead()
2071 unlock(&sched.lock)
2072
2073 if GOOS == "darwin" || GOOS == "ios" {
2074
2075
2076 if mp.signalPending.Load() != 0 {
2077 pendingPreemptSignals.Add(-1)
2078 }
2079 }
2080
2081
2082
2083 mdestroy(mp)
2084
2085 if osStack {
2086
2087 mp.freeWait.Store(freeMRef)
2088
2089
2090
2091 return
2092 }
2093
2094
2095
2096
2097
2098 exitThread(&mp.freeWait)
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 func forEachP(reason waitReason, fn func(*p)) {
2112 systemstack(func() {
2113 gp := getg().m.curg
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 casGToWaitingForSuspendG(gp, _Grunning, reason)
2126 forEachPInternal(fn)
2127 casgstatus(gp, _Gwaiting, _Grunning)
2128 })
2129 }
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140 func forEachPInternal(fn func(*p)) {
2141 mp := acquirem()
2142 pp := getg().m.p.ptr()
2143
2144 lock(&sched.lock)
2145 if sched.safePointWait != 0 {
2146 throw("forEachP: sched.safePointWait != 0")
2147 }
2148 sched.safePointWait = gomaxprocs - 1
2149 sched.safePointFn = fn
2150
2151
2152 for _, p2 := range allp {
2153 if p2 != pp {
2154 atomic.Store(&p2.runSafePointFn, 1)
2155 }
2156 }
2157 preemptall()
2158
2159
2160
2161
2162
2163
2164
2165 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2166 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2167 fn(p)
2168 sched.safePointWait--
2169 }
2170 }
2171
2172 wait := sched.safePointWait > 0
2173 unlock(&sched.lock)
2174
2175
2176 fn(pp)
2177
2178
2179
2180 for _, p2 := range allp {
2181 if atomic.Load(&p2.runSafePointFn) != 1 {
2182
2183 continue
2184 }
2185 if thread, ok := setBlockOnExitSyscall(p2); ok {
2186 thread.takeP()
2187 thread.resume()
2188 handoffp(p2)
2189 }
2190 }
2191
2192
2193 if wait {
2194 for {
2195
2196
2197
2198
2199 if notetsleep(&sched.safePointNote, 100*1000) {
2200 noteclear(&sched.safePointNote)
2201 break
2202 }
2203 preemptall()
2204 }
2205 }
2206 if sched.safePointWait != 0 {
2207 throw("forEachP: not done")
2208 }
2209 for _, p2 := range allp {
2210 if p2.runSafePointFn != 0 {
2211 throw("forEachP: P did not run fn")
2212 }
2213 }
2214
2215 lock(&sched.lock)
2216 sched.safePointFn = nil
2217 unlock(&sched.lock)
2218 releasem(mp)
2219 }
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 func runSafePointFn() {
2233 p := getg().m.p.ptr()
2234
2235
2236
2237 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2238 return
2239 }
2240 sched.safePointFn(p)
2241 lock(&sched.lock)
2242 sched.safePointWait--
2243 if sched.safePointWait == 0 {
2244 notewakeup(&sched.safePointNote)
2245 }
2246 unlock(&sched.lock)
2247 }
2248
2249
2250
2251
2252 var cgoThreadStart unsafe.Pointer
2253
2254 type cgothreadstart struct {
2255 g guintptr
2256 tls *uint64
2257 fn unsafe.Pointer
2258 }
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 func allocm(pp *p, fn func(), id int64) *m {
2270 allocmLock.rlock()
2271
2272
2273
2274
2275 acquirem()
2276
2277 gp := getg()
2278 if gp.m.p == 0 {
2279 acquirep(pp)
2280 }
2281
2282
2283
2284 if sched.freem != nil {
2285 lock(&sched.lock)
2286 var newList *m
2287 for freem := sched.freem; freem != nil; {
2288
2289 wait := freem.freeWait.Load()
2290 if wait == freeMWait {
2291 next := freem.freelink
2292 freem.freelink = newList
2293 newList = freem
2294 freem = next
2295 continue
2296 }
2297
2298
2299
2300 if traceEnabled() || traceShuttingDown() {
2301 traceThreadDestroy(freem)
2302 }
2303
2304
2305
2306 if wait == freeMStack {
2307
2308
2309
2310 systemstack(func() {
2311 stackfree(freem.g0.stack)
2312 if valgrindenabled {
2313 valgrindDeregisterStack(freem.g0.valgrindStackID)
2314 freem.g0.valgrindStackID = 0
2315 }
2316 })
2317 }
2318 freem = freem.freelink
2319 }
2320 sched.freem = newList
2321 unlock(&sched.lock)
2322 }
2323
2324 mp := &new(mPadded).m
2325 mp.mstartfn = fn
2326 mcommoninit(mp, id)
2327
2328
2329
2330 if iscgo || mStackIsSystemAllocated() {
2331 mp.g0 = malg(-1)
2332 } else {
2333 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2334 }
2335 mp.g0.m = mp
2336
2337 if pp == gp.m.p.ptr() {
2338 releasep()
2339 }
2340
2341 releasem(gp.m)
2342 allocmLock.runlock()
2343 return mp
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385 func needm(signal bool) {
2386 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2387
2388
2389
2390
2391
2392
2393 writeErrStr("fatal error: cgo callback before cgo call\n")
2394 exit(1)
2395 }
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405 var sigmask sigset
2406 sigsave(&sigmask)
2407 sigblock(false)
2408
2409
2410
2411
2412 mp, last := getExtraM()
2413
2414
2415
2416
2417
2418
2419
2420
2421 mp.needextram = last
2422
2423
2424 mp.sigmask = sigmask
2425
2426
2427
2428 osSetupTLS(mp)
2429
2430
2431
2432 setg(mp.g0)
2433 sp := sys.GetCallerSP()
2434 callbackUpdateSystemStack(mp, sp, signal)
2435
2436
2437
2438
2439 mp.isExtraInC = false
2440
2441
2442 asminit()
2443 minit()
2444
2445
2446
2447
2448
2449
2450 var trace traceLocker
2451 if !signal {
2452 trace = traceAcquire()
2453 }
2454
2455
2456 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2457 sched.ngsys.Add(-1)
2458
2459
2460
2461 if !signal {
2462 if trace.ok() {
2463 trace.GoCreateSyscall(mp.curg)
2464 traceRelease(trace)
2465 }
2466 }
2467 mp.isExtraInSig = signal
2468 }
2469
2470
2471
2472
2473 func needAndBindM() {
2474 needm(false)
2475
2476 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2477 cgoBindM()
2478 }
2479 }
2480
2481
2482
2483
2484 func newextram() {
2485 c := extraMWaiters.Swap(0)
2486 if c > 0 {
2487 for i := uint32(0); i < c; i++ {
2488 oneNewExtraM()
2489 }
2490 } else if extraMLength.Load() == 0 {
2491
2492 oneNewExtraM()
2493 }
2494 }
2495
2496
2497 func oneNewExtraM() {
2498
2499
2500
2501
2502
2503 mp := allocm(nil, nil, -1)
2504 gp := malg(4096)
2505 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2506 gp.sched.sp = gp.stack.hi
2507 gp.sched.sp -= 4 * goarch.PtrSize
2508 gp.sched.lr = 0
2509 gp.sched.g = guintptr(unsafe.Pointer(gp))
2510 gp.syscallpc = gp.sched.pc
2511 gp.syscallsp = gp.sched.sp
2512 gp.stktopsp = gp.sched.sp
2513
2514
2515
2516 casgstatus(gp, _Gidle, _Gdeadextra)
2517 gp.m = mp
2518 mp.curg = gp
2519 mp.isextra = true
2520
2521 mp.isExtraInC = true
2522 mp.lockedInt++
2523 mp.lockedg.set(gp)
2524 gp.lockedm.set(mp)
2525 gp.goid = sched.goidgen.Add(1)
2526 if raceenabled {
2527 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2528 }
2529
2530 allgadd(gp)
2531
2532
2533
2534
2535
2536 sched.ngsys.Add(1)
2537
2538
2539 addExtraM(mp)
2540 }
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575 func dropm() {
2576
2577
2578
2579 mp := getg().m
2580
2581
2582
2583
2584
2585 var trace traceLocker
2586 if !mp.isExtraInSig {
2587 trace = traceAcquire()
2588 }
2589
2590
2591 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2592 mp.curg.preemptStop = false
2593 sched.ngsys.Add(1)
2594 decGSyscallNoP(mp)
2595
2596 if !mp.isExtraInSig {
2597 if trace.ok() {
2598 trace.GoDestroySyscall()
2599 traceRelease(trace)
2600 }
2601 }
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 mp.syscalltick--
2617
2618
2619
2620 mp.curg.trace.reset()
2621
2622
2623
2624
2625 if traceEnabled() || traceShuttingDown() {
2626
2627
2628
2629
2630
2631
2632
2633 lock(&sched.lock)
2634 traceThreadDestroy(mp)
2635 unlock(&sched.lock)
2636 }
2637 mp.isExtraInSig = false
2638
2639
2640
2641
2642
2643 sigmask := mp.sigmask
2644 sigblock(false)
2645 unminit()
2646
2647 setg(nil)
2648
2649
2650
2651 g0 := mp.g0
2652 g0.stack.hi = 0
2653 g0.stack.lo = 0
2654 g0.stackguard0 = 0
2655 g0.stackguard1 = 0
2656 mp.g0StackAccurate = false
2657
2658 putExtraM(mp)
2659
2660 msigrestore(sigmask)
2661 }
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683 func cgoBindM() {
2684 if GOOS == "windows" || GOOS == "plan9" {
2685 fatal("bindm in unexpected GOOS")
2686 }
2687 g := getg()
2688 if g.m.g0 != g {
2689 fatal("the current g is not g0")
2690 }
2691 if _cgo_bindm != nil {
2692 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2693 }
2694 }
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707 func getm() uintptr {
2708 return uintptr(unsafe.Pointer(getg().m))
2709 }
2710
2711 var (
2712
2713
2714
2715
2716
2717
2718 extraM atomic.Uintptr
2719
2720 extraMLength atomic.Uint32
2721
2722 extraMWaiters atomic.Uint32
2723
2724
2725 extraMInUse atomic.Uint32
2726 )
2727
2728
2729
2730
2731
2732
2733
2734
2735 func lockextra(nilokay bool) *m {
2736 const locked = 1
2737
2738 incr := false
2739 for {
2740 old := extraM.Load()
2741 if old == locked {
2742 osyield_no_g()
2743 continue
2744 }
2745 if old == 0 && !nilokay {
2746 if !incr {
2747
2748
2749
2750 extraMWaiters.Add(1)
2751 incr = true
2752 }
2753 usleep_no_g(1)
2754 continue
2755 }
2756 if extraM.CompareAndSwap(old, locked) {
2757 return (*m)(unsafe.Pointer(old))
2758 }
2759 osyield_no_g()
2760 continue
2761 }
2762 }
2763
2764
2765 func unlockextra(mp *m, delta int32) {
2766 extraMLength.Add(delta)
2767 extraM.Store(uintptr(unsafe.Pointer(mp)))
2768 }
2769
2770
2771
2772
2773
2774
2775
2776
2777 func getExtraM() (mp *m, last bool) {
2778 mp = lockextra(false)
2779 extraMInUse.Add(1)
2780 unlockextra(mp.schedlink.ptr(), -1)
2781 return mp, mp.schedlink.ptr() == nil
2782 }
2783
2784
2785
2786
2787
2788 func putExtraM(mp *m) {
2789 extraMInUse.Add(-1)
2790 addExtraM(mp)
2791 }
2792
2793
2794
2795
2796 func addExtraM(mp *m) {
2797 mnext := lockextra(true)
2798 mp.schedlink.set(mnext)
2799 unlockextra(mp, 1)
2800 }
2801
2802 var (
2803
2804
2805
2806 allocmLock rwmutex
2807
2808
2809
2810
2811 execLock rwmutex
2812 )
2813
2814
2815
2816 const (
2817 failthreadcreate = "runtime: failed to create new OS thread\n"
2818 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2819 )
2820
2821
2822
2823
2824 var newmHandoff struct {
2825 lock mutex
2826
2827
2828
2829 newm muintptr
2830
2831
2832
2833 waiting bool
2834 wake note
2835
2836
2837
2838
2839 haveTemplateThread uint32
2840 }
2841
2842
2843
2844
2845
2846
2847
2848
2849 func newm(fn func(), pp *p, id int64) {
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860 acquirem()
2861
2862 mp := allocm(pp, fn, id)
2863 mp.nextp.set(pp)
2864 mp.sigmask = initSigmask
2865 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877 lock(&newmHandoff.lock)
2878 if newmHandoff.haveTemplateThread == 0 {
2879 throw("on a locked thread with no template thread")
2880 }
2881 mp.schedlink = newmHandoff.newm
2882 newmHandoff.newm.set(mp)
2883 if newmHandoff.waiting {
2884 newmHandoff.waiting = false
2885 notewakeup(&newmHandoff.wake)
2886 }
2887 unlock(&newmHandoff.lock)
2888
2889
2890
2891 releasem(getg().m)
2892 return
2893 }
2894 newm1(mp)
2895 releasem(getg().m)
2896 }
2897
2898 func newm1(mp *m) {
2899 if iscgo {
2900 var ts cgothreadstart
2901 if _cgo_thread_start == nil {
2902 throw("_cgo_thread_start missing")
2903 }
2904 ts.g.set(mp.g0)
2905 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2906 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2907 if msanenabled {
2908 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2909 }
2910 if asanenabled {
2911 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2912 }
2913 execLock.rlock()
2914 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2915 execLock.runlock()
2916 return
2917 }
2918 execLock.rlock()
2919 newosproc(mp)
2920 execLock.runlock()
2921 }
2922
2923
2924
2925
2926
2927 func startTemplateThread() {
2928 if GOARCH == "wasm" {
2929 return
2930 }
2931
2932
2933
2934 mp := acquirem()
2935 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2936 releasem(mp)
2937 return
2938 }
2939 newm(templateThread, nil, -1)
2940 releasem(mp)
2941 }
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 func templateThread() {
2956 lock(&sched.lock)
2957 sched.nmsys++
2958 checkdead()
2959 unlock(&sched.lock)
2960
2961 for {
2962 lock(&newmHandoff.lock)
2963 for newmHandoff.newm != 0 {
2964 newm := newmHandoff.newm.ptr()
2965 newmHandoff.newm = 0
2966 unlock(&newmHandoff.lock)
2967 for newm != nil {
2968 next := newm.schedlink.ptr()
2969 newm.schedlink = 0
2970 newm1(newm)
2971 newm = next
2972 }
2973 lock(&newmHandoff.lock)
2974 }
2975 newmHandoff.waiting = true
2976 noteclear(&newmHandoff.wake)
2977 unlock(&newmHandoff.lock)
2978 notesleep(&newmHandoff.wake)
2979 }
2980 }
2981
2982
2983
2984 func stopm() {
2985 gp := getg()
2986
2987 if gp.m.locks != 0 {
2988 throw("stopm holding locks")
2989 }
2990 if gp.m.p != 0 {
2991 throw("stopm holding p")
2992 }
2993 if gp.m.spinning {
2994 throw("stopm spinning")
2995 }
2996
2997 lock(&sched.lock)
2998 mput(gp.m)
2999 unlock(&sched.lock)
3000 mPark()
3001 acquirep(gp.m.nextp.ptr())
3002 gp.m.nextp = 0
3003 }
3004
3005 func mspinning() {
3006
3007 getg().m.spinning = true
3008 }
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027 func startm(pp *p, spinning, lockheld bool) {
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044 mp := acquirem()
3045 if !lockheld {
3046 lock(&sched.lock)
3047 }
3048 if pp == nil {
3049 if spinning {
3050
3051
3052
3053 throw("startm: P required for spinning=true")
3054 }
3055 pp, _ = pidleget(0)
3056 if pp == nil {
3057 if !lockheld {
3058 unlock(&sched.lock)
3059 }
3060 releasem(mp)
3061 return
3062 }
3063 }
3064 nmp := mget()
3065 if nmp == nil {
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080 id := mReserveID()
3081 unlock(&sched.lock)
3082
3083 var fn func()
3084 if spinning {
3085
3086 fn = mspinning
3087 }
3088 newm(fn, pp, id)
3089
3090 if lockheld {
3091 lock(&sched.lock)
3092 }
3093
3094
3095 releasem(mp)
3096 return
3097 }
3098 if !lockheld {
3099 unlock(&sched.lock)
3100 }
3101 if nmp.spinning {
3102 throw("startm: m is spinning")
3103 }
3104 if nmp.nextp != 0 {
3105 throw("startm: m has p")
3106 }
3107 if spinning && !runqempty(pp) {
3108 throw("startm: p has runnable gs")
3109 }
3110
3111 nmp.spinning = spinning
3112 nmp.nextp.set(pp)
3113 notewakeup(&nmp.park)
3114
3115
3116 releasem(mp)
3117 }
3118
3119
3120
3121
3122
3123 func handoffp(pp *p) {
3124
3125
3126
3127
3128 if !runqempty(pp) || !sched.runq.empty() {
3129 startm(pp, false, false)
3130 return
3131 }
3132
3133 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3134 startm(pp, false, false)
3135 return
3136 }
3137
3138 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3139 startm(pp, false, false)
3140 return
3141 }
3142
3143
3144 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3145 sched.needspinning.Store(0)
3146 startm(pp, true, false)
3147 return
3148 }
3149 lock(&sched.lock)
3150 if sched.gcwaiting.Load() {
3151 pp.status = _Pgcstop
3152 pp.gcStopTime = nanotime()
3153 sched.stopwait--
3154 if sched.stopwait == 0 {
3155 notewakeup(&sched.stopnote)
3156 }
3157 unlock(&sched.lock)
3158 return
3159 }
3160 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3161 sched.safePointFn(pp)
3162 sched.safePointWait--
3163 if sched.safePointWait == 0 {
3164 notewakeup(&sched.safePointNote)
3165 }
3166 }
3167 if !sched.runq.empty() {
3168 unlock(&sched.lock)
3169 startm(pp, false, false)
3170 return
3171 }
3172
3173
3174 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3175 unlock(&sched.lock)
3176 startm(pp, false, false)
3177 return
3178 }
3179
3180
3181
3182 when := pp.timers.wakeTime()
3183 pidleput(pp, 0)
3184 unlock(&sched.lock)
3185
3186 if when != 0 {
3187 wakeNetPoller(when)
3188 }
3189 }
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204 func wakep() {
3205
3206
3207 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3208 return
3209 }
3210
3211
3212
3213
3214
3215
3216 mp := acquirem()
3217
3218 var pp *p
3219 lock(&sched.lock)
3220 pp, _ = pidlegetSpinning(0)
3221 if pp == nil {
3222 if sched.nmspinning.Add(-1) < 0 {
3223 throw("wakep: negative nmspinning")
3224 }
3225 unlock(&sched.lock)
3226 releasem(mp)
3227 return
3228 }
3229
3230
3231
3232
3233 unlock(&sched.lock)
3234
3235 startm(pp, true, false)
3236
3237 releasem(mp)
3238 }
3239
3240
3241
3242 func stoplockedm() {
3243 gp := getg()
3244
3245 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3246 throw("stoplockedm: inconsistent locking")
3247 }
3248 if gp.m.p != 0 {
3249
3250 pp := releasep()
3251 handoffp(pp)
3252 }
3253 incidlelocked(1)
3254
3255 mPark()
3256 status := readgstatus(gp.m.lockedg.ptr())
3257 if status&^_Gscan != _Grunnable {
3258 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3259 dumpgstatus(gp.m.lockedg.ptr())
3260 throw("stoplockedm: not runnable")
3261 }
3262 acquirep(gp.m.nextp.ptr())
3263 gp.m.nextp = 0
3264 }
3265
3266
3267
3268
3269
3270 func startlockedm(gp *g) {
3271 mp := gp.lockedm.ptr()
3272 if mp == getg().m {
3273 throw("startlockedm: locked to me")
3274 }
3275 if mp.nextp != 0 {
3276 throw("startlockedm: m has p")
3277 }
3278
3279 incidlelocked(-1)
3280 pp := releasep()
3281 mp.nextp.set(pp)
3282 notewakeup(&mp.park)
3283 stopm()
3284 }
3285
3286
3287
3288 func gcstopm() {
3289 gp := getg()
3290
3291 if !sched.gcwaiting.Load() {
3292 throw("gcstopm: not waiting for gc")
3293 }
3294 if gp.m.spinning {
3295 gp.m.spinning = false
3296
3297
3298 if sched.nmspinning.Add(-1) < 0 {
3299 throw("gcstopm: negative nmspinning")
3300 }
3301 }
3302 pp := releasep()
3303 lock(&sched.lock)
3304 pp.status = _Pgcstop
3305 pp.gcStopTime = nanotime()
3306 sched.stopwait--
3307 if sched.stopwait == 0 {
3308 notewakeup(&sched.stopnote)
3309 }
3310 unlock(&sched.lock)
3311 stopm()
3312 }
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323 func execute(gp *g, inheritTime bool) {
3324 mp := getg().m
3325
3326 if goroutineProfile.active {
3327
3328
3329
3330 tryRecordGoroutineProfile(gp, nil, osyield)
3331 }
3332
3333
3334 mp.curg = gp
3335 gp.m = mp
3336 gp.syncSafePoint = false
3337 casgstatus(gp, _Grunnable, _Grunning)
3338 gp.waitsince = 0
3339 gp.preempt = false
3340 gp.stackguard0 = gp.stack.lo + stackGuard
3341 if !inheritTime {
3342 mp.p.ptr().schedtick++
3343 }
3344
3345 if sys.DITSupported && debug.dataindependenttiming != 1 {
3346 if gp.ditWanted && !mp.ditEnabled {
3347
3348
3349 sys.EnableDIT()
3350 mp.ditEnabled = true
3351 } else if !gp.ditWanted && mp.ditEnabled {
3352
3353
3354
3355
3356
3357 sys.DisableDIT()
3358 mp.ditEnabled = false
3359 }
3360 }
3361
3362
3363 hz := sched.profilehz
3364 if mp.profilehz != hz {
3365 setThreadCPUProfiler(hz)
3366 }
3367
3368 trace := traceAcquire()
3369 if trace.ok() {
3370 trace.GoStart()
3371 traceRelease(trace)
3372 }
3373
3374 gogo(&gp.sched)
3375 }
3376
3377
3378
3379
3380
3381 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3382 mp := getg().m
3383
3384
3385
3386
3387
3388 top:
3389
3390
3391
3392 mp.clearAllpSnapshot()
3393
3394 pp := mp.p.ptr()
3395 if sched.gcwaiting.Load() {
3396 gcstopm()
3397 goto top
3398 }
3399 if pp.runSafePointFn != 0 {
3400 runSafePointFn()
3401 }
3402
3403
3404
3405
3406
3407 now, pollUntil, _ := pp.timers.check(0, nil)
3408
3409
3410 if traceEnabled() || traceShuttingDown() {
3411 gp := traceReader()
3412 if gp != nil {
3413 trace := traceAcquire()
3414 casgstatus(gp, _Gwaiting, _Grunnable)
3415 if trace.ok() {
3416 trace.GoUnpark(gp, 0)
3417 traceRelease(trace)
3418 }
3419 return gp, false, true
3420 }
3421 }
3422
3423
3424 if gcBlackenEnabled != 0 {
3425 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3426 if gp != nil {
3427 return gp, false, true
3428 }
3429 now = tnow
3430 }
3431
3432
3433
3434
3435 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3436 lock(&sched.lock)
3437 gp := globrunqget()
3438 unlock(&sched.lock)
3439 if gp != nil {
3440 return gp, false, false
3441 }
3442 }
3443
3444
3445 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3446 if gp := wakefing(); gp != nil {
3447 ready(gp, 0, true)
3448 }
3449 }
3450
3451
3452 if gcCleanups.needsWake() {
3453 gcCleanups.wake()
3454 }
3455
3456 if *cgo_yield != nil {
3457 asmcgocall(*cgo_yield, nil)
3458 }
3459
3460
3461 if gp, inheritTime := runqget(pp); gp != nil {
3462 return gp, inheritTime, false
3463 }
3464
3465
3466 if !sched.runq.empty() {
3467 lock(&sched.lock)
3468 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3469 unlock(&sched.lock)
3470 if gp != nil {
3471 if runqputbatch(pp, &q); !q.empty() {
3472 throw("Couldn't put Gs into empty local runq")
3473 }
3474 return gp, false, false
3475 }
3476 }
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3488 list, delta := netpoll(0)
3489 sched.pollingNet.Store(0)
3490 if !list.empty() {
3491 gp := list.pop()
3492 injectglist(&list)
3493 netpollAdjustWaiters(delta)
3494 trace := traceAcquire()
3495 casgstatus(gp, _Gwaiting, _Grunnable)
3496 if trace.ok() {
3497 trace.GoUnpark(gp, 0)
3498 traceRelease(trace)
3499 }
3500 return gp, false, false
3501 }
3502 }
3503
3504
3505
3506
3507
3508
3509 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3510 if !mp.spinning {
3511 mp.becomeSpinning()
3512 }
3513
3514 gp, inheritTime, tnow, w, newWork := stealWork(now)
3515 if gp != nil {
3516
3517 return gp, inheritTime, false
3518 }
3519 if newWork {
3520
3521
3522 goto top
3523 }
3524
3525 now = tnow
3526 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3527
3528 pollUntil = w
3529 }
3530 }
3531
3532
3533
3534
3535
3536 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3537 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3538 if node != nil {
3539 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3540 gp := node.gp.ptr()
3541
3542 trace := traceAcquire()
3543 casgstatus(gp, _Gwaiting, _Grunnable)
3544 if trace.ok() {
3545 trace.GoUnpark(gp, 0)
3546 traceRelease(trace)
3547 }
3548 return gp, false, false
3549 }
3550 gcController.removeIdleMarkWorker()
3551 }
3552
3553
3554
3555
3556
3557 gp, otherReady := beforeIdle(now, pollUntil)
3558 if gp != nil {
3559 trace := traceAcquire()
3560 casgstatus(gp, _Gwaiting, _Grunnable)
3561 if trace.ok() {
3562 trace.GoUnpark(gp, 0)
3563 traceRelease(trace)
3564 }
3565 return gp, false, false
3566 }
3567 if otherReady {
3568 goto top
3569 }
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579 allpSnapshot := mp.snapshotAllp()
3580
3581
3582 idlepMaskSnapshot := idlepMask
3583 timerpMaskSnapshot := timerpMask
3584
3585
3586 lock(&sched.lock)
3587 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3588 unlock(&sched.lock)
3589 goto top
3590 }
3591 if !sched.runq.empty() {
3592 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3593 unlock(&sched.lock)
3594 if gp == nil {
3595 throw("global runq empty with non-zero runqsize")
3596 }
3597 if runqputbatch(pp, &q); !q.empty() {
3598 throw("Couldn't put Gs into empty local runq")
3599 }
3600 return gp, false, false
3601 }
3602 if !mp.spinning && sched.needspinning.Load() == 1 {
3603
3604 mp.becomeSpinning()
3605 unlock(&sched.lock)
3606 goto top
3607 }
3608 if releasep() != pp {
3609 throw("findRunnable: wrong p")
3610 }
3611 now = pidleput(pp, now)
3612 unlock(&sched.lock)
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650 wasSpinning := mp.spinning
3651 if mp.spinning {
3652 mp.spinning = false
3653 if sched.nmspinning.Add(-1) < 0 {
3654 throw("findRunnable: negative nmspinning")
3655 }
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668 lock(&sched.lock)
3669 if !sched.runq.empty() {
3670 pp, _ := pidlegetSpinning(0)
3671 if pp != nil {
3672 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3673 unlock(&sched.lock)
3674 if gp == nil {
3675 throw("global runq empty with non-zero runqsize")
3676 }
3677 if runqputbatch(pp, &q); !q.empty() {
3678 throw("Couldn't put Gs into empty local runq")
3679 }
3680 acquirep(pp)
3681 mp.becomeSpinning()
3682 return gp, false, false
3683 }
3684 }
3685 unlock(&sched.lock)
3686
3687 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3688 if pp != nil {
3689 acquirep(pp)
3690 mp.becomeSpinning()
3691 goto top
3692 }
3693
3694
3695 pp, gp := checkIdleGCNoP()
3696 if pp != nil {
3697 acquirep(pp)
3698 mp.becomeSpinning()
3699
3700
3701 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3702 trace := traceAcquire()
3703 casgstatus(gp, _Gwaiting, _Grunnable)
3704 if trace.ok() {
3705 trace.GoUnpark(gp, 0)
3706 traceRelease(trace)
3707 }
3708 return gp, false, false
3709 }
3710
3711
3712
3713
3714
3715
3716
3717 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3718 }
3719
3720
3721
3722
3723
3724 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3725 sched.pollUntil.Store(pollUntil)
3726 if mp.p != 0 {
3727 throw("findRunnable: netpoll with p")
3728 }
3729 if mp.spinning {
3730 throw("findRunnable: netpoll with spinning")
3731 }
3732 delay := int64(-1)
3733 if pollUntil != 0 {
3734 if now == 0 {
3735 now = nanotime()
3736 }
3737 delay = pollUntil - now
3738 if delay < 0 {
3739 delay = 0
3740 }
3741 }
3742 if faketime != 0 {
3743
3744 delay = 0
3745 }
3746 list, delta := netpoll(delay)
3747
3748 now = nanotime()
3749 sched.pollUntil.Store(0)
3750 sched.lastpoll.Store(now)
3751 if faketime != 0 && list.empty() {
3752
3753
3754 stopm()
3755 goto top
3756 }
3757 lock(&sched.lock)
3758 pp, _ := pidleget(now)
3759 unlock(&sched.lock)
3760 if pp == nil {
3761 injectglist(&list)
3762 netpollAdjustWaiters(delta)
3763 } else {
3764 acquirep(pp)
3765 if !list.empty() {
3766 gp := list.pop()
3767 injectglist(&list)
3768 netpollAdjustWaiters(delta)
3769 trace := traceAcquire()
3770 casgstatus(gp, _Gwaiting, _Grunnable)
3771 if trace.ok() {
3772 trace.GoUnpark(gp, 0)
3773 traceRelease(trace)
3774 }
3775 return gp, false, false
3776 }
3777 if wasSpinning {
3778 mp.becomeSpinning()
3779 }
3780 goto top
3781 }
3782 } else if pollUntil != 0 && netpollinited() {
3783 pollerPollUntil := sched.pollUntil.Load()
3784 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3785 netpollBreak()
3786 }
3787 }
3788 stopm()
3789 goto top
3790 }
3791
3792
3793
3794
3795
3796 func pollWork() bool {
3797 if !sched.runq.empty() {
3798 return true
3799 }
3800 p := getg().m.p.ptr()
3801 if !runqempty(p) {
3802 return true
3803 }
3804 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3805 if list, delta := netpoll(0); !list.empty() {
3806 injectglist(&list)
3807 netpollAdjustWaiters(delta)
3808 return true
3809 }
3810 }
3811 return false
3812 }
3813
3814
3815
3816
3817
3818
3819
3820 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3821 pp := getg().m.p.ptr()
3822
3823 ranTimer := false
3824
3825 const stealTries = 4
3826 for i := 0; i < stealTries; i++ {
3827 stealTimersOrRunNextG := i == stealTries-1
3828
3829 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3830 if sched.gcwaiting.Load() {
3831
3832 return nil, false, now, pollUntil, true
3833 }
3834 p2 := allp[enum.position()]
3835 if pp == p2 {
3836 continue
3837 }
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3853 tnow, w, ran := p2.timers.check(now, nil)
3854 now = tnow
3855 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3856 pollUntil = w
3857 }
3858 if ran {
3859
3860
3861
3862
3863
3864
3865
3866
3867 if gp, inheritTime := runqget(pp); gp != nil {
3868 return gp, inheritTime, now, pollUntil, ranTimer
3869 }
3870 ranTimer = true
3871 }
3872 }
3873
3874
3875 if !idlepMask.read(enum.position()) {
3876 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3877 return gp, false, now, pollUntil, ranTimer
3878 }
3879 }
3880 }
3881 }
3882
3883
3884
3885
3886 return nil, false, now, pollUntil, ranTimer
3887 }
3888
3889
3890
3891
3892
3893
3894 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3895 for id, p2 := range allpSnapshot {
3896 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3897 lock(&sched.lock)
3898 pp, _ := pidlegetSpinning(0)
3899 if pp == nil {
3900
3901 unlock(&sched.lock)
3902 return nil
3903 }
3904 unlock(&sched.lock)
3905 return pp
3906 }
3907 }
3908
3909
3910 return nil
3911 }
3912
3913
3914
3915
3916 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3917 for id, p2 := range allpSnapshot {
3918 if timerpMaskSnapshot.read(uint32(id)) {
3919 w := p2.timers.wakeTime()
3920 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3921 pollUntil = w
3922 }
3923 }
3924 }
3925
3926 return pollUntil
3927 }
3928
3929
3930
3931
3932
3933 func checkIdleGCNoP() (*p, *g) {
3934
3935
3936
3937
3938
3939
3940 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3941 return nil, nil
3942 }
3943 if !gcShouldScheduleWorker(nil) {
3944 return nil, nil
3945 }
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964 lock(&sched.lock)
3965 pp, now := pidlegetSpinning(0)
3966 if pp == nil {
3967 unlock(&sched.lock)
3968 return nil, nil
3969 }
3970
3971
3972 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3973 pidleput(pp, now)
3974 unlock(&sched.lock)
3975 return nil, nil
3976 }
3977
3978 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3979 if node == nil {
3980 pidleput(pp, now)
3981 unlock(&sched.lock)
3982 gcController.removeIdleMarkWorker()
3983 return nil, nil
3984 }
3985
3986 unlock(&sched.lock)
3987
3988 return pp, node.gp.ptr()
3989 }
3990
3991
3992
3993
3994 func wakeNetPoller(when int64) {
3995 if sched.lastpoll.Load() == 0 {
3996
3997
3998
3999
4000 pollerPollUntil := sched.pollUntil.Load()
4001 if pollerPollUntil == 0 || pollerPollUntil > when {
4002 netpollBreak()
4003 }
4004 } else {
4005
4006
4007 if GOOS != "plan9" {
4008 wakep()
4009 }
4010 }
4011 }
4012
4013 func resetspinning() {
4014 gp := getg()
4015 if !gp.m.spinning {
4016 throw("resetspinning: not a spinning m")
4017 }
4018 gp.m.spinning = false
4019 nmspinning := sched.nmspinning.Add(-1)
4020 if nmspinning < 0 {
4021 throw("findRunnable: negative nmspinning")
4022 }
4023
4024
4025
4026 wakep()
4027 }
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037 func injectglist(glist *gList) {
4038 if glist.empty() {
4039 return
4040 }
4041
4042
4043
4044 var tail *g
4045 trace := traceAcquire()
4046 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4047 tail = gp
4048 casgstatus(gp, _Gwaiting, _Grunnable)
4049 if trace.ok() {
4050 trace.GoUnpark(gp, 0)
4051 }
4052 }
4053 if trace.ok() {
4054 traceRelease(trace)
4055 }
4056
4057
4058 q := gQueue{glist.head, tail.guintptr(), glist.size}
4059 *glist = gList{}
4060
4061 startIdle := func(n int32) {
4062 for ; n > 0; n-- {
4063 mp := acquirem()
4064 lock(&sched.lock)
4065
4066 pp, _ := pidlegetSpinning(0)
4067 if pp == nil {
4068 unlock(&sched.lock)
4069 releasem(mp)
4070 break
4071 }
4072
4073 startm(pp, false, true)
4074 unlock(&sched.lock)
4075 releasem(mp)
4076 }
4077 }
4078
4079 pp := getg().m.p.ptr()
4080 if pp == nil {
4081 n := q.size
4082 lock(&sched.lock)
4083 globrunqputbatch(&q)
4084 unlock(&sched.lock)
4085 startIdle(n)
4086 return
4087 }
4088
4089 var globq gQueue
4090 npidle := sched.npidle.Load()
4091 for ; npidle > 0 && !q.empty(); npidle-- {
4092 g := q.pop()
4093 globq.pushBack(g)
4094 }
4095 if !globq.empty() {
4096 n := globq.size
4097 lock(&sched.lock)
4098 globrunqputbatch(&globq)
4099 unlock(&sched.lock)
4100 startIdle(n)
4101 }
4102
4103 if runqputbatch(pp, &q); !q.empty() {
4104 lock(&sched.lock)
4105 globrunqputbatch(&q)
4106 unlock(&sched.lock)
4107 }
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122 wakep()
4123 }
4124
4125
4126
4127 func schedule() {
4128 mp := getg().m
4129
4130 if mp.locks != 0 {
4131 throw("schedule: holding locks")
4132 }
4133
4134 if mp.lockedg != 0 {
4135 stoplockedm()
4136 execute(mp.lockedg.ptr(), false)
4137 }
4138
4139
4140
4141 if mp.incgo {
4142 throw("schedule: in cgo")
4143 }
4144
4145 top:
4146 pp := mp.p.ptr()
4147 pp.preempt = false
4148
4149
4150
4151
4152 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4153 throw("schedule: spinning with local work")
4154 }
4155
4156 gp, inheritTime, tryWakeP := findRunnable()
4157
4158
4159 pp = mp.p.ptr()
4160
4161
4162
4163
4164 mp.clearAllpSnapshot()
4165
4166
4167
4168
4169
4170
4171
4172
4173 gcController.releaseNextGCMarkWorker(pp)
4174
4175 if debug.dontfreezetheworld > 0 && freezing.Load() {
4176
4177
4178
4179
4180
4181
4182
4183 lock(&deadlock)
4184 lock(&deadlock)
4185 }
4186
4187
4188
4189
4190 if mp.spinning {
4191 resetspinning()
4192 }
4193
4194 if sched.disable.user && !schedEnabled(gp) {
4195
4196
4197
4198 lock(&sched.lock)
4199 if schedEnabled(gp) {
4200
4201
4202 unlock(&sched.lock)
4203 } else {
4204 sched.disable.runnable.pushBack(gp)
4205 unlock(&sched.lock)
4206 goto top
4207 }
4208 }
4209
4210
4211
4212 if tryWakeP {
4213 wakep()
4214 }
4215 if gp.lockedm != 0 {
4216
4217
4218 startlockedm(gp)
4219 goto top
4220 }
4221
4222 execute(gp, inheritTime)
4223 }
4224
4225
4226
4227
4228
4229
4230
4231
4232 func dropg() {
4233 gp := getg()
4234
4235 setMNoWB(&gp.m.curg.m, nil)
4236 setGNoWB(&gp.m.curg, nil)
4237 }
4238
4239 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4240 unlock((*mutex)(lock))
4241 return true
4242 }
4243
4244
4245 func park_m(gp *g) {
4246 mp := getg().m
4247
4248 trace := traceAcquire()
4249
4250
4251
4252
4253
4254 bubble := gp.bubble
4255 if bubble != nil {
4256 bubble.incActive()
4257 }
4258
4259 if trace.ok() {
4260
4261
4262
4263 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4264 }
4265
4266
4267 casgstatus(gp, _Grunning, _Gwaiting)
4268 if trace.ok() {
4269 traceRelease(trace)
4270 }
4271
4272 dropg()
4273
4274 if fn := mp.waitunlockf; fn != nil {
4275 ok := fn(gp, mp.waitlock)
4276 mp.waitunlockf = nil
4277 mp.waitlock = nil
4278 if !ok {
4279 trace := traceAcquire()
4280 casgstatus(gp, _Gwaiting, _Grunnable)
4281 if bubble != nil {
4282 bubble.decActive()
4283 }
4284 if trace.ok() {
4285 trace.GoUnpark(gp, 2)
4286 traceRelease(trace)
4287 }
4288 execute(gp, true)
4289 }
4290 }
4291
4292 if bubble != nil {
4293 bubble.decActive()
4294 }
4295
4296 schedule()
4297 }
4298
4299 func goschedImpl(gp *g, preempted bool) {
4300 pp := gp.m.p.ptr()
4301 trace := traceAcquire()
4302 status := readgstatus(gp)
4303 if status&^_Gscan != _Grunning {
4304 dumpgstatus(gp)
4305 throw("bad g status")
4306 }
4307 if trace.ok() {
4308
4309
4310
4311 if preempted {
4312 trace.GoPreempt()
4313 } else {
4314 trace.GoSched()
4315 }
4316 }
4317 casgstatus(gp, _Grunning, _Grunnable)
4318 if trace.ok() {
4319 traceRelease(trace)
4320 }
4321
4322 dropg()
4323 if preempted && sched.gcwaiting.Load() {
4324
4325
4326 runqput(pp, gp, true)
4327 } else {
4328 lock(&sched.lock)
4329 globrunqput(gp)
4330 unlock(&sched.lock)
4331 }
4332
4333 if mainStarted {
4334 wakep()
4335 }
4336
4337 schedule()
4338 }
4339
4340
4341 func gosched_m(gp *g) {
4342 goschedImpl(gp, false)
4343 }
4344
4345
4346 func goschedguarded_m(gp *g) {
4347 if !canPreemptM(gp.m) {
4348 gogo(&gp.sched)
4349 }
4350 goschedImpl(gp, false)
4351 }
4352
4353 func gopreempt_m(gp *g) {
4354 goschedImpl(gp, true)
4355 }
4356
4357
4358
4359
4360 func preemptPark(gp *g) {
4361 status := readgstatus(gp)
4362 if status&^_Gscan != _Grunning {
4363 dumpgstatus(gp)
4364 throw("bad g status")
4365 }
4366
4367 if gp.asyncSafePoint {
4368
4369
4370
4371 f := findfunc(gp.sched.pc)
4372 if !f.valid() {
4373 throw("preempt at unknown pc")
4374 }
4375 if f.flag&abi.FuncFlagSPWrite != 0 {
4376 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4377 throw("preempt SPWRITE")
4378 }
4379 }
4380
4381
4382
4383
4384
4385
4386
4387 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409 trace := traceAcquire()
4410 if trace.ok() {
4411 trace.GoPark(traceBlockPreempted, 0)
4412 }
4413
4414
4415
4416
4417 dropg()
4418
4419
4420 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4421 if trace.ok() {
4422 traceRelease(trace)
4423 }
4424
4425
4426 schedule()
4427 }
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443 func goyield() {
4444 checkTimeouts()
4445 mcall(goyield_m)
4446 }
4447
4448 func goyield_m(gp *g) {
4449 trace := traceAcquire()
4450 pp := gp.m.p.ptr()
4451 if trace.ok() {
4452
4453
4454
4455 trace.GoPreempt()
4456 }
4457 casgstatus(gp, _Grunning, _Grunnable)
4458 if trace.ok() {
4459 traceRelease(trace)
4460 }
4461 dropg()
4462 runqput(pp, gp, false)
4463 schedule()
4464 }
4465
4466
4467 func goexit1() {
4468 if raceenabled {
4469 if gp := getg(); gp.bubble != nil {
4470 racereleasemergeg(gp, gp.bubble.raceaddr())
4471 }
4472 racegoend()
4473 }
4474 trace := traceAcquire()
4475 if trace.ok() {
4476 trace.GoEnd()
4477 traceRelease(trace)
4478 }
4479 mcall(goexit0)
4480 }
4481
4482
4483 func goexit0(gp *g) {
4484 if goexperiment.RuntimeSecret && gp.secret > 0 {
4485
4486
4487 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4488
4489
4490 }
4491 gdestroy(gp)
4492 schedule()
4493 }
4494
4495 func gdestroy(gp *g) {
4496 mp := getg().m
4497 pp := mp.p.ptr()
4498
4499 casgstatus(gp, _Grunning, _Gdead)
4500 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4501 if isSystemGoroutine(gp, false) {
4502 sched.ngsys.Add(-1)
4503 }
4504 gp.m = nil
4505 locked := gp.lockedm != 0
4506 gp.lockedm = 0
4507 mp.lockedg = 0
4508 gp.preemptStop = false
4509 gp.paniconfault = false
4510 gp._defer = nil
4511 gp._panic = nil
4512 gp.writebuf = nil
4513 gp.waitreason = waitReasonZero
4514 gp.param = nil
4515 gp.labels = nil
4516 gp.timer = nil
4517 gp.bubble = nil
4518 gp.fipsOnlyBypass = false
4519 gp.secret = 0
4520
4521 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4522
4523
4524
4525 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4526 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4527 gcController.bgScanCredit.Add(scanCredit)
4528 gp.gcAssistBytes = 0
4529 }
4530
4531 dropg()
4532
4533 if GOARCH == "wasm" {
4534 gfput(pp, gp)
4535 return
4536 }
4537
4538 if locked && mp.lockedInt != 0 {
4539 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4540 if mp.isextra {
4541 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4542 }
4543 throw("exited a goroutine internally locked to the OS thread")
4544 }
4545 gfput(pp, gp)
4546 if locked {
4547
4548
4549
4550
4551
4552
4553 if GOOS != "plan9" {
4554 gogo(&mp.g0.sched)
4555 } else {
4556
4557
4558 mp.lockedExt = 0
4559 }
4560 }
4561 }
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571 func save(pc, sp, bp uintptr) {
4572 gp := getg()
4573
4574 if gp == gp.m.g0 || gp == gp.m.gsignal {
4575
4576
4577
4578
4579
4580 throw("save on system g not allowed")
4581 }
4582
4583 gp.sched.pc = pc
4584 gp.sched.sp = sp
4585 gp.sched.lr = 0
4586 gp.sched.bp = bp
4587
4588
4589
4590 if gp.sched.ctxt != nil {
4591 badctxt()
4592 }
4593 }
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619 func reentersyscall(pc, sp, bp uintptr) {
4620 gp := getg()
4621
4622
4623
4624 gp.m.locks++
4625
4626
4627
4628
4629
4630 gp.stackguard0 = stackPreempt
4631 gp.throwsplit = true
4632
4633
4634 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4635
4636 pp := gp.m.p.ptr()
4637 if pp.runSafePointFn != 0 {
4638
4639 systemstack(runSafePointFn)
4640 }
4641 gp.m.oldp.set(pp)
4642
4643
4644 save(pc, sp, bp)
4645 gp.syscallsp = sp
4646 gp.syscallpc = pc
4647 gp.syscallbp = bp
4648
4649
4650 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4651 systemstack(func() {
4652 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4653 throw("entersyscall")
4654 })
4655 }
4656 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4657 systemstack(func() {
4658 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4659 throw("entersyscall")
4660 })
4661 }
4662 trace := traceAcquire()
4663 if trace.ok() {
4664
4665
4666
4667
4668 systemstack(func() {
4669 trace.GoSysCall()
4670 })
4671
4672 save(pc, sp, bp)
4673 }
4674 if sched.gcwaiting.Load() {
4675
4676
4677
4678 systemstack(func() {
4679 entersyscallHandleGCWait(trace)
4680 })
4681
4682 save(pc, sp, bp)
4683 }
4684
4685
4686
4687
4688
4689 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4690 casgstatus(gp, _Grunning, _Gsyscall)
4691 }
4692 if staticLockRanking {
4693
4694 save(pc, sp, bp)
4695 }
4696 if trace.ok() {
4697
4698
4699
4700 traceRelease(trace)
4701 }
4702 if sched.sysmonwait.Load() {
4703 systemstack(entersyscallWakeSysmon)
4704
4705 save(pc, sp, bp)
4706 }
4707 gp.m.locks--
4708 }
4709
4710
4711
4712
4713 const debugExtendGrunningNoP = false
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729 func entersyscall() {
4730
4731
4732
4733
4734 fp := getcallerfp()
4735 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4736 }
4737
4738 func entersyscallWakeSysmon() {
4739 lock(&sched.lock)
4740 if sched.sysmonwait.Load() {
4741 sched.sysmonwait.Store(false)
4742 notewakeup(&sched.sysmonnote)
4743 }
4744 unlock(&sched.lock)
4745 }
4746
4747 func entersyscallHandleGCWait(trace traceLocker) {
4748 gp := getg()
4749
4750 lock(&sched.lock)
4751 if sched.stopwait > 0 {
4752
4753 pp := gp.m.p.ptr()
4754 pp.m = 0
4755 gp.m.p = 0
4756 atomic.Store(&pp.status, _Pgcstop)
4757
4758 if trace.ok() {
4759 trace.ProcStop(pp)
4760 }
4761 addGSyscallNoP(gp.m)
4762 pp.gcStopTime = nanotime()
4763 pp.syscalltick++
4764 if sched.stopwait--; sched.stopwait == 0 {
4765 notewakeup(&sched.stopnote)
4766 }
4767 }
4768 unlock(&sched.lock)
4769 }
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783 func entersyscallblock() {
4784 gp := getg()
4785
4786 gp.m.locks++
4787 gp.throwsplit = true
4788 gp.stackguard0 = stackPreempt
4789 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4790 gp.m.p.ptr().syscalltick++
4791
4792 addGSyscallNoP(gp.m)
4793
4794
4795 pc := sys.GetCallerPC()
4796 sp := sys.GetCallerSP()
4797 bp := getcallerfp()
4798 save(pc, sp, bp)
4799 gp.syscallsp = gp.sched.sp
4800 gp.syscallpc = gp.sched.pc
4801 gp.syscallbp = gp.sched.bp
4802 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4803 sp1 := sp
4804 sp2 := gp.sched.sp
4805 sp3 := gp.syscallsp
4806 systemstack(func() {
4807 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4808 throw("entersyscallblock")
4809 })
4810 }
4811
4812
4813
4814
4815
4816
4817 trace := traceAcquire()
4818 systemstack(func() {
4819 if trace.ok() {
4820 trace.GoSysCall()
4821 }
4822 handoffp(releasep())
4823 })
4824
4825
4826
4827 if debugExtendGrunningNoP {
4828 usleep(10)
4829 }
4830 casgstatus(gp, _Grunning, _Gsyscall)
4831 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4832 systemstack(func() {
4833 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4834 throw("entersyscallblock")
4835 })
4836 }
4837 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4838 systemstack(func() {
4839 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4840 throw("entersyscallblock")
4841 })
4842 }
4843 if trace.ok() {
4844 systemstack(func() {
4845 traceRelease(trace)
4846 })
4847 }
4848
4849
4850 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4851
4852 gp.m.locks--
4853 }
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875 func exitsyscall() {
4876 gp := getg()
4877
4878 gp.m.locks++
4879 if sys.GetCallerSP() > gp.syscallsp {
4880 throw("exitsyscall: syscall frame is no longer valid")
4881 }
4882 gp.waitsince = 0
4883
4884 if sched.stopwait == freezeStopWait {
4885
4886
4887
4888 systemstack(func() {
4889 lock(&deadlock)
4890 lock(&deadlock)
4891 })
4892 }
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4906 casgstatus(gp, _Gsyscall, _Grunning)
4907 }
4908
4909
4910
4911
4912 if debugExtendGrunningNoP {
4913 usleep(10)
4914 }
4915
4916
4917 oldp := gp.m.oldp.ptr()
4918 gp.m.oldp.set(nil)
4919
4920
4921 pp := gp.m.p.ptr()
4922 if pp != nil {
4923
4924 if trace := traceAcquire(); trace.ok() {
4925 systemstack(func() {
4926
4927
4928
4929
4930
4931
4932
4933
4934 if pp.syscalltick == gp.m.syscalltick {
4935 trace.GoSysExit(false)
4936 } else {
4937
4938
4939
4940
4941 trace.ProcSteal(pp)
4942 trace.ProcStart()
4943 trace.GoSysExit(true)
4944 trace.GoStart()
4945 }
4946 traceRelease(trace)
4947 })
4948 }
4949 } else {
4950
4951 systemstack(func() {
4952
4953 if pp := exitsyscallTryGetP(oldp); pp != nil {
4954
4955 acquirepNoTrace(pp)
4956
4957
4958 if trace := traceAcquire(); trace.ok() {
4959 trace.ProcStart()
4960 trace.GoSysExit(true)
4961 trace.GoStart()
4962 traceRelease(trace)
4963 }
4964 }
4965 })
4966 pp = gp.m.p.ptr()
4967 }
4968
4969
4970 if pp != nil {
4971 if goroutineProfile.active {
4972
4973
4974
4975 systemstack(func() {
4976 tryRecordGoroutineProfileWB(gp)
4977 })
4978 }
4979
4980
4981 pp.syscalltick++
4982
4983
4984
4985 gp.syscallsp = 0
4986 gp.m.locks--
4987 if gp.preempt {
4988
4989 gp.stackguard0 = stackPreempt
4990 } else {
4991
4992 gp.stackguard0 = gp.stack.lo + stackGuard
4993 }
4994 gp.throwsplit = false
4995
4996 if sched.disable.user && !schedEnabled(gp) {
4997
4998 Gosched()
4999 }
5000 return
5001 }
5002
5003 gp.m.locks--
5004
5005
5006 mcall(exitsyscallNoP)
5007
5008
5009
5010
5011
5012
5013
5014 gp.syscallsp = 0
5015 gp.m.p.ptr().syscalltick++
5016 gp.throwsplit = false
5017 }
5018
5019
5020
5021
5022
5023
5024
5025 func exitsyscallTryGetP(oldp *p) *p {
5026
5027 if oldp != nil {
5028 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5029 thread.takeP()
5030 addGSyscallNoP(thread.mp)
5031 thread.resume()
5032 return oldp
5033 }
5034 }
5035
5036
5037 if sched.pidle != 0 {
5038 lock(&sched.lock)
5039 pp, _ := pidleget(0)
5040 if pp != nil && sched.sysmonwait.Load() {
5041 sched.sysmonwait.Store(false)
5042 notewakeup(&sched.sysmonnote)
5043 }
5044 unlock(&sched.lock)
5045 if pp != nil {
5046 decGSyscallNoP(getg().m)
5047 return pp
5048 }
5049 }
5050 return nil
5051 }
5052
5053
5054
5055
5056
5057
5058
5059 func exitsyscallNoP(gp *g) {
5060 traceExitingSyscall()
5061 trace := traceAcquire()
5062 casgstatus(gp, _Grunning, _Grunnable)
5063 traceExitedSyscall()
5064 if trace.ok() {
5065
5066
5067
5068
5069 trace.GoSysExit(true)
5070 traceRelease(trace)
5071 }
5072 decGSyscallNoP(getg().m)
5073 dropg()
5074 lock(&sched.lock)
5075 var pp *p
5076 if schedEnabled(gp) {
5077 pp, _ = pidleget(0)
5078 }
5079 var locked bool
5080 if pp == nil {
5081 globrunqput(gp)
5082
5083
5084
5085
5086
5087
5088 locked = gp.lockedm != 0
5089 } else if sched.sysmonwait.Load() {
5090 sched.sysmonwait.Store(false)
5091 notewakeup(&sched.sysmonnote)
5092 }
5093 unlock(&sched.lock)
5094 if pp != nil {
5095 acquirep(pp)
5096 execute(gp, false)
5097 }
5098 if locked {
5099
5100
5101
5102
5103 stoplockedm()
5104 execute(gp, false)
5105 }
5106 stopm()
5107 schedule()
5108 }
5109
5110
5111
5112
5113
5114
5115
5116 func addGSyscallNoP(mp *m) {
5117
5118
5119
5120 if !mp.isExtraInC {
5121
5122
5123
5124
5125
5126 sched.nGsyscallNoP.Add(1)
5127 }
5128 }
5129
5130
5131
5132
5133
5134
5135
5136 func decGSyscallNoP(mp *m) {
5137
5138
5139
5140 if !mp.isExtraInC {
5141 sched.nGsyscallNoP.Add(-1)
5142 }
5143 }
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157 func syscall_runtime_BeforeFork() {
5158 gp := getg().m.curg
5159
5160
5161
5162
5163 gp.m.locks++
5164 sigsave(&gp.m.sigmask)
5165 sigblock(false)
5166
5167
5168
5169
5170
5171 gp.stackguard0 = stackFork
5172 }
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186 func syscall_runtime_AfterFork() {
5187 gp := getg().m.curg
5188
5189
5190 gp.stackguard0 = gp.stack.lo + stackGuard
5191
5192 msigrestore(gp.m.sigmask)
5193
5194 gp.m.locks--
5195 }
5196
5197
5198
5199 var inForkedChild bool
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220 func syscall_runtime_AfterForkInChild() {
5221
5222
5223
5224
5225 inForkedChild = true
5226
5227 clearSignalHandlers()
5228
5229
5230
5231 msigrestore(getg().m.sigmask)
5232
5233 inForkedChild = false
5234 }
5235
5236
5237
5238
5239 var pendingPreemptSignals atomic.Int32
5240
5241
5242
5243
5244 func syscall_runtime_BeforeExec() {
5245
5246 execLock.lock()
5247
5248
5249
5250 if GOOS == "darwin" || GOOS == "ios" {
5251 for pendingPreemptSignals.Load() > 0 {
5252 osyield()
5253 }
5254 }
5255 }
5256
5257
5258
5259
5260 func syscall_runtime_AfterExec() {
5261 execLock.unlock()
5262 }
5263
5264
5265 func malg(stacksize int32) *g {
5266 newg := new(g)
5267 if stacksize >= 0 {
5268 stacksize = round2(stackSystem + stacksize)
5269 systemstack(func() {
5270 newg.stack = stackalloc(uint32(stacksize))
5271 if valgrindenabled {
5272 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5273 }
5274 })
5275 newg.stackguard0 = newg.stack.lo + stackGuard
5276 newg.stackguard1 = ^uintptr(0)
5277
5278
5279 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5280 }
5281 return newg
5282 }
5283
5284
5285
5286
5287 func newproc(fn *funcval) {
5288 gp := getg()
5289 pc := sys.GetCallerPC()
5290 systemstack(func() {
5291 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5292
5293 pp := getg().m.p.ptr()
5294 runqput(pp, newg, true)
5295
5296 if mainStarted {
5297 wakep()
5298 }
5299 })
5300 }
5301
5302
5303
5304
5305 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5306 if fn == nil {
5307 fatal("go of nil func value")
5308 }
5309
5310 mp := acquirem()
5311 pp := mp.p.ptr()
5312 newg := gfget(pp)
5313 if newg == nil {
5314 newg = malg(stackMin)
5315 casgstatus(newg, _Gidle, _Gdead)
5316 allgadd(newg)
5317 }
5318 if newg.stack.hi == 0 {
5319 throw("newproc1: newg missing stack")
5320 }
5321
5322 if readgstatus(newg) != _Gdead {
5323 throw("newproc1: new g is not Gdead")
5324 }
5325
5326 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5327 totalSize = alignUp(totalSize, sys.StackAlign)
5328 sp := newg.stack.hi - totalSize
5329 if usesLR {
5330
5331 *(*uintptr)(unsafe.Pointer(sp)) = 0
5332 prepGoExitFrame(sp)
5333 }
5334 if GOARCH == "arm64" {
5335
5336 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5337 }
5338
5339 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5340 newg.sched.sp = sp
5341 newg.stktopsp = sp
5342 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5343 newg.sched.g = guintptr(unsafe.Pointer(newg))
5344 gostartcallfn(&newg.sched, fn)
5345 newg.parentGoid = callergp.goid
5346 newg.gopc = callerpc
5347 newg.ancestors = saveAncestors(callergp)
5348 newg.startpc = fn.fn
5349 newg.runningCleanups.Store(false)
5350 if isSystemGoroutine(newg, false) {
5351 sched.ngsys.Add(1)
5352 } else {
5353
5354 newg.bubble = callergp.bubble
5355 if mp.curg != nil {
5356 newg.labels = mp.curg.labels
5357 }
5358 if goroutineProfile.active {
5359
5360
5361
5362
5363
5364 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5365 }
5366 }
5367
5368 newg.trackingSeq = uint8(cheaprand())
5369 if newg.trackingSeq%gTrackingPeriod == 0 {
5370 newg.tracking = true
5371 }
5372 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5373
5374
5375
5376 trace := traceAcquire()
5377 var status uint32 = _Grunnable
5378 if parked {
5379 status = _Gwaiting
5380 newg.waitreason = waitreason
5381 }
5382 if pp.goidcache == pp.goidcacheend {
5383
5384
5385
5386 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5387 pp.goidcache -= _GoidCacheBatch - 1
5388 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5389 }
5390 newg.goid = pp.goidcache
5391 casgstatus(newg, _Gdead, status)
5392 pp.goidcache++
5393 newg.trace.reset()
5394 if trace.ok() {
5395 trace.GoCreate(newg, newg.startpc, parked)
5396 traceRelease(trace)
5397 }
5398
5399
5400 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5401
5402
5403 newg.ditWanted = callergp.ditWanted
5404
5405
5406 if raceenabled {
5407 newg.racectx = racegostart(callerpc)
5408 newg.raceignore = 0
5409 if newg.labels != nil {
5410
5411
5412 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5413 }
5414 }
5415 pp.goroutinesCreated++
5416 releasem(mp)
5417
5418 return newg
5419 }
5420
5421
5422
5423
5424 func saveAncestors(callergp *g) *[]ancestorInfo {
5425
5426 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5427 return nil
5428 }
5429 var callerAncestors []ancestorInfo
5430 if callergp.ancestors != nil {
5431 callerAncestors = *callergp.ancestors
5432 }
5433 n := int32(len(callerAncestors)) + 1
5434 if n > debug.tracebackancestors {
5435 n = debug.tracebackancestors
5436 }
5437 ancestors := make([]ancestorInfo, n)
5438 copy(ancestors[1:], callerAncestors)
5439
5440 var pcs [tracebackInnerFrames]uintptr
5441 npcs := gcallers(callergp, 0, pcs[:])
5442 ipcs := make([]uintptr, npcs)
5443 copy(ipcs, pcs[:])
5444 ancestors[0] = ancestorInfo{
5445 pcs: ipcs,
5446 goid: callergp.goid,
5447 gopc: callergp.gopc,
5448 }
5449
5450 ancestorsp := new([]ancestorInfo)
5451 *ancestorsp = ancestors
5452 return ancestorsp
5453 }
5454
5455
5456
5457 func gfput(pp *p, gp *g) {
5458 if readgstatus(gp) != _Gdead {
5459 throw("gfput: bad status (not Gdead)")
5460 }
5461
5462 stksize := gp.stack.hi - gp.stack.lo
5463
5464 if stksize != uintptr(startingStackSize) {
5465
5466 stackfree(gp.stack)
5467 gp.stack.lo = 0
5468 gp.stack.hi = 0
5469 gp.stackguard0 = 0
5470 if valgrindenabled {
5471 valgrindDeregisterStack(gp.valgrindStackID)
5472 gp.valgrindStackID = 0
5473 }
5474 }
5475
5476 pp.gFree.push(gp)
5477 if pp.gFree.size >= 64 {
5478 var (
5479 stackQ gQueue
5480 noStackQ gQueue
5481 )
5482 for pp.gFree.size >= 32 {
5483 gp := pp.gFree.pop()
5484 if gp.stack.lo == 0 {
5485 noStackQ.push(gp)
5486 } else {
5487 stackQ.push(gp)
5488 }
5489 }
5490 lock(&sched.gFree.lock)
5491 sched.gFree.noStack.pushAll(noStackQ)
5492 sched.gFree.stack.pushAll(stackQ)
5493 unlock(&sched.gFree.lock)
5494 }
5495 }
5496
5497
5498
5499 func gfget(pp *p) *g {
5500 retry:
5501 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5502 lock(&sched.gFree.lock)
5503
5504 for pp.gFree.size < 32 {
5505
5506 gp := sched.gFree.stack.pop()
5507 if gp == nil {
5508 gp = sched.gFree.noStack.pop()
5509 if gp == nil {
5510 break
5511 }
5512 }
5513 pp.gFree.push(gp)
5514 }
5515 unlock(&sched.gFree.lock)
5516 goto retry
5517 }
5518 gp := pp.gFree.pop()
5519 if gp == nil {
5520 return nil
5521 }
5522 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5523
5524
5525
5526 systemstack(func() {
5527 stackfree(gp.stack)
5528 gp.stack.lo = 0
5529 gp.stack.hi = 0
5530 gp.stackguard0 = 0
5531 if valgrindenabled {
5532 valgrindDeregisterStack(gp.valgrindStackID)
5533 gp.valgrindStackID = 0
5534 }
5535 })
5536 }
5537 if gp.stack.lo == 0 {
5538
5539 systemstack(func() {
5540 gp.stack = stackalloc(startingStackSize)
5541 if valgrindenabled {
5542 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5543 }
5544 })
5545 gp.stackguard0 = gp.stack.lo + stackGuard
5546 } else {
5547 if raceenabled {
5548 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5549 }
5550 if msanenabled {
5551 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5552 }
5553 if asanenabled {
5554 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5555 }
5556 }
5557 return gp
5558 }
5559
5560
5561 func gfpurge(pp *p) {
5562 var (
5563 stackQ gQueue
5564 noStackQ gQueue
5565 )
5566 for !pp.gFree.empty() {
5567 gp := pp.gFree.pop()
5568 if gp.stack.lo == 0 {
5569 noStackQ.push(gp)
5570 } else {
5571 stackQ.push(gp)
5572 }
5573 }
5574 lock(&sched.gFree.lock)
5575 sched.gFree.noStack.pushAll(noStackQ)
5576 sched.gFree.stack.pushAll(stackQ)
5577 unlock(&sched.gFree.lock)
5578 }
5579
5580
5581 func Breakpoint() {
5582 breakpoint()
5583 }
5584
5585
5586
5587
5588
5589
5590 func dolockOSThread() {
5591 if GOARCH == "wasm" {
5592 return
5593 }
5594 gp := getg()
5595 gp.m.lockedg.set(gp)
5596 gp.lockedm.set(gp.m)
5597 }
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615 func LockOSThread() {
5616 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5617
5618
5619
5620 startTemplateThread()
5621 }
5622 gp := getg()
5623 gp.m.lockedExt++
5624 if gp.m.lockedExt == 0 {
5625 gp.m.lockedExt--
5626 panic("LockOSThread nesting overflow")
5627 }
5628 dolockOSThread()
5629 }
5630
5631
5632 func lockOSThread() {
5633 getg().m.lockedInt++
5634 dolockOSThread()
5635 }
5636
5637
5638
5639
5640
5641
5642 func dounlockOSThread() {
5643 if GOARCH == "wasm" {
5644 return
5645 }
5646 gp := getg()
5647 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5648 return
5649 }
5650 gp.m.lockedg = 0
5651 gp.lockedm = 0
5652 }
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668 func UnlockOSThread() {
5669 gp := getg()
5670 if gp.m.lockedExt == 0 {
5671 return
5672 }
5673 gp.m.lockedExt--
5674 dounlockOSThread()
5675 }
5676
5677
5678 func unlockOSThread() {
5679 gp := getg()
5680 if gp.m.lockedInt == 0 {
5681 systemstack(badunlockosthread)
5682 }
5683 gp.m.lockedInt--
5684 dounlockOSThread()
5685 }
5686
5687 func badunlockosthread() {
5688 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5689 }
5690
5691 func gcount(includeSys bool) int32 {
5692 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5693 if !includeSys {
5694 n -= sched.ngsys.Load()
5695 }
5696 for _, pp := range allp {
5697 n -= pp.gFree.size
5698 }
5699
5700
5701
5702 if n < 1 {
5703 n = 1
5704 }
5705 return n
5706 }
5707
5708
5709
5710
5711
5712 func goroutineleakcount() int {
5713 return work.goroutineLeak.count
5714 }
5715
5716 func mcount() int32 {
5717 return int32(sched.mnext - sched.nmfreed)
5718 }
5719
5720 var prof struct {
5721 signalLock atomic.Uint32
5722
5723
5724
5725 hz atomic.Int32
5726 }
5727
5728 func _System() { _System() }
5729 func _ExternalCode() { _ExternalCode() }
5730 func _LostExternalCode() { _LostExternalCode() }
5731 func _GC() { _GC() }
5732 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5733 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5734 func _VDSO() { _VDSO() }
5735
5736
5737
5738
5739
5740 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5741 if prof.hz.Load() == 0 {
5742 return
5743 }
5744
5745
5746
5747
5748 if mp != nil && mp.profilehz == 0 {
5749 return
5750 }
5751
5752
5753
5754
5755
5756
5757
5758 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5759 if f := findfunc(pc); f.valid() {
5760 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5761 cpuprof.lostAtomic++
5762 return
5763 }
5764 }
5765 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5766
5767
5768
5769 cpuprof.lostAtomic++
5770 return
5771 }
5772 }
5773
5774
5775
5776
5777
5778
5779
5780 getg().m.mallocing++
5781
5782 var u unwinder
5783 var stk [maxCPUProfStack]uintptr
5784 n := 0
5785 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5786 cgoOff := 0
5787
5788
5789
5790
5791
5792 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5793 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5794 cgoOff++
5795 }
5796 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5797 mp.cgoCallers[0] = 0
5798 }
5799
5800
5801 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5802 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5803
5804
5805 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5806 } else if mp != nil && mp.vdsoSP != 0 {
5807
5808
5809 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5810 } else {
5811 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5812 }
5813 n += tracebackPCs(&u, 0, stk[n:])
5814
5815 if n <= 0 {
5816
5817
5818 n = 2
5819 if inVDSOPage(pc) {
5820 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5821 } else if pc > firstmoduledata.etext {
5822
5823 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5824 }
5825 stk[0] = pc
5826 if mp.preemptoff != "" {
5827 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5828 } else {
5829 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5830 }
5831 }
5832
5833 if prof.hz.Load() != 0 {
5834
5835
5836
5837 var tagPtr *unsafe.Pointer
5838 if gp != nil && gp.m != nil && gp.m.curg != nil {
5839 tagPtr = &gp.m.curg.labels
5840 }
5841 cpuprof.add(tagPtr, stk[:n])
5842
5843 gprof := gp
5844 var mp *m
5845 var pp *p
5846 if gp != nil && gp.m != nil {
5847 if gp.m.curg != nil {
5848 gprof = gp.m.curg
5849 }
5850 mp = gp.m
5851 pp = gp.m.p.ptr()
5852 }
5853 traceCPUSample(gprof, mp, pp, stk[:n])
5854 }
5855 getg().m.mallocing--
5856 }
5857
5858
5859
5860 func setcpuprofilerate(hz int32) {
5861
5862 if hz < 0 {
5863 hz = 0
5864 }
5865
5866
5867
5868 gp := getg()
5869 gp.m.locks++
5870
5871
5872
5873
5874 setThreadCPUProfiler(0)
5875
5876 for !prof.signalLock.CompareAndSwap(0, 1) {
5877 osyield()
5878 }
5879 if prof.hz.Load() != hz {
5880 setProcessCPUProfiler(hz)
5881 prof.hz.Store(hz)
5882 }
5883 prof.signalLock.Store(0)
5884
5885 lock(&sched.lock)
5886 sched.profilehz = hz
5887 unlock(&sched.lock)
5888
5889 if hz != 0 {
5890 setThreadCPUProfiler(hz)
5891 }
5892
5893 gp.m.locks--
5894 }
5895
5896
5897
5898 func (pp *p) init(id int32) {
5899 pp.id = id
5900 pp.gcw.id = id
5901 pp.status = _Pgcstop
5902 pp.sudogcache = pp.sudogbuf[:0]
5903 pp.deferpool = pp.deferpoolbuf[:0]
5904 pp.wbBuf.reset()
5905 if pp.mcache == nil {
5906 if id == 0 {
5907 if mcache0 == nil {
5908 throw("missing mcache?")
5909 }
5910
5911
5912 pp.mcache = mcache0
5913 } else {
5914 pp.mcache = allocmcache()
5915 }
5916 }
5917 if raceenabled && pp.raceprocctx == 0 {
5918 if id == 0 {
5919 pp.raceprocctx = raceprocctx0
5920 raceprocctx0 = 0
5921 } else {
5922 pp.raceprocctx = raceproccreate()
5923 }
5924 }
5925 lockInit(&pp.timers.mu, lockRankTimers)
5926
5927
5928
5929 timerpMask.set(id)
5930
5931
5932 idlepMask.clear(id)
5933 }
5934
5935
5936
5937
5938
5939 func (pp *p) destroy() {
5940 assertLockHeld(&sched.lock)
5941 assertWorldStopped()
5942
5943
5944 for pp.runqhead != pp.runqtail {
5945
5946 pp.runqtail--
5947 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5948
5949 globrunqputhead(gp)
5950 }
5951 if pp.runnext != 0 {
5952 globrunqputhead(pp.runnext.ptr())
5953 pp.runnext = 0
5954 }
5955
5956
5957 getg().m.p.ptr().timers.take(&pp.timers)
5958
5959
5960
5961 if phase := gcphase; phase != _GCoff {
5962 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5963 throw("P destroyed while GC is running")
5964 }
5965
5966 pp.gcw.spanq.destroy()
5967
5968 clear(pp.sudogbuf[:])
5969 pp.sudogcache = pp.sudogbuf[:0]
5970 pp.pinnerCache = nil
5971 clear(pp.deferpoolbuf[:])
5972 pp.deferpool = pp.deferpoolbuf[:0]
5973 systemstack(func() {
5974 for i := 0; i < pp.mspancache.len; i++ {
5975
5976 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5977 }
5978 pp.mspancache.len = 0
5979 lock(&mheap_.lock)
5980 pp.pcache.flush(&mheap_.pages)
5981 unlock(&mheap_.lock)
5982 })
5983 freemcache(pp.mcache)
5984 pp.mcache = nil
5985 gfpurge(pp)
5986 if raceenabled {
5987 if pp.timers.raceCtx != 0 {
5988
5989
5990
5991
5992
5993 mp := getg().m
5994 phold := mp.p.ptr()
5995 mp.p.set(pp)
5996
5997 racectxend(pp.timers.raceCtx)
5998 pp.timers.raceCtx = 0
5999
6000 mp.p.set(phold)
6001 }
6002 raceprocdestroy(pp.raceprocctx)
6003 pp.raceprocctx = 0
6004 }
6005 pp.gcAssistTime = 0
6006 gcCleanups.queued += pp.cleanupsQueued
6007 pp.cleanupsQueued = 0
6008 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
6009 pp.goroutinesCreated = 0
6010 pp.xRegs.free()
6011 pp.status = _Pdead
6012 }
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022 func procresize(nprocs int32) *p {
6023 assertLockHeld(&sched.lock)
6024 assertWorldStopped()
6025
6026 old := gomaxprocs
6027 if old < 0 || nprocs <= 0 {
6028 throw("procresize: invalid arg")
6029 }
6030 trace := traceAcquire()
6031 if trace.ok() {
6032 trace.Gomaxprocs(nprocs)
6033 traceRelease(trace)
6034 }
6035
6036
6037 now := nanotime()
6038 if sched.procresizetime != 0 {
6039 sched.totaltime += int64(old) * (now - sched.procresizetime)
6040 }
6041 sched.procresizetime = now
6042
6043
6044 if nprocs > int32(len(allp)) {
6045
6046
6047 lock(&allpLock)
6048 if nprocs <= int32(cap(allp)) {
6049 allp = allp[:nprocs]
6050 } else {
6051 nallp := make([]*p, nprocs)
6052
6053
6054 copy(nallp, allp[:cap(allp)])
6055 allp = nallp
6056 }
6057
6058 idlepMask = idlepMask.resize(nprocs)
6059 timerpMask = timerpMask.resize(nprocs)
6060 work.spanqMask = work.spanqMask.resize(nprocs)
6061 unlock(&allpLock)
6062 }
6063
6064
6065 for i := old; i < nprocs; i++ {
6066 pp := allp[i]
6067 if pp == nil {
6068 pp = new(p)
6069 }
6070 pp.init(i)
6071 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6072 }
6073
6074 gp := getg()
6075 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6076
6077 gp.m.p.ptr().status = _Prunning
6078 gp.m.p.ptr().mcache.prepareForSweep()
6079 } else {
6080
6081
6082
6083
6084
6085 if gp.m.p != 0 {
6086 trace := traceAcquire()
6087 if trace.ok() {
6088
6089
6090
6091 trace.GoSched()
6092 trace.ProcStop(gp.m.p.ptr())
6093 traceRelease(trace)
6094 }
6095 gp.m.p.ptr().m = 0
6096 }
6097 gp.m.p = 0
6098 pp := allp[0]
6099 pp.m = 0
6100 pp.status = _Pidle
6101 acquirep(pp)
6102 trace := traceAcquire()
6103 if trace.ok() {
6104 trace.GoStart()
6105 traceRelease(trace)
6106 }
6107 }
6108
6109
6110 mcache0 = nil
6111
6112
6113 for i := nprocs; i < old; i++ {
6114 pp := allp[i]
6115 pp.destroy()
6116
6117 }
6118
6119
6120 if int32(len(allp)) != nprocs {
6121 lock(&allpLock)
6122 allp = allp[:nprocs]
6123 idlepMask = idlepMask.resize(nprocs)
6124 timerpMask = timerpMask.resize(nprocs)
6125 work.spanqMask = work.spanqMask.resize(nprocs)
6126 unlock(&allpLock)
6127 }
6128
6129
6130 var runnablePs *p
6131 var runnablePsNeedM *p
6132 var idlePs *p
6133 for i := nprocs - 1; i >= 0; i-- {
6134 pp := allp[i]
6135 if gp.m.p.ptr() == pp {
6136 continue
6137 }
6138 pp.status = _Pidle
6139 if runqempty(pp) {
6140 pp.link.set(idlePs)
6141 idlePs = pp
6142 continue
6143 }
6144
6145
6146
6147
6148
6149
6150
6151
6152 var mp *m
6153 if oldm := pp.oldm.get(); oldm != nil {
6154
6155 mp = mgetSpecific(oldm)
6156 }
6157 if mp == nil {
6158
6159 pp.link.set(runnablePsNeedM)
6160 runnablePsNeedM = pp
6161 continue
6162 }
6163 pp.m.set(mp)
6164 pp.link.set(runnablePs)
6165 runnablePs = pp
6166 }
6167
6168
6169 for runnablePsNeedM != nil {
6170 pp := runnablePsNeedM
6171 runnablePsNeedM = pp.link.ptr()
6172
6173 mp := mget()
6174 pp.m.set(mp)
6175 pp.link.set(runnablePs)
6176 runnablePs = pp
6177 }
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203 if gcBlackenEnabled != 0 {
6204 for idlePs != nil {
6205 pp := idlePs
6206
6207 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6208 if !ok {
6209
6210 break
6211 }
6212
6213
6214
6215
6216
6217
6218
6219
6220 idlePs = pp.link.ptr()
6221 mp := mget()
6222 pp.m.set(mp)
6223 pp.link.set(runnablePs)
6224 runnablePs = pp
6225 }
6226 }
6227
6228
6229 for idlePs != nil {
6230 pp := idlePs
6231 idlePs = pp.link.ptr()
6232 pidleput(pp, now)
6233 }
6234
6235 stealOrder.reset(uint32(nprocs))
6236 var int32p *int32 = &gomaxprocs
6237 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6238 if old != nprocs {
6239
6240 gcCPULimiter.resetCapacity(now, nprocs)
6241 }
6242 return runnablePs
6243 }
6244
6245
6246
6247
6248
6249
6250
6251 func acquirep(pp *p) {
6252
6253 acquirepNoTrace(pp)
6254
6255
6256 trace := traceAcquire()
6257 if trace.ok() {
6258 trace.ProcStart()
6259 traceRelease(trace)
6260 }
6261 }
6262
6263
6264
6265
6266 func acquirepNoTrace(pp *p) {
6267
6268 wirep(pp)
6269
6270
6271
6272
6273
6274
6275 pp.oldm = pp.m.ptr().self
6276
6277
6278
6279 pp.mcache.prepareForSweep()
6280 }
6281
6282
6283
6284
6285
6286
6287
6288 func wirep(pp *p) {
6289 gp := getg()
6290
6291 if gp.m.p != 0 {
6292
6293
6294 systemstack(func() {
6295 throw("wirep: already in go")
6296 })
6297 }
6298 if pp.m != 0 || pp.status != _Pidle {
6299
6300
6301 systemstack(func() {
6302 id := int64(0)
6303 if pp.m != 0 {
6304 id = pp.m.ptr().id
6305 }
6306 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6307 throw("wirep: invalid p state")
6308 })
6309 }
6310 gp.m.p.set(pp)
6311 pp.m.set(gp.m)
6312 pp.status = _Prunning
6313 }
6314
6315
6316 func releasep() *p {
6317 trace := traceAcquire()
6318 if trace.ok() {
6319 trace.ProcStop(getg().m.p.ptr())
6320 traceRelease(trace)
6321 }
6322 return releasepNoTrace()
6323 }
6324
6325
6326 func releasepNoTrace() *p {
6327 gp := getg()
6328
6329 if gp.m.p == 0 {
6330 throw("releasep: invalid arg")
6331 }
6332 pp := gp.m.p.ptr()
6333 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6334 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6335 throw("releasep: invalid p state")
6336 }
6337
6338
6339 gcController.releaseNextGCMarkWorker(pp)
6340
6341 gp.m.p = 0
6342 pp.m = 0
6343 pp.status = _Pidle
6344 return pp
6345 }
6346
6347 func incidlelocked(v int32) {
6348 lock(&sched.lock)
6349 sched.nmidlelocked += v
6350 if v > 0 {
6351 checkdead()
6352 }
6353 unlock(&sched.lock)
6354 }
6355
6356
6357
6358
6359 func checkdead() {
6360 assertLockHeld(&sched.lock)
6361
6362
6363
6364
6365
6366
6367 if (islibrary || isarchive) && GOARCH != "wasm" {
6368 return
6369 }
6370
6371
6372
6373
6374
6375 if panicking.Load() > 0 {
6376 return
6377 }
6378
6379
6380
6381
6382
6383 var run0 int32
6384 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6385 run0 = 1
6386 }
6387
6388 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6389 if run > run0 {
6390 return
6391 }
6392 if run < 0 {
6393 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6394 unlock(&sched.lock)
6395 throw("checkdead: inconsistent counts")
6396 }
6397
6398 grunning := 0
6399 forEachG(func(gp *g) {
6400 if isSystemGoroutine(gp, false) {
6401 return
6402 }
6403 s := readgstatus(gp)
6404 switch s &^ _Gscan {
6405 case _Gwaiting,
6406 _Gpreempted:
6407 grunning++
6408 case _Grunnable,
6409 _Grunning,
6410 _Gsyscall:
6411 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6412 unlock(&sched.lock)
6413 throw("checkdead: runnable g")
6414 }
6415 })
6416 if grunning == 0 {
6417 unlock(&sched.lock)
6418 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6419 }
6420
6421
6422 if faketime != 0 {
6423 if when := timeSleepUntil(); when < maxWhen {
6424 faketime = when
6425
6426
6427 pp, _ := pidleget(faketime)
6428 if pp == nil {
6429
6430
6431 unlock(&sched.lock)
6432 throw("checkdead: no p for timer")
6433 }
6434 mp := mget()
6435 if mp == nil {
6436
6437
6438 unlock(&sched.lock)
6439 throw("checkdead: no m for timer")
6440 }
6441
6442
6443
6444 sched.nmspinning.Add(1)
6445 mp.spinning = true
6446 mp.nextp.set(pp)
6447 notewakeup(&mp.park)
6448 return
6449 }
6450 }
6451
6452
6453 for _, pp := range allp {
6454 if len(pp.timers.heap) > 0 {
6455 return
6456 }
6457 }
6458
6459 unlock(&sched.lock)
6460 fatal("all goroutines are asleep - deadlock!")
6461 }
6462
6463
6464
6465
6466
6467
6468 var forcegcperiod int64 = 2 * 60 * 1e9
6469
6470
6471
6472
6473 const haveSysmon = GOARCH != "wasm"
6474
6475
6476
6477
6478 func sysmon() {
6479 lock(&sched.lock)
6480 sched.nmsys++
6481 checkdead()
6482 unlock(&sched.lock)
6483
6484 lastgomaxprocs := int64(0)
6485 lasttrace := int64(0)
6486 idle := 0
6487 delay := uint32(0)
6488
6489 for {
6490 if idle == 0 {
6491 delay = 20
6492 } else if idle > 50 {
6493 delay *= 2
6494 }
6495 if delay > 10*1000 {
6496 delay = 10 * 1000
6497 }
6498 usleep(delay)
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515 now := nanotime()
6516 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6517 lock(&sched.lock)
6518 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6519 syscallWake := false
6520 next := timeSleepUntil()
6521 if next > now {
6522 sched.sysmonwait.Store(true)
6523 unlock(&sched.lock)
6524
6525
6526 sleep := forcegcperiod / 2
6527 if next-now < sleep {
6528 sleep = next - now
6529 }
6530 shouldRelax := sleep >= osRelaxMinNS
6531 if shouldRelax {
6532 osRelax(true)
6533 }
6534 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6535 if shouldRelax {
6536 osRelax(false)
6537 }
6538 lock(&sched.lock)
6539 sched.sysmonwait.Store(false)
6540 noteclear(&sched.sysmonnote)
6541 }
6542 if syscallWake {
6543 idle = 0
6544 delay = 20
6545 }
6546 }
6547 unlock(&sched.lock)
6548 }
6549
6550 lock(&sched.sysmonlock)
6551
6552
6553 now = nanotime()
6554
6555
6556 if *cgo_yield != nil {
6557 asmcgocall(*cgo_yield, nil)
6558 }
6559
6560 lastpoll := sched.lastpoll.Load()
6561 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6562 sched.lastpoll.CompareAndSwap(lastpoll, now)
6563 list, delta := netpoll(0)
6564 if !list.empty() {
6565
6566
6567
6568
6569
6570
6571
6572 incidlelocked(-1)
6573 injectglist(&list)
6574 incidlelocked(1)
6575 netpollAdjustWaiters(delta)
6576 }
6577 }
6578
6579 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6580 sysmonUpdateGOMAXPROCS()
6581 lastgomaxprocs = now
6582 }
6583 if scavenger.sysmonWake.Load() != 0 {
6584
6585 scavenger.wake()
6586 }
6587
6588
6589 if retake(now) != 0 {
6590 idle = 0
6591 } else {
6592 idle++
6593 }
6594
6595 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6596 lock(&forcegc.lock)
6597 forcegc.idle.Store(false)
6598 var list gList
6599 list.push(forcegc.g)
6600 injectglist(&list)
6601 unlock(&forcegc.lock)
6602 }
6603 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6604 lasttrace = now
6605 schedtrace(debug.scheddetail > 0)
6606 }
6607 unlock(&sched.sysmonlock)
6608 }
6609 }
6610
6611 type sysmontick struct {
6612 schedtick uint32
6613 syscalltick uint32
6614 schedwhen int64
6615 syscallwhen int64
6616 }
6617
6618
6619
6620 const forcePreemptNS = 10 * 1000 * 1000
6621
6622 func retake(now int64) uint32 {
6623 n := 0
6624
6625
6626 lock(&allpLock)
6627
6628
6629
6630 for i := 0; i < len(allp); i++ {
6631
6632
6633
6634
6635
6636
6637
6638
6639 pp := allp[i]
6640 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6641
6642
6643 continue
6644 }
6645 pd := &pp.sysmontick
6646 sysretake := false
6647
6648
6649
6650
6651
6652 schedt := int64(pp.schedtick)
6653 if int64(pd.schedtick) != schedt {
6654 pd.schedtick = uint32(schedt)
6655 pd.schedwhen = now
6656 } else if pd.schedwhen+forcePreemptNS <= now {
6657 preemptone(pp)
6658
6659
6660
6661
6662 sysretake = true
6663 }
6664
6665
6666 unlock(&allpLock)
6667
6668
6669
6670
6671
6672
6673
6674
6675 incidlelocked(-1)
6676
6677
6678 thread, ok := setBlockOnExitSyscall(pp)
6679 if !ok {
6680
6681 goto done
6682 }
6683
6684
6685 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6686 pd.syscalltick = uint32(syst)
6687 pd.syscallwhen = now
6688 thread.resume()
6689 goto done
6690 }
6691
6692
6693
6694
6695 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6696 thread.resume()
6697 goto done
6698 }
6699
6700
6701
6702 thread.takeP()
6703 thread.resume()
6704 n++
6705
6706
6707 handoffp(pp)
6708
6709
6710
6711 done:
6712 incidlelocked(1)
6713 lock(&allpLock)
6714 }
6715 unlock(&allpLock)
6716 return uint32(n)
6717 }
6718
6719
6720
6721 type syscallingThread struct {
6722 gp *g
6723 mp *m
6724 pp *p
6725 status uint32
6726 }
6727
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739
6740
6741
6742 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6743 if pp.status != _Prunning {
6744 return syscallingThread{}, false
6745 }
6746
6747
6748
6749
6750
6751
6752
6753
6754
6755
6756
6757 mp := pp.m.ptr()
6758 if mp == nil {
6759
6760 return syscallingThread{}, false
6761 }
6762 gp := mp.curg
6763 if gp == nil {
6764
6765 return syscallingThread{}, false
6766 }
6767 status := readgstatus(gp) &^ _Gscan
6768
6769
6770
6771
6772 if status != _Gsyscall && status != _Gdeadextra {
6773
6774 return syscallingThread{}, false
6775 }
6776 if !castogscanstatus(gp, status, status|_Gscan) {
6777
6778 return syscallingThread{}, false
6779 }
6780 if gp.m != mp || gp.m.p.ptr() != pp {
6781
6782 casfrom_Gscanstatus(gp, status|_Gscan, status)
6783 return syscallingThread{}, false
6784 }
6785 return syscallingThread{gp, mp, pp, status}, true
6786 }
6787
6788
6789
6790
6791
6792 func (s syscallingThread) gcstopP() {
6793 assertLockHeld(&sched.lock)
6794
6795 s.releaseP(_Pgcstop)
6796 s.pp.gcStopTime = nanotime()
6797 sched.stopwait--
6798 }
6799
6800
6801
6802 func (s syscallingThread) takeP() {
6803 s.releaseP(_Pidle)
6804 }
6805
6806
6807
6808
6809 func (s syscallingThread) releaseP(state uint32) {
6810 if state != _Pidle && state != _Pgcstop {
6811 throw("attempted to release P into a bad state")
6812 }
6813 trace := traceAcquire()
6814 s.pp.m = 0
6815 s.mp.p = 0
6816 atomic.Store(&s.pp.status, state)
6817 if trace.ok() {
6818 trace.ProcSteal(s.pp)
6819 traceRelease(trace)
6820 }
6821 addGSyscallNoP(s.mp)
6822 s.pp.syscalltick++
6823 }
6824
6825
6826 func (s syscallingThread) resume() {
6827 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6828 }
6829
6830
6831
6832
6833
6834
6835 func preemptall() bool {
6836 res := false
6837 for _, pp := range allp {
6838 if pp.status != _Prunning {
6839 continue
6840 }
6841 if preemptone(pp) {
6842 res = true
6843 }
6844 }
6845 return res
6846 }
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858 func preemptone(pp *p) bool {
6859 mp := pp.m.ptr()
6860 if mp == nil || mp == getg().m {
6861 return false
6862 }
6863 gp := mp.curg
6864 if gp == nil || gp == mp.g0 {
6865 return false
6866 }
6867 if readgstatus(gp)&^_Gscan == _Gsyscall {
6868
6869 return false
6870 }
6871
6872 gp.preempt = true
6873
6874
6875
6876
6877
6878 gp.stackguard0 = stackPreempt
6879
6880
6881 if preemptMSupported && debug.asyncpreemptoff == 0 {
6882 pp.preempt = true
6883 preemptM(mp)
6884 }
6885
6886 return true
6887 }
6888
6889 var starttime int64
6890
6891 func schedtrace(detailed bool) {
6892 now := nanotime()
6893 if starttime == 0 {
6894 starttime = now
6895 }
6896
6897 lock(&sched.lock)
6898 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6899 if detailed {
6900 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6901 }
6902
6903
6904
6905 for i, pp := range allp {
6906 h := atomic.Load(&pp.runqhead)
6907 t := atomic.Load(&pp.runqtail)
6908 if detailed {
6909 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6910 mp := pp.m.ptr()
6911 if mp != nil {
6912 print(mp.id)
6913 } else {
6914 print("nil")
6915 }
6916 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6917 } else {
6918
6919
6920 print(" ")
6921 if i == 0 {
6922 print("[ ")
6923 }
6924 print(t - h)
6925 if i == len(allp)-1 {
6926 print(" ]")
6927 }
6928 }
6929 }
6930
6931 if !detailed {
6932
6933 print(" schedticks=[ ")
6934 for _, pp := range allp {
6935 print(pp.schedtick)
6936 print(" ")
6937 }
6938 print("]\n")
6939 }
6940
6941 if !detailed {
6942 unlock(&sched.lock)
6943 return
6944 }
6945
6946 for mp := allm; mp != nil; mp = mp.alllink {
6947 pp := mp.p.ptr()
6948 print(" M", mp.id, ": p=")
6949 if pp != nil {
6950 print(pp.id)
6951 } else {
6952 print("nil")
6953 }
6954 print(" curg=")
6955 if mp.curg != nil {
6956 print(mp.curg.goid)
6957 } else {
6958 print("nil")
6959 }
6960 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6961 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6962 print(lockedg.goid)
6963 } else {
6964 print("nil")
6965 }
6966 print("\n")
6967 }
6968
6969 forEachG(func(gp *g) {
6970 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6971 if gp.m != nil {
6972 print(gp.m.id)
6973 } else {
6974 print("nil")
6975 }
6976 print(" lockedm=")
6977 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6978 print(lockedm.id)
6979 } else {
6980 print("nil")
6981 }
6982 print("\n")
6983 })
6984 unlock(&sched.lock)
6985 }
6986
6987 type updateMaxProcsGState struct {
6988 lock mutex
6989 g *g
6990 idle atomic.Bool
6991
6992
6993 procs int32
6994 }
6995
6996 var (
6997
6998
6999 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
7000
7001
7002
7003 updateMaxProcsG updateMaxProcsGState
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052 computeMaxProcsLock mutex
7053 )
7054
7055
7056
7057
7058 func defaultGOMAXPROCSUpdateEnable() {
7059 if debug.updatemaxprocs == 0 {
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071 updatemaxprocs.IncNonDefault()
7072 return
7073 }
7074
7075 go updateMaxProcsGoroutine()
7076 }
7077
7078 func updateMaxProcsGoroutine() {
7079 updateMaxProcsG.g = getg()
7080 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7081 for {
7082 lock(&updateMaxProcsG.lock)
7083 if updateMaxProcsG.idle.Load() {
7084 throw("updateMaxProcsGoroutine: phase error")
7085 }
7086 updateMaxProcsG.idle.Store(true)
7087 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7088
7089
7090 stw := stopTheWorldGC(stwGOMAXPROCS)
7091
7092
7093 lock(&sched.lock)
7094 custom := sched.customGOMAXPROCS
7095 unlock(&sched.lock)
7096 if custom {
7097 startTheWorldGC(stw)
7098 return
7099 }
7100
7101
7102
7103
7104
7105 newprocs = updateMaxProcsG.procs
7106 lock(&sched.lock)
7107 sched.customGOMAXPROCS = false
7108 unlock(&sched.lock)
7109
7110 startTheWorldGC(stw)
7111 }
7112 }
7113
7114 func sysmonUpdateGOMAXPROCS() {
7115
7116 lock(&computeMaxProcsLock)
7117
7118
7119 lock(&sched.lock)
7120 custom := sched.customGOMAXPROCS
7121 curr := gomaxprocs
7122 unlock(&sched.lock)
7123 if custom {
7124 unlock(&computeMaxProcsLock)
7125 return
7126 }
7127
7128
7129 procs := defaultGOMAXPROCS(0)
7130 unlock(&computeMaxProcsLock)
7131 if procs == curr {
7132
7133 return
7134 }
7135
7136
7137
7138
7139 if updateMaxProcsG.idle.Load() {
7140 lock(&updateMaxProcsG.lock)
7141 updateMaxProcsG.procs = procs
7142 updateMaxProcsG.idle.Store(false)
7143 var list gList
7144 list.push(updateMaxProcsG.g)
7145 injectglist(&list)
7146 unlock(&updateMaxProcsG.lock)
7147 }
7148 }
7149
7150
7151
7152
7153
7154
7155 func schedEnableUser(enable bool) {
7156 lock(&sched.lock)
7157 if sched.disable.user == !enable {
7158 unlock(&sched.lock)
7159 return
7160 }
7161 sched.disable.user = !enable
7162 if enable {
7163 n := sched.disable.runnable.size
7164 globrunqputbatch(&sched.disable.runnable)
7165 unlock(&sched.lock)
7166 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7167 startm(nil, false, false)
7168 }
7169 } else {
7170 unlock(&sched.lock)
7171 }
7172 }
7173
7174
7175
7176
7177
7178 func schedEnabled(gp *g) bool {
7179 assertLockHeld(&sched.lock)
7180
7181 if sched.disable.user {
7182 return isSystemGoroutine(gp, true)
7183 }
7184 return true
7185 }
7186
7187
7188
7189
7190
7191
7192 func mput(mp *m) {
7193 assertLockHeld(&sched.lock)
7194
7195 sched.midle.push(unsafe.Pointer(mp))
7196 sched.nmidle++
7197 checkdead()
7198 }
7199
7200
7201
7202
7203
7204
7205 func mget() *m {
7206 assertLockHeld(&sched.lock)
7207
7208 mp := (*m)(sched.midle.pop())
7209 if mp != nil {
7210 sched.nmidle--
7211 }
7212 return mp
7213 }
7214
7215
7216
7217
7218
7219
7220
7221
7222 func mgetSpecific(mp *m) *m {
7223 assertLockHeld(&sched.lock)
7224
7225 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7226
7227 return nil
7228 }
7229
7230 sched.midle.remove(unsafe.Pointer(mp))
7231 sched.nmidle--
7232
7233 return mp
7234 }
7235
7236
7237
7238
7239
7240
7241 func globrunqput(gp *g) {
7242 assertLockHeld(&sched.lock)
7243
7244 sched.runq.pushBack(gp)
7245 }
7246
7247
7248
7249
7250
7251
7252 func globrunqputhead(gp *g) {
7253 assertLockHeld(&sched.lock)
7254
7255 sched.runq.push(gp)
7256 }
7257
7258
7259
7260
7261
7262
7263
7264 func globrunqputbatch(batch *gQueue) {
7265 assertLockHeld(&sched.lock)
7266
7267 sched.runq.pushBackAll(*batch)
7268 *batch = gQueue{}
7269 }
7270
7271
7272
7273 func globrunqget() *g {
7274 assertLockHeld(&sched.lock)
7275
7276 if sched.runq.size == 0 {
7277 return nil
7278 }
7279
7280 return sched.runq.pop()
7281 }
7282
7283
7284
7285 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7286 assertLockHeld(&sched.lock)
7287
7288 if sched.runq.size == 0 {
7289 return
7290 }
7291
7292 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7293
7294 gp = sched.runq.pop()
7295 n--
7296
7297 for ; n > 0; n-- {
7298 gp1 := sched.runq.pop()
7299 q.pushBack(gp1)
7300 }
7301 return
7302 }
7303
7304
7305 type pMask []uint32
7306
7307
7308 func (p pMask) read(id uint32) bool {
7309 word := id / 32
7310 mask := uint32(1) << (id % 32)
7311 return (atomic.Load(&p[word]) & mask) != 0
7312 }
7313
7314
7315 func (p pMask) set(id int32) {
7316 word := id / 32
7317 mask := uint32(1) << (id % 32)
7318 atomic.Or(&p[word], mask)
7319 }
7320
7321
7322 func (p pMask) clear(id int32) {
7323 word := id / 32
7324 mask := uint32(1) << (id % 32)
7325 atomic.And(&p[word], ^mask)
7326 }
7327
7328
7329 func (p pMask) any() bool {
7330 for i := range p {
7331 if atomic.Load(&p[i]) != 0 {
7332 return true
7333 }
7334 }
7335 return false
7336 }
7337
7338
7339
7340
7341
7342 func (p pMask) resize(nprocs int32) pMask {
7343 maskWords := (nprocs + 31) / 32
7344
7345 if maskWords <= int32(cap(p)) {
7346 return p[:maskWords]
7347 }
7348 newMask := make([]uint32, maskWords)
7349
7350 copy(newMask, p)
7351 return newMask
7352 }
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365 func pidleput(pp *p, now int64) int64 {
7366 assertLockHeld(&sched.lock)
7367
7368 if !runqempty(pp) {
7369 throw("pidleput: P has non-empty run queue")
7370 }
7371 if now == 0 {
7372 now = nanotime()
7373 }
7374 if pp.timers.len.Load() == 0 {
7375 timerpMask.clear(pp.id)
7376 }
7377 idlepMask.set(pp.id)
7378 pp.link = sched.pidle
7379 sched.pidle.set(pp)
7380 sched.npidle.Add(1)
7381 if !pp.limiterEvent.start(limiterEventIdle, now) {
7382 throw("must be able to track idle limiter event")
7383 }
7384 return now
7385 }
7386
7387
7388
7389
7390
7391
7392
7393
7394 func pidleget(now int64) (*p, int64) {
7395 assertLockHeld(&sched.lock)
7396
7397 pp := sched.pidle.ptr()
7398 if pp != nil {
7399
7400 if now == 0 {
7401 now = nanotime()
7402 }
7403 timerpMask.set(pp.id)
7404 idlepMask.clear(pp.id)
7405 sched.pidle = pp.link
7406 sched.npidle.Add(-1)
7407 pp.limiterEvent.stop(limiterEventIdle, now)
7408 }
7409 return pp, now
7410 }
7411
7412
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422 func pidlegetSpinning(now int64) (*p, int64) {
7423 assertLockHeld(&sched.lock)
7424
7425 pp, now := pidleget(now)
7426 if pp == nil {
7427
7428
7429
7430 sched.needspinning.Store(1)
7431 return nil, now
7432 }
7433
7434 return pp, now
7435 }
7436
7437
7438
7439 func runqempty(pp *p) bool {
7440
7441
7442
7443
7444 for {
7445 head := atomic.Load(&pp.runqhead)
7446 tail := atomic.Load(&pp.runqtail)
7447 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7448 if tail == atomic.Load(&pp.runqtail) {
7449 return head == tail && runnext == 0
7450 }
7451 }
7452 }
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463 const randomizeScheduler = raceenabled
7464
7465
7466
7467
7468
7469
7470 func runqput(pp *p, gp *g, next bool) {
7471 if !haveSysmon && next {
7472
7473
7474
7475
7476
7477
7478
7479
7480 next = false
7481 }
7482 if randomizeScheduler && next && randn(2) == 0 {
7483 next = false
7484 }
7485
7486 if next {
7487 retryNext:
7488 oldnext := pp.runnext
7489 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7490 goto retryNext
7491 }
7492 if oldnext == 0 {
7493 return
7494 }
7495
7496 gp = oldnext.ptr()
7497 }
7498
7499 retry:
7500 h := atomic.LoadAcq(&pp.runqhead)
7501 t := pp.runqtail
7502 if t-h < uint32(len(pp.runq)) {
7503 pp.runq[t%uint32(len(pp.runq))].set(gp)
7504 atomic.StoreRel(&pp.runqtail, t+1)
7505 return
7506 }
7507 if runqputslow(pp, gp, h, t) {
7508 return
7509 }
7510
7511 goto retry
7512 }
7513
7514
7515
7516 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7517 var batch [len(pp.runq)/2 + 1]*g
7518
7519
7520 n := t - h
7521 n = n / 2
7522 if n != uint32(len(pp.runq)/2) {
7523 throw("runqputslow: queue is not full")
7524 }
7525 for i := uint32(0); i < n; i++ {
7526 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7527 }
7528 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7529 return false
7530 }
7531 batch[n] = gp
7532
7533 if randomizeScheduler {
7534 for i := uint32(1); i <= n; i++ {
7535 j := cheaprandn(i + 1)
7536 batch[i], batch[j] = batch[j], batch[i]
7537 }
7538 }
7539
7540
7541 for i := uint32(0); i < n; i++ {
7542 batch[i].schedlink.set(batch[i+1])
7543 }
7544
7545 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7546
7547
7548 lock(&sched.lock)
7549 globrunqputbatch(&q)
7550 unlock(&sched.lock)
7551 return true
7552 }
7553
7554
7555
7556
7557 func runqputbatch(pp *p, q *gQueue) {
7558 if q.empty() {
7559 return
7560 }
7561 h := atomic.LoadAcq(&pp.runqhead)
7562 t := pp.runqtail
7563 n := uint32(0)
7564 for !q.empty() && t-h < uint32(len(pp.runq)) {
7565 gp := q.pop()
7566 pp.runq[t%uint32(len(pp.runq))].set(gp)
7567 t++
7568 n++
7569 }
7570
7571 if randomizeScheduler {
7572 off := func(o uint32) uint32 {
7573 return (pp.runqtail + o) % uint32(len(pp.runq))
7574 }
7575 for i := uint32(1); i < n; i++ {
7576 j := cheaprandn(i + 1)
7577 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7578 }
7579 }
7580
7581 atomic.StoreRel(&pp.runqtail, t)
7582
7583 return
7584 }
7585
7586
7587
7588
7589
7590 func runqget(pp *p) (gp *g, inheritTime bool) {
7591
7592 next := pp.runnext
7593
7594
7595
7596 if next != 0 && pp.runnext.cas(next, 0) {
7597 return next.ptr(), true
7598 }
7599
7600 for {
7601 h := atomic.LoadAcq(&pp.runqhead)
7602 t := pp.runqtail
7603 if t == h {
7604 return nil, false
7605 }
7606 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7607 if atomic.CasRel(&pp.runqhead, h, h+1) {
7608 return gp, false
7609 }
7610 }
7611 }
7612
7613
7614
7615 func runqdrain(pp *p) (drainQ gQueue) {
7616 oldNext := pp.runnext
7617 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7618 drainQ.pushBack(oldNext.ptr())
7619 }
7620
7621 retry:
7622 h := atomic.LoadAcq(&pp.runqhead)
7623 t := pp.runqtail
7624 qn := t - h
7625 if qn == 0 {
7626 return
7627 }
7628 if qn > uint32(len(pp.runq)) {
7629 goto retry
7630 }
7631
7632 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7633 goto retry
7634 }
7635
7636
7637
7638
7639
7640
7641
7642
7643 for i := uint32(0); i < qn; i++ {
7644 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7645 drainQ.pushBack(gp)
7646 }
7647 return
7648 }
7649
7650
7651
7652
7653
7654 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7655 for {
7656 h := atomic.LoadAcq(&pp.runqhead)
7657 t := atomic.LoadAcq(&pp.runqtail)
7658 n := t - h
7659 n = n - n/2
7660 if n == 0 {
7661 if stealRunNextG {
7662
7663 if next := pp.runnext; next != 0 {
7664 if pp.status == _Prunning {
7665 if mp := pp.m.ptr(); mp != nil {
7666 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7667
7668
7669
7670
7671
7672
7673
7674
7675
7676
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686 if !osHasLowResTimer {
7687 usleep(3)
7688 } else {
7689
7690
7691
7692 osyield()
7693 }
7694 }
7695 }
7696 }
7697 if !pp.runnext.cas(next, 0) {
7698 continue
7699 }
7700 batch[batchHead%uint32(len(batch))] = next
7701 return 1
7702 }
7703 }
7704 return 0
7705 }
7706 if n > uint32(len(pp.runq)/2) {
7707 continue
7708 }
7709 for i := uint32(0); i < n; i++ {
7710 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7711 batch[(batchHead+i)%uint32(len(batch))] = g
7712 }
7713 if atomic.CasRel(&pp.runqhead, h, h+n) {
7714 return n
7715 }
7716 }
7717 }
7718
7719
7720
7721
7722 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7723 t := pp.runqtail
7724 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7725 if n == 0 {
7726 return nil
7727 }
7728 n--
7729 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7730 if n == 0 {
7731 return gp
7732 }
7733 h := atomic.LoadAcq(&pp.runqhead)
7734 if t-h+n >= uint32(len(pp.runq)) {
7735 throw("runqsteal: runq overflow")
7736 }
7737 atomic.StoreRel(&pp.runqtail, t+n)
7738 return gp
7739 }
7740
7741
7742
7743 type gQueue struct {
7744 head guintptr
7745 tail guintptr
7746 size int32
7747 }
7748
7749
7750 func (q *gQueue) empty() bool {
7751 return q.head == 0
7752 }
7753
7754
7755 func (q *gQueue) push(gp *g) {
7756 gp.schedlink = q.head
7757 q.head.set(gp)
7758 if q.tail == 0 {
7759 q.tail.set(gp)
7760 }
7761 q.size++
7762 }
7763
7764
7765 func (q *gQueue) pushBack(gp *g) {
7766 gp.schedlink = 0
7767 if q.tail != 0 {
7768 q.tail.ptr().schedlink.set(gp)
7769 } else {
7770 q.head.set(gp)
7771 }
7772 q.tail.set(gp)
7773 q.size++
7774 }
7775
7776
7777
7778 func (q *gQueue) pushBackAll(q2 gQueue) {
7779 if q2.tail == 0 {
7780 return
7781 }
7782 q2.tail.ptr().schedlink = 0
7783 if q.tail != 0 {
7784 q.tail.ptr().schedlink = q2.head
7785 } else {
7786 q.head = q2.head
7787 }
7788 q.tail = q2.tail
7789 q.size += q2.size
7790 }
7791
7792
7793
7794 func (q *gQueue) pop() *g {
7795 gp := q.head.ptr()
7796 if gp != nil {
7797 q.head = gp.schedlink
7798 if q.head == 0 {
7799 q.tail = 0
7800 }
7801 q.size--
7802 }
7803 return gp
7804 }
7805
7806
7807 func (q *gQueue) popList() gList {
7808 stack := gList{q.head, q.size}
7809 *q = gQueue{}
7810 return stack
7811 }
7812
7813
7814
7815 type gList struct {
7816 head guintptr
7817 size int32
7818 }
7819
7820
7821 func (l *gList) empty() bool {
7822 return l.head == 0
7823 }
7824
7825
7826 func (l *gList) push(gp *g) {
7827 gp.schedlink = l.head
7828 l.head.set(gp)
7829 l.size++
7830 }
7831
7832
7833 func (l *gList) pushAll(q gQueue) {
7834 if !q.empty() {
7835 q.tail.ptr().schedlink = l.head
7836 l.head = q.head
7837 l.size += q.size
7838 }
7839 }
7840
7841
7842 func (l *gList) pop() *g {
7843 gp := l.head.ptr()
7844 if gp != nil {
7845 l.head = gp.schedlink
7846 l.size--
7847 }
7848 return gp
7849 }
7850
7851
7852 func setMaxThreads(in int) (out int) {
7853 lock(&sched.lock)
7854 out = int(sched.maxmcount)
7855 if in > 0x7fffffff {
7856 sched.maxmcount = 0x7fffffff
7857 } else {
7858 sched.maxmcount = int32(in)
7859 }
7860 checkmcount()
7861 unlock(&sched.lock)
7862 return
7863 }
7864
7865
7866
7867
7868
7869
7870
7871
7872
7873
7874
7875
7876
7877 func procPin() int {
7878 gp := getg()
7879 mp := gp.m
7880
7881 mp.locks++
7882 return int(mp.p.ptr().id)
7883 }
7884
7885
7886
7887
7888
7889
7890
7891
7892
7893
7894
7895
7896
7897 func procUnpin() {
7898 gp := getg()
7899 gp.m.locks--
7900 }
7901
7902
7903
7904 func sync_runtime_procPin() int {
7905 return procPin()
7906 }
7907
7908
7909
7910 func sync_runtime_procUnpin() {
7911 procUnpin()
7912 }
7913
7914
7915
7916 func sync_atomic_runtime_procPin() int {
7917 return procPin()
7918 }
7919
7920
7921
7922 func sync_atomic_runtime_procUnpin() {
7923 procUnpin()
7924 }
7925
7926
7927
7928
7929
7930 func internal_sync_runtime_canSpin(i int) bool {
7931
7932
7933
7934
7935
7936 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7937 return false
7938 }
7939 if p := getg().m.p.ptr(); !runqempty(p) {
7940 return false
7941 }
7942 return true
7943 }
7944
7945
7946
7947 func internal_sync_runtime_doSpin() {
7948 procyield(active_spin_cnt)
7949 }
7950
7951
7952
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964
7965 func sync_runtime_canSpin(i int) bool {
7966 return internal_sync_runtime_canSpin(i)
7967 }
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979
7980
7981 func sync_runtime_doSpin() {
7982 internal_sync_runtime_doSpin()
7983 }
7984
7985 var stealOrder randomOrder
7986
7987
7988
7989
7990
7991 type randomOrder struct {
7992 count uint32
7993 coprimes []uint32
7994 }
7995
7996 type randomEnum struct {
7997 i uint32
7998 count uint32
7999 pos uint32
8000 inc uint32
8001 }
8002
8003 func (ord *randomOrder) reset(count uint32) {
8004 ord.count = count
8005 ord.coprimes = ord.coprimes[:0]
8006 for i := uint32(1); i <= count; i++ {
8007 if gcd(i, count) == 1 {
8008 ord.coprimes = append(ord.coprimes, i)
8009 }
8010 }
8011 }
8012
8013 func (ord *randomOrder) start(i uint32) randomEnum {
8014 return randomEnum{
8015 count: ord.count,
8016 pos: i % ord.count,
8017 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
8018 }
8019 }
8020
8021 func (enum *randomEnum) done() bool {
8022 return enum.i == enum.count
8023 }
8024
8025 func (enum *randomEnum) next() {
8026 enum.i++
8027 enum.pos = (enum.pos + enum.inc) % enum.count
8028 }
8029
8030 func (enum *randomEnum) position() uint32 {
8031 return enum.pos
8032 }
8033
8034 func gcd(a, b uint32) uint32 {
8035 for b != 0 {
8036 a, b = b, a%b
8037 }
8038 return a
8039 }
8040
8041
8042
8043 type initTask struct {
8044 state uint32
8045 nfns uint32
8046
8047 }
8048
8049
8050
8051 var inittrace tracestat
8052
8053 type tracestat struct {
8054 active bool
8055 id uint64
8056 allocs uint64
8057 bytes uint64
8058 }
8059
8060 func doInit(ts []*initTask) {
8061 for _, t := range ts {
8062 doInit1(t)
8063 }
8064 }
8065
8066 func doInit1(t *initTask) {
8067 switch t.state {
8068 case 2:
8069 return
8070 case 1:
8071 throw("recursive call during initialization - linker skew")
8072 default:
8073 t.state = 1
8074
8075 var (
8076 start int64
8077 before tracestat
8078 )
8079
8080 if inittrace.active {
8081 start = nanotime()
8082
8083 before = inittrace
8084 }
8085
8086 if t.nfns == 0 {
8087
8088 throw("inittask with no functions")
8089 }
8090
8091 firstFunc := add(unsafe.Pointer(t), 8)
8092 for i := uint32(0); i < t.nfns; i++ {
8093 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8094 f := *(*func())(unsafe.Pointer(&p))
8095 f()
8096 }
8097
8098 if inittrace.active {
8099 end := nanotime()
8100
8101 after := inittrace
8102
8103 f := *(*func())(unsafe.Pointer(&firstFunc))
8104 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8105
8106 var sbuf [24]byte
8107 print("init ", pkg, " @")
8108 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8109 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8110 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8111 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8112 print("\n")
8113 }
8114
8115 t.state = 2
8116 }
8117 }
8118
View as plain text