Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/sys"
16 "internal/strconv"
17 "internal/stringslite"
18 "unsafe"
19 )
20
21
22 var modinfo string
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 var (
119 m0 m
120 g0 g
121 mcache0 *mcache
122 raceprocctx0 uintptr
123 raceFiniLock mutex
124 )
125
126
127
128 var runtime_inittasks []*initTask
129
130
131
132 var mainInitDone atomic.Bool
133
134
135
136
137 var mainInitDoneChan chan bool
138
139
140 func main_main()
141
142
143 var mainStarted bool
144
145
146 var runtimeInitTime int64
147
148
149 var initSigmask sigset
150
151
152 func main() {
153 mp := getg().m
154
155
156
157 mp.g0.racectx = 0
158
159
160
161
162 if goarch.PtrSize == 8 {
163 maxstacksize = 1000000000
164 } else {
165 maxstacksize = 250000000
166 }
167
168
169
170
171 maxstackceiling = 2 * maxstacksize
172
173
174 mainStarted = true
175
176 if haveSysmon {
177 systemstack(func() {
178 newm(sysmon, nil, -1)
179 })
180 }
181
182
183
184
185
186
187
188 lockOSThread()
189
190 if mp != &m0 {
191 throw("runtime.main not on m0")
192 }
193
194
195
196 runtimeInitTime = nanotime()
197 if runtimeInitTime == 0 {
198 throw("nanotime returning zero")
199 }
200
201 if debug.inittrace != 0 {
202 inittrace.id = getg().goid
203 inittrace.active = true
204 }
205
206 doInit(runtime_inittasks)
207
208
209 needUnlock := true
210 defer func() {
211 if needUnlock {
212 unlockOSThread()
213 }
214 }()
215
216 gcenable()
217 defaultGOMAXPROCSUpdateEnable()
218
219 mainInitDoneChan = make(chan bool)
220 if iscgo {
221 if _cgo_pthread_key_created == nil {
222 throw("_cgo_pthread_key_created missing")
223 }
224
225 if GOOS != "windows" {
226 if _cgo_thread_start == nil {
227 throw("_cgo_thread_start missing")
228 }
229 if _cgo_setenv == nil {
230 throw("_cgo_setenv missing")
231 }
232 if _cgo_unsetenv == nil {
233 throw("_cgo_unsetenv missing")
234 }
235 }
236 if _cgo_notify_runtime_init_done == nil {
237 throw("_cgo_notify_runtime_init_done missing")
238 }
239
240
241 if set_crosscall2 == nil {
242 throw("set_crosscall2 missing")
243 }
244 set_crosscall2()
245
246
247
248 startTemplateThread()
249 cgocall(_cgo_notify_runtime_init_done, nil)
250 }
251
252
253
254
255
256
257
258
259 last := lastmoduledatap
260 for m := &firstmoduledata; true; m = m.next {
261 doInit(m.inittasks)
262 if m == last {
263 break
264 }
265 }
266
267
268
269 inittrace.active = false
270
271 mainInitDone.Store(true)
272 close(mainInitDoneChan)
273
274 needUnlock = false
275 unlockOSThread()
276
277 if isarchive || islibrary {
278
279
280 if GOARCH == "wasm" {
281
282
283
284
285
286
287
288 pause(sys.GetCallerSP() - 16)
289 panic("unreachable")
290 }
291 return
292 }
293 fn := main_main
294 fn()
295
296
297
298
299
300
301
302
303 exitHooksRun := false
304 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
305 runExitHooks(0)
306 exitHooksRun = true
307 lsandoleakcheck()
308 }
309
310
311
312
313
314 if runningPanicDefers.Load() != 0 {
315
316 for c := 0; c < 1000; c++ {
317 if runningPanicDefers.Load() == 0 {
318 break
319 }
320 Gosched()
321 }
322 }
323 if panicking.Load() != 0 {
324 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
325 }
326 if !exitHooksRun {
327 runExitHooks(0)
328 }
329 if raceenabled {
330 racefini()
331 }
332
333 exit(0)
334 for {
335 var x *int32
336 *x = 0
337 }
338 }
339
340
341
342
343 func os_beforeExit(exitCode int) {
344 runExitHooks(exitCode)
345 if exitCode == 0 && raceenabled {
346 racefini()
347 }
348
349
350 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
351 lsandoleakcheck()
352 }
353 }
354
355 func init() {
356 exithook.Gosched = Gosched
357 exithook.Goid = func() uint64 { return getg().goid }
358 exithook.Throw = throw
359 }
360
361 func runExitHooks(code int) {
362 exithook.Run(code)
363 }
364
365
366 func init() {
367 go forcegchelper()
368 }
369
370 func forcegchelper() {
371 forcegc.g = getg()
372 lockInit(&forcegc.lock, lockRankForcegc)
373 for {
374 lock(&forcegc.lock)
375 if forcegc.idle.Load() {
376 throw("forcegc: phase error")
377 }
378 forcegc.idle.Store(true)
379 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
380
381 if debug.gctrace > 0 {
382 println("GC forced")
383 }
384
385 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
386 }
387 }
388
389
390
391
392
393 func Gosched() {
394 checkTimeouts()
395 mcall(gosched_m)
396 }
397
398
399
400
401
402 func goschedguarded() {
403 mcall(goschedguarded_m)
404 }
405
406
407
408
409
410
411 func goschedIfBusy() {
412 gp := getg()
413
414
415 if !gp.preempt && sched.npidle.Load() > 0 {
416 return
417 }
418 mcall(gosched_m)
419 }
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
450 if reason != waitReasonSleep {
451 checkTimeouts()
452 }
453 mp := acquirem()
454 gp := mp.curg
455 status := readgstatus(gp)
456 if status != _Grunning && status != _Gscanrunning {
457 throw("gopark: bad g status")
458 }
459 mp.waitlock = lock
460 mp.waitunlockf = unlockf
461 gp.waitreason = reason
462 mp.waitTraceBlockReason = traceReason
463 mp.waitTraceSkip = traceskip
464 releasem(mp)
465
466 mcall(park_m)
467 }
468
469
470
471 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
472 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
473 }
474
475
476
477
478
479
480
481
482
483
484
485 func goready(gp *g, traceskip int) {
486 systemstack(func() {
487 ready(gp, traceskip, true)
488 })
489 }
490
491
492 func acquireSudog() *sudog {
493
494
495
496
497
498
499
500
501 mp := acquirem()
502 pp := mp.p.ptr()
503 if len(pp.sudogcache) == 0 {
504 lock(&sched.sudoglock)
505
506 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
507 s := sched.sudogcache
508 sched.sudogcache = s.next
509 s.next = nil
510 pp.sudogcache = append(pp.sudogcache, s)
511 }
512 unlock(&sched.sudoglock)
513
514 if len(pp.sudogcache) == 0 {
515 pp.sudogcache = append(pp.sudogcache, new(sudog))
516 }
517 }
518 n := len(pp.sudogcache)
519 s := pp.sudogcache[n-1]
520 pp.sudogcache[n-1] = nil
521 pp.sudogcache = pp.sudogcache[:n-1]
522 if s.elem.get() != nil {
523 throw("acquireSudog: found s.elem != nil in cache")
524 }
525 releasem(mp)
526 return s
527 }
528
529
530 func releaseSudog(s *sudog) {
531 if s.elem.get() != nil {
532 throw("runtime: sudog with non-nil elem")
533 }
534 if s.isSelect {
535 throw("runtime: sudog with non-false isSelect")
536 }
537 if s.next != nil {
538 throw("runtime: sudog with non-nil next")
539 }
540 if s.prev != nil {
541 throw("runtime: sudog with non-nil prev")
542 }
543 if s.waitlink != nil {
544 throw("runtime: sudog with non-nil waitlink")
545 }
546 if s.c.get() != nil {
547 throw("runtime: sudog with non-nil c")
548 }
549 gp := getg()
550 if gp.param != nil {
551 throw("runtime: releaseSudog with non-nil gp.param")
552 }
553 mp := acquirem()
554 pp := mp.p.ptr()
555 if len(pp.sudogcache) == cap(pp.sudogcache) {
556
557 var first, last *sudog
558 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
559 n := len(pp.sudogcache)
560 p := pp.sudogcache[n-1]
561 pp.sudogcache[n-1] = nil
562 pp.sudogcache = pp.sudogcache[:n-1]
563 if first == nil {
564 first = p
565 } else {
566 last.next = p
567 }
568 last = p
569 }
570 lock(&sched.sudoglock)
571 last.next = sched.sudogcache
572 sched.sudogcache = first
573 unlock(&sched.sudoglock)
574 }
575 pp.sudogcache = append(pp.sudogcache, s)
576 releasem(mp)
577 }
578
579
580 func badmcall(fn func(*g)) {
581 throw("runtime: mcall called on m->g0 stack")
582 }
583
584 func badmcall2(fn func(*g)) {
585 throw("runtime: mcall function returned")
586 }
587
588 func badreflectcall() {
589 panic(plainError("arg size to reflect.call more than 1GB"))
590 }
591
592
593
594 func badmorestackg0() {
595 if !crashStackImplemented {
596 writeErrStr("fatal: morestack on g0\n")
597 return
598 }
599
600 g := getg()
601 switchToCrashStack(func() {
602 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
603 g.m.traceback = 2
604 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
605 print("\n")
606
607 throw("morestack on g0")
608 })
609 }
610
611
612
613 func badmorestackgsignal() {
614 writeErrStr("fatal: morestack on gsignal\n")
615 }
616
617
618 func badctxt() {
619 throw("ctxt != 0")
620 }
621
622
623
624 var gcrash g
625
626 var crashingG atomic.Pointer[g]
627
628
629
630
631
632
633
634
635
636 func switchToCrashStack(fn func()) {
637 me := getg()
638 if crashingG.CompareAndSwapNoWB(nil, me) {
639 switchToCrashStack0(fn)
640 abort()
641 }
642 if crashingG.Load() == me {
643
644 writeErrStr("fatal: recursive switchToCrashStack\n")
645 abort()
646 }
647
648 usleep_no_g(100)
649 writeErrStr("fatal: concurrent switchToCrashStack\n")
650 abort()
651 }
652
653
654
655
656 const crashStackImplemented = GOOS != "windows"
657
658
659 func switchToCrashStack0(fn func())
660
661 func lockedOSThread() bool {
662 gp := getg()
663 return gp.lockedm != 0 && gp.m.lockedg != 0
664 }
665
666 var (
667
668
669
670
671
672
673 allglock mutex
674 allgs []*g
675
676
677
678
679
680
681
682
683
684
685
686
687
688 allglen uintptr
689 allgptr **g
690 )
691
692 func allgadd(gp *g) {
693 if readgstatus(gp) == _Gidle {
694 throw("allgadd: bad status Gidle")
695 }
696
697 lock(&allglock)
698 allgs = append(allgs, gp)
699 if &allgs[0] != allgptr {
700 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
701 }
702 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
703 unlock(&allglock)
704 }
705
706
707
708
709 func allGsSnapshot() []*g {
710 assertWorldStoppedOrLockHeld(&allglock)
711
712
713
714
715
716
717 return allgs[:len(allgs):len(allgs)]
718 }
719
720
721 func atomicAllG() (**g, uintptr) {
722 length := atomic.Loaduintptr(&allglen)
723 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
724 return ptr, length
725 }
726
727
728 func atomicAllGIndex(ptr **g, i uintptr) *g {
729 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
730 }
731
732
733
734
735 func forEachG(fn func(gp *g)) {
736 lock(&allglock)
737 for _, gp := range allgs {
738 fn(gp)
739 }
740 unlock(&allglock)
741 }
742
743
744
745
746
747 func forEachGRace(fn func(gp *g)) {
748 ptr, length := atomicAllG()
749 for i := uintptr(0); i < length; i++ {
750 gp := atomicAllGIndex(ptr, i)
751 fn(gp)
752 }
753 return
754 }
755
756 const (
757
758
759 _GoidCacheBatch = 16
760 )
761
762
763
764 func cpuinit(env string) {
765 cpu.Initialize(env)
766
767
768
769 switch GOARCH {
770 case "386", "amd64":
771 x86HasAVX = cpu.X86.HasAVX
772 x86HasFMA = cpu.X86.HasFMA
773 x86HasPOPCNT = cpu.X86.HasPOPCNT
774 x86HasSSE41 = cpu.X86.HasSSE41
775
776 case "arm":
777 armHasVFPv4 = cpu.ARM.HasVFPv4
778
779 case "arm64":
780 arm64HasATOMICS = cpu.ARM64.HasATOMICS
781
782 case "loong64":
783 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
784 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
785 loong64HasDBAR_HINTS = cpu.Loong64.HasDBAR_HINTS
786 loong64HasLSX = cpu.Loong64.HasLSX
787
788 case "riscv64":
789 riscv64HasZbb = cpu.RISCV64.HasZbb
790 }
791 }
792
793
794
795
796
797
798 func getGodebugEarly() (string, bool) {
799 const prefix = "GODEBUG="
800 var env string
801 switch GOOS {
802 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
803
804
805
806 n := int32(0)
807 for argv_index(argv, argc+1+n) != nil {
808 n++
809 }
810
811 for i := int32(0); i < n; i++ {
812 p := argv_index(argv, argc+1+i)
813 s := unsafe.String(p, findnull(p))
814
815 if stringslite.HasPrefix(s, prefix) {
816 env = gostringnocopy(p)[len(prefix):]
817 break
818 }
819 }
820 break
821
822 default:
823 return "", false
824 }
825 return env, true
826 }
827
828
829
830
831
832
833
834
835
836 func schedinit() {
837 lockInit(&sched.lock, lockRankSched)
838 lockInit(&sched.sysmonlock, lockRankSysmon)
839 lockInit(&sched.deferlock, lockRankDefer)
840 lockInit(&sched.sudoglock, lockRankSudog)
841 lockInit(&deadlock, lockRankDeadlock)
842 lockInit(&paniclk, lockRankPanic)
843 lockInit(&allglock, lockRankAllg)
844 lockInit(&allpLock, lockRankAllp)
845 lockInit(&reflectOffs.lock, lockRankReflectOffs)
846 lockInit(&finlock, lockRankFin)
847 lockInit(&cpuprof.lock, lockRankCpuprof)
848 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
849 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
850 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
851 traceLockInit()
852
853
854
855 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
856
857 lockVerifyMSize()
858
859 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
860
861
862
863 gp := getg()
864 if raceenabled {
865 gp.racectx, raceprocctx0 = raceinit()
866 }
867
868 sched.maxmcount = 10000
869 crashFD.Store(^uintptr(0))
870
871
872 worldStopped()
873
874 godebug, parsedGodebug := getGodebugEarly()
875 if parsedGodebug {
876 parseRuntimeDebugVars(godebug)
877 }
878 ticks.init()
879 moduledataverify()
880 stackinit()
881 randinit()
882 mallocinit()
883 cpuinit(godebug)
884 alginit()
885 mcommoninit(gp.m, -1)
886 modulesinit()
887 typelinksinit()
888 itabsinit()
889 stkobjinit()
890
891 sigsave(&gp.m.sigmask)
892 initSigmask = gp.m.sigmask
893
894 goargs()
895 goenvs()
896 secure()
897 checkfds()
898 if !parsedGodebug {
899
900
901 parseRuntimeDebugVars(gogetenv("GODEBUG"))
902 }
903 finishDebugVarsSetup()
904 gcinit()
905
906
907
908 gcrash.stack = stackalloc(16384)
909 gcrash.stackguard0 = gcrash.stack.lo + 1000
910 gcrash.stackguard1 = gcrash.stack.lo + 1000
911
912
913
914
915
916 if disableMemoryProfiling {
917 MemProfileRate = 0
918 }
919
920
921 mProfStackInit(gp.m)
922 defaultGOMAXPROCSInit()
923
924 lock(&sched.lock)
925 sched.lastpoll.Store(nanotime())
926 var procs int32
927 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
928 procs = int32(n)
929 sched.customGOMAXPROCS = true
930 } else {
931
932
933
934
935
936
937
938
939 procs = defaultGOMAXPROCS(numCPUStartup)
940 }
941 if procresize(procs) != nil {
942 throw("unknown runnable goroutine during bootstrap")
943 }
944 unlock(&sched.lock)
945
946
947 worldStarted()
948
949 if buildVersion == "" {
950
951
952 buildVersion = "unknown"
953 }
954 if len(modinfo) == 1 {
955
956
957 modinfo = ""
958 }
959 }
960
961 func dumpgstatus(gp *g) {
962 thisg := getg()
963 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
964 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
965 }
966
967
968 func checkmcount() {
969 assertLockHeld(&sched.lock)
970
971
972
973
974
975
976
977
978
979 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
980 if count > sched.maxmcount {
981 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
982 throw("thread exhaustion")
983 }
984 }
985
986
987
988
989
990 func mReserveID() int64 {
991 assertLockHeld(&sched.lock)
992
993 if sched.mnext+1 < sched.mnext {
994 throw("runtime: thread ID overflow")
995 }
996 id := sched.mnext
997 sched.mnext++
998 checkmcount()
999 return id
1000 }
1001
1002
1003 func mcommoninit(mp *m, id int64) {
1004 gp := getg()
1005
1006
1007 if gp != gp.m.g0 {
1008 callers(1, mp.createstack[:])
1009 }
1010
1011 lock(&sched.lock)
1012
1013 if id >= 0 {
1014 mp.id = id
1015 } else {
1016 mp.id = mReserveID()
1017 }
1018
1019 mp.self = newMWeakPointer(mp)
1020
1021 mrandinit(mp)
1022
1023 mpreinit(mp)
1024 if mp.gsignal != nil {
1025 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1026 }
1027
1028
1029
1030 mp.alllink = allm
1031
1032
1033
1034 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1035 unlock(&sched.lock)
1036
1037
1038 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1039 mp.cgoCallers = new(cgoCallers)
1040 }
1041 mProfStackInit(mp)
1042 }
1043
1044
1045
1046
1047
1048 func mProfStackInit(mp *m) {
1049 if debug.profstackdepth == 0 {
1050
1051
1052 return
1053 }
1054 mp.profStack = makeProfStackFP()
1055 mp.mLockProfile.stack = makeProfStackFP()
1056 }
1057
1058
1059
1060
1061 func makeProfStackFP() []uintptr {
1062
1063
1064
1065
1066
1067
1068 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1069 }
1070
1071
1072
1073 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1074
1075
1076 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1077
1078 func (mp *m) becomeSpinning() {
1079 mp.spinning = true
1080 sched.nmspinning.Add(1)
1081 sched.needspinning.Store(0)
1082 }
1083
1084
1085
1086
1087
1088
1089
1090
1091 func (mp *m) snapshotAllp() []*p {
1092 mp.allpSnapshot = allp
1093 return mp.allpSnapshot
1094 }
1095
1096
1097
1098
1099
1100
1101
1102 func (mp *m) clearAllpSnapshot() {
1103 mp.allpSnapshot = nil
1104 }
1105
1106 func (mp *m) hasCgoOnStack() bool {
1107 return mp.ncgo > 0 || mp.isextra
1108 }
1109
1110 const (
1111
1112
1113 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1114
1115
1116
1117 osHasLowResClockInt = goos.IsWindows
1118
1119
1120
1121 osHasLowResClock = osHasLowResClockInt > 0
1122 )
1123
1124
1125 func ready(gp *g, traceskip int, next bool) {
1126 status := readgstatus(gp)
1127
1128
1129 mp := acquirem()
1130 if status&^_Gscan != _Gwaiting {
1131 dumpgstatus(gp)
1132 throw("bad g->status in ready")
1133 }
1134
1135
1136 trace := traceAcquire()
1137 casgstatus(gp, _Gwaiting, _Grunnable)
1138 if trace.ok() {
1139 trace.GoUnpark(gp, traceskip)
1140 traceRelease(trace)
1141 }
1142 runqput(mp.p.ptr(), gp, next)
1143 wakep()
1144 releasem(mp)
1145 }
1146
1147
1148
1149 const freezeStopWait = 0x7fffffff
1150
1151
1152
1153 var freezing atomic.Bool
1154
1155
1156
1157
1158 func freezetheworld() {
1159 freezing.Store(true)
1160 if debug.dontfreezetheworld > 0 {
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185 usleep(1000)
1186 return
1187 }
1188
1189
1190
1191
1192 for i := 0; i < 5; i++ {
1193
1194 sched.stopwait = freezeStopWait
1195 sched.gcwaiting.Store(true)
1196
1197 if !preemptall() {
1198 break
1199 }
1200 usleep(1000)
1201 }
1202
1203 usleep(1000)
1204 preemptall()
1205 usleep(1000)
1206 }
1207
1208
1209
1210
1211
1212 func readgstatus(gp *g) uint32 {
1213 return gp.atomicstatus.Load()
1214 }
1215
1216
1217
1218
1219
1220 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1221 success := false
1222
1223
1224 switch oldval {
1225 default:
1226 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1227 dumpgstatus(gp)
1228 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1229 case _Gscanrunnable,
1230 _Gscanwaiting,
1231 _Gscanrunning,
1232 _Gscansyscall,
1233 _Gscanleaked,
1234 _Gscanpreempted,
1235 _Gscandeadextra:
1236 if newval == oldval&^_Gscan {
1237 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1238 }
1239 }
1240 if !success {
1241 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1242 dumpgstatus(gp)
1243 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1244 }
1245 releaseLockRankAndM(lockRankGscan)
1246 }
1247
1248
1249
1250 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1251 switch oldval {
1252 case _Grunnable,
1253 _Grunning,
1254 _Gwaiting,
1255 _Gleaked,
1256 _Gsyscall,
1257 _Gdeadextra:
1258 if newval == oldval|_Gscan {
1259 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1260 if r {
1261 acquireLockRankAndM(lockRankGscan)
1262 }
1263 return r
1264
1265 }
1266 }
1267 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1268 throw("bad oldval passed to castogscanstatus")
1269 return false
1270 }
1271
1272
1273
1274 var casgstatusAlwaysTrack = false
1275
1276
1277
1278
1279
1280
1281
1282 func casgstatus(gp *g, oldval, newval uint32) {
1283 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1284 systemstack(func() {
1285
1286
1287 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1288 throw("casgstatus: bad incoming values")
1289 })
1290 }
1291
1292 lockWithRankMayAcquire(nil, lockRankGscan)
1293
1294
1295 const yieldDelay = 5 * 1000
1296 var nextYield int64
1297
1298
1299
1300 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1301 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1302 systemstack(func() {
1303
1304
1305 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1306 })
1307 }
1308 if i == 0 {
1309 nextYield = nanotime() + yieldDelay
1310 }
1311 if nanotime() < nextYield {
1312 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1313 procyield(1)
1314 }
1315 } else {
1316 osyield()
1317 nextYield = nanotime() + yieldDelay/2
1318 }
1319 }
1320
1321 if gp.bubble != nil {
1322 systemstack(func() {
1323 gp.bubble.changegstatus(gp, oldval, newval)
1324 })
1325 }
1326
1327 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1328
1329
1330 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1331 gp.tracking = true
1332 }
1333 gp.trackingSeq++
1334 }
1335 if !gp.tracking {
1336 return
1337 }
1338
1339
1340
1341
1342
1343
1344 switch oldval {
1345 case _Grunnable:
1346
1347
1348
1349 now := nanotime()
1350 gp.runnableTime += now - gp.trackingStamp
1351 gp.trackingStamp = 0
1352 case _Gwaiting:
1353 if !gp.waitreason.isMutexWait() {
1354
1355 break
1356 }
1357
1358
1359
1360
1361
1362 now := nanotime()
1363 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1364 gp.trackingStamp = 0
1365 }
1366 switch newval {
1367 case _Gwaiting:
1368 if !gp.waitreason.isMutexWait() {
1369
1370 break
1371 }
1372
1373 now := nanotime()
1374 gp.trackingStamp = now
1375 case _Grunnable:
1376
1377
1378 now := nanotime()
1379 gp.trackingStamp = now
1380 case _Grunning:
1381
1382
1383
1384 gp.tracking = false
1385 sched.timeToRun.record(gp.runnableTime)
1386 gp.runnableTime = 0
1387 }
1388 }
1389
1390
1391
1392
1393 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1394
1395 gp.waitreason = reason
1396 casgstatus(gp, old, _Gwaiting)
1397 }
1398
1399
1400
1401
1402
1403
1404
1405
1406 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1407 if !reason.isWaitingForSuspendG() {
1408 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1409 }
1410 casGToWaiting(gp, old, reason)
1411 }
1412
1413
1414
1415
1416
1417 func casGToPreemptScan(gp *g, old, new uint32) {
1418 if old != _Grunning || new != _Gscan|_Gpreempted {
1419 throw("bad g transition")
1420 }
1421 acquireLockRankAndM(lockRankGscan)
1422 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1423 }
1424
1425
1426
1427
1428
1429
1430 }
1431
1432
1433
1434
1435 func casGFromPreempted(gp *g, old, new uint32) bool {
1436 if old != _Gpreempted || new != _Gwaiting {
1437 throw("bad g transition")
1438 }
1439 gp.waitreason = waitReasonPreempted
1440 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1441 return false
1442 }
1443 if bubble := gp.bubble; bubble != nil {
1444 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1445 }
1446 return true
1447 }
1448
1449
1450 type stwReason uint8
1451
1452
1453
1454
1455 const (
1456 stwUnknown stwReason = iota
1457 stwGCMarkTerm
1458 stwGCSweepTerm
1459 stwWriteHeapDump
1460 stwGoroutineProfile
1461 stwGoroutineProfileCleanup
1462 stwAllGoroutinesStack
1463 stwReadMemStats
1464 stwAllThreadsSyscall
1465 stwGOMAXPROCS
1466 stwStartTrace
1467 stwStopTrace
1468 stwForTestCountPagesInUse
1469 stwForTestReadMetricsSlow
1470 stwForTestReadMemStatsSlow
1471 stwForTestPageCachePagesLeaked
1472 stwForTestResetDebugLog
1473 )
1474
1475 func (r stwReason) String() string {
1476 return stwReasonStrings[r]
1477 }
1478
1479 func (r stwReason) isGC() bool {
1480 return r == stwGCMarkTerm || r == stwGCSweepTerm
1481 }
1482
1483
1484
1485
1486 var stwReasonStrings = [...]string{
1487 stwUnknown: "unknown",
1488 stwGCMarkTerm: "GC mark termination",
1489 stwGCSweepTerm: "GC sweep termination",
1490 stwWriteHeapDump: "write heap dump",
1491 stwGoroutineProfile: "goroutine profile",
1492 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1493 stwAllGoroutinesStack: "all goroutines stack trace",
1494 stwReadMemStats: "read mem stats",
1495 stwAllThreadsSyscall: "AllThreadsSyscall",
1496 stwGOMAXPROCS: "GOMAXPROCS",
1497 stwStartTrace: "start trace",
1498 stwStopTrace: "stop trace",
1499 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1500 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1501 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1502 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1503 stwForTestResetDebugLog: "ResetDebugLog (test)",
1504 }
1505
1506
1507
1508 type worldStop struct {
1509 reason stwReason
1510 startedStopping int64
1511 finishedStopping int64
1512 stoppingCPUTime int64
1513 }
1514
1515
1516
1517
1518 var stopTheWorldContext worldStop
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537 func stopTheWorld(reason stwReason) worldStop {
1538 semacquire(&worldsema)
1539 gp := getg()
1540 gp.m.preemptoff = reason.String()
1541 systemstack(func() {
1542 stopTheWorldContext = stopTheWorldWithSema(reason)
1543 })
1544 return stopTheWorldContext
1545 }
1546
1547
1548
1549
1550 func startTheWorld(w worldStop) {
1551 systemstack(func() { startTheWorldWithSema(0, w) })
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 mp := acquirem()
1569 mp.preemptoff = ""
1570 semrelease1(&worldsema, true, 0)
1571 releasem(mp)
1572 }
1573
1574
1575
1576
1577 func stopTheWorldGC(reason stwReason) worldStop {
1578 semacquire(&gcsema)
1579 return stopTheWorld(reason)
1580 }
1581
1582
1583
1584
1585 func startTheWorldGC(w worldStop) {
1586 startTheWorld(w)
1587 semrelease(&gcsema)
1588 }
1589
1590
1591 var worldsema uint32 = 1
1592
1593
1594
1595
1596
1597
1598
1599 var gcsema uint32 = 1
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633 func stopTheWorldWithSema(reason stwReason) worldStop {
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1647
1648 trace := traceAcquire()
1649 if trace.ok() {
1650 trace.STWStart(reason)
1651 traceRelease(trace)
1652 }
1653 gp := getg()
1654
1655
1656
1657 if gp.m.locks > 0 {
1658 throw("stopTheWorld: holding locks")
1659 }
1660
1661 lock(&sched.lock)
1662 start := nanotime()
1663 sched.stopwait = gomaxprocs
1664 sched.gcwaiting.Store(true)
1665 preemptall()
1666
1667
1668 gp.m.p.ptr().status = _Pgcstop
1669 gp.m.p.ptr().gcStopTime = start
1670 sched.stopwait--
1671
1672
1673 for _, pp := range allp {
1674 if thread, ok := setBlockOnExitSyscall(pp); ok {
1675 thread.gcstopP()
1676 thread.resume()
1677 }
1678 }
1679
1680
1681 now := nanotime()
1682 for {
1683 pp, _ := pidleget(now)
1684 if pp == nil {
1685 break
1686 }
1687 pp.status = _Pgcstop
1688 pp.gcStopTime = nanotime()
1689 sched.stopwait--
1690 }
1691 wait := sched.stopwait > 0
1692 unlock(&sched.lock)
1693
1694
1695 if wait {
1696 for {
1697
1698 if notetsleep(&sched.stopnote, 100*1000) {
1699 noteclear(&sched.stopnote)
1700 break
1701 }
1702 preemptall()
1703 }
1704 }
1705
1706 finish := nanotime()
1707 startTime := finish - start
1708 if reason.isGC() {
1709 sched.stwStoppingTimeGC.record(startTime)
1710 } else {
1711 sched.stwStoppingTimeOther.record(startTime)
1712 }
1713
1714
1715
1716
1717
1718 stoppingCPUTime := int64(0)
1719 bad := ""
1720 if sched.stopwait != 0 {
1721 bad = "stopTheWorld: not stopped (stopwait != 0)"
1722 } else {
1723 for _, pp := range allp {
1724 if pp.status != _Pgcstop {
1725 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1726 }
1727 if pp.gcStopTime == 0 && bad == "" {
1728 bad = "stopTheWorld: broken CPU time accounting"
1729 }
1730 stoppingCPUTime += finish - pp.gcStopTime
1731 pp.gcStopTime = 0
1732 }
1733 }
1734 if freezing.Load() {
1735
1736
1737
1738
1739 lock(&deadlock)
1740 lock(&deadlock)
1741 }
1742 if bad != "" {
1743 throw(bad)
1744 }
1745
1746 worldStopped()
1747
1748
1749 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1750
1751 return worldStop{
1752 reason: reason,
1753 startedStopping: start,
1754 finishedStopping: finish,
1755 stoppingCPUTime: stoppingCPUTime,
1756 }
1757 }
1758
1759
1760
1761
1762
1763
1764
1765 func startTheWorldWithSema(now int64, w worldStop) int64 {
1766 assertWorldStopped()
1767
1768 mp := acquirem()
1769 if netpollinited() {
1770 list, delta := netpoll(0)
1771 injectglist(&list)
1772 netpollAdjustWaiters(delta)
1773 }
1774 lock(&sched.lock)
1775
1776 procs := gomaxprocs
1777 if newprocs != 0 {
1778 procs = newprocs
1779 newprocs = 0
1780 }
1781 p1 := procresize(procs)
1782 sched.gcwaiting.Store(false)
1783 if sched.sysmonwait.Load() {
1784 sched.sysmonwait.Store(false)
1785 notewakeup(&sched.sysmonnote)
1786 }
1787 unlock(&sched.lock)
1788
1789 worldStarted()
1790
1791 for p1 != nil {
1792 p := p1
1793 p1 = p1.link.ptr()
1794 if p.m != 0 {
1795 mp := p.m.ptr()
1796 p.m = 0
1797 if mp.nextp != 0 {
1798 throw("startTheWorld: inconsistent mp->nextp")
1799 }
1800 mp.nextp.set(p)
1801 notewakeup(&mp.park)
1802 } else {
1803
1804 newm(nil, p, -1)
1805 }
1806 }
1807
1808
1809 if now == 0 {
1810 now = nanotime()
1811 }
1812 totalTime := now - w.startedStopping
1813 if w.reason.isGC() {
1814 sched.stwTotalTimeGC.record(totalTime)
1815 } else {
1816 sched.stwTotalTimeOther.record(totalTime)
1817 }
1818 trace := traceAcquire()
1819 if trace.ok() {
1820 trace.STWDone()
1821 traceRelease(trace)
1822 }
1823
1824
1825
1826
1827 wakep()
1828
1829 releasem(mp)
1830
1831 return now
1832 }
1833
1834
1835
1836 func usesLibcall() bool {
1837 switch GOOS {
1838 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1839 return true
1840 }
1841 return false
1842 }
1843
1844
1845
1846 func mStackIsSystemAllocated() bool {
1847 switch GOOS {
1848 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1849 return true
1850 }
1851 return false
1852 }
1853
1854
1855
1856 func mstart()
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867 func mstart0() {
1868 gp := getg()
1869
1870 osStack := gp.stack.lo == 0
1871 if osStack {
1872
1873
1874
1875
1876
1877
1878
1879
1880 size := gp.stack.hi
1881 if size == 0 {
1882 size = 16384 * sys.StackGuardMultiplier
1883 }
1884 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1885 gp.stack.lo = gp.stack.hi - size + 1024
1886 }
1887
1888
1889 gp.stackguard0 = gp.stack.lo + stackGuard
1890
1891
1892 gp.stackguard1 = gp.stackguard0
1893 mstart1()
1894
1895
1896 if mStackIsSystemAllocated() {
1897
1898
1899
1900 osStack = true
1901 }
1902 mexit(osStack)
1903 }
1904
1905
1906
1907
1908
1909 func mstart1() {
1910 gp := getg()
1911
1912 if gp != gp.m.g0 {
1913 throw("bad runtime·mstart")
1914 }
1915
1916
1917
1918
1919
1920
1921
1922 gp.sched.g = guintptr(unsafe.Pointer(gp))
1923 gp.sched.pc = sys.GetCallerPC()
1924 gp.sched.sp = sys.GetCallerSP()
1925
1926 asminit()
1927 minit()
1928
1929
1930
1931 if gp.m == &m0 {
1932 mstartm0()
1933 }
1934
1935 if debug.dataindependenttiming == 1 {
1936 sys.EnableDIT()
1937 }
1938
1939 if fn := gp.m.mstartfn; fn != nil {
1940 fn()
1941 }
1942
1943 if gp.m != &m0 {
1944 acquirep(gp.m.nextp.ptr())
1945 gp.m.nextp = 0
1946 }
1947 schedule()
1948 }
1949
1950
1951
1952
1953
1954
1955
1956 func mstartm0() {
1957
1958
1959
1960 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1961 cgoHasExtraM = true
1962 newextram()
1963 }
1964 initsig(false)
1965 }
1966
1967
1968
1969
1970 func mPark() {
1971 gp := getg()
1972
1973
1974 if goexperiment.RuntimeSecret {
1975 eraseSecretsSignalStk()
1976 }
1977 notesleep(&gp.m.park)
1978 noteclear(&gp.m.park)
1979 }
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991 func mexit(osStack bool) {
1992 mp := getg().m
1993
1994 if mp == &m0 {
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006 handoffp(releasep())
2007 lock(&sched.lock)
2008 sched.nmfreed++
2009 checkdead()
2010 unlock(&sched.lock)
2011 mPark()
2012 throw("locked m0 woke up")
2013 }
2014
2015 sigblock(true)
2016 unminit()
2017
2018
2019 if mp.gsignal != nil {
2020 stackfree(mp.gsignal.stack)
2021 if valgrindenabled {
2022 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2023 mp.gsignal.valgrindStackID = 0
2024 }
2025
2026
2027
2028
2029 mp.gsignal = nil
2030 }
2031
2032
2033 vgetrandomDestroy(mp)
2034
2035
2036
2037 mp.self.clear()
2038
2039
2040 lock(&sched.lock)
2041 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2042 if *pprev == mp {
2043 *pprev = mp.alllink
2044 goto found
2045 }
2046 }
2047 throw("m not found in allm")
2048 found:
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063 mp.freeWait.Store(freeMWait)
2064 mp.freelink = sched.freem
2065 sched.freem = mp
2066 unlock(&sched.lock)
2067
2068 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2069 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2070
2071
2072 handoffp(releasep())
2073
2074
2075
2076
2077
2078 lock(&sched.lock)
2079 sched.nmfreed++
2080 checkdead()
2081 unlock(&sched.lock)
2082
2083 if GOOS == "darwin" || GOOS == "ios" {
2084
2085
2086 if mp.signalPending.Load() != 0 {
2087 pendingPreemptSignals.Add(-1)
2088 }
2089 }
2090
2091
2092
2093 mdestroy(mp)
2094
2095 if osStack {
2096
2097 mp.freeWait.Store(freeMRef)
2098
2099
2100
2101 return
2102 }
2103
2104
2105
2106
2107
2108 exitThread(&mp.freeWait)
2109 }
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 func forEachP(reason waitReason, fn func(*p)) {
2122 systemstack(func() {
2123 gp := getg().m.curg
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135 casGToWaitingForSuspendG(gp, _Grunning, reason)
2136 forEachPInternal(fn)
2137 casgstatus(gp, _Gwaiting, _Grunning)
2138 })
2139 }
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150 func forEachPInternal(fn func(*p)) {
2151 mp := acquirem()
2152 pp := getg().m.p.ptr()
2153
2154 lock(&sched.lock)
2155 if sched.safePointWait != 0 {
2156 throw("forEachP: sched.safePointWait != 0")
2157 }
2158 sched.safePointWait = gomaxprocs - 1
2159 sched.safePointFn = fn
2160
2161
2162 for _, p2 := range allp {
2163 if p2 != pp {
2164 atomic.Store(&p2.runSafePointFn, 1)
2165 }
2166 }
2167 preemptall()
2168
2169
2170
2171
2172
2173
2174
2175 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2176 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2177 fn(p)
2178 sched.safePointWait--
2179 }
2180 }
2181
2182 wait := sched.safePointWait > 0
2183 unlock(&sched.lock)
2184
2185
2186 fn(pp)
2187
2188
2189
2190 for _, p2 := range allp {
2191 if atomic.Load(&p2.runSafePointFn) != 1 {
2192
2193 continue
2194 }
2195 if thread, ok := setBlockOnExitSyscall(p2); ok {
2196 thread.takeP()
2197 thread.resume()
2198 handoffp(p2)
2199 }
2200 }
2201
2202
2203 if wait {
2204 for {
2205
2206
2207
2208
2209 if notetsleep(&sched.safePointNote, 100*1000) {
2210 noteclear(&sched.safePointNote)
2211 break
2212 }
2213 preemptall()
2214 }
2215 }
2216 if sched.safePointWait != 0 {
2217 throw("forEachP: not done")
2218 }
2219 for _, p2 := range allp {
2220 if p2.runSafePointFn != 0 {
2221 throw("forEachP: P did not run fn")
2222 }
2223 }
2224
2225 lock(&sched.lock)
2226 sched.safePointFn = nil
2227 unlock(&sched.lock)
2228 releasem(mp)
2229 }
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242 func runSafePointFn() {
2243 p := getg().m.p.ptr()
2244
2245
2246
2247 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2248 return
2249 }
2250 sched.safePointFn(p)
2251 lock(&sched.lock)
2252 sched.safePointWait--
2253 if sched.safePointWait == 0 {
2254 notewakeup(&sched.safePointNote)
2255 }
2256 unlock(&sched.lock)
2257 }
2258
2259
2260
2261
2262 var cgoThreadStart unsafe.Pointer
2263
2264 type cgothreadstart struct {
2265 g guintptr
2266 tls *uint64
2267 fn unsafe.Pointer
2268 }
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279 func allocm(pp *p, fn func(), id int64) *m {
2280 allocmLock.rlock()
2281
2282
2283
2284
2285 acquirem()
2286
2287 gp := getg()
2288 if gp.m.p == 0 {
2289 acquirep(pp)
2290 }
2291
2292
2293
2294 if sched.freem != nil {
2295 lock(&sched.lock)
2296 var newList *m
2297 for freem := sched.freem; freem != nil; {
2298
2299 wait := freem.freeWait.Load()
2300 if wait == freeMWait {
2301 next := freem.freelink
2302 freem.freelink = newList
2303 newList = freem
2304 freem = next
2305 continue
2306 }
2307
2308
2309
2310 if traceEnabled() || traceShuttingDown() {
2311 traceThreadDestroy(freem)
2312 }
2313
2314
2315
2316 if wait == freeMStack {
2317
2318
2319
2320 systemstack(func() {
2321 stackfree(freem.g0.stack)
2322 if valgrindenabled {
2323 valgrindDeregisterStack(freem.g0.valgrindStackID)
2324 freem.g0.valgrindStackID = 0
2325 }
2326 })
2327 }
2328 freem = freem.freelink
2329 }
2330 sched.freem = newList
2331 unlock(&sched.lock)
2332 }
2333
2334 mp := &new(mPadded).m
2335 mp.mstartfn = fn
2336 mcommoninit(mp, id)
2337
2338
2339
2340 if iscgo || mStackIsSystemAllocated() {
2341 mp.g0 = malg(-1)
2342 } else {
2343 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2344 }
2345 mp.g0.m = mp
2346
2347 if pp == gp.m.p.ptr() {
2348 releasep()
2349 }
2350
2351 releasem(gp.m)
2352 allocmLock.runlock()
2353 return mp
2354 }
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395 func needm(signal bool) {
2396 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2397
2398
2399
2400
2401
2402
2403 writeErrStr("fatal error: cgo callback before cgo call\n")
2404 exit(1)
2405 }
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415 var sigmask sigset
2416 sigsave(&sigmask)
2417 sigblock(false)
2418
2419
2420
2421
2422 mp, last := getExtraM()
2423
2424
2425
2426
2427
2428
2429
2430
2431 mp.needextram = last
2432
2433
2434 mp.sigmask = sigmask
2435
2436
2437
2438 osSetupTLS(mp)
2439
2440
2441
2442 setg(mp.g0)
2443 sp := sys.GetCallerSP()
2444 callbackUpdateSystemStack(mp, sp, signal)
2445
2446
2447
2448
2449 mp.isExtraInC = false
2450
2451
2452 asminit()
2453 minit()
2454
2455
2456
2457
2458
2459
2460 var trace traceLocker
2461 if !signal {
2462 trace = traceAcquire()
2463 }
2464
2465
2466 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2467 sched.ngsys.Add(-1)
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477 addGSyscallNoP(mp)
2478
2479 if !signal {
2480 if trace.ok() {
2481 trace.GoCreateSyscall(mp.curg)
2482 traceRelease(trace)
2483 }
2484 }
2485 mp.isExtraInSig = signal
2486 }
2487
2488
2489
2490
2491 func needAndBindM() {
2492 needm(false)
2493
2494 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2495 cgoBindM()
2496 }
2497 }
2498
2499
2500
2501
2502 func newextram() {
2503 c := extraMWaiters.Swap(0)
2504 if c > 0 {
2505 for i := uint32(0); i < c; i++ {
2506 oneNewExtraM()
2507 }
2508 } else if extraMLength.Load() == 0 {
2509
2510 oneNewExtraM()
2511 }
2512 }
2513
2514
2515 func oneNewExtraM() {
2516
2517
2518
2519
2520
2521 mp := allocm(nil, nil, -1)
2522 gp := malg(4096)
2523 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2524 gp.sched.sp = gp.stack.hi
2525 gp.sched.sp -= 4 * goarch.PtrSize
2526 gp.sched.lr = 0
2527 gp.sched.g = guintptr(unsafe.Pointer(gp))
2528 gp.syscallpc = gp.sched.pc
2529 gp.syscallsp = gp.sched.sp
2530 gp.stktopsp = gp.sched.sp
2531
2532
2533
2534 casgstatus(gp, _Gidle, _Gdeadextra)
2535 gp.m = mp
2536 mp.curg = gp
2537 mp.isextra = true
2538
2539 mp.isExtraInC = true
2540 mp.lockedInt++
2541 mp.lockedg.set(gp)
2542 gp.lockedm.set(mp)
2543 gp.goid = sched.goidgen.Add(1)
2544 if raceenabled {
2545 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2546 }
2547
2548 allgadd(gp)
2549
2550
2551
2552
2553
2554 sched.ngsys.Add(1)
2555
2556
2557 addExtraM(mp)
2558 }
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593 func dropm() {
2594
2595
2596
2597 mp := getg().m
2598
2599
2600
2601
2602
2603 var trace traceLocker
2604 if !mp.isExtraInSig {
2605 trace = traceAcquire()
2606 }
2607
2608
2609 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2610 mp.curg.preemptStop = false
2611 sched.ngsys.Add(1)
2612 decGSyscallNoP(mp)
2613
2614 if !mp.isExtraInSig {
2615 if trace.ok() {
2616 trace.GoDestroySyscall()
2617 traceRelease(trace)
2618 }
2619 }
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634 mp.syscalltick--
2635
2636
2637
2638 mp.curg.trace.reset()
2639
2640
2641
2642
2643 if traceEnabled() || traceShuttingDown() {
2644
2645
2646
2647
2648
2649
2650
2651 lock(&sched.lock)
2652 traceThreadDestroy(mp)
2653 unlock(&sched.lock)
2654 }
2655 mp.isExtraInSig = false
2656
2657
2658
2659
2660
2661 sigmask := mp.sigmask
2662 sigblock(false)
2663 unminit()
2664
2665 setg(nil)
2666
2667
2668
2669 g0 := mp.g0
2670 g0.stack.hi = 0
2671 g0.stack.lo = 0
2672 g0.stackguard0 = 0
2673 g0.stackguard1 = 0
2674 mp.g0StackAccurate = false
2675
2676 putExtraM(mp)
2677
2678 msigrestore(sigmask)
2679 }
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701 func cgoBindM() {
2702 if GOOS == "windows" || GOOS == "plan9" {
2703 fatal("bindm in unexpected GOOS")
2704 }
2705 g := getg()
2706 if g.m.g0 != g {
2707 fatal("the current g is not g0")
2708 }
2709 if _cgo_bindm != nil {
2710 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2711 }
2712 }
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725 func getm() uintptr {
2726 return uintptr(unsafe.Pointer(getg().m))
2727 }
2728
2729 var (
2730
2731
2732
2733
2734
2735
2736 extraM atomic.Uintptr
2737
2738 extraMLength atomic.Uint32
2739
2740 extraMWaiters atomic.Uint32
2741
2742
2743 extraMInUse atomic.Uint32
2744 )
2745
2746
2747
2748
2749
2750
2751
2752
2753 func lockextra(nilokay bool) *m {
2754 const locked = 1
2755
2756 incr := false
2757 for {
2758 old := extraM.Load()
2759 if old == locked {
2760 osyield_no_g()
2761 continue
2762 }
2763 if old == 0 && !nilokay {
2764 if !incr {
2765
2766
2767
2768 extraMWaiters.Add(1)
2769 incr = true
2770 }
2771 usleep_no_g(1)
2772 continue
2773 }
2774 if extraM.CompareAndSwap(old, locked) {
2775 return (*m)(unsafe.Pointer(old))
2776 }
2777 osyield_no_g()
2778 continue
2779 }
2780 }
2781
2782
2783 func unlockextra(mp *m, delta int32) {
2784 extraMLength.Add(delta)
2785 extraM.Store(uintptr(unsafe.Pointer(mp)))
2786 }
2787
2788
2789
2790
2791
2792
2793
2794
2795 func getExtraM() (mp *m, last bool) {
2796 mp = lockextra(false)
2797 extraMInUse.Add(1)
2798 unlockextra(mp.schedlink.ptr(), -1)
2799 return mp, mp.schedlink.ptr() == nil
2800 }
2801
2802
2803
2804
2805
2806 func putExtraM(mp *m) {
2807 extraMInUse.Add(-1)
2808 addExtraM(mp)
2809 }
2810
2811
2812
2813
2814 func addExtraM(mp *m) {
2815 mnext := lockextra(true)
2816 mp.schedlink.set(mnext)
2817 unlockextra(mp, 1)
2818 }
2819
2820 var (
2821
2822
2823
2824 allocmLock rwmutex
2825
2826
2827
2828
2829 execLock rwmutex
2830 )
2831
2832
2833
2834 const (
2835 failthreadcreate = "runtime: failed to create new OS thread\n"
2836 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2837 )
2838
2839
2840
2841
2842 var newmHandoff struct {
2843 lock mutex
2844
2845
2846
2847 newm muintptr
2848
2849
2850
2851 waiting bool
2852 wake note
2853
2854
2855
2856
2857 haveTemplateThread uint32
2858 }
2859
2860
2861
2862
2863
2864
2865
2866
2867 func newm(fn func(), pp *p, id int64) {
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878 acquirem()
2879
2880 mp := allocm(pp, fn, id)
2881 mp.nextp.set(pp)
2882 mp.sigmask = initSigmask
2883 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895 lock(&newmHandoff.lock)
2896 if newmHandoff.haveTemplateThread == 0 {
2897 throw("on a locked thread with no template thread")
2898 }
2899 mp.schedlink = newmHandoff.newm
2900 newmHandoff.newm.set(mp)
2901 if newmHandoff.waiting {
2902 newmHandoff.waiting = false
2903 notewakeup(&newmHandoff.wake)
2904 }
2905 unlock(&newmHandoff.lock)
2906
2907
2908
2909 releasem(getg().m)
2910 return
2911 }
2912 newm1(mp)
2913 releasem(getg().m)
2914 }
2915
2916 func newm1(mp *m) {
2917 if iscgo && _cgo_thread_start != nil {
2918 var ts cgothreadstart
2919 ts.g.set(mp.g0)
2920 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2921 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2922 if msanenabled {
2923 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2924 }
2925 if asanenabled {
2926 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2927 }
2928 execLock.rlock()
2929 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2930 execLock.runlock()
2931 return
2932 }
2933 execLock.rlock()
2934 newosproc(mp)
2935 execLock.runlock()
2936 }
2937
2938
2939
2940
2941
2942 func startTemplateThread() {
2943 if GOARCH == "wasm" {
2944 return
2945 }
2946
2947
2948
2949 mp := acquirem()
2950 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2951 releasem(mp)
2952 return
2953 }
2954 newm(templateThread, nil, -1)
2955 releasem(mp)
2956 }
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970 func templateThread() {
2971 lock(&sched.lock)
2972 sched.nmsys++
2973 checkdead()
2974 unlock(&sched.lock)
2975
2976 for {
2977 lock(&newmHandoff.lock)
2978 for newmHandoff.newm != 0 {
2979 newm := newmHandoff.newm.ptr()
2980 newmHandoff.newm = 0
2981 unlock(&newmHandoff.lock)
2982 for newm != nil {
2983 next := newm.schedlink.ptr()
2984 newm.schedlink = 0
2985 newm1(newm)
2986 newm = next
2987 }
2988 lock(&newmHandoff.lock)
2989 }
2990 newmHandoff.waiting = true
2991 noteclear(&newmHandoff.wake)
2992 unlock(&newmHandoff.lock)
2993 notesleep(&newmHandoff.wake)
2994 }
2995 }
2996
2997
2998
2999 func stopm() {
3000 gp := getg()
3001
3002 if gp.m.locks != 0 {
3003 throw("stopm holding locks")
3004 }
3005 if gp.m.p != 0 {
3006 throw("stopm holding p")
3007 }
3008 if gp.m.spinning {
3009 throw("stopm spinning")
3010 }
3011
3012 lock(&sched.lock)
3013 mput(gp.m)
3014 unlock(&sched.lock)
3015 mPark()
3016 acquirep(gp.m.nextp.ptr())
3017 gp.m.nextp = 0
3018 }
3019
3020 func mspinning() {
3021
3022 getg().m.spinning = true
3023 }
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042 func startm(pp *p, spinning, lockheld bool) {
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059 mp := acquirem()
3060 if !lockheld {
3061 lock(&sched.lock)
3062 }
3063 if pp == nil {
3064 if spinning {
3065
3066
3067
3068 throw("startm: P required for spinning=true")
3069 }
3070 pp, _ = pidleget(0)
3071 if pp == nil {
3072 if !lockheld {
3073 unlock(&sched.lock)
3074 }
3075 releasem(mp)
3076 return
3077 }
3078 }
3079 nmp := mget()
3080 if nmp == nil {
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095 id := mReserveID()
3096 unlock(&sched.lock)
3097
3098 var fn func()
3099 if spinning {
3100
3101 fn = mspinning
3102 }
3103 newm(fn, pp, id)
3104
3105 if lockheld {
3106 lock(&sched.lock)
3107 }
3108
3109
3110 releasem(mp)
3111 return
3112 }
3113 if !lockheld {
3114 unlock(&sched.lock)
3115 }
3116 if nmp.spinning {
3117 throw("startm: m is spinning")
3118 }
3119 if nmp.nextp != 0 {
3120 throw("startm: m has p")
3121 }
3122 if spinning && !runqempty(pp) {
3123 throw("startm: p has runnable gs")
3124 }
3125
3126 nmp.spinning = spinning
3127 nmp.nextp.set(pp)
3128 notewakeup(&nmp.park)
3129
3130
3131 releasem(mp)
3132 }
3133
3134
3135
3136
3137
3138 func handoffp(pp *p) {
3139
3140
3141
3142
3143 if !runqempty(pp) || !sched.runq.empty() {
3144 startm(pp, false, false)
3145 return
3146 }
3147
3148 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3149 startm(pp, false, false)
3150 return
3151 }
3152
3153 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3154 startm(pp, false, false)
3155 return
3156 }
3157
3158
3159 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3160 sched.needspinning.Store(0)
3161 startm(pp, true, false)
3162 return
3163 }
3164 lock(&sched.lock)
3165 if sched.gcwaiting.Load() {
3166 pp.status = _Pgcstop
3167 pp.gcStopTime = nanotime()
3168 sched.stopwait--
3169 if sched.stopwait == 0 {
3170 notewakeup(&sched.stopnote)
3171 }
3172 unlock(&sched.lock)
3173 return
3174 }
3175 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3176 sched.safePointFn(pp)
3177 sched.safePointWait--
3178 if sched.safePointWait == 0 {
3179 notewakeup(&sched.safePointNote)
3180 }
3181 }
3182 if !sched.runq.empty() {
3183 unlock(&sched.lock)
3184 startm(pp, false, false)
3185 return
3186 }
3187
3188
3189 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3190 unlock(&sched.lock)
3191 startm(pp, false, false)
3192 return
3193 }
3194
3195
3196
3197 when := pp.timers.wakeTime()
3198 pidleput(pp, 0)
3199 unlock(&sched.lock)
3200
3201 if when != 0 {
3202 wakeNetPoller(when)
3203 }
3204 }
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219 func wakep() {
3220
3221
3222 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3223 return
3224 }
3225
3226
3227
3228
3229
3230
3231 mp := acquirem()
3232
3233 var pp *p
3234 lock(&sched.lock)
3235 pp, _ = pidlegetSpinning(0)
3236 if pp == nil {
3237 if sched.nmspinning.Add(-1) < 0 {
3238 throw("wakep: negative nmspinning")
3239 }
3240 unlock(&sched.lock)
3241 releasem(mp)
3242 return
3243 }
3244
3245
3246
3247
3248 unlock(&sched.lock)
3249
3250 startm(pp, true, false)
3251
3252 releasem(mp)
3253 }
3254
3255
3256
3257 func stoplockedm() {
3258 gp := getg()
3259
3260 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3261 throw("stoplockedm: inconsistent locking")
3262 }
3263 if gp.m.p != 0 {
3264
3265 pp := releasep()
3266 handoffp(pp)
3267 }
3268 incidlelocked(1)
3269
3270 mPark()
3271 status := readgstatus(gp.m.lockedg.ptr())
3272 if status&^_Gscan != _Grunnable {
3273 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3274 dumpgstatus(gp.m.lockedg.ptr())
3275 throw("stoplockedm: not runnable")
3276 }
3277 acquirep(gp.m.nextp.ptr())
3278 gp.m.nextp = 0
3279 }
3280
3281
3282
3283
3284
3285 func startlockedm(gp *g) {
3286 mp := gp.lockedm.ptr()
3287 if mp == getg().m {
3288 throw("startlockedm: locked to me")
3289 }
3290 if mp.nextp != 0 {
3291 throw("startlockedm: m has p")
3292 }
3293
3294 incidlelocked(-1)
3295 pp := releasep()
3296 mp.nextp.set(pp)
3297 notewakeup(&mp.park)
3298 stopm()
3299 }
3300
3301
3302
3303 func gcstopm() {
3304 gp := getg()
3305
3306 if !sched.gcwaiting.Load() {
3307 throw("gcstopm: not waiting for gc")
3308 }
3309 if gp.m.spinning {
3310 gp.m.spinning = false
3311
3312
3313 if sched.nmspinning.Add(-1) < 0 {
3314 throw("gcstopm: negative nmspinning")
3315 }
3316 }
3317 pp := releasep()
3318 lock(&sched.lock)
3319 pp.status = _Pgcstop
3320 pp.gcStopTime = nanotime()
3321 sched.stopwait--
3322 if sched.stopwait == 0 {
3323 notewakeup(&sched.stopnote)
3324 }
3325 unlock(&sched.lock)
3326 stopm()
3327 }
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338 func execute(gp *g, inheritTime bool) {
3339 mp := getg().m
3340
3341 if goroutineProfile.active {
3342
3343
3344
3345 tryRecordGoroutineProfile(gp, nil, osyield)
3346 }
3347
3348
3349 mp.curg = gp
3350 gp.m = mp
3351 gp.syncSafePoint = false
3352 casgstatus(gp, _Grunnable, _Grunning)
3353 gp.waitsince = 0
3354 gp.preempt = false
3355 gp.stackguard0 = gp.stack.lo + stackGuard
3356 if !inheritTime {
3357 mp.p.ptr().schedtick++
3358 }
3359
3360 if sys.DITSupported && debug.dataindependenttiming != 1 {
3361 if gp.ditWanted && !mp.ditEnabled {
3362
3363
3364 sys.EnableDIT()
3365 mp.ditEnabled = true
3366 } else if !gp.ditWanted && mp.ditEnabled {
3367
3368
3369
3370
3371
3372 sys.DisableDIT()
3373 mp.ditEnabled = false
3374 }
3375 }
3376
3377
3378 hz := sched.profilehz
3379 if mp.profilehz != hz {
3380 setThreadCPUProfiler(hz)
3381 }
3382
3383 trace := traceAcquire()
3384 if trace.ok() {
3385 trace.GoStart()
3386 traceRelease(trace)
3387 }
3388
3389 gogo(&gp.sched)
3390 }
3391
3392
3393
3394
3395
3396 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3397 mp := getg().m
3398
3399
3400
3401
3402
3403 top:
3404
3405
3406
3407 mp.clearAllpSnapshot()
3408
3409 pp := mp.p.ptr()
3410 if sched.gcwaiting.Load() {
3411 gcstopm()
3412 goto top
3413 }
3414 if pp.runSafePointFn != 0 {
3415 runSafePointFn()
3416 }
3417
3418
3419
3420
3421
3422 now, pollUntil, _ := pp.timers.check(0, nil)
3423
3424
3425 if traceEnabled() || traceShuttingDown() {
3426 gp := traceReader()
3427 if gp != nil {
3428 trace := traceAcquire()
3429 casgstatus(gp, _Gwaiting, _Grunnable)
3430 if trace.ok() {
3431 trace.GoUnpark(gp, 0)
3432 traceRelease(trace)
3433 }
3434 return gp, false, true
3435 }
3436 }
3437
3438
3439 if gcBlackenEnabled != 0 {
3440 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3441 if gp != nil {
3442 return gp, false, true
3443 }
3444 now = tnow
3445 }
3446
3447
3448
3449
3450 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3451 lock(&sched.lock)
3452 gp := globrunqget()
3453 unlock(&sched.lock)
3454 if gp != nil {
3455 return gp, false, false
3456 }
3457 }
3458
3459
3460 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3461 if gp := wakefing(); gp != nil {
3462 ready(gp, 0, true)
3463 }
3464 }
3465
3466
3467 if gcCleanups.needsWake() {
3468 gcCleanups.wake()
3469 }
3470
3471 if *cgo_yield != nil {
3472 asmcgocall(*cgo_yield, nil)
3473 }
3474
3475
3476 if gp, inheritTime := runqget(pp); gp != nil {
3477 return gp, inheritTime, false
3478 }
3479
3480
3481 if !sched.runq.empty() {
3482 lock(&sched.lock)
3483 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3484 unlock(&sched.lock)
3485 if gp != nil {
3486 if runqputbatch(pp, &q); !q.empty() {
3487 throw("Couldn't put Gs into empty local runq")
3488 }
3489 return gp, false, false
3490 }
3491 }
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3503 list, delta := netpoll(0)
3504 sched.pollingNet.Store(0)
3505 if !list.empty() {
3506 gp := list.pop()
3507 injectglist(&list)
3508 netpollAdjustWaiters(delta)
3509 trace := traceAcquire()
3510 casgstatus(gp, _Gwaiting, _Grunnable)
3511 if trace.ok() {
3512 trace.GoUnpark(gp, 0)
3513 traceRelease(trace)
3514 }
3515 return gp, false, false
3516 }
3517 }
3518
3519
3520
3521
3522
3523
3524 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3525 if !mp.spinning {
3526 mp.becomeSpinning()
3527 }
3528
3529 gp, inheritTime, tnow, w, newWork := stealWork(now)
3530 if gp != nil {
3531
3532 return gp, inheritTime, false
3533 }
3534 if newWork {
3535
3536
3537 goto top
3538 }
3539
3540 now = tnow
3541 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3542
3543 pollUntil = w
3544 }
3545 }
3546
3547
3548
3549
3550
3551 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3552 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3553 if node != nil {
3554 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3555 gp := node.gp.ptr()
3556
3557 trace := traceAcquire()
3558 casgstatus(gp, _Gwaiting, _Grunnable)
3559 if trace.ok() {
3560 trace.GoUnpark(gp, 0)
3561 traceRelease(trace)
3562 }
3563 return gp, false, false
3564 }
3565 gcController.removeIdleMarkWorker()
3566 }
3567
3568
3569
3570
3571
3572 gp, otherReady := beforeIdle(now, pollUntil)
3573 if gp != nil {
3574 trace := traceAcquire()
3575 casgstatus(gp, _Gwaiting, _Grunnable)
3576 if trace.ok() {
3577 trace.GoUnpark(gp, 0)
3578 traceRelease(trace)
3579 }
3580 return gp, false, false
3581 }
3582 if otherReady {
3583 goto top
3584 }
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594 allpSnapshot := mp.snapshotAllp()
3595
3596
3597 idlepMaskSnapshot := idlepMask
3598 timerpMaskSnapshot := timerpMask
3599
3600
3601 lock(&sched.lock)
3602 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3603 unlock(&sched.lock)
3604 goto top
3605 }
3606 if !sched.runq.empty() {
3607 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3608 unlock(&sched.lock)
3609 if gp == nil {
3610 throw("global runq empty with non-zero runqsize")
3611 }
3612 if runqputbatch(pp, &q); !q.empty() {
3613 throw("Couldn't put Gs into empty local runq")
3614 }
3615 return gp, false, false
3616 }
3617 if !mp.spinning && sched.needspinning.Load() == 1 {
3618
3619 mp.becomeSpinning()
3620 unlock(&sched.lock)
3621 goto top
3622 }
3623 if releasep() != pp {
3624 throw("findRunnable: wrong p")
3625 }
3626 now = pidleput(pp, now)
3627 unlock(&sched.lock)
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665 wasSpinning := mp.spinning
3666 if mp.spinning {
3667 mp.spinning = false
3668 if sched.nmspinning.Add(-1) < 0 {
3669 throw("findRunnable: negative nmspinning")
3670 }
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683 lock(&sched.lock)
3684 if !sched.runq.empty() {
3685 pp, _ := pidlegetSpinning(0)
3686 if pp != nil {
3687 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3688 unlock(&sched.lock)
3689 if gp == nil {
3690 throw("global runq empty with non-zero runqsize")
3691 }
3692 if runqputbatch(pp, &q); !q.empty() {
3693 throw("Couldn't put Gs into empty local runq")
3694 }
3695 acquirep(pp)
3696 mp.becomeSpinning()
3697 return gp, false, false
3698 }
3699 }
3700 unlock(&sched.lock)
3701
3702 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3703 if pp != nil {
3704 acquirep(pp)
3705 mp.becomeSpinning()
3706 goto top
3707 }
3708
3709
3710 pp, gp := checkIdleGCNoP()
3711 if pp != nil {
3712 acquirep(pp)
3713 mp.becomeSpinning()
3714
3715
3716 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3717 trace := traceAcquire()
3718 casgstatus(gp, _Gwaiting, _Grunnable)
3719 if trace.ok() {
3720 trace.GoUnpark(gp, 0)
3721 traceRelease(trace)
3722 }
3723 return gp, false, false
3724 }
3725
3726
3727
3728
3729
3730
3731
3732 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3733 }
3734
3735
3736
3737
3738
3739 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3740 sched.pollUntil.Store(pollUntil)
3741 if mp.p != 0 {
3742 throw("findRunnable: netpoll with p")
3743 }
3744 if mp.spinning {
3745 throw("findRunnable: netpoll with spinning")
3746 }
3747 delay := int64(-1)
3748 if pollUntil != 0 {
3749 if now == 0 {
3750 now = nanotime()
3751 }
3752 delay = pollUntil - now
3753 if delay < 0 {
3754 delay = 0
3755 }
3756 }
3757 if faketime != 0 {
3758
3759 delay = 0
3760 }
3761 list, delta := netpoll(delay)
3762
3763 now = nanotime()
3764 sched.pollUntil.Store(0)
3765 sched.lastpoll.Store(now)
3766 if faketime != 0 && list.empty() {
3767
3768
3769 stopm()
3770 goto top
3771 }
3772 lock(&sched.lock)
3773 pp, _ := pidleget(now)
3774 unlock(&sched.lock)
3775 if pp == nil {
3776 injectglist(&list)
3777 netpollAdjustWaiters(delta)
3778 } else {
3779 acquirep(pp)
3780 if !list.empty() {
3781 gp := list.pop()
3782 injectglist(&list)
3783 netpollAdjustWaiters(delta)
3784 trace := traceAcquire()
3785 casgstatus(gp, _Gwaiting, _Grunnable)
3786 if trace.ok() {
3787 trace.GoUnpark(gp, 0)
3788 traceRelease(trace)
3789 }
3790 return gp, false, false
3791 }
3792 if wasSpinning {
3793 mp.becomeSpinning()
3794 }
3795 goto top
3796 }
3797 } else if pollUntil != 0 && netpollinited() {
3798 pollerPollUntil := sched.pollUntil.Load()
3799 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3800 netpollBreak()
3801 }
3802 }
3803 stopm()
3804 goto top
3805 }
3806
3807
3808
3809
3810
3811 func pollWork() bool {
3812 if !sched.runq.empty() {
3813 return true
3814 }
3815 p := getg().m.p.ptr()
3816 if !runqempty(p) {
3817 return true
3818 }
3819 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3820 if list, delta := netpoll(0); !list.empty() {
3821 injectglist(&list)
3822 netpollAdjustWaiters(delta)
3823 return true
3824 }
3825 }
3826 return false
3827 }
3828
3829
3830
3831
3832
3833
3834
3835 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3836 pp := getg().m.p.ptr()
3837
3838 ranTimer := false
3839
3840 const stealTries = 4
3841 for i := 0; i < stealTries; i++ {
3842 stealTimersOrRunNextG := i == stealTries-1
3843
3844 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3845 if sched.gcwaiting.Load() {
3846
3847 return nil, false, now, pollUntil, true
3848 }
3849 p2 := allp[enum.position()]
3850 if pp == p2 {
3851 continue
3852 }
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3868 tnow, w, ran := p2.timers.check(now, nil)
3869 now = tnow
3870 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3871 pollUntil = w
3872 }
3873 if ran {
3874
3875
3876
3877
3878
3879
3880
3881
3882 if gp, inheritTime := runqget(pp); gp != nil {
3883 return gp, inheritTime, now, pollUntil, ranTimer
3884 }
3885 ranTimer = true
3886 }
3887 }
3888
3889
3890 if !idlepMask.read(enum.position()) {
3891 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3892 return gp, false, now, pollUntil, ranTimer
3893 }
3894 }
3895 }
3896 }
3897
3898
3899
3900
3901 return nil, false, now, pollUntil, ranTimer
3902 }
3903
3904
3905
3906
3907
3908
3909 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3910 for id, p2 := range allpSnapshot {
3911 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3912 lock(&sched.lock)
3913 pp, _ := pidlegetSpinning(0)
3914 if pp == nil {
3915
3916 unlock(&sched.lock)
3917 return nil
3918 }
3919 unlock(&sched.lock)
3920 return pp
3921 }
3922 }
3923
3924
3925 return nil
3926 }
3927
3928
3929
3930
3931 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3932 for id, p2 := range allpSnapshot {
3933 if timerpMaskSnapshot.read(uint32(id)) {
3934 w := p2.timers.wakeTime()
3935 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3936 pollUntil = w
3937 }
3938 }
3939 }
3940
3941 return pollUntil
3942 }
3943
3944
3945
3946
3947
3948 func checkIdleGCNoP() (*p, *g) {
3949
3950
3951
3952
3953
3954
3955 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3956 return nil, nil
3957 }
3958 if !gcShouldScheduleWorker(nil) {
3959 return nil, nil
3960 }
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979 lock(&sched.lock)
3980 pp, now := pidlegetSpinning(0)
3981 if pp == nil {
3982 unlock(&sched.lock)
3983 return nil, nil
3984 }
3985
3986
3987 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3988 pidleput(pp, now)
3989 unlock(&sched.lock)
3990 return nil, nil
3991 }
3992
3993 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3994 if node == nil {
3995 pidleput(pp, now)
3996 unlock(&sched.lock)
3997 gcController.removeIdleMarkWorker()
3998 return nil, nil
3999 }
4000
4001 unlock(&sched.lock)
4002
4003 return pp, node.gp.ptr()
4004 }
4005
4006
4007
4008
4009 func wakeNetPoller(when int64) {
4010 if sched.lastpoll.Load() == 0 {
4011
4012
4013
4014
4015 pollerPollUntil := sched.pollUntil.Load()
4016 if pollerPollUntil == 0 || pollerPollUntil > when {
4017 netpollBreak()
4018 }
4019 } else {
4020
4021
4022 if GOOS != "plan9" {
4023 wakep()
4024 }
4025 }
4026 }
4027
4028 func resetspinning() {
4029 gp := getg()
4030 if !gp.m.spinning {
4031 throw("resetspinning: not a spinning m")
4032 }
4033 gp.m.spinning = false
4034 nmspinning := sched.nmspinning.Add(-1)
4035 if nmspinning < 0 {
4036 throw("findRunnable: negative nmspinning")
4037 }
4038
4039
4040
4041 wakep()
4042 }
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052 func injectglist(glist *gList) {
4053 if glist.empty() {
4054 return
4055 }
4056
4057
4058
4059 var tail *g
4060 trace := traceAcquire()
4061 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4062 tail = gp
4063 casgstatus(gp, _Gwaiting, _Grunnable)
4064 if trace.ok() {
4065 trace.GoUnpark(gp, 0)
4066 }
4067 }
4068 if trace.ok() {
4069 traceRelease(trace)
4070 }
4071
4072
4073 q := gQueue{glist.head, tail.guintptr(), glist.size}
4074 *glist = gList{}
4075
4076 startIdle := func(n int32) {
4077 for ; n > 0; n-- {
4078 mp := acquirem()
4079 lock(&sched.lock)
4080
4081 pp, _ := pidlegetSpinning(0)
4082 if pp == nil {
4083 unlock(&sched.lock)
4084 releasem(mp)
4085 break
4086 }
4087
4088 startm(pp, false, true)
4089 unlock(&sched.lock)
4090 releasem(mp)
4091 }
4092 }
4093
4094 pp := getg().m.p.ptr()
4095 if pp == nil {
4096 n := q.size
4097 lock(&sched.lock)
4098 globrunqputbatch(&q)
4099 unlock(&sched.lock)
4100 startIdle(n)
4101 return
4102 }
4103
4104 var globq gQueue
4105 npidle := sched.npidle.Load()
4106 for ; npidle > 0 && !q.empty(); npidle-- {
4107 g := q.pop()
4108 globq.pushBack(g)
4109 }
4110 if !globq.empty() {
4111 n := globq.size
4112 lock(&sched.lock)
4113 globrunqputbatch(&globq)
4114 unlock(&sched.lock)
4115 startIdle(n)
4116 }
4117
4118 if runqputbatch(pp, &q); !q.empty() {
4119 lock(&sched.lock)
4120 globrunqputbatch(&q)
4121 unlock(&sched.lock)
4122 }
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137 wakep()
4138 }
4139
4140
4141
4142 func schedule() {
4143 mp := getg().m
4144
4145 if mp.locks != 0 {
4146 throw("schedule: holding locks")
4147 }
4148
4149 if mp.lockedg != 0 {
4150 stoplockedm()
4151 execute(mp.lockedg.ptr(), false)
4152 }
4153
4154
4155
4156 if mp.incgo {
4157 throw("schedule: in cgo")
4158 }
4159
4160 top:
4161 pp := mp.p.ptr()
4162 pp.preempt = false
4163
4164
4165
4166
4167 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4168 throw("schedule: spinning with local work")
4169 }
4170
4171 gp, inheritTime, tryWakeP := findRunnable()
4172
4173
4174 pp = mp.p.ptr()
4175
4176
4177
4178
4179 mp.clearAllpSnapshot()
4180
4181
4182
4183
4184
4185
4186
4187
4188 gcController.releaseNextGCMarkWorker(pp)
4189
4190 if debug.dontfreezetheworld > 0 && freezing.Load() {
4191
4192
4193
4194
4195
4196
4197
4198 lock(&deadlock)
4199 lock(&deadlock)
4200 }
4201
4202
4203
4204
4205 if mp.spinning {
4206 resetspinning()
4207 }
4208
4209 if sched.disable.user && !schedEnabled(gp) {
4210
4211
4212
4213 lock(&sched.lock)
4214 if schedEnabled(gp) {
4215
4216
4217 unlock(&sched.lock)
4218 } else {
4219 sched.disable.runnable.pushBack(gp)
4220 unlock(&sched.lock)
4221 goto top
4222 }
4223 }
4224
4225
4226
4227 if tryWakeP {
4228 wakep()
4229 }
4230 if gp.lockedm != 0 {
4231
4232
4233 startlockedm(gp)
4234 goto top
4235 }
4236
4237 execute(gp, inheritTime)
4238 }
4239
4240
4241
4242
4243
4244
4245
4246
4247 func dropg() {
4248 gp := getg()
4249
4250 setMNoWB(&gp.m.curg.m, nil)
4251 setGNoWB(&gp.m.curg, nil)
4252 }
4253
4254 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4255 unlock((*mutex)(lock))
4256 return true
4257 }
4258
4259
4260 func park_m(gp *g) {
4261 mp := getg().m
4262
4263 trace := traceAcquire()
4264
4265
4266
4267
4268
4269 bubble := gp.bubble
4270 if bubble != nil {
4271 bubble.incActive()
4272 }
4273
4274 if trace.ok() {
4275
4276
4277
4278 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4279 }
4280
4281
4282 casgstatus(gp, _Grunning, _Gwaiting)
4283 if trace.ok() {
4284 traceRelease(trace)
4285 }
4286
4287 dropg()
4288
4289 if fn := mp.waitunlockf; fn != nil {
4290 ok := fn(gp, mp.waitlock)
4291 mp.waitunlockf = nil
4292 mp.waitlock = nil
4293 if !ok {
4294 trace := traceAcquire()
4295 casgstatus(gp, _Gwaiting, _Grunnable)
4296 if bubble != nil {
4297 bubble.decActive()
4298 }
4299 if trace.ok() {
4300 trace.GoUnpark(gp, 2)
4301 traceRelease(trace)
4302 }
4303 execute(gp, true)
4304 }
4305 }
4306
4307 if bubble != nil {
4308 bubble.decActive()
4309 }
4310
4311 schedule()
4312 }
4313
4314 func goschedImpl(gp *g, preempted bool) {
4315 pp := gp.m.p.ptr()
4316 trace := traceAcquire()
4317 status := readgstatus(gp)
4318 if status&^_Gscan != _Grunning {
4319 dumpgstatus(gp)
4320 throw("bad g status")
4321 }
4322 if trace.ok() {
4323
4324
4325
4326 if preempted {
4327 trace.GoPreempt()
4328 } else {
4329 trace.GoSched()
4330 }
4331 }
4332 casgstatus(gp, _Grunning, _Grunnable)
4333 if trace.ok() {
4334 traceRelease(trace)
4335 }
4336
4337 dropg()
4338 if preempted && sched.gcwaiting.Load() {
4339
4340
4341 runqput(pp, gp, true)
4342 } else {
4343 lock(&sched.lock)
4344 globrunqput(gp)
4345 unlock(&sched.lock)
4346 }
4347
4348 if mainStarted {
4349 wakep()
4350 }
4351
4352 schedule()
4353 }
4354
4355
4356 func gosched_m(gp *g) {
4357 goschedImpl(gp, false)
4358 }
4359
4360
4361 func goschedguarded_m(gp *g) {
4362 if !canPreemptM(gp.m) {
4363 gogo(&gp.sched)
4364 }
4365 goschedImpl(gp, false)
4366 }
4367
4368 func gopreempt_m(gp *g) {
4369 goschedImpl(gp, true)
4370 }
4371
4372
4373
4374
4375 func preemptPark(gp *g) {
4376 status := readgstatus(gp)
4377 if status&^_Gscan != _Grunning {
4378 dumpgstatus(gp)
4379 throw("bad g status")
4380 }
4381
4382 if gp.asyncSafePoint {
4383
4384
4385
4386 f := findfunc(gp.sched.pc)
4387 if !f.valid() {
4388 throw("preempt at unknown pc")
4389 }
4390 if f.flag&abi.FuncFlagSPWrite != 0 {
4391 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4392 throw("preempt SPWRITE")
4393 }
4394 }
4395
4396
4397
4398
4399
4400
4401
4402 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424 trace := traceAcquire()
4425 if trace.ok() {
4426 trace.GoPark(traceBlockPreempted, 0)
4427 }
4428
4429
4430
4431
4432 dropg()
4433
4434
4435 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4436 if trace.ok() {
4437 traceRelease(trace)
4438 }
4439
4440
4441 schedule()
4442 }
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458 func goyield() {
4459 checkTimeouts()
4460 mcall(goyield_m)
4461 }
4462
4463 func goyield_m(gp *g) {
4464 trace := traceAcquire()
4465 pp := gp.m.p.ptr()
4466 if trace.ok() {
4467
4468
4469
4470 trace.GoPreempt()
4471 }
4472 casgstatus(gp, _Grunning, _Grunnable)
4473 if trace.ok() {
4474 traceRelease(trace)
4475 }
4476 dropg()
4477 runqput(pp, gp, false)
4478 schedule()
4479 }
4480
4481
4482 func goexit1() {
4483 if raceenabled {
4484 if gp := getg(); gp.bubble != nil {
4485 racereleasemergeg(gp, gp.bubble.raceaddr())
4486 }
4487 racegoend()
4488 }
4489 trace := traceAcquire()
4490 if trace.ok() {
4491 trace.GoEnd()
4492 traceRelease(trace)
4493 }
4494 mcall(goexit0)
4495 }
4496
4497
4498 func goexit0(gp *g) {
4499 if goexperiment.RuntimeSecret && gp.secret > 0 {
4500
4501
4502 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4503
4504
4505 }
4506 gdestroy(gp)
4507 schedule()
4508 }
4509
4510 func gdestroy(gp *g) {
4511 mp := getg().m
4512 pp := mp.p.ptr()
4513
4514 casgstatus(gp, _Grunning, _Gdead)
4515 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4516 if isSystemGoroutine(gp, false) {
4517 sched.ngsys.Add(-1)
4518 }
4519 gp.m = nil
4520 locked := gp.lockedm != 0
4521 gp.lockedm = 0
4522 mp.lockedg = 0
4523 gp.preemptStop = false
4524 gp.paniconfault = false
4525 gp._defer = nil
4526 gp._panic = nil
4527 gp.writebuf = nil
4528 gp.waitreason = waitReasonZero
4529 gp.param = nil
4530 gp.labels = nil
4531 gp.timer = nil
4532 gp.bubble = nil
4533 gp.fipsOnlyBypass = false
4534 gp.secret = 0
4535
4536 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4537
4538
4539
4540 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4541 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4542 gcController.bgScanCredit.Add(scanCredit)
4543 gp.gcAssistBytes = 0
4544 }
4545
4546 dropg()
4547
4548 if GOARCH == "wasm" {
4549 gfput(pp, gp)
4550 return
4551 }
4552
4553 if locked && mp.lockedInt != 0 {
4554 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4555 if mp.isextra {
4556 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4557 }
4558 throw("exited a goroutine internally locked to the OS thread")
4559 }
4560 gfput(pp, gp)
4561 if locked {
4562
4563
4564
4565
4566
4567
4568 if GOOS != "plan9" {
4569 gogo(&mp.g0.sched)
4570 } else {
4571
4572
4573 mp.lockedExt = 0
4574 }
4575 }
4576 }
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586 func save(pc, sp, bp uintptr) {
4587 gp := getg()
4588
4589 if gp == gp.m.g0 || gp == gp.m.gsignal {
4590
4591
4592
4593
4594
4595 throw("save on system g not allowed")
4596 }
4597
4598 gp.sched.pc = pc
4599 gp.sched.sp = sp
4600 gp.sched.lr = 0
4601 gp.sched.bp = bp
4602
4603
4604
4605 if gp.sched.ctxt != nil {
4606 badctxt()
4607 }
4608 }
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634 func reentersyscall(pc, sp, bp uintptr) {
4635 gp := getg()
4636
4637
4638
4639 gp.m.locks++
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661 if goexperiment.RuntimeSecret {
4662 eraseSecretsSignalStk()
4663 }
4664
4665
4666
4667
4668
4669 gp.stackguard0 = stackPreempt
4670 gp.throwsplit = true
4671
4672
4673 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4674
4675 pp := gp.m.p.ptr()
4676 if pp.runSafePointFn != 0 {
4677
4678 systemstack(runSafePointFn)
4679 }
4680 gp.m.oldp.set(pp)
4681
4682
4683 save(pc, sp, bp)
4684 gp.syscallsp = sp
4685 gp.syscallpc = pc
4686 gp.syscallbp = bp
4687
4688
4689 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4690 systemstack(func() {
4691 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4692 throw("entersyscall")
4693 })
4694 }
4695 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4696 systemstack(func() {
4697 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4698 throw("entersyscall")
4699 })
4700 }
4701 trace := traceAcquire()
4702 if trace.ok() {
4703
4704
4705
4706
4707 systemstack(func() {
4708 trace.GoSysCall()
4709 })
4710
4711 save(pc, sp, bp)
4712 }
4713 if sched.gcwaiting.Load() {
4714
4715
4716
4717 systemstack(func() {
4718 entersyscallHandleGCWait(trace)
4719 })
4720
4721 save(pc, sp, bp)
4722 }
4723
4724
4725
4726
4727
4728 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4729 casgstatus(gp, _Grunning, _Gsyscall)
4730 }
4731 if staticLockRanking {
4732
4733 save(pc, sp, bp)
4734 }
4735 if trace.ok() {
4736
4737
4738
4739 traceRelease(trace)
4740 }
4741 if sched.sysmonwait.Load() {
4742 systemstack(entersyscallWakeSysmon)
4743
4744 save(pc, sp, bp)
4745 }
4746 gp.m.locks--
4747 }
4748
4749
4750
4751
4752 const debugExtendGrunningNoP = false
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768 func entersyscall() {
4769
4770
4771
4772
4773 fp := getcallerfp()
4774 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4775 }
4776
4777 func entersyscallWakeSysmon() {
4778 lock(&sched.lock)
4779 if sched.sysmonwait.Load() {
4780 sched.sysmonwait.Store(false)
4781 notewakeup(&sched.sysmonnote)
4782 }
4783 unlock(&sched.lock)
4784 }
4785
4786 func entersyscallHandleGCWait(trace traceLocker) {
4787 gp := getg()
4788
4789 lock(&sched.lock)
4790 if sched.stopwait > 0 {
4791
4792 pp := gp.m.p.ptr()
4793 pp.m = 0
4794 gp.m.p = 0
4795 atomic.Store(&pp.status, _Pgcstop)
4796
4797 if trace.ok() {
4798 trace.ProcStop(pp)
4799 }
4800 addGSyscallNoP(gp.m)
4801 pp.gcStopTime = nanotime()
4802 pp.syscalltick++
4803 if sched.stopwait--; sched.stopwait == 0 {
4804 notewakeup(&sched.stopnote)
4805 }
4806 }
4807 unlock(&sched.lock)
4808 }
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822 func entersyscallblock() {
4823 gp := getg()
4824
4825 gp.m.locks++
4826 gp.throwsplit = true
4827 gp.stackguard0 = stackPreempt
4828 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4829 gp.m.p.ptr().syscalltick++
4830
4831 addGSyscallNoP(gp.m)
4832
4833
4834 pc := sys.GetCallerPC()
4835 sp := sys.GetCallerSP()
4836 bp := getcallerfp()
4837 save(pc, sp, bp)
4838 gp.syscallsp = gp.sched.sp
4839 gp.syscallpc = gp.sched.pc
4840 gp.syscallbp = gp.sched.bp
4841 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4842 sp1 := sp
4843 sp2 := gp.sched.sp
4844 sp3 := gp.syscallsp
4845 systemstack(func() {
4846 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4847 throw("entersyscallblock")
4848 })
4849 }
4850
4851
4852
4853
4854
4855
4856 trace := traceAcquire()
4857 systemstack(func() {
4858 if trace.ok() {
4859 trace.GoSysCall()
4860 }
4861 handoffp(releasep())
4862 })
4863
4864
4865
4866 if debugExtendGrunningNoP {
4867 usleep(10)
4868 }
4869 casgstatus(gp, _Grunning, _Gsyscall)
4870 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4871 systemstack(func() {
4872 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4873 throw("entersyscallblock")
4874 })
4875 }
4876 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4877 systemstack(func() {
4878 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4879 throw("entersyscallblock")
4880 })
4881 }
4882 if trace.ok() {
4883 systemstack(func() {
4884 traceRelease(trace)
4885 })
4886 }
4887
4888
4889 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4890
4891 gp.m.locks--
4892 }
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914 func exitsyscall() {
4915 gp := getg()
4916
4917 gp.m.locks++
4918 if sys.GetCallerSP() > gp.syscallsp {
4919 throw("exitsyscall: syscall frame is no longer valid")
4920 }
4921 gp.waitsince = 0
4922
4923 if sched.stopwait == freezeStopWait {
4924
4925
4926
4927 systemstack(func() {
4928 lock(&deadlock)
4929 lock(&deadlock)
4930 })
4931 }
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4945 casgstatus(gp, _Gsyscall, _Grunning)
4946 }
4947
4948
4949
4950
4951 if debugExtendGrunningNoP {
4952 usleep(10)
4953 }
4954
4955
4956 oldp := gp.m.oldp.ptr()
4957 gp.m.oldp.set(nil)
4958
4959
4960 pp := gp.m.p.ptr()
4961 if pp != nil {
4962
4963 if trace := traceAcquire(); trace.ok() {
4964 systemstack(func() {
4965
4966
4967
4968
4969
4970
4971
4972
4973 if pp.syscalltick == gp.m.syscalltick {
4974 trace.GoSysExit(false)
4975 } else {
4976
4977
4978
4979
4980 trace.ProcSteal(pp)
4981 trace.ProcStart()
4982 trace.GoSysExit(true)
4983 trace.GoStart()
4984 }
4985 traceRelease(trace)
4986 })
4987 }
4988 } else {
4989
4990 systemstack(func() {
4991
4992 if pp := exitsyscallTryGetP(oldp); pp != nil {
4993
4994 acquirepNoTrace(pp)
4995
4996
4997 if trace := traceAcquire(); trace.ok() {
4998 trace.ProcStart()
4999 trace.GoSysExit(true)
5000 trace.GoStart()
5001 traceRelease(trace)
5002 }
5003 }
5004 })
5005 pp = gp.m.p.ptr()
5006 }
5007
5008
5009 if pp != nil {
5010 if goroutineProfile.active {
5011
5012
5013
5014 systemstack(func() {
5015 tryRecordGoroutineProfileWB(gp)
5016 })
5017 }
5018
5019
5020 pp.syscalltick++
5021
5022
5023
5024 gp.syscallsp = 0
5025 gp.m.locks--
5026 if gp.preempt {
5027
5028 gp.stackguard0 = stackPreempt
5029 } else {
5030
5031 gp.stackguard0 = gp.stack.lo + stackGuard
5032 }
5033 gp.throwsplit = false
5034
5035 if sched.disable.user && !schedEnabled(gp) {
5036
5037 Gosched()
5038 }
5039 return
5040 }
5041
5042 gp.m.locks--
5043
5044
5045 mcall(exitsyscallNoP)
5046
5047
5048
5049
5050
5051
5052
5053 gp.syscallsp = 0
5054 gp.m.p.ptr().syscalltick++
5055 gp.throwsplit = false
5056 }
5057
5058
5059
5060
5061
5062
5063
5064 func exitsyscallTryGetP(oldp *p) *p {
5065
5066 if oldp != nil {
5067 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5068 thread.takeP()
5069 decGSyscallNoP(getg().m)
5070 thread.resume()
5071 return oldp
5072 }
5073 }
5074
5075
5076 if sched.pidle != 0 {
5077 lock(&sched.lock)
5078 pp, _ := pidleget(0)
5079 if pp != nil && sched.sysmonwait.Load() {
5080 sched.sysmonwait.Store(false)
5081 notewakeup(&sched.sysmonnote)
5082 }
5083 unlock(&sched.lock)
5084 if pp != nil {
5085 decGSyscallNoP(getg().m)
5086 return pp
5087 }
5088 }
5089 return nil
5090 }
5091
5092
5093
5094
5095
5096
5097
5098 func exitsyscallNoP(gp *g) {
5099 traceExitingSyscall()
5100 trace := traceAcquire()
5101 casgstatus(gp, _Grunning, _Grunnable)
5102 traceExitedSyscall()
5103 if trace.ok() {
5104
5105
5106
5107
5108 trace.GoSysExit(true)
5109 traceRelease(trace)
5110 }
5111 decGSyscallNoP(getg().m)
5112 dropg()
5113 lock(&sched.lock)
5114 var pp *p
5115 if schedEnabled(gp) {
5116 pp, _ = pidleget(0)
5117 }
5118 var locked bool
5119 if pp == nil {
5120 globrunqput(gp)
5121
5122
5123
5124
5125
5126
5127 locked = gp.lockedm != 0
5128 } else if sched.sysmonwait.Load() {
5129 sched.sysmonwait.Store(false)
5130 notewakeup(&sched.sysmonnote)
5131 }
5132 unlock(&sched.lock)
5133 if pp != nil {
5134 acquirep(pp)
5135 execute(gp, false)
5136 }
5137 if locked {
5138
5139
5140
5141
5142 stoplockedm()
5143 execute(gp, false)
5144 }
5145 stopm()
5146 schedule()
5147 }
5148
5149
5150
5151
5152
5153
5154
5155 func addGSyscallNoP(mp *m) {
5156
5157
5158
5159 if !mp.isExtraInC {
5160
5161
5162
5163
5164
5165 sched.nGsyscallNoP.Add(1)
5166 }
5167 }
5168
5169
5170
5171
5172
5173
5174
5175 func decGSyscallNoP(mp *m) {
5176
5177
5178
5179 if !mp.isExtraInC {
5180 sched.nGsyscallNoP.Add(-1)
5181 }
5182 }
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196 func syscall_runtime_BeforeFork() {
5197 gp := getg().m.curg
5198
5199
5200
5201
5202 gp.m.locks++
5203 sigsave(&gp.m.sigmask)
5204 sigblock(false)
5205
5206
5207
5208
5209
5210 gp.stackguard0 = stackFork
5211 }
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225 func syscall_runtime_AfterFork() {
5226 gp := getg().m.curg
5227
5228
5229 gp.stackguard0 = gp.stack.lo + stackGuard
5230
5231 msigrestore(gp.m.sigmask)
5232
5233 gp.m.locks--
5234 }
5235
5236
5237
5238 var inForkedChild bool
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259 func syscall_runtime_AfterForkInChild() {
5260
5261
5262
5263
5264 inForkedChild = true
5265
5266 clearSignalHandlers()
5267
5268
5269
5270 msigrestore(getg().m.sigmask)
5271
5272 inForkedChild = false
5273 }
5274
5275
5276
5277
5278 var pendingPreemptSignals atomic.Int32
5279
5280
5281
5282
5283 func syscall_runtime_BeforeExec() {
5284
5285 execLock.lock()
5286
5287
5288
5289 if GOOS == "darwin" || GOOS == "ios" {
5290 for pendingPreemptSignals.Load() > 0 {
5291 osyield()
5292 }
5293 }
5294 }
5295
5296
5297
5298
5299 func syscall_runtime_AfterExec() {
5300 execLock.unlock()
5301 }
5302
5303
5304 func malg(stacksize int32) *g {
5305 newg := new(g)
5306 if stacksize >= 0 {
5307 stacksize = round2(stackSystem + stacksize)
5308 systemstack(func() {
5309 newg.stack = stackalloc(uint32(stacksize))
5310 if valgrindenabled {
5311 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5312 }
5313 })
5314 newg.stackguard0 = newg.stack.lo + stackGuard
5315 newg.stackguard1 = ^uintptr(0)
5316
5317
5318 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5319 }
5320 return newg
5321 }
5322
5323
5324
5325
5326 func newproc(fn *funcval) {
5327 gp := getg()
5328 pc := sys.GetCallerPC()
5329 systemstack(func() {
5330 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5331
5332 pp := getg().m.p.ptr()
5333 runqput(pp, newg, true)
5334
5335 if mainStarted {
5336 wakep()
5337 }
5338 })
5339 }
5340
5341
5342
5343
5344 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5345 if fn == nil {
5346 fatal("go of nil func value")
5347 }
5348
5349 mp := acquirem()
5350 pp := mp.p.ptr()
5351 newg := gfget(pp)
5352 if newg == nil {
5353 newg = malg(stackMin)
5354 casgstatus(newg, _Gidle, _Gdead)
5355 allgadd(newg)
5356 }
5357 if newg.stack.hi == 0 {
5358 throw("newproc1: newg missing stack")
5359 }
5360
5361 if readgstatus(newg) != _Gdead {
5362 throw("newproc1: new g is not Gdead")
5363 }
5364
5365 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5366 totalSize = alignUp(totalSize, sys.StackAlign)
5367 sp := newg.stack.hi - totalSize
5368 if usesLR {
5369
5370 *(*uintptr)(unsafe.Pointer(sp)) = 0
5371 prepGoExitFrame(sp)
5372 }
5373 if GOARCH == "arm64" {
5374
5375 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5376 }
5377
5378 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5379 newg.sched.sp = sp
5380 newg.stktopsp = sp
5381 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5382 newg.sched.g = guintptr(unsafe.Pointer(newg))
5383 gostartcallfn(&newg.sched, fn)
5384 newg.parentGoid = callergp.goid
5385 newg.gopc = callerpc
5386 newg.ancestors = saveAncestors(callergp)
5387 newg.startpc = fn.fn
5388 newg.runningCleanups.Store(false)
5389 if isSystemGoroutine(newg, false) {
5390 sched.ngsys.Add(1)
5391 } else {
5392
5393 newg.bubble = callergp.bubble
5394 if mp.curg != nil {
5395 newg.labels = mp.curg.labels
5396 }
5397 if goroutineProfile.active {
5398
5399
5400
5401
5402
5403 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5404 }
5405 }
5406
5407 newg.trackingSeq = uint8(cheaprand())
5408 if newg.trackingSeq%gTrackingPeriod == 0 {
5409 newg.tracking = true
5410 }
5411 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5412
5413
5414
5415 trace := traceAcquire()
5416 var status uint32 = _Grunnable
5417 if parked {
5418 status = _Gwaiting
5419 newg.waitreason = waitreason
5420 }
5421 if pp.goidcache == pp.goidcacheend {
5422
5423
5424
5425 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5426 pp.goidcache -= _GoidCacheBatch - 1
5427 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5428 }
5429 newg.goid = pp.goidcache
5430 casgstatus(newg, _Gdead, status)
5431 pp.goidcache++
5432 newg.trace.reset()
5433 if trace.ok() {
5434 trace.GoCreate(newg, newg.startpc, parked)
5435 traceRelease(trace)
5436 }
5437
5438
5439 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5440
5441
5442 newg.ditWanted = callergp.ditWanted
5443
5444
5445 if raceenabled {
5446 newg.racectx = racegostart(callerpc)
5447 newg.raceignore = 0
5448 if newg.labels != nil {
5449
5450
5451 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5452 }
5453 }
5454 pp.goroutinesCreated++
5455 releasem(mp)
5456
5457 return newg
5458 }
5459
5460
5461
5462
5463 func saveAncestors(callergp *g) *[]ancestorInfo {
5464
5465 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5466 return nil
5467 }
5468 var callerAncestors []ancestorInfo
5469 if callergp.ancestors != nil {
5470 callerAncestors = *callergp.ancestors
5471 }
5472 n := int32(len(callerAncestors)) + 1
5473 if n > debug.tracebackancestors {
5474 n = debug.tracebackancestors
5475 }
5476 ancestors := make([]ancestorInfo, n)
5477 copy(ancestors[1:], callerAncestors)
5478
5479 var pcs [tracebackInnerFrames]uintptr
5480 npcs := gcallers(callergp, 0, pcs[:])
5481 ipcs := make([]uintptr, npcs)
5482 copy(ipcs, pcs[:])
5483 ancestors[0] = ancestorInfo{
5484 pcs: ipcs,
5485 goid: callergp.goid,
5486 gopc: callergp.gopc,
5487 }
5488
5489 ancestorsp := new([]ancestorInfo)
5490 *ancestorsp = ancestors
5491 return ancestorsp
5492 }
5493
5494
5495
5496 func gfput(pp *p, gp *g) {
5497 if readgstatus(gp) != _Gdead {
5498 throw("gfput: bad status (not Gdead)")
5499 }
5500
5501 stksize := gp.stack.hi - gp.stack.lo
5502
5503 if stksize != uintptr(startingStackSize) {
5504
5505 stackfree(gp.stack)
5506 gp.stack.lo = 0
5507 gp.stack.hi = 0
5508 gp.stackguard0 = 0
5509 if valgrindenabled {
5510 valgrindDeregisterStack(gp.valgrindStackID)
5511 gp.valgrindStackID = 0
5512 }
5513 }
5514
5515 pp.gFree.push(gp)
5516 if pp.gFree.size >= 64 {
5517 var (
5518 stackQ gQueue
5519 noStackQ gQueue
5520 )
5521 for pp.gFree.size >= 32 {
5522 gp := pp.gFree.pop()
5523 if gp.stack.lo == 0 {
5524 noStackQ.push(gp)
5525 } else {
5526 stackQ.push(gp)
5527 }
5528 }
5529 lock(&sched.gFree.lock)
5530 sched.gFree.noStack.pushAll(noStackQ)
5531 sched.gFree.stack.pushAll(stackQ)
5532 unlock(&sched.gFree.lock)
5533 }
5534 }
5535
5536
5537
5538 func gfget(pp *p) *g {
5539 retry:
5540 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5541 lock(&sched.gFree.lock)
5542
5543 for pp.gFree.size < 32 {
5544
5545 gp := sched.gFree.stack.pop()
5546 if gp == nil {
5547 gp = sched.gFree.noStack.pop()
5548 if gp == nil {
5549 break
5550 }
5551 }
5552 pp.gFree.push(gp)
5553 }
5554 unlock(&sched.gFree.lock)
5555 goto retry
5556 }
5557 gp := pp.gFree.pop()
5558 if gp == nil {
5559 return nil
5560 }
5561 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5562
5563
5564
5565 systemstack(func() {
5566 stackfree(gp.stack)
5567 gp.stack.lo = 0
5568 gp.stack.hi = 0
5569 gp.stackguard0 = 0
5570 if valgrindenabled {
5571 valgrindDeregisterStack(gp.valgrindStackID)
5572 gp.valgrindStackID = 0
5573 }
5574 })
5575 }
5576 if gp.stack.lo == 0 {
5577
5578 systemstack(func() {
5579 gp.stack = stackalloc(startingStackSize)
5580 if valgrindenabled {
5581 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5582 }
5583 })
5584 gp.stackguard0 = gp.stack.lo + stackGuard
5585 } else {
5586 if raceenabled {
5587 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5588 }
5589 if msanenabled {
5590 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5591 }
5592 if asanenabled {
5593 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5594 }
5595 }
5596 return gp
5597 }
5598
5599
5600 func gfpurge(pp *p) {
5601 var (
5602 stackQ gQueue
5603 noStackQ gQueue
5604 )
5605 for !pp.gFree.empty() {
5606 gp := pp.gFree.pop()
5607 if gp.stack.lo == 0 {
5608 noStackQ.push(gp)
5609 } else {
5610 stackQ.push(gp)
5611 }
5612 }
5613 lock(&sched.gFree.lock)
5614 sched.gFree.noStack.pushAll(noStackQ)
5615 sched.gFree.stack.pushAll(stackQ)
5616 unlock(&sched.gFree.lock)
5617 }
5618
5619
5620 func Breakpoint() {
5621 breakpoint()
5622 }
5623
5624
5625
5626
5627
5628
5629 func dolockOSThread() {
5630 if GOARCH == "wasm" {
5631 return
5632 }
5633 gp := getg()
5634 gp.m.lockedg.set(gp)
5635 gp.lockedm.set(gp.m)
5636 }
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654 func LockOSThread() {
5655 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5656
5657
5658
5659 startTemplateThread()
5660 }
5661 gp := getg()
5662 gp.m.lockedExt++
5663 if gp.m.lockedExt == 0 {
5664 gp.m.lockedExt--
5665 panic("LockOSThread nesting overflow")
5666 }
5667 dolockOSThread()
5668 }
5669
5670
5671 func lockOSThread() {
5672 getg().m.lockedInt++
5673 dolockOSThread()
5674 }
5675
5676
5677
5678
5679
5680
5681 func dounlockOSThread() {
5682 if GOARCH == "wasm" {
5683 return
5684 }
5685 gp := getg()
5686 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5687 return
5688 }
5689 gp.m.lockedg = 0
5690 gp.lockedm = 0
5691 }
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707 func UnlockOSThread() {
5708 gp := getg()
5709 if gp.m.lockedExt == 0 {
5710 return
5711 }
5712 gp.m.lockedExt--
5713 dounlockOSThread()
5714 }
5715
5716
5717 func unlockOSThread() {
5718 gp := getg()
5719 if gp.m.lockedInt == 0 {
5720 systemstack(badunlockosthread)
5721 }
5722 gp.m.lockedInt--
5723 dounlockOSThread()
5724 }
5725
5726 func badunlockosthread() {
5727 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5728 }
5729
5730 func gcount(includeSys bool) int32 {
5731 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5732 if !includeSys {
5733 n -= sched.ngsys.Load()
5734 }
5735 for _, pp := range allp {
5736 n -= pp.gFree.size
5737 }
5738
5739
5740
5741 if n < 1 {
5742 n = 1
5743 }
5744 return n
5745 }
5746
5747
5748
5749
5750
5751 func goroutineleakcount() int {
5752 return work.goroutineLeak.count
5753 }
5754
5755 func mcount() int32 {
5756 return int32(sched.mnext - sched.nmfreed)
5757 }
5758
5759 var prof struct {
5760 signalLock atomic.Uint32
5761
5762
5763
5764 hz atomic.Int32
5765 }
5766
5767 func _System() { _System() }
5768 func _ExternalCode() { _ExternalCode() }
5769 func _LostExternalCode() { _LostExternalCode() }
5770 func _GC() { _GC() }
5771 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5772 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5773 func _VDSO() { _VDSO() }
5774
5775
5776
5777
5778
5779 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5780 if prof.hz.Load() == 0 {
5781 return
5782 }
5783
5784
5785
5786
5787 if mp != nil && mp.profilehz == 0 {
5788 return
5789 }
5790
5791
5792
5793
5794
5795
5796
5797 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5798 if f := findfunc(pc); f.valid() {
5799 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5800 cpuprof.lostAtomic++
5801 return
5802 }
5803 }
5804 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5805
5806
5807
5808 cpuprof.lostAtomic++
5809 return
5810 }
5811 }
5812
5813
5814
5815
5816
5817
5818
5819 getg().m.mallocing++
5820
5821 var u unwinder
5822 var stk [maxCPUProfStack]uintptr
5823 n := 0
5824 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5825 cgoOff := 0
5826
5827
5828
5829
5830
5831 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5832 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5833 cgoOff++
5834 }
5835 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5836 mp.cgoCallers[0] = 0
5837 }
5838
5839
5840 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5841 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5842
5843
5844 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5845 } else if mp != nil && mp.vdsoSP != 0 {
5846
5847
5848 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5849 } else {
5850 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5851 }
5852 n += tracebackPCs(&u, 0, stk[n:])
5853
5854 if n <= 0 {
5855
5856
5857 n = 2
5858 if inVDSOPage(pc) {
5859 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5860 } else if pc > firstmoduledata.etext {
5861
5862 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5863 }
5864 stk[0] = pc
5865 if mp.preemptoff != "" {
5866 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5867 } else {
5868 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5869 }
5870 }
5871
5872 if prof.hz.Load() != 0 {
5873
5874
5875
5876 var tagPtr *unsafe.Pointer
5877 if gp != nil && gp.m != nil && gp.m.curg != nil {
5878 tagPtr = &gp.m.curg.labels
5879 }
5880 cpuprof.add(tagPtr, stk[:n])
5881
5882 gprof := gp
5883 var mp *m
5884 var pp *p
5885 if gp != nil && gp.m != nil {
5886 if gp.m.curg != nil {
5887 gprof = gp.m.curg
5888 }
5889 mp = gp.m
5890 pp = gp.m.p.ptr()
5891 }
5892 traceCPUSample(gprof, mp, pp, stk[:n])
5893 }
5894 getg().m.mallocing--
5895 }
5896
5897
5898
5899 func setcpuprofilerate(hz int32) {
5900
5901 if hz < 0 {
5902 hz = 0
5903 }
5904
5905
5906
5907 gp := getg()
5908 gp.m.locks++
5909
5910
5911
5912
5913 setThreadCPUProfiler(0)
5914
5915 for !prof.signalLock.CompareAndSwap(0, 1) {
5916 osyield()
5917 }
5918 if prof.hz.Load() != hz {
5919 setProcessCPUProfiler(hz)
5920 prof.hz.Store(hz)
5921 }
5922 prof.signalLock.Store(0)
5923
5924 lock(&sched.lock)
5925 sched.profilehz = hz
5926 unlock(&sched.lock)
5927
5928 if hz != 0 {
5929 setThreadCPUProfiler(hz)
5930 }
5931
5932 gp.m.locks--
5933 }
5934
5935
5936
5937 func (pp *p) init(id int32) {
5938 pp.id = id
5939 pp.gcw.id = id
5940 pp.status = _Pgcstop
5941 pp.sudogcache = pp.sudogbuf[:0]
5942 pp.deferpool = pp.deferpoolbuf[:0]
5943 pp.wbBuf.reset()
5944 if pp.mcache == nil {
5945 if id == 0 {
5946 if mcache0 == nil {
5947 throw("missing mcache?")
5948 }
5949
5950
5951 pp.mcache = mcache0
5952 } else {
5953 pp.mcache = allocmcache()
5954 }
5955 }
5956 if raceenabled && pp.raceprocctx == 0 {
5957 if id == 0 {
5958 pp.raceprocctx = raceprocctx0
5959 raceprocctx0 = 0
5960 } else {
5961 pp.raceprocctx = raceproccreate()
5962 }
5963 }
5964 lockInit(&pp.timers.mu, lockRankTimers)
5965
5966
5967
5968 timerpMask.set(id)
5969
5970
5971 idlepMask.clear(id)
5972 }
5973
5974
5975
5976
5977
5978 func (pp *p) destroy() {
5979 assertLockHeld(&sched.lock)
5980 assertWorldStopped()
5981
5982
5983 for pp.runqhead != pp.runqtail {
5984
5985 pp.runqtail--
5986 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5987
5988 globrunqputhead(gp)
5989 }
5990 if pp.runnext != 0 {
5991 globrunqputhead(pp.runnext.ptr())
5992 pp.runnext = 0
5993 }
5994
5995
5996 getg().m.p.ptr().timers.take(&pp.timers)
5997
5998
5999
6000 if phase := gcphase; phase != _GCoff {
6001 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
6002 throw("P destroyed while GC is running")
6003 }
6004
6005 pp.gcw.spanq.destroy()
6006
6007 clear(pp.sudogbuf[:])
6008 pp.sudogcache = pp.sudogbuf[:0]
6009 pp.pinnerCache = nil
6010 clear(pp.deferpoolbuf[:])
6011 pp.deferpool = pp.deferpoolbuf[:0]
6012 systemstack(func() {
6013 for i := 0; i < pp.mspancache.len; i++ {
6014
6015 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
6016 }
6017 pp.mspancache.len = 0
6018 lock(&mheap_.lock)
6019 pp.pcache.flush(&mheap_.pages)
6020 unlock(&mheap_.lock)
6021 })
6022 freemcache(pp.mcache)
6023 pp.mcache = nil
6024 gfpurge(pp)
6025 if raceenabled {
6026 if pp.timers.raceCtx != 0 {
6027
6028
6029
6030
6031
6032 mp := getg().m
6033 phold := mp.p.ptr()
6034 mp.p.set(pp)
6035
6036 racectxend(pp.timers.raceCtx)
6037 pp.timers.raceCtx = 0
6038
6039 mp.p.set(phold)
6040 }
6041 raceprocdestroy(pp.raceprocctx)
6042 pp.raceprocctx = 0
6043 }
6044 pp.gcAssistTime = 0
6045 gcCleanups.queued += pp.cleanupsQueued
6046 pp.cleanupsQueued = 0
6047 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
6048 pp.goroutinesCreated = 0
6049 pp.xRegs.free()
6050 pp.status = _Pdead
6051 }
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061 func procresize(nprocs int32) *p {
6062 assertLockHeld(&sched.lock)
6063 assertWorldStopped()
6064
6065 old := gomaxprocs
6066 if old < 0 || nprocs <= 0 {
6067 throw("procresize: invalid arg")
6068 }
6069 trace := traceAcquire()
6070 if trace.ok() {
6071 trace.Gomaxprocs(nprocs)
6072 traceRelease(trace)
6073 }
6074
6075
6076 now := nanotime()
6077 if sched.procresizetime != 0 {
6078 sched.totaltime += int64(old) * (now - sched.procresizetime)
6079 }
6080 sched.procresizetime = now
6081
6082
6083 if nprocs > int32(len(allp)) {
6084
6085
6086 lock(&allpLock)
6087 if nprocs <= int32(cap(allp)) {
6088 allp = allp[:nprocs]
6089 } else {
6090 nallp := make([]*p, nprocs)
6091
6092
6093 copy(nallp, allp[:cap(allp)])
6094 allp = nallp
6095 }
6096
6097 idlepMask = idlepMask.resize(nprocs)
6098 timerpMask = timerpMask.resize(nprocs)
6099 work.spanqMask = work.spanqMask.resize(nprocs)
6100 unlock(&allpLock)
6101 }
6102
6103
6104 for i := old; i < nprocs; i++ {
6105 pp := allp[i]
6106 if pp == nil {
6107 pp = new(p)
6108 }
6109 pp.init(i)
6110 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6111 }
6112
6113 gp := getg()
6114 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6115
6116 gp.m.p.ptr().status = _Prunning
6117 gp.m.p.ptr().mcache.prepareForSweep()
6118 } else {
6119
6120
6121
6122
6123
6124 if gp.m.p != 0 {
6125 trace := traceAcquire()
6126 if trace.ok() {
6127
6128
6129
6130 trace.GoSched()
6131 trace.ProcStop(gp.m.p.ptr())
6132 traceRelease(trace)
6133 }
6134 gp.m.p.ptr().m = 0
6135 }
6136 gp.m.p = 0
6137 pp := allp[0]
6138 pp.m = 0
6139 pp.status = _Pidle
6140 acquirep(pp)
6141 trace := traceAcquire()
6142 if trace.ok() {
6143 trace.GoStart()
6144 traceRelease(trace)
6145 }
6146 }
6147
6148
6149 mcache0 = nil
6150
6151
6152 for i := nprocs; i < old; i++ {
6153 pp := allp[i]
6154 pp.destroy()
6155
6156 }
6157
6158
6159 if int32(len(allp)) != nprocs {
6160 lock(&allpLock)
6161 allp = allp[:nprocs]
6162 idlepMask = idlepMask.resize(nprocs)
6163 timerpMask = timerpMask.resize(nprocs)
6164 work.spanqMask = work.spanqMask.resize(nprocs)
6165 unlock(&allpLock)
6166 }
6167
6168
6169 var runnablePs *p
6170 var runnablePsNeedM *p
6171 var idlePs *p
6172 for i := nprocs - 1; i >= 0; i-- {
6173 pp := allp[i]
6174 if gp.m.p.ptr() == pp {
6175 continue
6176 }
6177 pp.status = _Pidle
6178 if runqempty(pp) {
6179 pp.link.set(idlePs)
6180 idlePs = pp
6181 continue
6182 }
6183
6184
6185
6186
6187
6188
6189
6190
6191 var mp *m
6192 if oldm := pp.oldm.get(); oldm != nil {
6193
6194 mp = mgetSpecific(oldm)
6195 }
6196 if mp == nil {
6197
6198 pp.link.set(runnablePsNeedM)
6199 runnablePsNeedM = pp
6200 continue
6201 }
6202 pp.m.set(mp)
6203 pp.link.set(runnablePs)
6204 runnablePs = pp
6205 }
6206
6207
6208 for runnablePsNeedM != nil {
6209 pp := runnablePsNeedM
6210 runnablePsNeedM = pp.link.ptr()
6211
6212 mp := mget()
6213 pp.m.set(mp)
6214 pp.link.set(runnablePs)
6215 runnablePs = pp
6216 }
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242 if gcBlackenEnabled != 0 {
6243 for idlePs != nil {
6244 pp := idlePs
6245
6246 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6247 if !ok {
6248
6249 break
6250 }
6251
6252
6253
6254
6255
6256
6257
6258
6259 idlePs = pp.link.ptr()
6260 mp := mget()
6261 pp.m.set(mp)
6262 pp.link.set(runnablePs)
6263 runnablePs = pp
6264 }
6265 }
6266
6267
6268 for idlePs != nil {
6269 pp := idlePs
6270 idlePs = pp.link.ptr()
6271 pidleput(pp, now)
6272 }
6273
6274 stealOrder.reset(uint32(nprocs))
6275 var int32p *int32 = &gomaxprocs
6276 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6277 if old != nprocs {
6278
6279 gcCPULimiter.resetCapacity(now, nprocs)
6280 }
6281 return runnablePs
6282 }
6283
6284
6285
6286
6287
6288
6289
6290 func acquirep(pp *p) {
6291
6292 acquirepNoTrace(pp)
6293
6294
6295 trace := traceAcquire()
6296 if trace.ok() {
6297 trace.ProcStart()
6298 traceRelease(trace)
6299 }
6300 }
6301
6302
6303
6304
6305 func acquirepNoTrace(pp *p) {
6306
6307 wirep(pp)
6308
6309
6310
6311
6312
6313
6314 pp.oldm = pp.m.ptr().self
6315
6316
6317
6318 pp.mcache.prepareForSweep()
6319 }
6320
6321
6322
6323
6324
6325
6326
6327 func wirep(pp *p) {
6328 gp := getg()
6329
6330 if gp.m.p != 0 {
6331
6332
6333 systemstack(func() {
6334 throw("wirep: already in go")
6335 })
6336 }
6337 if pp.m != 0 || pp.status != _Pidle {
6338
6339
6340 systemstack(func() {
6341 id := int64(0)
6342 if pp.m != 0 {
6343 id = pp.m.ptr().id
6344 }
6345 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6346 throw("wirep: invalid p state")
6347 })
6348 }
6349 gp.m.p.set(pp)
6350 pp.m.set(gp.m)
6351 pp.status = _Prunning
6352 }
6353
6354
6355 func releasep() *p {
6356 trace := traceAcquire()
6357 if trace.ok() {
6358 trace.ProcStop(getg().m.p.ptr())
6359 traceRelease(trace)
6360 }
6361 return releasepNoTrace()
6362 }
6363
6364
6365 func releasepNoTrace() *p {
6366 gp := getg()
6367
6368 if gp.m.p == 0 {
6369 throw("releasep: invalid arg")
6370 }
6371 pp := gp.m.p.ptr()
6372 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6373 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6374 throw("releasep: invalid p state")
6375 }
6376
6377
6378 gcController.releaseNextGCMarkWorker(pp)
6379
6380 gp.m.p = 0
6381 pp.m = 0
6382 pp.status = _Pidle
6383 return pp
6384 }
6385
6386 func incidlelocked(v int32) {
6387 lock(&sched.lock)
6388 sched.nmidlelocked += v
6389 if v > 0 {
6390 checkdead()
6391 }
6392 unlock(&sched.lock)
6393 }
6394
6395
6396
6397
6398 func checkdead() {
6399 assertLockHeld(&sched.lock)
6400
6401
6402
6403
6404
6405
6406 if (islibrary || isarchive) && GOARCH != "wasm" {
6407 return
6408 }
6409
6410
6411
6412
6413
6414 if panicking.Load() > 0 {
6415 return
6416 }
6417
6418
6419
6420
6421
6422 var run0 int32
6423 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6424 run0 = 1
6425 }
6426
6427 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6428 if run > run0 {
6429 return
6430 }
6431 if run < 0 {
6432 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6433 unlock(&sched.lock)
6434 throw("checkdead: inconsistent counts")
6435 }
6436
6437 grunning := 0
6438 forEachG(func(gp *g) {
6439 if isSystemGoroutine(gp, false) {
6440 return
6441 }
6442 s := readgstatus(gp)
6443 switch s &^ _Gscan {
6444 case _Gwaiting,
6445 _Gpreempted:
6446 grunning++
6447 case _Grunnable,
6448 _Grunning,
6449 _Gsyscall:
6450 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6451 unlock(&sched.lock)
6452 throw("checkdead: runnable g")
6453 }
6454 })
6455 if grunning == 0 {
6456 unlock(&sched.lock)
6457 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6458 }
6459
6460
6461 if faketime != 0 {
6462 if when := timeSleepUntil(); when < maxWhen {
6463 faketime = when
6464
6465
6466 pp, _ := pidleget(faketime)
6467 if pp == nil {
6468
6469
6470 unlock(&sched.lock)
6471 throw("checkdead: no p for timer")
6472 }
6473 mp := mget()
6474 if mp == nil {
6475
6476
6477 unlock(&sched.lock)
6478 throw("checkdead: no m for timer")
6479 }
6480
6481
6482
6483 sched.nmspinning.Add(1)
6484 mp.spinning = true
6485 mp.nextp.set(pp)
6486 notewakeup(&mp.park)
6487 return
6488 }
6489 }
6490
6491
6492 for _, pp := range allp {
6493 if len(pp.timers.heap) > 0 {
6494 return
6495 }
6496 }
6497
6498 unlock(&sched.lock)
6499 fatal("all goroutines are asleep - deadlock!")
6500 }
6501
6502
6503
6504
6505
6506
6507 var forcegcperiod int64 = 2 * 60 * 1e9
6508
6509
6510
6511
6512 const haveSysmon = GOARCH != "wasm"
6513
6514
6515
6516
6517 func sysmon() {
6518 lock(&sched.lock)
6519 sched.nmsys++
6520 checkdead()
6521 unlock(&sched.lock)
6522
6523 lastgomaxprocs := int64(0)
6524 lasttrace := int64(0)
6525 idle := 0
6526 delay := uint32(0)
6527
6528 for {
6529 if idle == 0 {
6530 delay = 20
6531 } else if idle > 50 {
6532 delay *= 2
6533 }
6534 if delay > 10*1000 {
6535 delay = 10 * 1000
6536 }
6537 usleep(delay)
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554 now := nanotime()
6555 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6556 lock(&sched.lock)
6557 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6558 syscallWake := false
6559 next := timeSleepUntil()
6560 if next > now {
6561 sched.sysmonwait.Store(true)
6562 unlock(&sched.lock)
6563
6564
6565 sleep := forcegcperiod / 2
6566 if next-now < sleep {
6567 sleep = next - now
6568 }
6569 shouldRelax := sleep >= osRelaxMinNS
6570 if shouldRelax {
6571 osRelax(true)
6572 }
6573 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6574 if shouldRelax {
6575 osRelax(false)
6576 }
6577 lock(&sched.lock)
6578 sched.sysmonwait.Store(false)
6579 noteclear(&sched.sysmonnote)
6580 }
6581 if syscallWake {
6582 idle = 0
6583 delay = 20
6584 }
6585 }
6586 unlock(&sched.lock)
6587 }
6588
6589 lock(&sched.sysmonlock)
6590
6591
6592 now = nanotime()
6593
6594
6595 if *cgo_yield != nil {
6596 asmcgocall(*cgo_yield, nil)
6597 }
6598
6599 lastpoll := sched.lastpoll.Load()
6600 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6601 sched.lastpoll.CompareAndSwap(lastpoll, now)
6602 list, delta := netpoll(0)
6603 if !list.empty() {
6604
6605
6606
6607
6608
6609
6610
6611 incidlelocked(-1)
6612 injectglist(&list)
6613 incidlelocked(1)
6614 netpollAdjustWaiters(delta)
6615 }
6616 }
6617
6618 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6619 sysmonUpdateGOMAXPROCS()
6620 lastgomaxprocs = now
6621 }
6622 if scavenger.sysmonWake.Load() != 0 {
6623
6624 scavenger.wake()
6625 }
6626
6627
6628 if retake(now) != 0 {
6629 idle = 0
6630 } else {
6631 idle++
6632 }
6633
6634 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6635 lock(&forcegc.lock)
6636 forcegc.idle.Store(false)
6637 var list gList
6638 list.push(forcegc.g)
6639 injectglist(&list)
6640 unlock(&forcegc.lock)
6641 }
6642 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6643 lasttrace = now
6644 schedtrace(debug.scheddetail > 0)
6645 }
6646 unlock(&sched.sysmonlock)
6647 }
6648 }
6649
6650 type sysmontick struct {
6651 schedtick uint32
6652 syscalltick uint32
6653 schedwhen int64
6654 syscallwhen int64
6655 }
6656
6657
6658
6659 const forcePreemptNS = 10 * 1000 * 1000
6660
6661 func retake(now int64) uint32 {
6662 n := 0
6663
6664
6665 lock(&allpLock)
6666
6667
6668
6669 for i := 0; i < len(allp); i++ {
6670
6671
6672
6673
6674
6675
6676
6677
6678 pp := allp[i]
6679 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6680
6681
6682 continue
6683 }
6684 pd := &pp.sysmontick
6685 sysretake := false
6686
6687
6688
6689
6690
6691 schedt := int64(pp.schedtick)
6692 if int64(pd.schedtick) != schedt {
6693 pd.schedtick = uint32(schedt)
6694 pd.schedwhen = now
6695 } else if pd.schedwhen+forcePreemptNS <= now {
6696 preemptone(pp)
6697
6698
6699
6700
6701 sysretake = true
6702 }
6703
6704
6705 unlock(&allpLock)
6706
6707
6708
6709
6710
6711
6712
6713
6714 incidlelocked(-1)
6715
6716
6717 thread, ok := setBlockOnExitSyscall(pp)
6718 if !ok {
6719
6720 goto done
6721 }
6722
6723
6724 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6725 pd.syscalltick = uint32(syst)
6726 pd.syscallwhen = now
6727 thread.resume()
6728 goto done
6729 }
6730
6731
6732
6733
6734 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6735 thread.resume()
6736 goto done
6737 }
6738
6739
6740
6741 thread.takeP()
6742 thread.resume()
6743 n++
6744
6745
6746 handoffp(pp)
6747
6748
6749
6750 done:
6751 incidlelocked(1)
6752 lock(&allpLock)
6753 }
6754 unlock(&allpLock)
6755 return uint32(n)
6756 }
6757
6758
6759
6760 type syscallingThread struct {
6761 gp *g
6762 mp *m
6763 pp *p
6764 status uint32
6765 }
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6782 if pp.status != _Prunning {
6783 return syscallingThread{}, false
6784 }
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796 mp := pp.m.ptr()
6797 if mp == nil {
6798
6799 return syscallingThread{}, false
6800 }
6801 gp := mp.curg
6802 if gp == nil {
6803
6804 return syscallingThread{}, false
6805 }
6806 status := readgstatus(gp) &^ _Gscan
6807
6808
6809
6810
6811 if status != _Gsyscall && status != _Gdeadextra {
6812
6813 return syscallingThread{}, false
6814 }
6815 if !castogscanstatus(gp, status, status|_Gscan) {
6816
6817 return syscallingThread{}, false
6818 }
6819 if gp.m != mp || gp.m.p.ptr() != pp {
6820
6821 casfrom_Gscanstatus(gp, status|_Gscan, status)
6822 return syscallingThread{}, false
6823 }
6824 return syscallingThread{gp, mp, pp, status}, true
6825 }
6826
6827
6828
6829
6830
6831 func (s syscallingThread) gcstopP() {
6832 assertLockHeld(&sched.lock)
6833
6834 s.releaseP(_Pgcstop)
6835 s.pp.gcStopTime = nanotime()
6836 sched.stopwait--
6837 }
6838
6839
6840
6841 func (s syscallingThread) takeP() {
6842 s.releaseP(_Pidle)
6843 }
6844
6845
6846
6847
6848 func (s syscallingThread) releaseP(state uint32) {
6849 if state != _Pidle && state != _Pgcstop {
6850 throw("attempted to release P into a bad state")
6851 }
6852 trace := traceAcquire()
6853 s.pp.m = 0
6854 s.mp.p = 0
6855 atomic.Store(&s.pp.status, state)
6856 if trace.ok() {
6857 trace.ProcSteal(s.pp)
6858 traceRelease(trace)
6859 }
6860 addGSyscallNoP(s.mp)
6861 s.pp.syscalltick++
6862 }
6863
6864
6865 func (s syscallingThread) resume() {
6866 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6867 }
6868
6869
6870
6871
6872
6873
6874 func preemptall() bool {
6875 res := false
6876 for _, pp := range allp {
6877 if pp.status != _Prunning {
6878 continue
6879 }
6880 if preemptone(pp) {
6881 res = true
6882 }
6883 }
6884 return res
6885 }
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897 func preemptone(pp *p) bool {
6898 mp := pp.m.ptr()
6899 if mp == nil || mp == getg().m {
6900 return false
6901 }
6902 gp := mp.curg
6903 if gp == nil || gp == mp.g0 {
6904 return false
6905 }
6906 if readgstatus(gp)&^_Gscan == _Gsyscall {
6907
6908 return false
6909 }
6910
6911 gp.preempt = true
6912
6913
6914
6915
6916
6917 gp.stackguard0 = stackPreempt
6918
6919
6920 if preemptMSupported && debug.asyncpreemptoff == 0 {
6921 pp.preempt = true
6922 preemptM(mp)
6923 }
6924
6925 return true
6926 }
6927
6928 var starttime int64
6929
6930 func schedtrace(detailed bool) {
6931 now := nanotime()
6932 if starttime == 0 {
6933 starttime = now
6934 }
6935
6936 lock(&sched.lock)
6937 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6938 if detailed {
6939 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6940 }
6941
6942
6943
6944 for i, pp := range allp {
6945 h := atomic.Load(&pp.runqhead)
6946 t := atomic.Load(&pp.runqtail)
6947 if detailed {
6948 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6949 mp := pp.m.ptr()
6950 if mp != nil {
6951 print(mp.id)
6952 } else {
6953 print("nil")
6954 }
6955 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6956 } else {
6957
6958
6959 print(" ")
6960 if i == 0 {
6961 print("[ ")
6962 }
6963 print(t - h)
6964 if i == len(allp)-1 {
6965 print(" ]")
6966 }
6967 }
6968 }
6969
6970 if !detailed {
6971
6972 print(" schedticks=[ ")
6973 for _, pp := range allp {
6974 print(pp.schedtick)
6975 print(" ")
6976 }
6977 print("]\n")
6978 }
6979
6980 if !detailed {
6981 unlock(&sched.lock)
6982 return
6983 }
6984
6985 for mp := allm; mp != nil; mp = mp.alllink {
6986 pp := mp.p.ptr()
6987 print(" M", mp.id, ": p=")
6988 if pp != nil {
6989 print(pp.id)
6990 } else {
6991 print("nil")
6992 }
6993 print(" curg=")
6994 if mp.curg != nil {
6995 print(mp.curg.goid)
6996 } else {
6997 print("nil")
6998 }
6999 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
7000 if lockedg := mp.lockedg.ptr(); lockedg != nil {
7001 print(lockedg.goid)
7002 } else {
7003 print("nil")
7004 }
7005 print("\n")
7006 }
7007
7008 forEachG(func(gp *g) {
7009 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
7010 if gp.m != nil {
7011 print(gp.m.id)
7012 } else {
7013 print("nil")
7014 }
7015 print(" lockedm=")
7016 if lockedm := gp.lockedm.ptr(); lockedm != nil {
7017 print(lockedm.id)
7018 } else {
7019 print("nil")
7020 }
7021 print("\n")
7022 })
7023 unlock(&sched.lock)
7024 }
7025
7026 type updateMaxProcsGState struct {
7027 lock mutex
7028 g *g
7029 idle atomic.Bool
7030
7031
7032 procs int32
7033 }
7034
7035 var (
7036
7037
7038 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
7039
7040
7041
7042 updateMaxProcsG updateMaxProcsGState
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091 computeMaxProcsLock mutex
7092 )
7093
7094
7095
7096
7097 func defaultGOMAXPROCSUpdateEnable() {
7098 if debug.updatemaxprocs == 0 {
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110 updatemaxprocs.IncNonDefault()
7111 return
7112 }
7113
7114 go updateMaxProcsGoroutine()
7115 }
7116
7117 func updateMaxProcsGoroutine() {
7118 updateMaxProcsG.g = getg()
7119 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7120 for {
7121 lock(&updateMaxProcsG.lock)
7122 if updateMaxProcsG.idle.Load() {
7123 throw("updateMaxProcsGoroutine: phase error")
7124 }
7125 updateMaxProcsG.idle.Store(true)
7126 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7127
7128
7129 stw := stopTheWorldGC(stwGOMAXPROCS)
7130
7131
7132 lock(&sched.lock)
7133 custom := sched.customGOMAXPROCS
7134 unlock(&sched.lock)
7135 if custom {
7136 startTheWorldGC(stw)
7137 return
7138 }
7139
7140
7141
7142
7143
7144 newprocs = updateMaxProcsG.procs
7145 lock(&sched.lock)
7146 sched.customGOMAXPROCS = false
7147 unlock(&sched.lock)
7148
7149 startTheWorldGC(stw)
7150 }
7151 }
7152
7153 func sysmonUpdateGOMAXPROCS() {
7154
7155 lock(&computeMaxProcsLock)
7156
7157
7158 lock(&sched.lock)
7159 custom := sched.customGOMAXPROCS
7160 curr := gomaxprocs
7161 unlock(&sched.lock)
7162 if custom {
7163 unlock(&computeMaxProcsLock)
7164 return
7165 }
7166
7167
7168 procs := defaultGOMAXPROCS(0)
7169 unlock(&computeMaxProcsLock)
7170 if procs == curr {
7171
7172 return
7173 }
7174
7175
7176
7177
7178 if updateMaxProcsG.idle.Load() {
7179 lock(&updateMaxProcsG.lock)
7180 updateMaxProcsG.procs = procs
7181 updateMaxProcsG.idle.Store(false)
7182 var list gList
7183 list.push(updateMaxProcsG.g)
7184 injectglist(&list)
7185 unlock(&updateMaxProcsG.lock)
7186 }
7187 }
7188
7189
7190
7191
7192
7193
7194 func schedEnableUser(enable bool) {
7195 lock(&sched.lock)
7196 if sched.disable.user == !enable {
7197 unlock(&sched.lock)
7198 return
7199 }
7200 sched.disable.user = !enable
7201 if enable {
7202 n := sched.disable.runnable.size
7203 globrunqputbatch(&sched.disable.runnable)
7204 unlock(&sched.lock)
7205 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7206 startm(nil, false, false)
7207 }
7208 } else {
7209 unlock(&sched.lock)
7210 }
7211 }
7212
7213
7214
7215
7216
7217 func schedEnabled(gp *g) bool {
7218 assertLockHeld(&sched.lock)
7219
7220 if sched.disable.user {
7221 return isSystemGoroutine(gp, true)
7222 }
7223 return true
7224 }
7225
7226
7227
7228
7229
7230
7231 func mput(mp *m) {
7232 assertLockHeld(&sched.lock)
7233
7234 sched.midle.push(unsafe.Pointer(mp))
7235 sched.nmidle++
7236 checkdead()
7237 }
7238
7239
7240
7241
7242
7243
7244 func mget() *m {
7245 assertLockHeld(&sched.lock)
7246
7247 mp := (*m)(sched.midle.pop())
7248 if mp != nil {
7249 sched.nmidle--
7250 }
7251 return mp
7252 }
7253
7254
7255
7256
7257
7258
7259
7260
7261 func mgetSpecific(mp *m) *m {
7262 assertLockHeld(&sched.lock)
7263
7264 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7265
7266 return nil
7267 }
7268
7269 sched.midle.remove(unsafe.Pointer(mp))
7270 sched.nmidle--
7271
7272 return mp
7273 }
7274
7275
7276
7277
7278
7279
7280 func globrunqput(gp *g) {
7281 assertLockHeld(&sched.lock)
7282
7283 sched.runq.pushBack(gp)
7284 }
7285
7286
7287
7288
7289
7290
7291 func globrunqputhead(gp *g) {
7292 assertLockHeld(&sched.lock)
7293
7294 sched.runq.push(gp)
7295 }
7296
7297
7298
7299
7300
7301
7302
7303 func globrunqputbatch(batch *gQueue) {
7304 assertLockHeld(&sched.lock)
7305
7306 sched.runq.pushBackAll(*batch)
7307 *batch = gQueue{}
7308 }
7309
7310
7311
7312 func globrunqget() *g {
7313 assertLockHeld(&sched.lock)
7314
7315 if sched.runq.size == 0 {
7316 return nil
7317 }
7318
7319 return sched.runq.pop()
7320 }
7321
7322
7323
7324 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7325 assertLockHeld(&sched.lock)
7326
7327 if sched.runq.size == 0 {
7328 return
7329 }
7330
7331 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7332
7333 gp = sched.runq.pop()
7334 n--
7335
7336 for ; n > 0; n-- {
7337 gp1 := sched.runq.pop()
7338 q.pushBack(gp1)
7339 }
7340 return
7341 }
7342
7343
7344 type pMask []uint32
7345
7346
7347 func (p pMask) read(id uint32) bool {
7348 word := id / 32
7349 mask := uint32(1) << (id % 32)
7350 return (atomic.Load(&p[word]) & mask) != 0
7351 }
7352
7353
7354 func (p pMask) set(id int32) {
7355 word := id / 32
7356 mask := uint32(1) << (id % 32)
7357 atomic.Or(&p[word], mask)
7358 }
7359
7360
7361 func (p pMask) clear(id int32) {
7362 word := id / 32
7363 mask := uint32(1) << (id % 32)
7364 atomic.And(&p[word], ^mask)
7365 }
7366
7367
7368 func (p pMask) any() bool {
7369 for i := range p {
7370 if atomic.Load(&p[i]) != 0 {
7371 return true
7372 }
7373 }
7374 return false
7375 }
7376
7377
7378
7379
7380
7381 func (p pMask) resize(nprocs int32) pMask {
7382 maskWords := (nprocs + 31) / 32
7383
7384 if maskWords <= int32(cap(p)) {
7385 return p[:maskWords]
7386 }
7387 newMask := make([]uint32, maskWords)
7388
7389 copy(newMask, p)
7390 return newMask
7391 }
7392
7393
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403
7404 func pidleput(pp *p, now int64) int64 {
7405 assertLockHeld(&sched.lock)
7406
7407 if !runqempty(pp) {
7408 throw("pidleput: P has non-empty run queue")
7409 }
7410 if now == 0 {
7411 now = nanotime()
7412 }
7413 if pp.timers.len.Load() == 0 {
7414 timerpMask.clear(pp.id)
7415 }
7416 idlepMask.set(pp.id)
7417 pp.link = sched.pidle
7418 sched.pidle.set(pp)
7419 sched.npidle.Add(1)
7420 if !pp.limiterEvent.start(limiterEventIdle, now) {
7421 throw("must be able to track idle limiter event")
7422 }
7423 return now
7424 }
7425
7426
7427
7428
7429
7430
7431
7432
7433 func pidleget(now int64) (*p, int64) {
7434 assertLockHeld(&sched.lock)
7435
7436 pp := sched.pidle.ptr()
7437 if pp != nil {
7438
7439 if now == 0 {
7440 now = nanotime()
7441 }
7442 timerpMask.set(pp.id)
7443 idlepMask.clear(pp.id)
7444 sched.pidle = pp.link
7445 sched.npidle.Add(-1)
7446 pp.limiterEvent.stop(limiterEventIdle, now)
7447 }
7448 return pp, now
7449 }
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461 func pidlegetSpinning(now int64) (*p, int64) {
7462 assertLockHeld(&sched.lock)
7463
7464 pp, now := pidleget(now)
7465 if pp == nil {
7466
7467
7468
7469 sched.needspinning.Store(1)
7470 return nil, now
7471 }
7472
7473 return pp, now
7474 }
7475
7476
7477
7478 func runqempty(pp *p) bool {
7479
7480
7481
7482
7483 for {
7484 head := atomic.Load(&pp.runqhead)
7485 tail := atomic.Load(&pp.runqtail)
7486 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7487 if tail == atomic.Load(&pp.runqtail) {
7488 return head == tail && runnext == 0
7489 }
7490 }
7491 }
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502 const randomizeScheduler = raceenabled
7503
7504
7505
7506
7507
7508
7509 func runqput(pp *p, gp *g, next bool) {
7510 if !haveSysmon && next {
7511
7512
7513
7514
7515
7516
7517
7518
7519 next = false
7520 }
7521 if randomizeScheduler && next && randn(2) == 0 {
7522 next = false
7523 }
7524
7525 if next {
7526 retryNext:
7527 oldnext := pp.runnext
7528 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7529 goto retryNext
7530 }
7531 if oldnext == 0 {
7532 return
7533 }
7534
7535 gp = oldnext.ptr()
7536 }
7537
7538 retry:
7539 h := atomic.LoadAcq(&pp.runqhead)
7540 t := pp.runqtail
7541 if t-h < uint32(len(pp.runq)) {
7542 pp.runq[t%uint32(len(pp.runq))].set(gp)
7543 atomic.StoreRel(&pp.runqtail, t+1)
7544 return
7545 }
7546 if runqputslow(pp, gp, h, t) {
7547 return
7548 }
7549
7550 goto retry
7551 }
7552
7553
7554
7555 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7556 var batch [len(pp.runq)/2 + 1]*g
7557
7558
7559 n := t - h
7560 n = n / 2
7561 if n != uint32(len(pp.runq)/2) {
7562 throw("runqputslow: queue is not full")
7563 }
7564 for i := uint32(0); i < n; i++ {
7565 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7566 }
7567 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7568 return false
7569 }
7570 batch[n] = gp
7571
7572 if randomizeScheduler {
7573 for i := uint32(1); i <= n; i++ {
7574 j := cheaprandn(i + 1)
7575 batch[i], batch[j] = batch[j], batch[i]
7576 }
7577 }
7578
7579
7580 for i := uint32(0); i < n; i++ {
7581 batch[i].schedlink.set(batch[i+1])
7582 }
7583
7584 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7585
7586
7587 lock(&sched.lock)
7588 globrunqputbatch(&q)
7589 unlock(&sched.lock)
7590 return true
7591 }
7592
7593
7594
7595
7596 func runqputbatch(pp *p, q *gQueue) {
7597 if q.empty() {
7598 return
7599 }
7600 h := atomic.LoadAcq(&pp.runqhead)
7601 t := pp.runqtail
7602 n := uint32(0)
7603 for !q.empty() && t-h < uint32(len(pp.runq)) {
7604 gp := q.pop()
7605 pp.runq[t%uint32(len(pp.runq))].set(gp)
7606 t++
7607 n++
7608 }
7609
7610 if randomizeScheduler {
7611 off := func(o uint32) uint32 {
7612 return (pp.runqtail + o) % uint32(len(pp.runq))
7613 }
7614 for i := uint32(1); i < n; i++ {
7615 j := cheaprandn(i + 1)
7616 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7617 }
7618 }
7619
7620 atomic.StoreRel(&pp.runqtail, t)
7621
7622 return
7623 }
7624
7625
7626
7627
7628
7629 func runqget(pp *p) (gp *g, inheritTime bool) {
7630
7631 next := pp.runnext
7632
7633
7634
7635 if next != 0 && pp.runnext.cas(next, 0) {
7636 return next.ptr(), true
7637 }
7638
7639 for {
7640 h := atomic.LoadAcq(&pp.runqhead)
7641 t := pp.runqtail
7642 if t == h {
7643 return nil, false
7644 }
7645 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7646 if atomic.CasRel(&pp.runqhead, h, h+1) {
7647 return gp, false
7648 }
7649 }
7650 }
7651
7652
7653
7654 func runqdrain(pp *p) (drainQ gQueue) {
7655 oldNext := pp.runnext
7656 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7657 drainQ.pushBack(oldNext.ptr())
7658 }
7659
7660 retry:
7661 h := atomic.LoadAcq(&pp.runqhead)
7662 t := pp.runqtail
7663 qn := t - h
7664 if qn == 0 {
7665 return
7666 }
7667 if qn > uint32(len(pp.runq)) {
7668 goto retry
7669 }
7670
7671 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7672 goto retry
7673 }
7674
7675
7676
7677
7678
7679
7680
7681
7682 for i := uint32(0); i < qn; i++ {
7683 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7684 drainQ.pushBack(gp)
7685 }
7686 return
7687 }
7688
7689
7690
7691
7692
7693 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7694 for {
7695 h := atomic.LoadAcq(&pp.runqhead)
7696 t := atomic.LoadAcq(&pp.runqtail)
7697 n := t - h
7698 n = n - n/2
7699 if n == 0 {
7700 if stealRunNextG {
7701
7702 if next := pp.runnext; next != 0 {
7703 if pp.status == _Prunning {
7704 if mp := pp.m.ptr(); mp != nil {
7705 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7706
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724
7725 if !osHasLowResTimer {
7726 usleep(3)
7727 } else {
7728
7729
7730
7731 osyield()
7732 }
7733 }
7734 }
7735 }
7736 if !pp.runnext.cas(next, 0) {
7737 continue
7738 }
7739 batch[batchHead%uint32(len(batch))] = next
7740 return 1
7741 }
7742 }
7743 return 0
7744 }
7745 if n > uint32(len(pp.runq)/2) {
7746 continue
7747 }
7748 for i := uint32(0); i < n; i++ {
7749 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7750 batch[(batchHead+i)%uint32(len(batch))] = g
7751 }
7752 if atomic.CasRel(&pp.runqhead, h, h+n) {
7753 return n
7754 }
7755 }
7756 }
7757
7758
7759
7760
7761 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7762 t := pp.runqtail
7763 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7764 if n == 0 {
7765 return nil
7766 }
7767 n--
7768 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7769 if n == 0 {
7770 return gp
7771 }
7772 h := atomic.LoadAcq(&pp.runqhead)
7773 if t-h+n >= uint32(len(pp.runq)) {
7774 throw("runqsteal: runq overflow")
7775 }
7776 atomic.StoreRel(&pp.runqtail, t+n)
7777 return gp
7778 }
7779
7780
7781
7782 type gQueue struct {
7783 head guintptr
7784 tail guintptr
7785 size int32
7786 }
7787
7788
7789 func (q *gQueue) empty() bool {
7790 return q.head == 0
7791 }
7792
7793
7794 func (q *gQueue) push(gp *g) {
7795 gp.schedlink = q.head
7796 q.head.set(gp)
7797 if q.tail == 0 {
7798 q.tail.set(gp)
7799 }
7800 q.size++
7801 }
7802
7803
7804 func (q *gQueue) pushBack(gp *g) {
7805 gp.schedlink = 0
7806 if q.tail != 0 {
7807 q.tail.ptr().schedlink.set(gp)
7808 } else {
7809 q.head.set(gp)
7810 }
7811 q.tail.set(gp)
7812 q.size++
7813 }
7814
7815
7816
7817 func (q *gQueue) pushBackAll(q2 gQueue) {
7818 if q2.tail == 0 {
7819 return
7820 }
7821 q2.tail.ptr().schedlink = 0
7822 if q.tail != 0 {
7823 q.tail.ptr().schedlink = q2.head
7824 } else {
7825 q.head = q2.head
7826 }
7827 q.tail = q2.tail
7828 q.size += q2.size
7829 }
7830
7831
7832
7833 func (q *gQueue) pop() *g {
7834 gp := q.head.ptr()
7835 if gp != nil {
7836 q.head = gp.schedlink
7837 if q.head == 0 {
7838 q.tail = 0
7839 }
7840 q.size--
7841 }
7842 return gp
7843 }
7844
7845
7846 func (q *gQueue) popList() gList {
7847 stack := gList{q.head, q.size}
7848 *q = gQueue{}
7849 return stack
7850 }
7851
7852
7853
7854 type gList struct {
7855 head guintptr
7856 size int32
7857 }
7858
7859
7860 func (l *gList) empty() bool {
7861 return l.head == 0
7862 }
7863
7864
7865 func (l *gList) push(gp *g) {
7866 gp.schedlink = l.head
7867 l.head.set(gp)
7868 l.size++
7869 }
7870
7871
7872 func (l *gList) pushAll(q gQueue) {
7873 if !q.empty() {
7874 q.tail.ptr().schedlink = l.head
7875 l.head = q.head
7876 l.size += q.size
7877 }
7878 }
7879
7880
7881 func (l *gList) pop() *g {
7882 gp := l.head.ptr()
7883 if gp != nil {
7884 l.head = gp.schedlink
7885 l.size--
7886 }
7887 return gp
7888 }
7889
7890
7891 func setMaxThreads(in int) (out int) {
7892 lock(&sched.lock)
7893 out = int(sched.maxmcount)
7894 if in > 0x7fffffff {
7895 sched.maxmcount = 0x7fffffff
7896 } else {
7897 sched.maxmcount = int32(in)
7898 }
7899 checkmcount()
7900 unlock(&sched.lock)
7901 return
7902 }
7903
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916 func procPin() int {
7917 gp := getg()
7918 mp := gp.m
7919
7920 mp.locks++
7921 return int(mp.p.ptr().id)
7922 }
7923
7924
7925
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935
7936 func procUnpin() {
7937 gp := getg()
7938 gp.m.locks--
7939 }
7940
7941
7942
7943 func sync_runtime_procPin() int {
7944 return procPin()
7945 }
7946
7947
7948
7949 func sync_runtime_procUnpin() {
7950 procUnpin()
7951 }
7952
7953
7954
7955 func sync_atomic_runtime_procPin() int {
7956 return procPin()
7957 }
7958
7959
7960
7961 func sync_atomic_runtime_procUnpin() {
7962 procUnpin()
7963 }
7964
7965
7966
7967
7968
7969 func internal_sync_runtime_canSpin(i int) bool {
7970
7971
7972
7973
7974
7975 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7976 return false
7977 }
7978 if p := getg().m.p.ptr(); !runqempty(p) {
7979 return false
7980 }
7981 return true
7982 }
7983
7984
7985
7986 func internal_sync_runtime_doSpin() {
7987 procyield(active_spin_cnt)
7988 }
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001
8002
8003
8004 func sync_runtime_canSpin(i int) bool {
8005 return internal_sync_runtime_canSpin(i)
8006 }
8007
8008
8009
8010
8011
8012
8013
8014
8015
8016
8017
8018
8019
8020 func sync_runtime_doSpin() {
8021 internal_sync_runtime_doSpin()
8022 }
8023
8024 var stealOrder randomOrder
8025
8026
8027
8028
8029
8030 type randomOrder struct {
8031 count uint32
8032 coprimes []uint32
8033 }
8034
8035 type randomEnum struct {
8036 i uint32
8037 count uint32
8038 pos uint32
8039 inc uint32
8040 }
8041
8042 func (ord *randomOrder) reset(count uint32) {
8043 ord.count = count
8044 ord.coprimes = ord.coprimes[:0]
8045 for i := uint32(1); i <= count; i++ {
8046 if gcd(i, count) == 1 {
8047 ord.coprimes = append(ord.coprimes, i)
8048 }
8049 }
8050 }
8051
8052 func (ord *randomOrder) start(i uint32) randomEnum {
8053 return randomEnum{
8054 count: ord.count,
8055 pos: i % ord.count,
8056 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
8057 }
8058 }
8059
8060 func (enum *randomEnum) done() bool {
8061 return enum.i == enum.count
8062 }
8063
8064 func (enum *randomEnum) next() {
8065 enum.i++
8066 enum.pos = (enum.pos + enum.inc) % enum.count
8067 }
8068
8069 func (enum *randomEnum) position() uint32 {
8070 return enum.pos
8071 }
8072
8073 func gcd(a, b uint32) uint32 {
8074 for b != 0 {
8075 a, b = b, a%b
8076 }
8077 return a
8078 }
8079
8080
8081
8082 type initTask struct {
8083 state uint32
8084 nfns uint32
8085
8086 }
8087
8088
8089
8090 var inittrace tracestat
8091
8092 type tracestat struct {
8093 active bool
8094 id uint64
8095 allocs uint64
8096 bytes uint64
8097 }
8098
8099 func doInit(ts []*initTask) {
8100 for _, t := range ts {
8101 doInit1(t)
8102 }
8103 }
8104
8105 func doInit1(t *initTask) {
8106 switch t.state {
8107 case 2:
8108 return
8109 case 1:
8110 throw("recursive call during initialization - linker skew")
8111 default:
8112 t.state = 1
8113
8114 var (
8115 start int64
8116 before tracestat
8117 )
8118
8119 if inittrace.active {
8120 start = nanotime()
8121
8122 before = inittrace
8123 }
8124
8125 if t.nfns == 0 {
8126
8127 throw("inittask with no functions")
8128 }
8129
8130 firstFunc := add(unsafe.Pointer(t), 8)
8131 for i := uint32(0); i < t.nfns; i++ {
8132 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8133 f := *(*func())(unsafe.Pointer(&p))
8134 f()
8135 }
8136
8137 if inittrace.active {
8138 end := nanotime()
8139
8140 after := inittrace
8141
8142 f := *(*func())(unsafe.Pointer(&firstFunc))
8143 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8144
8145 var sbuf [24]byte
8146 print("init ", pkg, " @")
8147 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8148 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8149 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8150 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8151 print("\n")
8152 }
8153
8154 t.state = 2
8155 }
8156 }
8157
View as plain text