Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/sys"
16 "internal/strconv"
17 "internal/stringslite"
18 "unsafe"
19 )
20
21
22 var modinfo string
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 var (
119 m0 m
120 g0 g
121 mcache0 *mcache
122 raceprocctx0 uintptr
123 raceFiniLock mutex
124 )
125
126
127
128 var runtime_inittasks []*initTask
129
130
131
132
133
134 var main_init_done chan bool
135
136
137 func main_main()
138
139
140 var mainStarted bool
141
142
143 var runtimeInitTime int64
144
145
146 var initSigmask sigset
147
148
149 func main() {
150 mp := getg().m
151
152
153
154 mp.g0.racectx = 0
155
156
157
158
159 if goarch.PtrSize == 8 {
160 maxstacksize = 1000000000
161 } else {
162 maxstacksize = 250000000
163 }
164
165
166
167
168 maxstackceiling = 2 * maxstacksize
169
170
171 mainStarted = true
172
173 if haveSysmon {
174 systemstack(func() {
175 newm(sysmon, nil, -1)
176 })
177 }
178
179
180
181
182
183
184
185 lockOSThread()
186
187 if mp != &m0 {
188 throw("runtime.main not on m0")
189 }
190
191
192
193 runtimeInitTime = nanotime()
194 if runtimeInitTime == 0 {
195 throw("nanotime returning zero")
196 }
197
198 if debug.inittrace != 0 {
199 inittrace.id = getg().goid
200 inittrace.active = true
201 }
202
203 doInit(runtime_inittasks)
204
205
206 needUnlock := true
207 defer func() {
208 if needUnlock {
209 unlockOSThread()
210 }
211 }()
212
213 gcenable()
214 defaultGOMAXPROCSUpdateEnable()
215
216 main_init_done = make(chan bool)
217 if iscgo {
218 if _cgo_pthread_key_created == nil {
219 throw("_cgo_pthread_key_created missing")
220 }
221
222 if _cgo_thread_start == nil {
223 throw("_cgo_thread_start missing")
224 }
225 if GOOS != "windows" {
226 if _cgo_setenv == nil {
227 throw("_cgo_setenv missing")
228 }
229 if _cgo_unsetenv == nil {
230 throw("_cgo_unsetenv missing")
231 }
232 }
233 if _cgo_notify_runtime_init_done == nil {
234 throw("_cgo_notify_runtime_init_done missing")
235 }
236
237
238 if set_crosscall2 == nil {
239 throw("set_crosscall2 missing")
240 }
241 set_crosscall2()
242
243
244
245 startTemplateThread()
246 cgocall(_cgo_notify_runtime_init_done, nil)
247 }
248
249
250
251
252
253
254
255
256 last := lastmoduledatap
257 for m := &firstmoduledata; true; m = m.next {
258 doInit(m.inittasks)
259 if m == last {
260 break
261 }
262 }
263
264
265
266 inittrace.active = false
267
268 close(main_init_done)
269
270 needUnlock = false
271 unlockOSThread()
272
273 if isarchive || islibrary {
274
275
276 if GOARCH == "wasm" {
277
278
279
280
281
282
283
284 pause(sys.GetCallerSP() - 16)
285 panic("unreachable")
286 }
287 return
288 }
289 fn := main_main
290 fn()
291
292
293
294
295
296
297
298
299 exitHooksRun := false
300 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
301 runExitHooks(0)
302 exitHooksRun = true
303 lsandoleakcheck()
304 }
305
306
307
308
309
310 if runningPanicDefers.Load() != 0 {
311
312 for c := 0; c < 1000; c++ {
313 if runningPanicDefers.Load() == 0 {
314 break
315 }
316 Gosched()
317 }
318 }
319 if panicking.Load() != 0 {
320 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
321 }
322 if !exitHooksRun {
323 runExitHooks(0)
324 }
325 if raceenabled {
326 racefini()
327 }
328
329 exit(0)
330 for {
331 var x *int32
332 *x = 0
333 }
334 }
335
336
337
338
339 func os_beforeExit(exitCode int) {
340 runExitHooks(exitCode)
341 if exitCode == 0 && raceenabled {
342 racefini()
343 }
344
345
346 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
347 lsandoleakcheck()
348 }
349 }
350
351 func init() {
352 exithook.Gosched = Gosched
353 exithook.Goid = func() uint64 { return getg().goid }
354 exithook.Throw = throw
355 }
356
357 func runExitHooks(code int) {
358 exithook.Run(code)
359 }
360
361
362 func init() {
363 go forcegchelper()
364 }
365
366 func forcegchelper() {
367 forcegc.g = getg()
368 lockInit(&forcegc.lock, lockRankForcegc)
369 for {
370 lock(&forcegc.lock)
371 if forcegc.idle.Load() {
372 throw("forcegc: phase error")
373 }
374 forcegc.idle.Store(true)
375 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
376
377 if debug.gctrace > 0 {
378 println("GC forced")
379 }
380
381 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
382 }
383 }
384
385
386
387
388
389 func Gosched() {
390 checkTimeouts()
391 mcall(gosched_m)
392 }
393
394
395
396
397
398 func goschedguarded() {
399 mcall(goschedguarded_m)
400 }
401
402
403
404
405
406
407 func goschedIfBusy() {
408 gp := getg()
409
410
411 if !gp.preempt && sched.npidle.Load() > 0 {
412 return
413 }
414 mcall(gosched_m)
415 }
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
446 if reason != waitReasonSleep {
447 checkTimeouts()
448 }
449 mp := acquirem()
450 gp := mp.curg
451 status := readgstatus(gp)
452 if status != _Grunning && status != _Gscanrunning {
453 throw("gopark: bad g status")
454 }
455 mp.waitlock = lock
456 mp.waitunlockf = unlockf
457 gp.waitreason = reason
458 mp.waitTraceBlockReason = traceReason
459 mp.waitTraceSkip = traceskip
460 releasem(mp)
461
462 mcall(park_m)
463 }
464
465
466
467 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
468 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
469 }
470
471
472
473
474
475
476
477
478
479
480
481 func goready(gp *g, traceskip int) {
482 systemstack(func() {
483 ready(gp, traceskip, true)
484 })
485 }
486
487
488 func acquireSudog() *sudog {
489
490
491
492
493
494
495
496
497 mp := acquirem()
498 pp := mp.p.ptr()
499 if len(pp.sudogcache) == 0 {
500 lock(&sched.sudoglock)
501
502 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
503 s := sched.sudogcache
504 sched.sudogcache = s.next
505 s.next = nil
506 pp.sudogcache = append(pp.sudogcache, s)
507 }
508 unlock(&sched.sudoglock)
509
510 if len(pp.sudogcache) == 0 {
511 pp.sudogcache = append(pp.sudogcache, new(sudog))
512 }
513 }
514 n := len(pp.sudogcache)
515 s := pp.sudogcache[n-1]
516 pp.sudogcache[n-1] = nil
517 pp.sudogcache = pp.sudogcache[:n-1]
518 if s.elem.get() != nil {
519 throw("acquireSudog: found s.elem != nil in cache")
520 }
521 releasem(mp)
522 return s
523 }
524
525
526 func releaseSudog(s *sudog) {
527 if s.elem.get() != nil {
528 throw("runtime: sudog with non-nil elem")
529 }
530 if s.isSelect {
531 throw("runtime: sudog with non-false isSelect")
532 }
533 if s.next != nil {
534 throw("runtime: sudog with non-nil next")
535 }
536 if s.prev != nil {
537 throw("runtime: sudog with non-nil prev")
538 }
539 if s.waitlink != nil {
540 throw("runtime: sudog with non-nil waitlink")
541 }
542 if s.c.get() != nil {
543 throw("runtime: sudog with non-nil c")
544 }
545 gp := getg()
546 if gp.param != nil {
547 throw("runtime: releaseSudog with non-nil gp.param")
548 }
549 mp := acquirem()
550 pp := mp.p.ptr()
551 if len(pp.sudogcache) == cap(pp.sudogcache) {
552
553 var first, last *sudog
554 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
555 n := len(pp.sudogcache)
556 p := pp.sudogcache[n-1]
557 pp.sudogcache[n-1] = nil
558 pp.sudogcache = pp.sudogcache[:n-1]
559 if first == nil {
560 first = p
561 } else {
562 last.next = p
563 }
564 last = p
565 }
566 lock(&sched.sudoglock)
567 last.next = sched.sudogcache
568 sched.sudogcache = first
569 unlock(&sched.sudoglock)
570 }
571 pp.sudogcache = append(pp.sudogcache, s)
572 releasem(mp)
573 }
574
575
576 func badmcall(fn func(*g)) {
577 throw("runtime: mcall called on m->g0 stack")
578 }
579
580 func badmcall2(fn func(*g)) {
581 throw("runtime: mcall function returned")
582 }
583
584 func badreflectcall() {
585 panic(plainError("arg size to reflect.call more than 1GB"))
586 }
587
588
589
590 func badmorestackg0() {
591 if !crashStackImplemented {
592 writeErrStr("fatal: morestack on g0\n")
593 return
594 }
595
596 g := getg()
597 switchToCrashStack(func() {
598 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
599 g.m.traceback = 2
600 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
601 print("\n")
602
603 throw("morestack on g0")
604 })
605 }
606
607
608
609 func badmorestackgsignal() {
610 writeErrStr("fatal: morestack on gsignal\n")
611 }
612
613
614 func badctxt() {
615 throw("ctxt != 0")
616 }
617
618
619
620 var gcrash g
621
622 var crashingG atomic.Pointer[g]
623
624
625
626
627
628
629
630
631
632 func switchToCrashStack(fn func()) {
633 me := getg()
634 if crashingG.CompareAndSwapNoWB(nil, me) {
635 switchToCrashStack0(fn)
636 abort()
637 }
638 if crashingG.Load() == me {
639
640 writeErrStr("fatal: recursive switchToCrashStack\n")
641 abort()
642 }
643
644 usleep_no_g(100)
645 writeErrStr("fatal: concurrent switchToCrashStack\n")
646 abort()
647 }
648
649
650
651
652 const crashStackImplemented = GOOS != "windows"
653
654
655 func switchToCrashStack0(fn func())
656
657 func lockedOSThread() bool {
658 gp := getg()
659 return gp.lockedm != 0 && gp.m.lockedg != 0
660 }
661
662 var (
663
664
665
666
667
668
669 allglock mutex
670 allgs []*g
671
672
673
674
675
676
677
678
679
680
681
682
683
684 allglen uintptr
685 allgptr **g
686 )
687
688 func allgadd(gp *g) {
689 if readgstatus(gp) == _Gidle {
690 throw("allgadd: bad status Gidle")
691 }
692
693 lock(&allglock)
694 allgs = append(allgs, gp)
695 if &allgs[0] != allgptr {
696 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
697 }
698 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
699 unlock(&allglock)
700 }
701
702
703
704
705 func allGsSnapshot() []*g {
706 assertWorldStoppedOrLockHeld(&allglock)
707
708
709
710
711
712
713 return allgs[:len(allgs):len(allgs)]
714 }
715
716
717 func atomicAllG() (**g, uintptr) {
718 length := atomic.Loaduintptr(&allglen)
719 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
720 return ptr, length
721 }
722
723
724 func atomicAllGIndex(ptr **g, i uintptr) *g {
725 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
726 }
727
728
729
730
731 func forEachG(fn func(gp *g)) {
732 lock(&allglock)
733 for _, gp := range allgs {
734 fn(gp)
735 }
736 unlock(&allglock)
737 }
738
739
740
741
742
743 func forEachGRace(fn func(gp *g)) {
744 ptr, length := atomicAllG()
745 for i := uintptr(0); i < length; i++ {
746 gp := atomicAllGIndex(ptr, i)
747 fn(gp)
748 }
749 return
750 }
751
752 const (
753
754
755 _GoidCacheBatch = 16
756 )
757
758
759
760 func cpuinit(env string) {
761 cpu.Initialize(env)
762
763
764
765 switch GOARCH {
766 case "386", "amd64":
767 x86HasAVX = cpu.X86.HasAVX
768 x86HasFMA = cpu.X86.HasFMA
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771
772 case "arm":
773 armHasVFPv4 = cpu.ARM.HasVFPv4
774
775 case "arm64":
776 arm64HasATOMICS = cpu.ARM64.HasATOMICS
777
778 case "loong64":
779 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
780 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
781 loong64HasLSX = cpu.Loong64.HasLSX
782
783 case "riscv64":
784 riscv64HasZbb = cpu.RISCV64.HasZbb
785 }
786 }
787
788
789
790
791
792
793 func getGodebugEarly() (string, bool) {
794 const prefix = "GODEBUG="
795 var env string
796 switch GOOS {
797 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
798
799
800
801 n := int32(0)
802 for argv_index(argv, argc+1+n) != nil {
803 n++
804 }
805
806 for i := int32(0); i < n; i++ {
807 p := argv_index(argv, argc+1+i)
808 s := unsafe.String(p, findnull(p))
809
810 if stringslite.HasPrefix(s, prefix) {
811 env = gostringnocopy(p)[len(prefix):]
812 break
813 }
814 }
815 break
816
817 default:
818 return "", false
819 }
820 return env, true
821 }
822
823
824
825
826
827
828
829
830
831 func schedinit() {
832 lockInit(&sched.lock, lockRankSched)
833 lockInit(&sched.sysmonlock, lockRankSysmon)
834 lockInit(&sched.deferlock, lockRankDefer)
835 lockInit(&sched.sudoglock, lockRankSudog)
836 lockInit(&deadlock, lockRankDeadlock)
837 lockInit(&paniclk, lockRankPanic)
838 lockInit(&allglock, lockRankAllg)
839 lockInit(&allpLock, lockRankAllp)
840 lockInit(&reflectOffs.lock, lockRankReflectOffs)
841 lockInit(&finlock, lockRankFin)
842 lockInit(&cpuprof.lock, lockRankCpuprof)
843 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
844 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
845 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
846 traceLockInit()
847
848
849
850 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
851
852 lockVerifyMSize()
853
854 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
855
856
857
858 gp := getg()
859 if raceenabled {
860 gp.racectx, raceprocctx0 = raceinit()
861 }
862
863 sched.maxmcount = 10000
864 crashFD.Store(^uintptr(0))
865
866
867 worldStopped()
868
869 godebug, parsedGodebug := getGodebugEarly()
870 if parsedGodebug {
871 parseRuntimeDebugVars(godebug)
872 }
873 ticks.init()
874 moduledataverify()
875 stackinit()
876 randinit()
877 mallocinit()
878 cpuinit(godebug)
879 alginit()
880 mcommoninit(gp.m, -1)
881 modulesinit()
882 typelinksinit()
883 itabsinit()
884 stkobjinit()
885
886 sigsave(&gp.m.sigmask)
887 initSigmask = gp.m.sigmask
888
889 goargs()
890 goenvs()
891 secure()
892 checkfds()
893 if !parsedGodebug {
894
895
896 parseRuntimeDebugVars(gogetenv("GODEBUG"))
897 }
898 finishDebugVarsSetup()
899 gcinit()
900
901
902
903 gcrash.stack = stackalloc(16384)
904 gcrash.stackguard0 = gcrash.stack.lo + 1000
905 gcrash.stackguard1 = gcrash.stack.lo + 1000
906
907
908
909
910
911 if disableMemoryProfiling {
912 MemProfileRate = 0
913 }
914
915
916 mProfStackInit(gp.m)
917 defaultGOMAXPROCSInit()
918
919 lock(&sched.lock)
920 sched.lastpoll.Store(nanotime())
921 var procs int32
922 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
923 procs = int32(n)
924 sched.customGOMAXPROCS = true
925 } else {
926
927
928
929
930
931
932
933
934 procs = defaultGOMAXPROCS(numCPUStartup)
935 }
936 if procresize(procs) != nil {
937 throw("unknown runnable goroutine during bootstrap")
938 }
939 unlock(&sched.lock)
940
941
942 worldStarted()
943
944 if buildVersion == "" {
945
946
947 buildVersion = "unknown"
948 }
949 if len(modinfo) == 1 {
950
951
952 modinfo = ""
953 }
954 }
955
956 func dumpgstatus(gp *g) {
957 thisg := getg()
958 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
959 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
960 }
961
962
963 func checkmcount() {
964 assertLockHeld(&sched.lock)
965
966
967
968
969
970
971
972
973
974 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
975 if count > sched.maxmcount {
976 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
977 throw("thread exhaustion")
978 }
979 }
980
981
982
983
984
985 func mReserveID() int64 {
986 assertLockHeld(&sched.lock)
987
988 if sched.mnext+1 < sched.mnext {
989 throw("runtime: thread ID overflow")
990 }
991 id := sched.mnext
992 sched.mnext++
993 checkmcount()
994 return id
995 }
996
997
998 func mcommoninit(mp *m, id int64) {
999 gp := getg()
1000
1001
1002 if gp != gp.m.g0 {
1003 callers(1, mp.createstack[:])
1004 }
1005
1006 lock(&sched.lock)
1007
1008 if id >= 0 {
1009 mp.id = id
1010 } else {
1011 mp.id = mReserveID()
1012 }
1013
1014 mp.self = newMWeakPointer(mp)
1015
1016 mrandinit(mp)
1017
1018 mpreinit(mp)
1019 if mp.gsignal != nil {
1020 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1021 }
1022
1023
1024
1025 mp.alllink = allm
1026
1027
1028
1029 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1030 unlock(&sched.lock)
1031
1032
1033 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1034 mp.cgoCallers = new(cgoCallers)
1035 }
1036 mProfStackInit(mp)
1037 }
1038
1039
1040
1041
1042
1043 func mProfStackInit(mp *m) {
1044 if debug.profstackdepth == 0 {
1045
1046
1047 return
1048 }
1049 mp.profStack = makeProfStackFP()
1050 mp.mLockProfile.stack = makeProfStackFP()
1051 }
1052
1053
1054
1055
1056 func makeProfStackFP() []uintptr {
1057
1058
1059
1060
1061
1062
1063 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1064 }
1065
1066
1067
1068 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1069
1070
1071 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1072
1073 func (mp *m) becomeSpinning() {
1074 mp.spinning = true
1075 sched.nmspinning.Add(1)
1076 sched.needspinning.Store(0)
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086 func (mp *m) snapshotAllp() []*p {
1087 mp.allpSnapshot = allp
1088 return mp.allpSnapshot
1089 }
1090
1091
1092
1093
1094
1095
1096
1097 func (mp *m) clearAllpSnapshot() {
1098 mp.allpSnapshot = nil
1099 }
1100
1101 func (mp *m) hasCgoOnStack() bool {
1102 return mp.ncgo > 0 || mp.isextra
1103 }
1104
1105 const (
1106
1107
1108 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1109
1110
1111
1112 osHasLowResClockInt = goos.IsWindows
1113
1114
1115
1116 osHasLowResClock = osHasLowResClockInt > 0
1117 )
1118
1119
1120 func ready(gp *g, traceskip int, next bool) {
1121 status := readgstatus(gp)
1122
1123
1124 mp := acquirem()
1125 if status&^_Gscan != _Gwaiting {
1126 dumpgstatus(gp)
1127 throw("bad g->status in ready")
1128 }
1129
1130
1131 trace := traceAcquire()
1132 casgstatus(gp, _Gwaiting, _Grunnable)
1133 if trace.ok() {
1134 trace.GoUnpark(gp, traceskip)
1135 traceRelease(trace)
1136 }
1137 runqput(mp.p.ptr(), gp, next)
1138 wakep()
1139 releasem(mp)
1140 }
1141
1142
1143
1144 const freezeStopWait = 0x7fffffff
1145
1146
1147
1148 var freezing atomic.Bool
1149
1150
1151
1152
1153 func freezetheworld() {
1154 freezing.Store(true)
1155 if debug.dontfreezetheworld > 0 {
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 usleep(1000)
1181 return
1182 }
1183
1184
1185
1186
1187 for i := 0; i < 5; i++ {
1188
1189 sched.stopwait = freezeStopWait
1190 sched.gcwaiting.Store(true)
1191
1192 if !preemptall() {
1193 break
1194 }
1195 usleep(1000)
1196 }
1197
1198 usleep(1000)
1199 preemptall()
1200 usleep(1000)
1201 }
1202
1203
1204
1205
1206
1207 func readgstatus(gp *g) uint32 {
1208 return gp.atomicstatus.Load()
1209 }
1210
1211
1212
1213
1214
1215 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1216 success := false
1217
1218
1219 switch oldval {
1220 default:
1221 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1222 dumpgstatus(gp)
1223 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1224 case _Gscanrunnable,
1225 _Gscanwaiting,
1226 _Gscanrunning,
1227 _Gscansyscall,
1228 _Gscanleaked,
1229 _Gscanpreempted,
1230 _Gscandeadextra:
1231 if newval == oldval&^_Gscan {
1232 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1233 }
1234 }
1235 if !success {
1236 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1237 dumpgstatus(gp)
1238 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1239 }
1240 releaseLockRankAndM(lockRankGscan)
1241 }
1242
1243
1244
1245 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1246 switch oldval {
1247 case _Grunnable,
1248 _Grunning,
1249 _Gwaiting,
1250 _Gleaked,
1251 _Gsyscall,
1252 _Gdeadextra:
1253 if newval == oldval|_Gscan {
1254 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1255 if r {
1256 acquireLockRankAndM(lockRankGscan)
1257 }
1258 return r
1259
1260 }
1261 }
1262 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1263 throw("bad oldval passed to castogscanstatus")
1264 return false
1265 }
1266
1267
1268
1269 var casgstatusAlwaysTrack = false
1270
1271
1272
1273
1274
1275
1276
1277 func casgstatus(gp *g, oldval, newval uint32) {
1278 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1279 systemstack(func() {
1280
1281
1282 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1283 throw("casgstatus: bad incoming values")
1284 })
1285 }
1286
1287 lockWithRankMayAcquire(nil, lockRankGscan)
1288
1289
1290 const yieldDelay = 5 * 1000
1291 var nextYield int64
1292
1293
1294
1295 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1296 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1297 systemstack(func() {
1298
1299
1300 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1301 })
1302 }
1303 if i == 0 {
1304 nextYield = nanotime() + yieldDelay
1305 }
1306 if nanotime() < nextYield {
1307 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1308 procyield(1)
1309 }
1310 } else {
1311 osyield()
1312 nextYield = nanotime() + yieldDelay/2
1313 }
1314 }
1315
1316 if gp.bubble != nil {
1317 systemstack(func() {
1318 gp.bubble.changegstatus(gp, oldval, newval)
1319 })
1320 }
1321
1322 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1323
1324
1325 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1326 gp.tracking = true
1327 }
1328 gp.trackingSeq++
1329 }
1330 if !gp.tracking {
1331 return
1332 }
1333
1334
1335
1336
1337
1338
1339 switch oldval {
1340 case _Grunnable:
1341
1342
1343
1344 now := nanotime()
1345 gp.runnableTime += now - gp.trackingStamp
1346 gp.trackingStamp = 0
1347 case _Gwaiting:
1348 if !gp.waitreason.isMutexWait() {
1349
1350 break
1351 }
1352
1353
1354
1355
1356
1357 now := nanotime()
1358 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1359 gp.trackingStamp = 0
1360 }
1361 switch newval {
1362 case _Gwaiting:
1363 if !gp.waitreason.isMutexWait() {
1364
1365 break
1366 }
1367
1368 now := nanotime()
1369 gp.trackingStamp = now
1370 case _Grunnable:
1371
1372
1373 now := nanotime()
1374 gp.trackingStamp = now
1375 case _Grunning:
1376
1377
1378
1379 gp.tracking = false
1380 sched.timeToRun.record(gp.runnableTime)
1381 gp.runnableTime = 0
1382 }
1383 }
1384
1385
1386
1387
1388 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1389
1390 gp.waitreason = reason
1391 casgstatus(gp, old, _Gwaiting)
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1402 if !reason.isWaitingForSuspendG() {
1403 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1404 }
1405 casGToWaiting(gp, old, reason)
1406 }
1407
1408
1409
1410
1411
1412 func casGToPreemptScan(gp *g, old, new uint32) {
1413 if old != _Grunning || new != _Gscan|_Gpreempted {
1414 throw("bad g transition")
1415 }
1416 acquireLockRankAndM(lockRankGscan)
1417 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1418 }
1419
1420
1421
1422
1423
1424
1425 }
1426
1427
1428
1429
1430 func casGFromPreempted(gp *g, old, new uint32) bool {
1431 if old != _Gpreempted || new != _Gwaiting {
1432 throw("bad g transition")
1433 }
1434 gp.waitreason = waitReasonPreempted
1435 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1436 return false
1437 }
1438 if bubble := gp.bubble; bubble != nil {
1439 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1440 }
1441 return true
1442 }
1443
1444
1445 type stwReason uint8
1446
1447
1448
1449
1450 const (
1451 stwUnknown stwReason = iota
1452 stwGCMarkTerm
1453 stwGCSweepTerm
1454 stwWriteHeapDump
1455 stwGoroutineProfile
1456 stwGoroutineProfileCleanup
1457 stwAllGoroutinesStack
1458 stwReadMemStats
1459 stwAllThreadsSyscall
1460 stwGOMAXPROCS
1461 stwStartTrace
1462 stwStopTrace
1463 stwForTestCountPagesInUse
1464 stwForTestReadMetricsSlow
1465 stwForTestReadMemStatsSlow
1466 stwForTestPageCachePagesLeaked
1467 stwForTestResetDebugLog
1468 )
1469
1470 func (r stwReason) String() string {
1471 return stwReasonStrings[r]
1472 }
1473
1474 func (r stwReason) isGC() bool {
1475 return r == stwGCMarkTerm || r == stwGCSweepTerm
1476 }
1477
1478
1479
1480
1481 var stwReasonStrings = [...]string{
1482 stwUnknown: "unknown",
1483 stwGCMarkTerm: "GC mark termination",
1484 stwGCSweepTerm: "GC sweep termination",
1485 stwWriteHeapDump: "write heap dump",
1486 stwGoroutineProfile: "goroutine profile",
1487 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1488 stwAllGoroutinesStack: "all goroutines stack trace",
1489 stwReadMemStats: "read mem stats",
1490 stwAllThreadsSyscall: "AllThreadsSyscall",
1491 stwGOMAXPROCS: "GOMAXPROCS",
1492 stwStartTrace: "start trace",
1493 stwStopTrace: "stop trace",
1494 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1495 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1496 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1497 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1498 stwForTestResetDebugLog: "ResetDebugLog (test)",
1499 }
1500
1501
1502
1503 type worldStop struct {
1504 reason stwReason
1505 startedStopping int64
1506 finishedStopping int64
1507 stoppingCPUTime int64
1508 }
1509
1510
1511
1512
1513 var stopTheWorldContext worldStop
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 func stopTheWorld(reason stwReason) worldStop {
1533 semacquire(&worldsema)
1534 gp := getg()
1535 gp.m.preemptoff = reason.String()
1536 systemstack(func() {
1537 stopTheWorldContext = stopTheWorldWithSema(reason)
1538 })
1539 return stopTheWorldContext
1540 }
1541
1542
1543
1544
1545 func startTheWorld(w worldStop) {
1546 systemstack(func() { startTheWorldWithSema(0, w) })
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 mp := acquirem()
1564 mp.preemptoff = ""
1565 semrelease1(&worldsema, true, 0)
1566 releasem(mp)
1567 }
1568
1569
1570
1571
1572 func stopTheWorldGC(reason stwReason) worldStop {
1573 semacquire(&gcsema)
1574 return stopTheWorld(reason)
1575 }
1576
1577
1578
1579
1580 func startTheWorldGC(w worldStop) {
1581 startTheWorld(w)
1582 semrelease(&gcsema)
1583 }
1584
1585
1586 var worldsema uint32 = 1
1587
1588
1589
1590
1591
1592
1593
1594 var gcsema uint32 = 1
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628 func stopTheWorldWithSema(reason stwReason) worldStop {
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1642
1643 trace := traceAcquire()
1644 if trace.ok() {
1645 trace.STWStart(reason)
1646 traceRelease(trace)
1647 }
1648 gp := getg()
1649
1650
1651
1652 if gp.m.locks > 0 {
1653 throw("stopTheWorld: holding locks")
1654 }
1655
1656 lock(&sched.lock)
1657 start := nanotime()
1658 sched.stopwait = gomaxprocs
1659 sched.gcwaiting.Store(true)
1660 preemptall()
1661
1662
1663 gp.m.p.ptr().status = _Pgcstop
1664 gp.m.p.ptr().gcStopTime = start
1665 sched.stopwait--
1666
1667
1668 for _, pp := range allp {
1669 if thread, ok := setBlockOnExitSyscall(pp); ok {
1670 thread.gcstopP()
1671 thread.resume()
1672 }
1673 }
1674
1675
1676 now := nanotime()
1677 for {
1678 pp, _ := pidleget(now)
1679 if pp == nil {
1680 break
1681 }
1682 pp.status = _Pgcstop
1683 pp.gcStopTime = nanotime()
1684 sched.stopwait--
1685 }
1686 wait := sched.stopwait > 0
1687 unlock(&sched.lock)
1688
1689
1690 if wait {
1691 for {
1692
1693 if notetsleep(&sched.stopnote, 100*1000) {
1694 noteclear(&sched.stopnote)
1695 break
1696 }
1697 preemptall()
1698 }
1699 }
1700
1701 finish := nanotime()
1702 startTime := finish - start
1703 if reason.isGC() {
1704 sched.stwStoppingTimeGC.record(startTime)
1705 } else {
1706 sched.stwStoppingTimeOther.record(startTime)
1707 }
1708
1709
1710
1711
1712
1713 stoppingCPUTime := int64(0)
1714 bad := ""
1715 if sched.stopwait != 0 {
1716 bad = "stopTheWorld: not stopped (stopwait != 0)"
1717 } else {
1718 for _, pp := range allp {
1719 if pp.status != _Pgcstop {
1720 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1721 }
1722 if pp.gcStopTime == 0 && bad == "" {
1723 bad = "stopTheWorld: broken CPU time accounting"
1724 }
1725 stoppingCPUTime += finish - pp.gcStopTime
1726 pp.gcStopTime = 0
1727 }
1728 }
1729 if freezing.Load() {
1730
1731
1732
1733
1734 lock(&deadlock)
1735 lock(&deadlock)
1736 }
1737 if bad != "" {
1738 throw(bad)
1739 }
1740
1741 worldStopped()
1742
1743
1744 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1745
1746 return worldStop{
1747 reason: reason,
1748 startedStopping: start,
1749 finishedStopping: finish,
1750 stoppingCPUTime: stoppingCPUTime,
1751 }
1752 }
1753
1754
1755
1756
1757
1758
1759
1760 func startTheWorldWithSema(now int64, w worldStop) int64 {
1761 assertWorldStopped()
1762
1763 mp := acquirem()
1764 if netpollinited() {
1765 list, delta := netpoll(0)
1766 injectglist(&list)
1767 netpollAdjustWaiters(delta)
1768 }
1769 lock(&sched.lock)
1770
1771 procs := gomaxprocs
1772 if newprocs != 0 {
1773 procs = newprocs
1774 newprocs = 0
1775 }
1776 p1 := procresize(procs)
1777 sched.gcwaiting.Store(false)
1778 if sched.sysmonwait.Load() {
1779 sched.sysmonwait.Store(false)
1780 notewakeup(&sched.sysmonnote)
1781 }
1782 unlock(&sched.lock)
1783
1784 worldStarted()
1785
1786 for p1 != nil {
1787 p := p1
1788 p1 = p1.link.ptr()
1789 if p.m != 0 {
1790 mp := p.m.ptr()
1791 p.m = 0
1792 if mp.nextp != 0 {
1793 throw("startTheWorld: inconsistent mp->nextp")
1794 }
1795 mp.nextp.set(p)
1796 notewakeup(&mp.park)
1797 } else {
1798
1799 newm(nil, p, -1)
1800 }
1801 }
1802
1803
1804 if now == 0 {
1805 now = nanotime()
1806 }
1807 totalTime := now - w.startedStopping
1808 if w.reason.isGC() {
1809 sched.stwTotalTimeGC.record(totalTime)
1810 } else {
1811 sched.stwTotalTimeOther.record(totalTime)
1812 }
1813 trace := traceAcquire()
1814 if trace.ok() {
1815 trace.STWDone()
1816 traceRelease(trace)
1817 }
1818
1819
1820
1821
1822 wakep()
1823
1824 releasem(mp)
1825
1826 return now
1827 }
1828
1829
1830
1831 func usesLibcall() bool {
1832 switch GOOS {
1833 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1834 return true
1835 }
1836 return false
1837 }
1838
1839
1840
1841 func mStackIsSystemAllocated() bool {
1842 switch GOOS {
1843 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1844 return true
1845 }
1846 return false
1847 }
1848
1849
1850
1851 func mstart()
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 func mstart0() {
1863 gp := getg()
1864
1865 osStack := gp.stack.lo == 0
1866 if osStack {
1867
1868
1869
1870
1871
1872
1873
1874
1875 size := gp.stack.hi
1876 if size == 0 {
1877 size = 16384 * sys.StackGuardMultiplier
1878 }
1879 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1880 gp.stack.lo = gp.stack.hi - size + 1024
1881 }
1882
1883
1884 gp.stackguard0 = gp.stack.lo + stackGuard
1885
1886
1887 gp.stackguard1 = gp.stackguard0
1888 mstart1()
1889
1890
1891 if mStackIsSystemAllocated() {
1892
1893
1894
1895 osStack = true
1896 }
1897 mexit(osStack)
1898 }
1899
1900
1901
1902
1903
1904 func mstart1() {
1905 gp := getg()
1906
1907 if gp != gp.m.g0 {
1908 throw("bad runtime·mstart")
1909 }
1910
1911
1912
1913
1914
1915
1916
1917 gp.sched.g = guintptr(unsafe.Pointer(gp))
1918 gp.sched.pc = sys.GetCallerPC()
1919 gp.sched.sp = sys.GetCallerSP()
1920
1921 asminit()
1922 minit()
1923
1924
1925
1926 if gp.m == &m0 {
1927 mstartm0()
1928 }
1929
1930 if debug.dataindependenttiming == 1 {
1931 sys.EnableDIT()
1932 }
1933
1934 if fn := gp.m.mstartfn; fn != nil {
1935 fn()
1936 }
1937
1938 if gp.m != &m0 {
1939 acquirep(gp.m.nextp.ptr())
1940 gp.m.nextp = 0
1941 }
1942 schedule()
1943 }
1944
1945
1946
1947
1948
1949
1950
1951 func mstartm0() {
1952
1953
1954
1955 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1956 cgoHasExtraM = true
1957 newextram()
1958 }
1959 initsig(false)
1960 }
1961
1962
1963
1964
1965 func mPark() {
1966 gp := getg()
1967 notesleep(&gp.m.park)
1968 noteclear(&gp.m.park)
1969 }
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981 func mexit(osStack bool) {
1982 mp := getg().m
1983
1984 if mp == &m0 {
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 handoffp(releasep())
1997 lock(&sched.lock)
1998 sched.nmfreed++
1999 checkdead()
2000 unlock(&sched.lock)
2001 mPark()
2002 throw("locked m0 woke up")
2003 }
2004
2005 sigblock(true)
2006 unminit()
2007
2008
2009 if mp.gsignal != nil {
2010 stackfree(mp.gsignal.stack)
2011 if valgrindenabled {
2012 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2013 mp.gsignal.valgrindStackID = 0
2014 }
2015
2016
2017
2018
2019 mp.gsignal = nil
2020 }
2021
2022
2023 vgetrandomDestroy(mp)
2024
2025
2026
2027 mp.self.clear()
2028
2029
2030 lock(&sched.lock)
2031 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2032 if *pprev == mp {
2033 *pprev = mp.alllink
2034 goto found
2035 }
2036 }
2037 throw("m not found in allm")
2038 found:
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 mp.freeWait.Store(freeMWait)
2054 mp.freelink = sched.freem
2055 sched.freem = mp
2056 unlock(&sched.lock)
2057
2058 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2059 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2060
2061
2062 handoffp(releasep())
2063
2064
2065
2066
2067
2068 lock(&sched.lock)
2069 sched.nmfreed++
2070 checkdead()
2071 unlock(&sched.lock)
2072
2073 if GOOS == "darwin" || GOOS == "ios" {
2074
2075
2076 if mp.signalPending.Load() != 0 {
2077 pendingPreemptSignals.Add(-1)
2078 }
2079 }
2080
2081
2082
2083 mdestroy(mp)
2084
2085 if osStack {
2086
2087 mp.freeWait.Store(freeMRef)
2088
2089
2090
2091 return
2092 }
2093
2094
2095
2096
2097
2098 exitThread(&mp.freeWait)
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 func forEachP(reason waitReason, fn func(*p)) {
2112 systemstack(func() {
2113 gp := getg().m.curg
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 casGToWaitingForSuspendG(gp, _Grunning, reason)
2126 forEachPInternal(fn)
2127 casgstatus(gp, _Gwaiting, _Grunning)
2128 })
2129 }
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140 func forEachPInternal(fn func(*p)) {
2141 mp := acquirem()
2142 pp := getg().m.p.ptr()
2143
2144 lock(&sched.lock)
2145 if sched.safePointWait != 0 {
2146 throw("forEachP: sched.safePointWait != 0")
2147 }
2148 sched.safePointWait = gomaxprocs - 1
2149 sched.safePointFn = fn
2150
2151
2152 for _, p2 := range allp {
2153 if p2 != pp {
2154 atomic.Store(&p2.runSafePointFn, 1)
2155 }
2156 }
2157 preemptall()
2158
2159
2160
2161
2162
2163
2164
2165 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2166 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2167 fn(p)
2168 sched.safePointWait--
2169 }
2170 }
2171
2172 wait := sched.safePointWait > 0
2173 unlock(&sched.lock)
2174
2175
2176 fn(pp)
2177
2178
2179
2180 for _, p2 := range allp {
2181 if atomic.Load(&p2.runSafePointFn) != 1 {
2182
2183 continue
2184 }
2185 if thread, ok := setBlockOnExitSyscall(p2); ok {
2186 thread.takeP()
2187 thread.resume()
2188 handoffp(p2)
2189 }
2190 }
2191
2192
2193 if wait {
2194 for {
2195
2196
2197
2198
2199 if notetsleep(&sched.safePointNote, 100*1000) {
2200 noteclear(&sched.safePointNote)
2201 break
2202 }
2203 preemptall()
2204 }
2205 }
2206 if sched.safePointWait != 0 {
2207 throw("forEachP: not done")
2208 }
2209 for _, p2 := range allp {
2210 if p2.runSafePointFn != 0 {
2211 throw("forEachP: P did not run fn")
2212 }
2213 }
2214
2215 lock(&sched.lock)
2216 sched.safePointFn = nil
2217 unlock(&sched.lock)
2218 releasem(mp)
2219 }
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 func runSafePointFn() {
2233 p := getg().m.p.ptr()
2234
2235
2236
2237 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2238 return
2239 }
2240 sched.safePointFn(p)
2241 lock(&sched.lock)
2242 sched.safePointWait--
2243 if sched.safePointWait == 0 {
2244 notewakeup(&sched.safePointNote)
2245 }
2246 unlock(&sched.lock)
2247 }
2248
2249
2250
2251
2252 var cgoThreadStart unsafe.Pointer
2253
2254 type cgothreadstart struct {
2255 g guintptr
2256 tls *uint64
2257 fn unsafe.Pointer
2258 }
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 func allocm(pp *p, fn func(), id int64) *m {
2270 allocmLock.rlock()
2271
2272
2273
2274
2275 acquirem()
2276
2277 gp := getg()
2278 if gp.m.p == 0 {
2279 acquirep(pp)
2280 }
2281
2282
2283
2284 if sched.freem != nil {
2285 lock(&sched.lock)
2286 var newList *m
2287 for freem := sched.freem; freem != nil; {
2288
2289 wait := freem.freeWait.Load()
2290 if wait == freeMWait {
2291 next := freem.freelink
2292 freem.freelink = newList
2293 newList = freem
2294 freem = next
2295 continue
2296 }
2297
2298
2299
2300 if traceEnabled() || traceShuttingDown() {
2301 traceThreadDestroy(freem)
2302 }
2303
2304
2305
2306 if wait == freeMStack {
2307
2308
2309
2310 systemstack(func() {
2311 stackfree(freem.g0.stack)
2312 if valgrindenabled {
2313 valgrindDeregisterStack(freem.g0.valgrindStackID)
2314 freem.g0.valgrindStackID = 0
2315 }
2316 })
2317 }
2318 freem = freem.freelink
2319 }
2320 sched.freem = newList
2321 unlock(&sched.lock)
2322 }
2323
2324 mp := &new(mPadded).m
2325 mp.mstartfn = fn
2326 mcommoninit(mp, id)
2327
2328
2329
2330 if iscgo || mStackIsSystemAllocated() {
2331 mp.g0 = malg(-1)
2332 } else {
2333 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2334 }
2335 mp.g0.m = mp
2336
2337 if pp == gp.m.p.ptr() {
2338 releasep()
2339 }
2340
2341 releasem(gp.m)
2342 allocmLock.runlock()
2343 return mp
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385 func needm(signal bool) {
2386 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2387
2388
2389
2390
2391
2392
2393 writeErrStr("fatal error: cgo callback before cgo call\n")
2394 exit(1)
2395 }
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405 var sigmask sigset
2406 sigsave(&sigmask)
2407 sigblock(false)
2408
2409
2410
2411
2412 mp, last := getExtraM()
2413
2414
2415
2416
2417
2418
2419
2420
2421 mp.needextram = last
2422
2423
2424 mp.sigmask = sigmask
2425
2426
2427
2428 osSetupTLS(mp)
2429
2430
2431
2432 setg(mp.g0)
2433 sp := sys.GetCallerSP()
2434 callbackUpdateSystemStack(mp, sp, signal)
2435
2436
2437
2438
2439 mp.isExtraInC = false
2440
2441
2442 asminit()
2443 minit()
2444
2445
2446
2447
2448
2449
2450 var trace traceLocker
2451 if !signal {
2452 trace = traceAcquire()
2453 }
2454
2455
2456 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2457 sched.ngsys.Add(-1)
2458
2459
2460
2461 if !signal {
2462 if trace.ok() {
2463 trace.GoCreateSyscall(mp.curg)
2464 traceRelease(trace)
2465 }
2466 }
2467 mp.isExtraInSig = signal
2468 }
2469
2470
2471
2472
2473 func needAndBindM() {
2474 needm(false)
2475
2476 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2477 cgoBindM()
2478 }
2479 }
2480
2481
2482
2483
2484 func newextram() {
2485 c := extraMWaiters.Swap(0)
2486 if c > 0 {
2487 for i := uint32(0); i < c; i++ {
2488 oneNewExtraM()
2489 }
2490 } else if extraMLength.Load() == 0 {
2491
2492 oneNewExtraM()
2493 }
2494 }
2495
2496
2497 func oneNewExtraM() {
2498
2499
2500
2501
2502
2503 mp := allocm(nil, nil, -1)
2504 gp := malg(4096)
2505 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2506 gp.sched.sp = gp.stack.hi
2507 gp.sched.sp -= 4 * goarch.PtrSize
2508 gp.sched.lr = 0
2509 gp.sched.g = guintptr(unsafe.Pointer(gp))
2510 gp.syscallpc = gp.sched.pc
2511 gp.syscallsp = gp.sched.sp
2512 gp.stktopsp = gp.sched.sp
2513
2514
2515
2516 casgstatus(gp, _Gidle, _Gdeadextra)
2517 gp.m = mp
2518 mp.curg = gp
2519 mp.isextra = true
2520
2521 mp.isExtraInC = true
2522 mp.lockedInt++
2523 mp.lockedg.set(gp)
2524 gp.lockedm.set(mp)
2525 gp.goid = sched.goidgen.Add(1)
2526 if raceenabled {
2527 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2528 }
2529
2530 allgadd(gp)
2531
2532
2533
2534
2535
2536 sched.ngsys.Add(1)
2537
2538
2539 addExtraM(mp)
2540 }
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575 func dropm() {
2576
2577
2578
2579 mp := getg().m
2580
2581
2582
2583
2584
2585 var trace traceLocker
2586 if !mp.isExtraInSig {
2587 trace = traceAcquire()
2588 }
2589
2590
2591 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2592 mp.curg.preemptStop = false
2593 sched.ngsys.Add(1)
2594 decGSyscallNoP(mp)
2595
2596 if !mp.isExtraInSig {
2597 if trace.ok() {
2598 trace.GoDestroySyscall()
2599 traceRelease(trace)
2600 }
2601 }
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 mp.syscalltick--
2617
2618
2619
2620 mp.curg.trace.reset()
2621
2622
2623
2624
2625 if traceEnabled() || traceShuttingDown() {
2626
2627
2628
2629
2630
2631
2632
2633 lock(&sched.lock)
2634 traceThreadDestroy(mp)
2635 unlock(&sched.lock)
2636 }
2637 mp.isExtraInSig = false
2638
2639
2640
2641
2642
2643 sigmask := mp.sigmask
2644 sigblock(false)
2645 unminit()
2646
2647 setg(nil)
2648
2649
2650
2651 g0 := mp.g0
2652 g0.stack.hi = 0
2653 g0.stack.lo = 0
2654 g0.stackguard0 = 0
2655 g0.stackguard1 = 0
2656 mp.g0StackAccurate = false
2657
2658 putExtraM(mp)
2659
2660 msigrestore(sigmask)
2661 }
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683 func cgoBindM() {
2684 if GOOS == "windows" || GOOS == "plan9" {
2685 fatal("bindm in unexpected GOOS")
2686 }
2687 g := getg()
2688 if g.m.g0 != g {
2689 fatal("the current g is not g0")
2690 }
2691 if _cgo_bindm != nil {
2692 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2693 }
2694 }
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707 func getm() uintptr {
2708 return uintptr(unsafe.Pointer(getg().m))
2709 }
2710
2711 var (
2712
2713
2714
2715
2716
2717
2718 extraM atomic.Uintptr
2719
2720 extraMLength atomic.Uint32
2721
2722 extraMWaiters atomic.Uint32
2723
2724
2725 extraMInUse atomic.Uint32
2726 )
2727
2728
2729
2730
2731
2732
2733
2734
2735 func lockextra(nilokay bool) *m {
2736 const locked = 1
2737
2738 incr := false
2739 for {
2740 old := extraM.Load()
2741 if old == locked {
2742 osyield_no_g()
2743 continue
2744 }
2745 if old == 0 && !nilokay {
2746 if !incr {
2747
2748
2749
2750 extraMWaiters.Add(1)
2751 incr = true
2752 }
2753 usleep_no_g(1)
2754 continue
2755 }
2756 if extraM.CompareAndSwap(old, locked) {
2757 return (*m)(unsafe.Pointer(old))
2758 }
2759 osyield_no_g()
2760 continue
2761 }
2762 }
2763
2764
2765 func unlockextra(mp *m, delta int32) {
2766 extraMLength.Add(delta)
2767 extraM.Store(uintptr(unsafe.Pointer(mp)))
2768 }
2769
2770
2771
2772
2773
2774
2775
2776
2777 func getExtraM() (mp *m, last bool) {
2778 mp = lockextra(false)
2779 extraMInUse.Add(1)
2780 unlockextra(mp.schedlink.ptr(), -1)
2781 return mp, mp.schedlink.ptr() == nil
2782 }
2783
2784
2785
2786
2787
2788 func putExtraM(mp *m) {
2789 extraMInUse.Add(-1)
2790 addExtraM(mp)
2791 }
2792
2793
2794
2795
2796 func addExtraM(mp *m) {
2797 mnext := lockextra(true)
2798 mp.schedlink.set(mnext)
2799 unlockextra(mp, 1)
2800 }
2801
2802 var (
2803
2804
2805
2806 allocmLock rwmutex
2807
2808
2809
2810
2811 execLock rwmutex
2812 )
2813
2814
2815
2816 const (
2817 failthreadcreate = "runtime: failed to create new OS thread\n"
2818 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2819 )
2820
2821
2822
2823
2824 var newmHandoff struct {
2825 lock mutex
2826
2827
2828
2829 newm muintptr
2830
2831
2832
2833 waiting bool
2834 wake note
2835
2836
2837
2838
2839 haveTemplateThread uint32
2840 }
2841
2842
2843
2844
2845
2846
2847
2848
2849 func newm(fn func(), pp *p, id int64) {
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860 acquirem()
2861
2862 mp := allocm(pp, fn, id)
2863 mp.nextp.set(pp)
2864 mp.sigmask = initSigmask
2865 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877 lock(&newmHandoff.lock)
2878 if newmHandoff.haveTemplateThread == 0 {
2879 throw("on a locked thread with no template thread")
2880 }
2881 mp.schedlink = newmHandoff.newm
2882 newmHandoff.newm.set(mp)
2883 if newmHandoff.waiting {
2884 newmHandoff.waiting = false
2885 notewakeup(&newmHandoff.wake)
2886 }
2887 unlock(&newmHandoff.lock)
2888
2889
2890
2891 releasem(getg().m)
2892 return
2893 }
2894 newm1(mp)
2895 releasem(getg().m)
2896 }
2897
2898 func newm1(mp *m) {
2899 if iscgo {
2900 var ts cgothreadstart
2901 if _cgo_thread_start == nil {
2902 throw("_cgo_thread_start missing")
2903 }
2904 ts.g.set(mp.g0)
2905 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2906 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2907 if msanenabled {
2908 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2909 }
2910 if asanenabled {
2911 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2912 }
2913 execLock.rlock()
2914 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2915 execLock.runlock()
2916 return
2917 }
2918 execLock.rlock()
2919 newosproc(mp)
2920 execLock.runlock()
2921 }
2922
2923
2924
2925
2926
2927 func startTemplateThread() {
2928 if GOARCH == "wasm" {
2929 return
2930 }
2931
2932
2933
2934 mp := acquirem()
2935 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2936 releasem(mp)
2937 return
2938 }
2939 newm(templateThread, nil, -1)
2940 releasem(mp)
2941 }
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 func templateThread() {
2956 lock(&sched.lock)
2957 sched.nmsys++
2958 checkdead()
2959 unlock(&sched.lock)
2960
2961 for {
2962 lock(&newmHandoff.lock)
2963 for newmHandoff.newm != 0 {
2964 newm := newmHandoff.newm.ptr()
2965 newmHandoff.newm = 0
2966 unlock(&newmHandoff.lock)
2967 for newm != nil {
2968 next := newm.schedlink.ptr()
2969 newm.schedlink = 0
2970 newm1(newm)
2971 newm = next
2972 }
2973 lock(&newmHandoff.lock)
2974 }
2975 newmHandoff.waiting = true
2976 noteclear(&newmHandoff.wake)
2977 unlock(&newmHandoff.lock)
2978 notesleep(&newmHandoff.wake)
2979 }
2980 }
2981
2982
2983
2984 func stopm() {
2985 gp := getg()
2986
2987 if gp.m.locks != 0 {
2988 throw("stopm holding locks")
2989 }
2990 if gp.m.p != 0 {
2991 throw("stopm holding p")
2992 }
2993 if gp.m.spinning {
2994 throw("stopm spinning")
2995 }
2996
2997 lock(&sched.lock)
2998 mput(gp.m)
2999 unlock(&sched.lock)
3000 mPark()
3001 acquirep(gp.m.nextp.ptr())
3002 gp.m.nextp = 0
3003 }
3004
3005 func mspinning() {
3006
3007 getg().m.spinning = true
3008 }
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027 func startm(pp *p, spinning, lockheld bool) {
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044 mp := acquirem()
3045 if !lockheld {
3046 lock(&sched.lock)
3047 }
3048 if pp == nil {
3049 if spinning {
3050
3051
3052
3053 throw("startm: P required for spinning=true")
3054 }
3055 pp, _ = pidleget(0)
3056 if pp == nil {
3057 if !lockheld {
3058 unlock(&sched.lock)
3059 }
3060 releasem(mp)
3061 return
3062 }
3063 }
3064 nmp := mget()
3065 if nmp == nil {
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080 id := mReserveID()
3081 unlock(&sched.lock)
3082
3083 var fn func()
3084 if spinning {
3085
3086 fn = mspinning
3087 }
3088 newm(fn, pp, id)
3089
3090 if lockheld {
3091 lock(&sched.lock)
3092 }
3093
3094
3095 releasem(mp)
3096 return
3097 }
3098 if !lockheld {
3099 unlock(&sched.lock)
3100 }
3101 if nmp.spinning {
3102 throw("startm: m is spinning")
3103 }
3104 if nmp.nextp != 0 {
3105 throw("startm: m has p")
3106 }
3107 if spinning && !runqempty(pp) {
3108 throw("startm: p has runnable gs")
3109 }
3110
3111 nmp.spinning = spinning
3112 nmp.nextp.set(pp)
3113 notewakeup(&nmp.park)
3114
3115
3116 releasem(mp)
3117 }
3118
3119
3120
3121
3122
3123 func handoffp(pp *p) {
3124
3125
3126
3127
3128 if !runqempty(pp) || !sched.runq.empty() {
3129 startm(pp, false, false)
3130 return
3131 }
3132
3133 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3134 startm(pp, false, false)
3135 return
3136 }
3137
3138 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3139 startm(pp, false, false)
3140 return
3141 }
3142
3143
3144 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3145 sched.needspinning.Store(0)
3146 startm(pp, true, false)
3147 return
3148 }
3149 lock(&sched.lock)
3150 if sched.gcwaiting.Load() {
3151 pp.status = _Pgcstop
3152 pp.gcStopTime = nanotime()
3153 sched.stopwait--
3154 if sched.stopwait == 0 {
3155 notewakeup(&sched.stopnote)
3156 }
3157 unlock(&sched.lock)
3158 return
3159 }
3160 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3161 sched.safePointFn(pp)
3162 sched.safePointWait--
3163 if sched.safePointWait == 0 {
3164 notewakeup(&sched.safePointNote)
3165 }
3166 }
3167 if !sched.runq.empty() {
3168 unlock(&sched.lock)
3169 startm(pp, false, false)
3170 return
3171 }
3172
3173
3174 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3175 unlock(&sched.lock)
3176 startm(pp, false, false)
3177 return
3178 }
3179
3180
3181
3182 when := pp.timers.wakeTime()
3183 pidleput(pp, 0)
3184 unlock(&sched.lock)
3185
3186 if when != 0 {
3187 wakeNetPoller(when)
3188 }
3189 }
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204 func wakep() {
3205
3206
3207 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3208 return
3209 }
3210
3211
3212
3213
3214
3215
3216 mp := acquirem()
3217
3218 var pp *p
3219 lock(&sched.lock)
3220 pp, _ = pidlegetSpinning(0)
3221 if pp == nil {
3222 if sched.nmspinning.Add(-1) < 0 {
3223 throw("wakep: negative nmspinning")
3224 }
3225 unlock(&sched.lock)
3226 releasem(mp)
3227 return
3228 }
3229
3230
3231
3232
3233 unlock(&sched.lock)
3234
3235 startm(pp, true, false)
3236
3237 releasem(mp)
3238 }
3239
3240
3241
3242 func stoplockedm() {
3243 gp := getg()
3244
3245 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3246 throw("stoplockedm: inconsistent locking")
3247 }
3248 if gp.m.p != 0 {
3249
3250 pp := releasep()
3251 handoffp(pp)
3252 }
3253 incidlelocked(1)
3254
3255 mPark()
3256 status := readgstatus(gp.m.lockedg.ptr())
3257 if status&^_Gscan != _Grunnable {
3258 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3259 dumpgstatus(gp.m.lockedg.ptr())
3260 throw("stoplockedm: not runnable")
3261 }
3262 acquirep(gp.m.nextp.ptr())
3263 gp.m.nextp = 0
3264 }
3265
3266
3267
3268
3269
3270 func startlockedm(gp *g) {
3271 mp := gp.lockedm.ptr()
3272 if mp == getg().m {
3273 throw("startlockedm: locked to me")
3274 }
3275 if mp.nextp != 0 {
3276 throw("startlockedm: m has p")
3277 }
3278
3279 incidlelocked(-1)
3280 pp := releasep()
3281 mp.nextp.set(pp)
3282 notewakeup(&mp.park)
3283 stopm()
3284 }
3285
3286
3287
3288 func gcstopm() {
3289 gp := getg()
3290
3291 if !sched.gcwaiting.Load() {
3292 throw("gcstopm: not waiting for gc")
3293 }
3294 if gp.m.spinning {
3295 gp.m.spinning = false
3296
3297
3298 if sched.nmspinning.Add(-1) < 0 {
3299 throw("gcstopm: negative nmspinning")
3300 }
3301 }
3302 pp := releasep()
3303 lock(&sched.lock)
3304 pp.status = _Pgcstop
3305 pp.gcStopTime = nanotime()
3306 sched.stopwait--
3307 if sched.stopwait == 0 {
3308 notewakeup(&sched.stopnote)
3309 }
3310 unlock(&sched.lock)
3311 stopm()
3312 }
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323 func execute(gp *g, inheritTime bool) {
3324 mp := getg().m
3325
3326 if goroutineProfile.active {
3327
3328
3329
3330 tryRecordGoroutineProfile(gp, nil, osyield)
3331 }
3332
3333
3334 mp.curg = gp
3335 gp.m = mp
3336 gp.syncSafePoint = false
3337 casgstatus(gp, _Grunnable, _Grunning)
3338 gp.waitsince = 0
3339 gp.preempt = false
3340 gp.stackguard0 = gp.stack.lo + stackGuard
3341 if !inheritTime {
3342 mp.p.ptr().schedtick++
3343 }
3344
3345
3346 hz := sched.profilehz
3347 if mp.profilehz != hz {
3348 setThreadCPUProfiler(hz)
3349 }
3350
3351 trace := traceAcquire()
3352 if trace.ok() {
3353 trace.GoStart()
3354 traceRelease(trace)
3355 }
3356
3357 gogo(&gp.sched)
3358 }
3359
3360
3361
3362
3363
3364 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3365 mp := getg().m
3366
3367
3368
3369
3370
3371 top:
3372
3373
3374
3375 mp.clearAllpSnapshot()
3376
3377 pp := mp.p.ptr()
3378 if sched.gcwaiting.Load() {
3379 gcstopm()
3380 goto top
3381 }
3382 if pp.runSafePointFn != 0 {
3383 runSafePointFn()
3384 }
3385
3386
3387
3388
3389
3390 now, pollUntil, _ := pp.timers.check(0, nil)
3391
3392
3393 if traceEnabled() || traceShuttingDown() {
3394 gp := traceReader()
3395 if gp != nil {
3396 trace := traceAcquire()
3397 casgstatus(gp, _Gwaiting, _Grunnable)
3398 if trace.ok() {
3399 trace.GoUnpark(gp, 0)
3400 traceRelease(trace)
3401 }
3402 return gp, false, true
3403 }
3404 }
3405
3406
3407 if gcBlackenEnabled != 0 {
3408 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3409 if gp != nil {
3410 return gp, false, true
3411 }
3412 now = tnow
3413 }
3414
3415
3416
3417
3418 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3419 lock(&sched.lock)
3420 gp := globrunqget()
3421 unlock(&sched.lock)
3422 if gp != nil {
3423 return gp, false, false
3424 }
3425 }
3426
3427
3428 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3429 if gp := wakefing(); gp != nil {
3430 ready(gp, 0, true)
3431 }
3432 }
3433
3434
3435 if gcCleanups.needsWake() {
3436 gcCleanups.wake()
3437 }
3438
3439 if *cgo_yield != nil {
3440 asmcgocall(*cgo_yield, nil)
3441 }
3442
3443
3444 if gp, inheritTime := runqget(pp); gp != nil {
3445 return gp, inheritTime, false
3446 }
3447
3448
3449 if !sched.runq.empty() {
3450 lock(&sched.lock)
3451 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3452 unlock(&sched.lock)
3453 if gp != nil {
3454 if runqputbatch(pp, &q); !q.empty() {
3455 throw("Couldn't put Gs into empty local runq")
3456 }
3457 return gp, false, false
3458 }
3459 }
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3471 list, delta := netpoll(0)
3472 sched.pollingNet.Store(0)
3473 if !list.empty() {
3474 gp := list.pop()
3475 injectglist(&list)
3476 netpollAdjustWaiters(delta)
3477 trace := traceAcquire()
3478 casgstatus(gp, _Gwaiting, _Grunnable)
3479 if trace.ok() {
3480 trace.GoUnpark(gp, 0)
3481 traceRelease(trace)
3482 }
3483 return gp, false, false
3484 }
3485 }
3486
3487
3488
3489
3490
3491
3492 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3493 if !mp.spinning {
3494 mp.becomeSpinning()
3495 }
3496
3497 gp, inheritTime, tnow, w, newWork := stealWork(now)
3498 if gp != nil {
3499
3500 return gp, inheritTime, false
3501 }
3502 if newWork {
3503
3504
3505 goto top
3506 }
3507
3508 now = tnow
3509 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3510
3511 pollUntil = w
3512 }
3513 }
3514
3515
3516
3517
3518
3519 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3520 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3521 if node != nil {
3522 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3523 gp := node.gp.ptr()
3524
3525 trace := traceAcquire()
3526 casgstatus(gp, _Gwaiting, _Grunnable)
3527 if trace.ok() {
3528 trace.GoUnpark(gp, 0)
3529 traceRelease(trace)
3530 }
3531 return gp, false, false
3532 }
3533 gcController.removeIdleMarkWorker()
3534 }
3535
3536
3537
3538
3539
3540 gp, otherReady := beforeIdle(now, pollUntil)
3541 if gp != nil {
3542 trace := traceAcquire()
3543 casgstatus(gp, _Gwaiting, _Grunnable)
3544 if trace.ok() {
3545 trace.GoUnpark(gp, 0)
3546 traceRelease(trace)
3547 }
3548 return gp, false, false
3549 }
3550 if otherReady {
3551 goto top
3552 }
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562 allpSnapshot := mp.snapshotAllp()
3563
3564
3565 idlepMaskSnapshot := idlepMask
3566 timerpMaskSnapshot := timerpMask
3567
3568
3569 lock(&sched.lock)
3570 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3571 unlock(&sched.lock)
3572 goto top
3573 }
3574 if !sched.runq.empty() {
3575 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3576 unlock(&sched.lock)
3577 if gp == nil {
3578 throw("global runq empty with non-zero runqsize")
3579 }
3580 if runqputbatch(pp, &q); !q.empty() {
3581 throw("Couldn't put Gs into empty local runq")
3582 }
3583 return gp, false, false
3584 }
3585 if !mp.spinning && sched.needspinning.Load() == 1 {
3586
3587 mp.becomeSpinning()
3588 unlock(&sched.lock)
3589 goto top
3590 }
3591 if releasep() != pp {
3592 throw("findRunnable: wrong p")
3593 }
3594 now = pidleput(pp, now)
3595 unlock(&sched.lock)
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633 wasSpinning := mp.spinning
3634 if mp.spinning {
3635 mp.spinning = false
3636 if sched.nmspinning.Add(-1) < 0 {
3637 throw("findRunnable: negative nmspinning")
3638 }
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651 lock(&sched.lock)
3652 if !sched.runq.empty() {
3653 pp, _ := pidlegetSpinning(0)
3654 if pp != nil {
3655 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3656 unlock(&sched.lock)
3657 if gp == nil {
3658 throw("global runq empty with non-zero runqsize")
3659 }
3660 if runqputbatch(pp, &q); !q.empty() {
3661 throw("Couldn't put Gs into empty local runq")
3662 }
3663 acquirep(pp)
3664 mp.becomeSpinning()
3665 return gp, false, false
3666 }
3667 }
3668 unlock(&sched.lock)
3669
3670 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3671 if pp != nil {
3672 acquirep(pp)
3673 mp.becomeSpinning()
3674 goto top
3675 }
3676
3677
3678 pp, gp := checkIdleGCNoP()
3679 if pp != nil {
3680 acquirep(pp)
3681 mp.becomeSpinning()
3682
3683
3684 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3685 trace := traceAcquire()
3686 casgstatus(gp, _Gwaiting, _Grunnable)
3687 if trace.ok() {
3688 trace.GoUnpark(gp, 0)
3689 traceRelease(trace)
3690 }
3691 return gp, false, false
3692 }
3693
3694
3695
3696
3697
3698
3699
3700 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3701 }
3702
3703
3704
3705
3706
3707 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3708 sched.pollUntil.Store(pollUntil)
3709 if mp.p != 0 {
3710 throw("findRunnable: netpoll with p")
3711 }
3712 if mp.spinning {
3713 throw("findRunnable: netpoll with spinning")
3714 }
3715 delay := int64(-1)
3716 if pollUntil != 0 {
3717 if now == 0 {
3718 now = nanotime()
3719 }
3720 delay = pollUntil - now
3721 if delay < 0 {
3722 delay = 0
3723 }
3724 }
3725 if faketime != 0 {
3726
3727 delay = 0
3728 }
3729 list, delta := netpoll(delay)
3730
3731 now = nanotime()
3732 sched.pollUntil.Store(0)
3733 sched.lastpoll.Store(now)
3734 if faketime != 0 && list.empty() {
3735
3736
3737 stopm()
3738 goto top
3739 }
3740 lock(&sched.lock)
3741 pp, _ := pidleget(now)
3742 unlock(&sched.lock)
3743 if pp == nil {
3744 injectglist(&list)
3745 netpollAdjustWaiters(delta)
3746 } else {
3747 acquirep(pp)
3748 if !list.empty() {
3749 gp := list.pop()
3750 injectglist(&list)
3751 netpollAdjustWaiters(delta)
3752 trace := traceAcquire()
3753 casgstatus(gp, _Gwaiting, _Grunnable)
3754 if trace.ok() {
3755 trace.GoUnpark(gp, 0)
3756 traceRelease(trace)
3757 }
3758 return gp, false, false
3759 }
3760 if wasSpinning {
3761 mp.becomeSpinning()
3762 }
3763 goto top
3764 }
3765 } else if pollUntil != 0 && netpollinited() {
3766 pollerPollUntil := sched.pollUntil.Load()
3767 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3768 netpollBreak()
3769 }
3770 }
3771 stopm()
3772 goto top
3773 }
3774
3775
3776
3777
3778
3779 func pollWork() bool {
3780 if !sched.runq.empty() {
3781 return true
3782 }
3783 p := getg().m.p.ptr()
3784 if !runqempty(p) {
3785 return true
3786 }
3787 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3788 if list, delta := netpoll(0); !list.empty() {
3789 injectglist(&list)
3790 netpollAdjustWaiters(delta)
3791 return true
3792 }
3793 }
3794 return false
3795 }
3796
3797
3798
3799
3800
3801
3802
3803 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3804 pp := getg().m.p.ptr()
3805
3806 ranTimer := false
3807
3808 const stealTries = 4
3809 for i := 0; i < stealTries; i++ {
3810 stealTimersOrRunNextG := i == stealTries-1
3811
3812 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3813 if sched.gcwaiting.Load() {
3814
3815 return nil, false, now, pollUntil, true
3816 }
3817 p2 := allp[enum.position()]
3818 if pp == p2 {
3819 continue
3820 }
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3836 tnow, w, ran := p2.timers.check(now, nil)
3837 now = tnow
3838 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3839 pollUntil = w
3840 }
3841 if ran {
3842
3843
3844
3845
3846
3847
3848
3849
3850 if gp, inheritTime := runqget(pp); gp != nil {
3851 return gp, inheritTime, now, pollUntil, ranTimer
3852 }
3853 ranTimer = true
3854 }
3855 }
3856
3857
3858 if !idlepMask.read(enum.position()) {
3859 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3860 return gp, false, now, pollUntil, ranTimer
3861 }
3862 }
3863 }
3864 }
3865
3866
3867
3868
3869 return nil, false, now, pollUntil, ranTimer
3870 }
3871
3872
3873
3874
3875
3876
3877 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3878 for id, p2 := range allpSnapshot {
3879 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3880 lock(&sched.lock)
3881 pp, _ := pidlegetSpinning(0)
3882 if pp == nil {
3883
3884 unlock(&sched.lock)
3885 return nil
3886 }
3887 unlock(&sched.lock)
3888 return pp
3889 }
3890 }
3891
3892
3893 return nil
3894 }
3895
3896
3897
3898
3899 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3900 for id, p2 := range allpSnapshot {
3901 if timerpMaskSnapshot.read(uint32(id)) {
3902 w := p2.timers.wakeTime()
3903 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3904 pollUntil = w
3905 }
3906 }
3907 }
3908
3909 return pollUntil
3910 }
3911
3912
3913
3914
3915
3916 func checkIdleGCNoP() (*p, *g) {
3917
3918
3919
3920
3921
3922
3923 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3924 return nil, nil
3925 }
3926 if !gcShouldScheduleWorker(nil) {
3927 return nil, nil
3928 }
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947 lock(&sched.lock)
3948 pp, now := pidlegetSpinning(0)
3949 if pp == nil {
3950 unlock(&sched.lock)
3951 return nil, nil
3952 }
3953
3954
3955 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3956 pidleput(pp, now)
3957 unlock(&sched.lock)
3958 return nil, nil
3959 }
3960
3961 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3962 if node == nil {
3963 pidleput(pp, now)
3964 unlock(&sched.lock)
3965 gcController.removeIdleMarkWorker()
3966 return nil, nil
3967 }
3968
3969 unlock(&sched.lock)
3970
3971 return pp, node.gp.ptr()
3972 }
3973
3974
3975
3976
3977 func wakeNetPoller(when int64) {
3978 if sched.lastpoll.Load() == 0 {
3979
3980
3981
3982
3983 pollerPollUntil := sched.pollUntil.Load()
3984 if pollerPollUntil == 0 || pollerPollUntil > when {
3985 netpollBreak()
3986 }
3987 } else {
3988
3989
3990 if GOOS != "plan9" {
3991 wakep()
3992 }
3993 }
3994 }
3995
3996 func resetspinning() {
3997 gp := getg()
3998 if !gp.m.spinning {
3999 throw("resetspinning: not a spinning m")
4000 }
4001 gp.m.spinning = false
4002 nmspinning := sched.nmspinning.Add(-1)
4003 if nmspinning < 0 {
4004 throw("findRunnable: negative nmspinning")
4005 }
4006
4007
4008
4009 wakep()
4010 }
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020 func injectglist(glist *gList) {
4021 if glist.empty() {
4022 return
4023 }
4024
4025
4026
4027 var tail *g
4028 trace := traceAcquire()
4029 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4030 tail = gp
4031 casgstatus(gp, _Gwaiting, _Grunnable)
4032 if trace.ok() {
4033 trace.GoUnpark(gp, 0)
4034 }
4035 }
4036 if trace.ok() {
4037 traceRelease(trace)
4038 }
4039
4040
4041 q := gQueue{glist.head, tail.guintptr(), glist.size}
4042 *glist = gList{}
4043
4044 startIdle := func(n int32) {
4045 for ; n > 0; n-- {
4046 mp := acquirem()
4047 lock(&sched.lock)
4048
4049 pp, _ := pidlegetSpinning(0)
4050 if pp == nil {
4051 unlock(&sched.lock)
4052 releasem(mp)
4053 break
4054 }
4055
4056 startm(pp, false, true)
4057 unlock(&sched.lock)
4058 releasem(mp)
4059 }
4060 }
4061
4062 pp := getg().m.p.ptr()
4063 if pp == nil {
4064 n := q.size
4065 lock(&sched.lock)
4066 globrunqputbatch(&q)
4067 unlock(&sched.lock)
4068 startIdle(n)
4069 return
4070 }
4071
4072 var globq gQueue
4073 npidle := sched.npidle.Load()
4074 for ; npidle > 0 && !q.empty(); npidle-- {
4075 g := q.pop()
4076 globq.pushBack(g)
4077 }
4078 if !globq.empty() {
4079 n := globq.size
4080 lock(&sched.lock)
4081 globrunqputbatch(&globq)
4082 unlock(&sched.lock)
4083 startIdle(n)
4084 }
4085
4086 if runqputbatch(pp, &q); !q.empty() {
4087 lock(&sched.lock)
4088 globrunqputbatch(&q)
4089 unlock(&sched.lock)
4090 }
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105 wakep()
4106 }
4107
4108
4109
4110 func schedule() {
4111 mp := getg().m
4112
4113 if mp.locks != 0 {
4114 throw("schedule: holding locks")
4115 }
4116
4117 if mp.lockedg != 0 {
4118 stoplockedm()
4119 execute(mp.lockedg.ptr(), false)
4120 }
4121
4122
4123
4124 if mp.incgo {
4125 throw("schedule: in cgo")
4126 }
4127
4128 top:
4129 pp := mp.p.ptr()
4130 pp.preempt = false
4131
4132
4133
4134
4135 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4136 throw("schedule: spinning with local work")
4137 }
4138
4139 gp, inheritTime, tryWakeP := findRunnable()
4140
4141
4142 pp = mp.p.ptr()
4143
4144
4145
4146
4147 mp.clearAllpSnapshot()
4148
4149
4150
4151
4152
4153
4154
4155
4156 gcController.releaseNextGCMarkWorker(pp)
4157
4158 if debug.dontfreezetheworld > 0 && freezing.Load() {
4159
4160
4161
4162
4163
4164
4165
4166 lock(&deadlock)
4167 lock(&deadlock)
4168 }
4169
4170
4171
4172
4173 if mp.spinning {
4174 resetspinning()
4175 }
4176
4177 if sched.disable.user && !schedEnabled(gp) {
4178
4179
4180
4181 lock(&sched.lock)
4182 if schedEnabled(gp) {
4183
4184
4185 unlock(&sched.lock)
4186 } else {
4187 sched.disable.runnable.pushBack(gp)
4188 unlock(&sched.lock)
4189 goto top
4190 }
4191 }
4192
4193
4194
4195 if tryWakeP {
4196 wakep()
4197 }
4198 if gp.lockedm != 0 {
4199
4200
4201 startlockedm(gp)
4202 goto top
4203 }
4204
4205 execute(gp, inheritTime)
4206 }
4207
4208
4209
4210
4211
4212
4213
4214
4215 func dropg() {
4216 gp := getg()
4217
4218 setMNoWB(&gp.m.curg.m, nil)
4219 setGNoWB(&gp.m.curg, nil)
4220 }
4221
4222 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4223 unlock((*mutex)(lock))
4224 return true
4225 }
4226
4227
4228 func park_m(gp *g) {
4229 mp := getg().m
4230
4231 trace := traceAcquire()
4232
4233
4234
4235
4236
4237 bubble := gp.bubble
4238 if bubble != nil {
4239 bubble.incActive()
4240 }
4241
4242 if trace.ok() {
4243
4244
4245
4246 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4247 }
4248
4249
4250 casgstatus(gp, _Grunning, _Gwaiting)
4251 if trace.ok() {
4252 traceRelease(trace)
4253 }
4254
4255 dropg()
4256
4257 if fn := mp.waitunlockf; fn != nil {
4258 ok := fn(gp, mp.waitlock)
4259 mp.waitunlockf = nil
4260 mp.waitlock = nil
4261 if !ok {
4262 trace := traceAcquire()
4263 casgstatus(gp, _Gwaiting, _Grunnable)
4264 if bubble != nil {
4265 bubble.decActive()
4266 }
4267 if trace.ok() {
4268 trace.GoUnpark(gp, 2)
4269 traceRelease(trace)
4270 }
4271 execute(gp, true)
4272 }
4273 }
4274
4275 if bubble != nil {
4276 bubble.decActive()
4277 }
4278
4279 schedule()
4280 }
4281
4282 func goschedImpl(gp *g, preempted bool) {
4283 pp := gp.m.p.ptr()
4284 trace := traceAcquire()
4285 status := readgstatus(gp)
4286 if status&^_Gscan != _Grunning {
4287 dumpgstatus(gp)
4288 throw("bad g status")
4289 }
4290 if trace.ok() {
4291
4292
4293
4294 if preempted {
4295 trace.GoPreempt()
4296 } else {
4297 trace.GoSched()
4298 }
4299 }
4300 casgstatus(gp, _Grunning, _Grunnable)
4301 if trace.ok() {
4302 traceRelease(trace)
4303 }
4304
4305 dropg()
4306 if preempted && sched.gcwaiting.Load() {
4307
4308
4309 runqput(pp, gp, true)
4310 } else {
4311 lock(&sched.lock)
4312 globrunqput(gp)
4313 unlock(&sched.lock)
4314 }
4315
4316 if mainStarted {
4317 wakep()
4318 }
4319
4320 schedule()
4321 }
4322
4323
4324 func gosched_m(gp *g) {
4325 goschedImpl(gp, false)
4326 }
4327
4328
4329 func goschedguarded_m(gp *g) {
4330 if !canPreemptM(gp.m) {
4331 gogo(&gp.sched)
4332 }
4333 goschedImpl(gp, false)
4334 }
4335
4336 func gopreempt_m(gp *g) {
4337 goschedImpl(gp, true)
4338 }
4339
4340
4341
4342
4343 func preemptPark(gp *g) {
4344 status := readgstatus(gp)
4345 if status&^_Gscan != _Grunning {
4346 dumpgstatus(gp)
4347 throw("bad g status")
4348 }
4349
4350 if gp.asyncSafePoint {
4351
4352
4353
4354 f := findfunc(gp.sched.pc)
4355 if !f.valid() {
4356 throw("preempt at unknown pc")
4357 }
4358 if f.flag&abi.FuncFlagSPWrite != 0 {
4359 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4360 throw("preempt SPWRITE")
4361 }
4362 }
4363
4364
4365
4366
4367
4368
4369
4370 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4371 dropg()
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393 trace := traceAcquire()
4394 if trace.ok() {
4395 trace.GoPark(traceBlockPreempted, 0)
4396 }
4397 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4398 if trace.ok() {
4399 traceRelease(trace)
4400 }
4401 schedule()
4402 }
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418 func goyield() {
4419 checkTimeouts()
4420 mcall(goyield_m)
4421 }
4422
4423 func goyield_m(gp *g) {
4424 trace := traceAcquire()
4425 pp := gp.m.p.ptr()
4426 if trace.ok() {
4427
4428
4429
4430 trace.GoPreempt()
4431 }
4432 casgstatus(gp, _Grunning, _Grunnable)
4433 if trace.ok() {
4434 traceRelease(trace)
4435 }
4436 dropg()
4437 runqput(pp, gp, false)
4438 schedule()
4439 }
4440
4441
4442 func goexit1() {
4443 if raceenabled {
4444 if gp := getg(); gp.bubble != nil {
4445 racereleasemergeg(gp, gp.bubble.raceaddr())
4446 }
4447 racegoend()
4448 }
4449 trace := traceAcquire()
4450 if trace.ok() {
4451 trace.GoEnd()
4452 traceRelease(trace)
4453 }
4454 mcall(goexit0)
4455 }
4456
4457
4458 func goexit0(gp *g) {
4459 if goexperiment.RuntimeSecret && gp.secret > 0 {
4460
4461
4462 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4463
4464
4465 }
4466 gdestroy(gp)
4467 schedule()
4468 }
4469
4470 func gdestroy(gp *g) {
4471 mp := getg().m
4472 pp := mp.p.ptr()
4473
4474 casgstatus(gp, _Grunning, _Gdead)
4475 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4476 if isSystemGoroutine(gp, false) {
4477 sched.ngsys.Add(-1)
4478 }
4479 gp.m = nil
4480 locked := gp.lockedm != 0
4481 gp.lockedm = 0
4482 mp.lockedg = 0
4483 gp.preemptStop = false
4484 gp.paniconfault = false
4485 gp._defer = nil
4486 gp._panic = nil
4487 gp.writebuf = nil
4488 gp.waitreason = waitReasonZero
4489 gp.param = nil
4490 gp.labels = nil
4491 gp.timer = nil
4492 gp.bubble = nil
4493 gp.fipsOnlyBypass = false
4494 gp.secret = 0
4495
4496 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4497
4498
4499
4500 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4501 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4502 gcController.bgScanCredit.Add(scanCredit)
4503 gp.gcAssistBytes = 0
4504 }
4505
4506 dropg()
4507
4508 if GOARCH == "wasm" {
4509 gfput(pp, gp)
4510 return
4511 }
4512
4513 if locked && mp.lockedInt != 0 {
4514 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4515 if mp.isextra {
4516 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4517 }
4518 throw("exited a goroutine internally locked to the OS thread")
4519 }
4520 gfput(pp, gp)
4521 if locked {
4522
4523
4524
4525
4526
4527
4528 if GOOS != "plan9" {
4529 gogo(&mp.g0.sched)
4530 } else {
4531
4532
4533 mp.lockedExt = 0
4534 }
4535 }
4536 }
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546 func save(pc, sp, bp uintptr) {
4547 gp := getg()
4548
4549 if gp == gp.m.g0 || gp == gp.m.gsignal {
4550
4551
4552
4553
4554
4555 throw("save on system g not allowed")
4556 }
4557
4558 gp.sched.pc = pc
4559 gp.sched.sp = sp
4560 gp.sched.lr = 0
4561 gp.sched.bp = bp
4562
4563
4564
4565 if gp.sched.ctxt != nil {
4566 badctxt()
4567 }
4568 }
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594 func reentersyscall(pc, sp, bp uintptr) {
4595 gp := getg()
4596
4597
4598
4599 gp.m.locks++
4600
4601
4602
4603
4604
4605 gp.stackguard0 = stackPreempt
4606 gp.throwsplit = true
4607
4608
4609 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4610
4611 pp := gp.m.p.ptr()
4612 if pp.runSafePointFn != 0 {
4613
4614 systemstack(runSafePointFn)
4615 }
4616 gp.m.oldp.set(pp)
4617
4618
4619 save(pc, sp, bp)
4620 gp.syscallsp = sp
4621 gp.syscallpc = pc
4622 gp.syscallbp = bp
4623
4624
4625 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4626 systemstack(func() {
4627 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4628 throw("entersyscall")
4629 })
4630 }
4631 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4632 systemstack(func() {
4633 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4634 throw("entersyscall")
4635 })
4636 }
4637 trace := traceAcquire()
4638 if trace.ok() {
4639
4640
4641
4642
4643 systemstack(func() {
4644 trace.GoSysCall()
4645 })
4646
4647 save(pc, sp, bp)
4648 }
4649 if sched.gcwaiting.Load() {
4650
4651
4652
4653 systemstack(func() {
4654 entersyscallHandleGCWait(trace)
4655 })
4656
4657 save(pc, sp, bp)
4658 }
4659
4660
4661
4662
4663
4664 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4665 casgstatus(gp, _Grunning, _Gsyscall)
4666 }
4667 if staticLockRanking {
4668
4669 save(pc, sp, bp)
4670 }
4671 if trace.ok() {
4672
4673
4674
4675 traceRelease(trace)
4676 }
4677 if sched.sysmonwait.Load() {
4678 systemstack(entersyscallWakeSysmon)
4679
4680 save(pc, sp, bp)
4681 }
4682 gp.m.locks--
4683 }
4684
4685
4686
4687
4688 const debugExtendGrunningNoP = false
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704 func entersyscall() {
4705
4706
4707
4708
4709 fp := getcallerfp()
4710 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4711 }
4712
4713 func entersyscallWakeSysmon() {
4714 lock(&sched.lock)
4715 if sched.sysmonwait.Load() {
4716 sched.sysmonwait.Store(false)
4717 notewakeup(&sched.sysmonnote)
4718 }
4719 unlock(&sched.lock)
4720 }
4721
4722 func entersyscallHandleGCWait(trace traceLocker) {
4723 gp := getg()
4724
4725 lock(&sched.lock)
4726 if sched.stopwait > 0 {
4727
4728 pp := gp.m.p.ptr()
4729 pp.m = 0
4730 gp.m.p = 0
4731 atomic.Store(&pp.status, _Pgcstop)
4732
4733 if trace.ok() {
4734 trace.ProcStop(pp)
4735 }
4736 addGSyscallNoP(gp.m)
4737 pp.gcStopTime = nanotime()
4738 pp.syscalltick++
4739 if sched.stopwait--; sched.stopwait == 0 {
4740 notewakeup(&sched.stopnote)
4741 }
4742 }
4743 unlock(&sched.lock)
4744 }
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758 func entersyscallblock() {
4759 gp := getg()
4760
4761 gp.m.locks++
4762 gp.throwsplit = true
4763 gp.stackguard0 = stackPreempt
4764 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4765 gp.m.p.ptr().syscalltick++
4766
4767 addGSyscallNoP(gp.m)
4768
4769
4770 pc := sys.GetCallerPC()
4771 sp := sys.GetCallerSP()
4772 bp := getcallerfp()
4773 save(pc, sp, bp)
4774 gp.syscallsp = gp.sched.sp
4775 gp.syscallpc = gp.sched.pc
4776 gp.syscallbp = gp.sched.bp
4777 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4778 sp1 := sp
4779 sp2 := gp.sched.sp
4780 sp3 := gp.syscallsp
4781 systemstack(func() {
4782 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4783 throw("entersyscallblock")
4784 })
4785 }
4786
4787
4788
4789
4790
4791
4792 trace := traceAcquire()
4793 systemstack(func() {
4794 if trace.ok() {
4795 trace.GoSysCall()
4796 }
4797 handoffp(releasep())
4798 })
4799
4800
4801
4802 if debugExtendGrunningNoP {
4803 usleep(10)
4804 }
4805 casgstatus(gp, _Grunning, _Gsyscall)
4806 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4807 systemstack(func() {
4808 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4809 throw("entersyscallblock")
4810 })
4811 }
4812 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4813 systemstack(func() {
4814 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4815 throw("entersyscallblock")
4816 })
4817 }
4818 if trace.ok() {
4819 systemstack(func() {
4820 traceRelease(trace)
4821 })
4822 }
4823
4824
4825 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4826
4827 gp.m.locks--
4828 }
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850 func exitsyscall() {
4851 gp := getg()
4852
4853 gp.m.locks++
4854 if sys.GetCallerSP() > gp.syscallsp {
4855 throw("exitsyscall: syscall frame is no longer valid")
4856 }
4857 gp.waitsince = 0
4858
4859 if sched.stopwait == freezeStopWait {
4860
4861
4862
4863 systemstack(func() {
4864 lock(&deadlock)
4865 lock(&deadlock)
4866 })
4867 }
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4881 casgstatus(gp, _Gsyscall, _Grunning)
4882 }
4883
4884
4885
4886
4887 if debugExtendGrunningNoP {
4888 usleep(10)
4889 }
4890
4891
4892 oldp := gp.m.oldp.ptr()
4893 gp.m.oldp.set(nil)
4894
4895
4896 pp := gp.m.p.ptr()
4897 if pp != nil {
4898
4899 if trace := traceAcquire(); trace.ok() {
4900 systemstack(func() {
4901
4902
4903
4904
4905
4906
4907
4908
4909 if pp.syscalltick == gp.m.syscalltick {
4910 trace.GoSysExit(false)
4911 } else {
4912
4913
4914
4915
4916 trace.ProcSteal(pp)
4917 trace.ProcStart()
4918 trace.GoSysExit(true)
4919 trace.GoStart()
4920 }
4921 traceRelease(trace)
4922 })
4923 }
4924 } else {
4925
4926 systemstack(func() {
4927
4928 if pp := exitsyscallTryGetP(oldp); pp != nil {
4929
4930 acquirepNoTrace(pp)
4931
4932
4933 if trace := traceAcquire(); trace.ok() {
4934 trace.ProcStart()
4935 trace.GoSysExit(true)
4936 trace.GoStart()
4937 traceRelease(trace)
4938 }
4939 }
4940 })
4941 pp = gp.m.p.ptr()
4942 }
4943
4944
4945 if pp != nil {
4946 if goroutineProfile.active {
4947
4948
4949
4950 systemstack(func() {
4951 tryRecordGoroutineProfileWB(gp)
4952 })
4953 }
4954
4955
4956 pp.syscalltick++
4957
4958
4959
4960 gp.syscallsp = 0
4961 gp.m.locks--
4962 if gp.preempt {
4963
4964 gp.stackguard0 = stackPreempt
4965 } else {
4966
4967 gp.stackguard0 = gp.stack.lo + stackGuard
4968 }
4969 gp.throwsplit = false
4970
4971 if sched.disable.user && !schedEnabled(gp) {
4972
4973 Gosched()
4974 }
4975 return
4976 }
4977
4978 gp.m.locks--
4979
4980
4981 mcall(exitsyscallNoP)
4982
4983
4984
4985
4986
4987
4988
4989 gp.syscallsp = 0
4990 gp.m.p.ptr().syscalltick++
4991 gp.throwsplit = false
4992 }
4993
4994
4995
4996
4997
4998
4999
5000 func exitsyscallTryGetP(oldp *p) *p {
5001
5002 if oldp != nil {
5003 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5004 thread.takeP()
5005 addGSyscallNoP(thread.mp)
5006 thread.resume()
5007 return oldp
5008 }
5009 }
5010
5011
5012 if sched.pidle != 0 {
5013 lock(&sched.lock)
5014 pp, _ := pidleget(0)
5015 if pp != nil && sched.sysmonwait.Load() {
5016 sched.sysmonwait.Store(false)
5017 notewakeup(&sched.sysmonnote)
5018 }
5019 unlock(&sched.lock)
5020 if pp != nil {
5021 decGSyscallNoP(getg().m)
5022 return pp
5023 }
5024 }
5025 return nil
5026 }
5027
5028
5029
5030
5031
5032
5033
5034 func exitsyscallNoP(gp *g) {
5035 traceExitingSyscall()
5036 trace := traceAcquire()
5037 casgstatus(gp, _Grunning, _Grunnable)
5038 traceExitedSyscall()
5039 if trace.ok() {
5040
5041
5042
5043
5044 trace.GoSysExit(true)
5045 traceRelease(trace)
5046 }
5047 decGSyscallNoP(getg().m)
5048 dropg()
5049 lock(&sched.lock)
5050 var pp *p
5051 if schedEnabled(gp) {
5052 pp, _ = pidleget(0)
5053 }
5054 var locked bool
5055 if pp == nil {
5056 globrunqput(gp)
5057
5058
5059
5060
5061
5062
5063 locked = gp.lockedm != 0
5064 } else if sched.sysmonwait.Load() {
5065 sched.sysmonwait.Store(false)
5066 notewakeup(&sched.sysmonnote)
5067 }
5068 unlock(&sched.lock)
5069 if pp != nil {
5070 acquirep(pp)
5071 execute(gp, false)
5072 }
5073 if locked {
5074
5075
5076
5077
5078 stoplockedm()
5079 execute(gp, false)
5080 }
5081 stopm()
5082 schedule()
5083 }
5084
5085
5086
5087
5088
5089
5090
5091 func addGSyscallNoP(mp *m) {
5092
5093
5094
5095 if !mp.isExtraInC {
5096
5097
5098
5099
5100
5101 sched.nGsyscallNoP.Add(1)
5102 }
5103 }
5104
5105
5106
5107
5108
5109
5110
5111 func decGSyscallNoP(mp *m) {
5112
5113
5114
5115 if !mp.isExtraInC {
5116 sched.nGsyscallNoP.Add(-1)
5117 }
5118 }
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132 func syscall_runtime_BeforeFork() {
5133 gp := getg().m.curg
5134
5135
5136
5137
5138 gp.m.locks++
5139 sigsave(&gp.m.sigmask)
5140 sigblock(false)
5141
5142
5143
5144
5145
5146 gp.stackguard0 = stackFork
5147 }
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161 func syscall_runtime_AfterFork() {
5162 gp := getg().m.curg
5163
5164
5165 gp.stackguard0 = gp.stack.lo + stackGuard
5166
5167 msigrestore(gp.m.sigmask)
5168
5169 gp.m.locks--
5170 }
5171
5172
5173
5174 var inForkedChild bool
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195 func syscall_runtime_AfterForkInChild() {
5196
5197
5198
5199
5200 inForkedChild = true
5201
5202 clearSignalHandlers()
5203
5204
5205
5206 msigrestore(getg().m.sigmask)
5207
5208 inForkedChild = false
5209 }
5210
5211
5212
5213
5214 var pendingPreemptSignals atomic.Int32
5215
5216
5217
5218
5219 func syscall_runtime_BeforeExec() {
5220
5221 execLock.lock()
5222
5223
5224
5225 if GOOS == "darwin" || GOOS == "ios" {
5226 for pendingPreemptSignals.Load() > 0 {
5227 osyield()
5228 }
5229 }
5230 }
5231
5232
5233
5234
5235 func syscall_runtime_AfterExec() {
5236 execLock.unlock()
5237 }
5238
5239
5240 func malg(stacksize int32) *g {
5241 newg := new(g)
5242 if stacksize >= 0 {
5243 stacksize = round2(stackSystem + stacksize)
5244 systemstack(func() {
5245 newg.stack = stackalloc(uint32(stacksize))
5246 if valgrindenabled {
5247 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5248 }
5249 })
5250 newg.stackguard0 = newg.stack.lo + stackGuard
5251 newg.stackguard1 = ^uintptr(0)
5252
5253
5254 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5255 }
5256 return newg
5257 }
5258
5259
5260
5261
5262 func newproc(fn *funcval) {
5263 gp := getg()
5264 if goexperiment.RuntimeSecret && gp.secret > 0 {
5265 panic("goroutine spawned while running in secret mode")
5266 }
5267
5268 pc := sys.GetCallerPC()
5269 systemstack(func() {
5270 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5271
5272 pp := getg().m.p.ptr()
5273 runqput(pp, newg, true)
5274
5275 if mainStarted {
5276 wakep()
5277 }
5278 })
5279 }
5280
5281
5282
5283
5284 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5285 if fn == nil {
5286 fatal("go of nil func value")
5287 }
5288
5289 mp := acquirem()
5290 pp := mp.p.ptr()
5291 newg := gfget(pp)
5292 if newg == nil {
5293 newg = malg(stackMin)
5294 casgstatus(newg, _Gidle, _Gdead)
5295 allgadd(newg)
5296 }
5297 if newg.stack.hi == 0 {
5298 throw("newproc1: newg missing stack")
5299 }
5300
5301 if readgstatus(newg) != _Gdead {
5302 throw("newproc1: new g is not Gdead")
5303 }
5304
5305 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5306 totalSize = alignUp(totalSize, sys.StackAlign)
5307 sp := newg.stack.hi - totalSize
5308 if usesLR {
5309
5310 *(*uintptr)(unsafe.Pointer(sp)) = 0
5311 prepGoExitFrame(sp)
5312 }
5313 if GOARCH == "arm64" {
5314
5315 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5316 }
5317
5318 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5319 newg.sched.sp = sp
5320 newg.stktopsp = sp
5321 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5322 newg.sched.g = guintptr(unsafe.Pointer(newg))
5323 gostartcallfn(&newg.sched, fn)
5324 newg.parentGoid = callergp.goid
5325 newg.gopc = callerpc
5326 newg.ancestors = saveAncestors(callergp)
5327 newg.startpc = fn.fn
5328 newg.runningCleanups.Store(false)
5329 if isSystemGoroutine(newg, false) {
5330 sched.ngsys.Add(1)
5331 } else {
5332
5333 newg.bubble = callergp.bubble
5334 if mp.curg != nil {
5335 newg.labels = mp.curg.labels
5336 }
5337 if goroutineProfile.active {
5338
5339
5340
5341
5342
5343 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5344 }
5345 }
5346
5347 newg.trackingSeq = uint8(cheaprand())
5348 if newg.trackingSeq%gTrackingPeriod == 0 {
5349 newg.tracking = true
5350 }
5351 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5352
5353
5354
5355 trace := traceAcquire()
5356 var status uint32 = _Grunnable
5357 if parked {
5358 status = _Gwaiting
5359 newg.waitreason = waitreason
5360 }
5361 if pp.goidcache == pp.goidcacheend {
5362
5363
5364
5365 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5366 pp.goidcache -= _GoidCacheBatch - 1
5367 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5368 }
5369 newg.goid = pp.goidcache
5370 casgstatus(newg, _Gdead, status)
5371 pp.goidcache++
5372 newg.trace.reset()
5373 if trace.ok() {
5374 trace.GoCreate(newg, newg.startpc, parked)
5375 traceRelease(trace)
5376 }
5377
5378
5379 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5380
5381
5382 if raceenabled {
5383 newg.racectx = racegostart(callerpc)
5384 newg.raceignore = 0
5385 if newg.labels != nil {
5386
5387
5388 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5389 }
5390 }
5391 pp.goroutinesCreated++
5392 releasem(mp)
5393
5394 return newg
5395 }
5396
5397
5398
5399
5400 func saveAncestors(callergp *g) *[]ancestorInfo {
5401
5402 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5403 return nil
5404 }
5405 var callerAncestors []ancestorInfo
5406 if callergp.ancestors != nil {
5407 callerAncestors = *callergp.ancestors
5408 }
5409 n := int32(len(callerAncestors)) + 1
5410 if n > debug.tracebackancestors {
5411 n = debug.tracebackancestors
5412 }
5413 ancestors := make([]ancestorInfo, n)
5414 copy(ancestors[1:], callerAncestors)
5415
5416 var pcs [tracebackInnerFrames]uintptr
5417 npcs := gcallers(callergp, 0, pcs[:])
5418 ipcs := make([]uintptr, npcs)
5419 copy(ipcs, pcs[:])
5420 ancestors[0] = ancestorInfo{
5421 pcs: ipcs,
5422 goid: callergp.goid,
5423 gopc: callergp.gopc,
5424 }
5425
5426 ancestorsp := new([]ancestorInfo)
5427 *ancestorsp = ancestors
5428 return ancestorsp
5429 }
5430
5431
5432
5433 func gfput(pp *p, gp *g) {
5434 if readgstatus(gp) != _Gdead {
5435 throw("gfput: bad status (not Gdead)")
5436 }
5437
5438 stksize := gp.stack.hi - gp.stack.lo
5439
5440 if stksize != uintptr(startingStackSize) {
5441
5442 stackfree(gp.stack)
5443 gp.stack.lo = 0
5444 gp.stack.hi = 0
5445 gp.stackguard0 = 0
5446 if valgrindenabled {
5447 valgrindDeregisterStack(gp.valgrindStackID)
5448 gp.valgrindStackID = 0
5449 }
5450 }
5451
5452 pp.gFree.push(gp)
5453 if pp.gFree.size >= 64 {
5454 var (
5455 stackQ gQueue
5456 noStackQ gQueue
5457 )
5458 for pp.gFree.size >= 32 {
5459 gp := pp.gFree.pop()
5460 if gp.stack.lo == 0 {
5461 noStackQ.push(gp)
5462 } else {
5463 stackQ.push(gp)
5464 }
5465 }
5466 lock(&sched.gFree.lock)
5467 sched.gFree.noStack.pushAll(noStackQ)
5468 sched.gFree.stack.pushAll(stackQ)
5469 unlock(&sched.gFree.lock)
5470 }
5471 }
5472
5473
5474
5475 func gfget(pp *p) *g {
5476 retry:
5477 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5478 lock(&sched.gFree.lock)
5479
5480 for pp.gFree.size < 32 {
5481
5482 gp := sched.gFree.stack.pop()
5483 if gp == nil {
5484 gp = sched.gFree.noStack.pop()
5485 if gp == nil {
5486 break
5487 }
5488 }
5489 pp.gFree.push(gp)
5490 }
5491 unlock(&sched.gFree.lock)
5492 goto retry
5493 }
5494 gp := pp.gFree.pop()
5495 if gp == nil {
5496 return nil
5497 }
5498 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5499
5500
5501
5502 systemstack(func() {
5503 stackfree(gp.stack)
5504 gp.stack.lo = 0
5505 gp.stack.hi = 0
5506 gp.stackguard0 = 0
5507 if valgrindenabled {
5508 valgrindDeregisterStack(gp.valgrindStackID)
5509 gp.valgrindStackID = 0
5510 }
5511 })
5512 }
5513 if gp.stack.lo == 0 {
5514
5515 systemstack(func() {
5516 gp.stack = stackalloc(startingStackSize)
5517 if valgrindenabled {
5518 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5519 }
5520 })
5521 gp.stackguard0 = gp.stack.lo + stackGuard
5522 } else {
5523 if raceenabled {
5524 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5525 }
5526 if msanenabled {
5527 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5528 }
5529 if asanenabled {
5530 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5531 }
5532 }
5533 return gp
5534 }
5535
5536
5537 func gfpurge(pp *p) {
5538 var (
5539 stackQ gQueue
5540 noStackQ gQueue
5541 )
5542 for !pp.gFree.empty() {
5543 gp := pp.gFree.pop()
5544 if gp.stack.lo == 0 {
5545 noStackQ.push(gp)
5546 } else {
5547 stackQ.push(gp)
5548 }
5549 }
5550 lock(&sched.gFree.lock)
5551 sched.gFree.noStack.pushAll(noStackQ)
5552 sched.gFree.stack.pushAll(stackQ)
5553 unlock(&sched.gFree.lock)
5554 }
5555
5556
5557 func Breakpoint() {
5558 breakpoint()
5559 }
5560
5561
5562
5563
5564
5565
5566 func dolockOSThread() {
5567 if GOARCH == "wasm" {
5568 return
5569 }
5570 gp := getg()
5571 gp.m.lockedg.set(gp)
5572 gp.lockedm.set(gp.m)
5573 }
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591 func LockOSThread() {
5592 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5593
5594
5595
5596 startTemplateThread()
5597 }
5598 gp := getg()
5599 gp.m.lockedExt++
5600 if gp.m.lockedExt == 0 {
5601 gp.m.lockedExt--
5602 panic("LockOSThread nesting overflow")
5603 }
5604 dolockOSThread()
5605 }
5606
5607
5608 func lockOSThread() {
5609 getg().m.lockedInt++
5610 dolockOSThread()
5611 }
5612
5613
5614
5615
5616
5617
5618 func dounlockOSThread() {
5619 if GOARCH == "wasm" {
5620 return
5621 }
5622 gp := getg()
5623 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5624 return
5625 }
5626 gp.m.lockedg = 0
5627 gp.lockedm = 0
5628 }
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644 func UnlockOSThread() {
5645 gp := getg()
5646 if gp.m.lockedExt == 0 {
5647 return
5648 }
5649 gp.m.lockedExt--
5650 dounlockOSThread()
5651 }
5652
5653
5654 func unlockOSThread() {
5655 gp := getg()
5656 if gp.m.lockedInt == 0 {
5657 systemstack(badunlockosthread)
5658 }
5659 gp.m.lockedInt--
5660 dounlockOSThread()
5661 }
5662
5663 func badunlockosthread() {
5664 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5665 }
5666
5667 func gcount(includeSys bool) int32 {
5668 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5669 if !includeSys {
5670 n -= sched.ngsys.Load()
5671 }
5672 for _, pp := range allp {
5673 n -= pp.gFree.size
5674 }
5675
5676
5677
5678 if n < 1 {
5679 n = 1
5680 }
5681 return n
5682 }
5683
5684
5685
5686
5687
5688 func goroutineleakcount() int {
5689 return work.goroutineLeak.count
5690 }
5691
5692 func mcount() int32 {
5693 return int32(sched.mnext - sched.nmfreed)
5694 }
5695
5696 var prof struct {
5697 signalLock atomic.Uint32
5698
5699
5700
5701 hz atomic.Int32
5702 }
5703
5704 func _System() { _System() }
5705 func _ExternalCode() { _ExternalCode() }
5706 func _LostExternalCode() { _LostExternalCode() }
5707 func _GC() { _GC() }
5708 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5709 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5710 func _VDSO() { _VDSO() }
5711
5712
5713
5714
5715
5716 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5717 if prof.hz.Load() == 0 {
5718 return
5719 }
5720
5721
5722
5723
5724 if mp != nil && mp.profilehz == 0 {
5725 return
5726 }
5727
5728
5729
5730
5731
5732
5733
5734 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5735 if f := findfunc(pc); f.valid() {
5736 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5737 cpuprof.lostAtomic++
5738 return
5739 }
5740 }
5741 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5742
5743
5744
5745 cpuprof.lostAtomic++
5746 return
5747 }
5748 }
5749
5750
5751
5752
5753
5754
5755
5756 getg().m.mallocing++
5757
5758 var u unwinder
5759 var stk [maxCPUProfStack]uintptr
5760 n := 0
5761 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5762 cgoOff := 0
5763
5764
5765
5766
5767
5768 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5769 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5770 cgoOff++
5771 }
5772 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5773 mp.cgoCallers[0] = 0
5774 }
5775
5776
5777 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5778 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5779
5780
5781 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5782 } else if mp != nil && mp.vdsoSP != 0 {
5783
5784
5785 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5786 } else {
5787 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5788 }
5789 n += tracebackPCs(&u, 0, stk[n:])
5790
5791 if n <= 0 {
5792
5793
5794 n = 2
5795 if inVDSOPage(pc) {
5796 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5797 } else if pc > firstmoduledata.etext {
5798
5799 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5800 }
5801 stk[0] = pc
5802 if mp.preemptoff != "" {
5803 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5804 } else {
5805 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5806 }
5807 }
5808
5809 if prof.hz.Load() != 0 {
5810
5811
5812
5813 var tagPtr *unsafe.Pointer
5814 if gp != nil && gp.m != nil && gp.m.curg != nil {
5815 tagPtr = &gp.m.curg.labels
5816 }
5817 cpuprof.add(tagPtr, stk[:n])
5818
5819 gprof := gp
5820 var mp *m
5821 var pp *p
5822 if gp != nil && gp.m != nil {
5823 if gp.m.curg != nil {
5824 gprof = gp.m.curg
5825 }
5826 mp = gp.m
5827 pp = gp.m.p.ptr()
5828 }
5829 traceCPUSample(gprof, mp, pp, stk[:n])
5830 }
5831 getg().m.mallocing--
5832 }
5833
5834
5835
5836 func setcpuprofilerate(hz int32) {
5837
5838 if hz < 0 {
5839 hz = 0
5840 }
5841
5842
5843
5844 gp := getg()
5845 gp.m.locks++
5846
5847
5848
5849
5850 setThreadCPUProfiler(0)
5851
5852 for !prof.signalLock.CompareAndSwap(0, 1) {
5853 osyield()
5854 }
5855 if prof.hz.Load() != hz {
5856 setProcessCPUProfiler(hz)
5857 prof.hz.Store(hz)
5858 }
5859 prof.signalLock.Store(0)
5860
5861 lock(&sched.lock)
5862 sched.profilehz = hz
5863 unlock(&sched.lock)
5864
5865 if hz != 0 {
5866 setThreadCPUProfiler(hz)
5867 }
5868
5869 gp.m.locks--
5870 }
5871
5872
5873
5874 func (pp *p) init(id int32) {
5875 pp.id = id
5876 pp.gcw.id = id
5877 pp.status = _Pgcstop
5878 pp.sudogcache = pp.sudogbuf[:0]
5879 pp.deferpool = pp.deferpoolbuf[:0]
5880 pp.wbBuf.reset()
5881 if pp.mcache == nil {
5882 if id == 0 {
5883 if mcache0 == nil {
5884 throw("missing mcache?")
5885 }
5886
5887
5888 pp.mcache = mcache0
5889 } else {
5890 pp.mcache = allocmcache()
5891 }
5892 }
5893 if raceenabled && pp.raceprocctx == 0 {
5894 if id == 0 {
5895 pp.raceprocctx = raceprocctx0
5896 raceprocctx0 = 0
5897 } else {
5898 pp.raceprocctx = raceproccreate()
5899 }
5900 }
5901 lockInit(&pp.timers.mu, lockRankTimers)
5902
5903
5904
5905 timerpMask.set(id)
5906
5907
5908 idlepMask.clear(id)
5909 }
5910
5911
5912
5913
5914
5915 func (pp *p) destroy() {
5916 assertLockHeld(&sched.lock)
5917 assertWorldStopped()
5918
5919
5920 for pp.runqhead != pp.runqtail {
5921
5922 pp.runqtail--
5923 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5924
5925 globrunqputhead(gp)
5926 }
5927 if pp.runnext != 0 {
5928 globrunqputhead(pp.runnext.ptr())
5929 pp.runnext = 0
5930 }
5931
5932
5933 getg().m.p.ptr().timers.take(&pp.timers)
5934
5935
5936
5937 if phase := gcphase; phase != _GCoff {
5938 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5939 throw("P destroyed while GC is running")
5940 }
5941
5942 pp.gcw.spanq.destroy()
5943
5944 clear(pp.sudogbuf[:])
5945 pp.sudogcache = pp.sudogbuf[:0]
5946 pp.pinnerCache = nil
5947 clear(pp.deferpoolbuf[:])
5948 pp.deferpool = pp.deferpoolbuf[:0]
5949 systemstack(func() {
5950 for i := 0; i < pp.mspancache.len; i++ {
5951
5952 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5953 }
5954 pp.mspancache.len = 0
5955 lock(&mheap_.lock)
5956 pp.pcache.flush(&mheap_.pages)
5957 unlock(&mheap_.lock)
5958 })
5959 freemcache(pp.mcache)
5960 pp.mcache = nil
5961 gfpurge(pp)
5962 if raceenabled {
5963 if pp.timers.raceCtx != 0 {
5964
5965
5966
5967
5968
5969 mp := getg().m
5970 phold := mp.p.ptr()
5971 mp.p.set(pp)
5972
5973 racectxend(pp.timers.raceCtx)
5974 pp.timers.raceCtx = 0
5975
5976 mp.p.set(phold)
5977 }
5978 raceprocdestroy(pp.raceprocctx)
5979 pp.raceprocctx = 0
5980 }
5981 pp.gcAssistTime = 0
5982 gcCleanups.queued += pp.cleanupsQueued
5983 pp.cleanupsQueued = 0
5984 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5985 pp.goroutinesCreated = 0
5986 pp.xRegs.free()
5987 pp.status = _Pdead
5988 }
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998 func procresize(nprocs int32) *p {
5999 assertLockHeld(&sched.lock)
6000 assertWorldStopped()
6001
6002 old := gomaxprocs
6003 if old < 0 || nprocs <= 0 {
6004 throw("procresize: invalid arg")
6005 }
6006 trace := traceAcquire()
6007 if trace.ok() {
6008 trace.Gomaxprocs(nprocs)
6009 traceRelease(trace)
6010 }
6011
6012
6013 now := nanotime()
6014 if sched.procresizetime != 0 {
6015 sched.totaltime += int64(old) * (now - sched.procresizetime)
6016 }
6017 sched.procresizetime = now
6018
6019
6020 if nprocs > int32(len(allp)) {
6021
6022
6023 lock(&allpLock)
6024 if nprocs <= int32(cap(allp)) {
6025 allp = allp[:nprocs]
6026 } else {
6027 nallp := make([]*p, nprocs)
6028
6029
6030 copy(nallp, allp[:cap(allp)])
6031 allp = nallp
6032 }
6033
6034 idlepMask = idlepMask.resize(nprocs)
6035 timerpMask = timerpMask.resize(nprocs)
6036 work.spanqMask = work.spanqMask.resize(nprocs)
6037 unlock(&allpLock)
6038 }
6039
6040
6041 for i := old; i < nprocs; i++ {
6042 pp := allp[i]
6043 if pp == nil {
6044 pp = new(p)
6045 }
6046 pp.init(i)
6047 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6048 }
6049
6050 gp := getg()
6051 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6052
6053 gp.m.p.ptr().status = _Prunning
6054 gp.m.p.ptr().mcache.prepareForSweep()
6055 } else {
6056
6057
6058
6059
6060
6061 if gp.m.p != 0 {
6062 trace := traceAcquire()
6063 if trace.ok() {
6064
6065
6066
6067 trace.GoSched()
6068 trace.ProcStop(gp.m.p.ptr())
6069 traceRelease(trace)
6070 }
6071 gp.m.p.ptr().m = 0
6072 }
6073 gp.m.p = 0
6074 pp := allp[0]
6075 pp.m = 0
6076 pp.status = _Pidle
6077 acquirep(pp)
6078 trace := traceAcquire()
6079 if trace.ok() {
6080 trace.GoStart()
6081 traceRelease(trace)
6082 }
6083 }
6084
6085
6086 mcache0 = nil
6087
6088
6089 for i := nprocs; i < old; i++ {
6090 pp := allp[i]
6091 pp.destroy()
6092
6093 }
6094
6095
6096 if int32(len(allp)) != nprocs {
6097 lock(&allpLock)
6098 allp = allp[:nprocs]
6099 idlepMask = idlepMask.resize(nprocs)
6100 timerpMask = timerpMask.resize(nprocs)
6101 work.spanqMask = work.spanqMask.resize(nprocs)
6102 unlock(&allpLock)
6103 }
6104
6105
6106 var runnablePs *p
6107 var runnablePsNeedM *p
6108 var idlePs *p
6109 for i := nprocs - 1; i >= 0; i-- {
6110 pp := allp[i]
6111 if gp.m.p.ptr() == pp {
6112 continue
6113 }
6114 pp.status = _Pidle
6115 if runqempty(pp) {
6116 pp.link.set(idlePs)
6117 idlePs = pp
6118 continue
6119 }
6120
6121
6122
6123
6124
6125
6126
6127
6128 var mp *m
6129 if oldm := pp.oldm.get(); oldm != nil {
6130
6131 mp = mgetSpecific(oldm)
6132 }
6133 if mp == nil {
6134
6135 pp.link.set(runnablePsNeedM)
6136 runnablePsNeedM = pp
6137 continue
6138 }
6139 pp.m.set(mp)
6140 pp.link.set(runnablePs)
6141 runnablePs = pp
6142 }
6143
6144
6145 for runnablePsNeedM != nil {
6146 pp := runnablePsNeedM
6147 runnablePsNeedM = pp.link.ptr()
6148
6149 mp := mget()
6150 pp.m.set(mp)
6151 pp.link.set(runnablePs)
6152 runnablePs = pp
6153 }
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179 if gcBlackenEnabled != 0 {
6180 for idlePs != nil {
6181 pp := idlePs
6182
6183 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6184 if !ok {
6185
6186 break
6187 }
6188
6189
6190
6191
6192
6193
6194
6195
6196 idlePs = pp.link.ptr()
6197 mp := mget()
6198 pp.m.set(mp)
6199 pp.link.set(runnablePs)
6200 runnablePs = pp
6201 }
6202 }
6203
6204
6205 for idlePs != nil {
6206 pp := idlePs
6207 idlePs = pp.link.ptr()
6208 pidleput(pp, now)
6209 }
6210
6211 stealOrder.reset(uint32(nprocs))
6212 var int32p *int32 = &gomaxprocs
6213 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6214 if old != nprocs {
6215
6216 gcCPULimiter.resetCapacity(now, nprocs)
6217 }
6218 return runnablePs
6219 }
6220
6221
6222
6223
6224
6225
6226
6227 func acquirep(pp *p) {
6228
6229 acquirepNoTrace(pp)
6230
6231
6232 trace := traceAcquire()
6233 if trace.ok() {
6234 trace.ProcStart()
6235 traceRelease(trace)
6236 }
6237 }
6238
6239
6240
6241
6242 func acquirepNoTrace(pp *p) {
6243
6244 wirep(pp)
6245
6246
6247
6248
6249
6250
6251 pp.oldm = pp.m.ptr().self
6252
6253
6254
6255 pp.mcache.prepareForSweep()
6256 }
6257
6258
6259
6260
6261
6262
6263
6264 func wirep(pp *p) {
6265 gp := getg()
6266
6267 if gp.m.p != 0 {
6268
6269
6270 systemstack(func() {
6271 throw("wirep: already in go")
6272 })
6273 }
6274 if pp.m != 0 || pp.status != _Pidle {
6275
6276
6277 systemstack(func() {
6278 id := int64(0)
6279 if pp.m != 0 {
6280 id = pp.m.ptr().id
6281 }
6282 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6283 throw("wirep: invalid p state")
6284 })
6285 }
6286 gp.m.p.set(pp)
6287 pp.m.set(gp.m)
6288 pp.status = _Prunning
6289 }
6290
6291
6292 func releasep() *p {
6293 trace := traceAcquire()
6294 if trace.ok() {
6295 trace.ProcStop(getg().m.p.ptr())
6296 traceRelease(trace)
6297 }
6298 return releasepNoTrace()
6299 }
6300
6301
6302 func releasepNoTrace() *p {
6303 gp := getg()
6304
6305 if gp.m.p == 0 {
6306 throw("releasep: invalid arg")
6307 }
6308 pp := gp.m.p.ptr()
6309 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6310 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6311 throw("releasep: invalid p state")
6312 }
6313
6314
6315 gcController.releaseNextGCMarkWorker(pp)
6316
6317 gp.m.p = 0
6318 pp.m = 0
6319 pp.status = _Pidle
6320 return pp
6321 }
6322
6323 func incidlelocked(v int32) {
6324 lock(&sched.lock)
6325 sched.nmidlelocked += v
6326 if v > 0 {
6327 checkdead()
6328 }
6329 unlock(&sched.lock)
6330 }
6331
6332
6333
6334
6335 func checkdead() {
6336 assertLockHeld(&sched.lock)
6337
6338
6339
6340
6341
6342
6343 if (islibrary || isarchive) && GOARCH != "wasm" {
6344 return
6345 }
6346
6347
6348
6349
6350
6351 if panicking.Load() > 0 {
6352 return
6353 }
6354
6355
6356
6357
6358
6359 var run0 int32
6360 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6361 run0 = 1
6362 }
6363
6364 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6365 if run > run0 {
6366 return
6367 }
6368 if run < 0 {
6369 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6370 unlock(&sched.lock)
6371 throw("checkdead: inconsistent counts")
6372 }
6373
6374 grunning := 0
6375 forEachG(func(gp *g) {
6376 if isSystemGoroutine(gp, false) {
6377 return
6378 }
6379 s := readgstatus(gp)
6380 switch s &^ _Gscan {
6381 case _Gwaiting,
6382 _Gpreempted:
6383 grunning++
6384 case _Grunnable,
6385 _Grunning,
6386 _Gsyscall:
6387 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6388 unlock(&sched.lock)
6389 throw("checkdead: runnable g")
6390 }
6391 })
6392 if grunning == 0 {
6393 unlock(&sched.lock)
6394 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6395 }
6396
6397
6398 if faketime != 0 {
6399 if when := timeSleepUntil(); when < maxWhen {
6400 faketime = when
6401
6402
6403 pp, _ := pidleget(faketime)
6404 if pp == nil {
6405
6406
6407 unlock(&sched.lock)
6408 throw("checkdead: no p for timer")
6409 }
6410 mp := mget()
6411 if mp == nil {
6412
6413
6414 unlock(&sched.lock)
6415 throw("checkdead: no m for timer")
6416 }
6417
6418
6419
6420 sched.nmspinning.Add(1)
6421 mp.spinning = true
6422 mp.nextp.set(pp)
6423 notewakeup(&mp.park)
6424 return
6425 }
6426 }
6427
6428
6429 for _, pp := range allp {
6430 if len(pp.timers.heap) > 0 {
6431 return
6432 }
6433 }
6434
6435 unlock(&sched.lock)
6436 fatal("all goroutines are asleep - deadlock!")
6437 }
6438
6439
6440
6441
6442
6443
6444 var forcegcperiod int64 = 2 * 60 * 1e9
6445
6446
6447
6448
6449 const haveSysmon = GOARCH != "wasm"
6450
6451
6452
6453
6454 func sysmon() {
6455 lock(&sched.lock)
6456 sched.nmsys++
6457 checkdead()
6458 unlock(&sched.lock)
6459
6460 lastgomaxprocs := int64(0)
6461 lasttrace := int64(0)
6462 idle := 0
6463 delay := uint32(0)
6464
6465 for {
6466 if idle == 0 {
6467 delay = 20
6468 } else if idle > 50 {
6469 delay *= 2
6470 }
6471 if delay > 10*1000 {
6472 delay = 10 * 1000
6473 }
6474 usleep(delay)
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491 now := nanotime()
6492 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6493 lock(&sched.lock)
6494 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6495 syscallWake := false
6496 next := timeSleepUntil()
6497 if next > now {
6498 sched.sysmonwait.Store(true)
6499 unlock(&sched.lock)
6500
6501
6502 sleep := forcegcperiod / 2
6503 if next-now < sleep {
6504 sleep = next - now
6505 }
6506 shouldRelax := sleep >= osRelaxMinNS
6507 if shouldRelax {
6508 osRelax(true)
6509 }
6510 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6511 if shouldRelax {
6512 osRelax(false)
6513 }
6514 lock(&sched.lock)
6515 sched.sysmonwait.Store(false)
6516 noteclear(&sched.sysmonnote)
6517 }
6518 if syscallWake {
6519 idle = 0
6520 delay = 20
6521 }
6522 }
6523 unlock(&sched.lock)
6524 }
6525
6526 lock(&sched.sysmonlock)
6527
6528
6529 now = nanotime()
6530
6531
6532 if *cgo_yield != nil {
6533 asmcgocall(*cgo_yield, nil)
6534 }
6535
6536 lastpoll := sched.lastpoll.Load()
6537 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6538 sched.lastpoll.CompareAndSwap(lastpoll, now)
6539 list, delta := netpoll(0)
6540 if !list.empty() {
6541
6542
6543
6544
6545
6546
6547
6548 incidlelocked(-1)
6549 injectglist(&list)
6550 incidlelocked(1)
6551 netpollAdjustWaiters(delta)
6552 }
6553 }
6554
6555 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6556 sysmonUpdateGOMAXPROCS()
6557 lastgomaxprocs = now
6558 }
6559 if scavenger.sysmonWake.Load() != 0 {
6560
6561 scavenger.wake()
6562 }
6563
6564
6565 if retake(now) != 0 {
6566 idle = 0
6567 } else {
6568 idle++
6569 }
6570
6571 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6572 lock(&forcegc.lock)
6573 forcegc.idle.Store(false)
6574 var list gList
6575 list.push(forcegc.g)
6576 injectglist(&list)
6577 unlock(&forcegc.lock)
6578 }
6579 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6580 lasttrace = now
6581 schedtrace(debug.scheddetail > 0)
6582 }
6583 unlock(&sched.sysmonlock)
6584 }
6585 }
6586
6587 type sysmontick struct {
6588 schedtick uint32
6589 syscalltick uint32
6590 schedwhen int64
6591 syscallwhen int64
6592 }
6593
6594
6595
6596 const forcePreemptNS = 10 * 1000 * 1000
6597
6598 func retake(now int64) uint32 {
6599 n := 0
6600
6601
6602 lock(&allpLock)
6603
6604
6605
6606 for i := 0; i < len(allp); i++ {
6607
6608
6609
6610
6611
6612
6613
6614
6615 pp := allp[i]
6616 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6617
6618
6619 continue
6620 }
6621 pd := &pp.sysmontick
6622 sysretake := false
6623
6624
6625
6626
6627
6628 schedt := int64(pp.schedtick)
6629 if int64(pd.schedtick) != schedt {
6630 pd.schedtick = uint32(schedt)
6631 pd.schedwhen = now
6632 } else if pd.schedwhen+forcePreemptNS <= now {
6633 preemptone(pp)
6634
6635
6636
6637
6638 sysretake = true
6639 }
6640
6641
6642 unlock(&allpLock)
6643
6644
6645
6646
6647
6648
6649
6650
6651 incidlelocked(-1)
6652
6653
6654 thread, ok := setBlockOnExitSyscall(pp)
6655 if !ok {
6656
6657 goto done
6658 }
6659
6660
6661 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6662 pd.syscalltick = uint32(syst)
6663 pd.syscallwhen = now
6664 thread.resume()
6665 goto done
6666 }
6667
6668
6669
6670
6671 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6672 thread.resume()
6673 goto done
6674 }
6675
6676
6677
6678 thread.takeP()
6679 thread.resume()
6680 n++
6681
6682
6683 handoffp(pp)
6684
6685
6686
6687 done:
6688 incidlelocked(1)
6689 lock(&allpLock)
6690 }
6691 unlock(&allpLock)
6692 return uint32(n)
6693 }
6694
6695
6696
6697 type syscallingThread struct {
6698 gp *g
6699 mp *m
6700 pp *p
6701 status uint32
6702 }
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6719 if pp.status != _Prunning {
6720 return syscallingThread{}, false
6721 }
6722
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733 mp := pp.m.ptr()
6734 if mp == nil {
6735
6736 return syscallingThread{}, false
6737 }
6738 gp := mp.curg
6739 if gp == nil {
6740
6741 return syscallingThread{}, false
6742 }
6743 status := readgstatus(gp) &^ _Gscan
6744
6745
6746
6747
6748 if status != _Gsyscall && status != _Gdeadextra {
6749
6750 return syscallingThread{}, false
6751 }
6752 if !castogscanstatus(gp, status, status|_Gscan) {
6753
6754 return syscallingThread{}, false
6755 }
6756 if gp.m != mp || gp.m.p.ptr() != pp {
6757
6758 casfrom_Gscanstatus(gp, status|_Gscan, status)
6759 return syscallingThread{}, false
6760 }
6761 return syscallingThread{gp, mp, pp, status}, true
6762 }
6763
6764
6765
6766
6767
6768 func (s syscallingThread) gcstopP() {
6769 assertLockHeld(&sched.lock)
6770
6771 s.releaseP(_Pgcstop)
6772 s.pp.gcStopTime = nanotime()
6773 sched.stopwait--
6774 }
6775
6776
6777
6778 func (s syscallingThread) takeP() {
6779 s.releaseP(_Pidle)
6780 }
6781
6782
6783
6784
6785 func (s syscallingThread) releaseP(state uint32) {
6786 if state != _Pidle && state != _Pgcstop {
6787 throw("attempted to release P into a bad state")
6788 }
6789 trace := traceAcquire()
6790 s.pp.m = 0
6791 s.mp.p = 0
6792 atomic.Store(&s.pp.status, state)
6793 if trace.ok() {
6794 trace.ProcSteal(s.pp)
6795 traceRelease(trace)
6796 }
6797 addGSyscallNoP(s.mp)
6798 s.pp.syscalltick++
6799 }
6800
6801
6802 func (s syscallingThread) resume() {
6803 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6804 }
6805
6806
6807
6808
6809
6810
6811 func preemptall() bool {
6812 res := false
6813 for _, pp := range allp {
6814 if pp.status != _Prunning {
6815 continue
6816 }
6817 if preemptone(pp) {
6818 res = true
6819 }
6820 }
6821 return res
6822 }
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834 func preemptone(pp *p) bool {
6835 mp := pp.m.ptr()
6836 if mp == nil || mp == getg().m {
6837 return false
6838 }
6839 gp := mp.curg
6840 if gp == nil || gp == mp.g0 {
6841 return false
6842 }
6843 if readgstatus(gp)&^_Gscan == _Gsyscall {
6844
6845 return false
6846 }
6847
6848 gp.preempt = true
6849
6850
6851
6852
6853
6854 gp.stackguard0 = stackPreempt
6855
6856
6857 if preemptMSupported && debug.asyncpreemptoff == 0 {
6858 pp.preempt = true
6859 preemptM(mp)
6860 }
6861
6862 return true
6863 }
6864
6865 var starttime int64
6866
6867 func schedtrace(detailed bool) {
6868 now := nanotime()
6869 if starttime == 0 {
6870 starttime = now
6871 }
6872
6873 lock(&sched.lock)
6874 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6875 if detailed {
6876 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6877 }
6878
6879
6880
6881 for i, pp := range allp {
6882 h := atomic.Load(&pp.runqhead)
6883 t := atomic.Load(&pp.runqtail)
6884 if detailed {
6885 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6886 mp := pp.m.ptr()
6887 if mp != nil {
6888 print(mp.id)
6889 } else {
6890 print("nil")
6891 }
6892 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6893 } else {
6894
6895
6896 print(" ")
6897 if i == 0 {
6898 print("[ ")
6899 }
6900 print(t - h)
6901 if i == len(allp)-1 {
6902 print(" ]")
6903 }
6904 }
6905 }
6906
6907 if !detailed {
6908
6909 print(" schedticks=[ ")
6910 for _, pp := range allp {
6911 print(pp.schedtick)
6912 print(" ")
6913 }
6914 print("]\n")
6915 }
6916
6917 if !detailed {
6918 unlock(&sched.lock)
6919 return
6920 }
6921
6922 for mp := allm; mp != nil; mp = mp.alllink {
6923 pp := mp.p.ptr()
6924 print(" M", mp.id, ": p=")
6925 if pp != nil {
6926 print(pp.id)
6927 } else {
6928 print("nil")
6929 }
6930 print(" curg=")
6931 if mp.curg != nil {
6932 print(mp.curg.goid)
6933 } else {
6934 print("nil")
6935 }
6936 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6937 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6938 print(lockedg.goid)
6939 } else {
6940 print("nil")
6941 }
6942 print("\n")
6943 }
6944
6945 forEachG(func(gp *g) {
6946 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6947 if gp.m != nil {
6948 print(gp.m.id)
6949 } else {
6950 print("nil")
6951 }
6952 print(" lockedm=")
6953 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6954 print(lockedm.id)
6955 } else {
6956 print("nil")
6957 }
6958 print("\n")
6959 })
6960 unlock(&sched.lock)
6961 }
6962
6963 type updateMaxProcsGState struct {
6964 lock mutex
6965 g *g
6966 idle atomic.Bool
6967
6968
6969 procs int32
6970 }
6971
6972 var (
6973
6974
6975 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6976
6977
6978
6979 updateMaxProcsG updateMaxProcsGState
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028 computeMaxProcsLock mutex
7029 )
7030
7031
7032
7033
7034 func defaultGOMAXPROCSUpdateEnable() {
7035 if debug.updatemaxprocs == 0 {
7036
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047 updatemaxprocs.IncNonDefault()
7048 return
7049 }
7050
7051 go updateMaxProcsGoroutine()
7052 }
7053
7054 func updateMaxProcsGoroutine() {
7055 updateMaxProcsG.g = getg()
7056 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7057 for {
7058 lock(&updateMaxProcsG.lock)
7059 if updateMaxProcsG.idle.Load() {
7060 throw("updateMaxProcsGoroutine: phase error")
7061 }
7062 updateMaxProcsG.idle.Store(true)
7063 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7064
7065
7066 stw := stopTheWorldGC(stwGOMAXPROCS)
7067
7068
7069 lock(&sched.lock)
7070 custom := sched.customGOMAXPROCS
7071 unlock(&sched.lock)
7072 if custom {
7073 startTheWorldGC(stw)
7074 return
7075 }
7076
7077
7078
7079
7080
7081 newprocs = updateMaxProcsG.procs
7082 lock(&sched.lock)
7083 sched.customGOMAXPROCS = false
7084 unlock(&sched.lock)
7085
7086 startTheWorldGC(stw)
7087 }
7088 }
7089
7090 func sysmonUpdateGOMAXPROCS() {
7091
7092 lock(&computeMaxProcsLock)
7093
7094
7095 lock(&sched.lock)
7096 custom := sched.customGOMAXPROCS
7097 curr := gomaxprocs
7098 unlock(&sched.lock)
7099 if custom {
7100 unlock(&computeMaxProcsLock)
7101 return
7102 }
7103
7104
7105 procs := defaultGOMAXPROCS(0)
7106 unlock(&computeMaxProcsLock)
7107 if procs == curr {
7108
7109 return
7110 }
7111
7112
7113
7114
7115 if updateMaxProcsG.idle.Load() {
7116 lock(&updateMaxProcsG.lock)
7117 updateMaxProcsG.procs = procs
7118 updateMaxProcsG.idle.Store(false)
7119 var list gList
7120 list.push(updateMaxProcsG.g)
7121 injectglist(&list)
7122 unlock(&updateMaxProcsG.lock)
7123 }
7124 }
7125
7126
7127
7128
7129
7130
7131 func schedEnableUser(enable bool) {
7132 lock(&sched.lock)
7133 if sched.disable.user == !enable {
7134 unlock(&sched.lock)
7135 return
7136 }
7137 sched.disable.user = !enable
7138 if enable {
7139 n := sched.disable.runnable.size
7140 globrunqputbatch(&sched.disable.runnable)
7141 unlock(&sched.lock)
7142 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7143 startm(nil, false, false)
7144 }
7145 } else {
7146 unlock(&sched.lock)
7147 }
7148 }
7149
7150
7151
7152
7153
7154 func schedEnabled(gp *g) bool {
7155 assertLockHeld(&sched.lock)
7156
7157 if sched.disable.user {
7158 return isSystemGoroutine(gp, true)
7159 }
7160 return true
7161 }
7162
7163
7164
7165
7166
7167
7168 func mput(mp *m) {
7169 assertLockHeld(&sched.lock)
7170
7171 sched.midle.push(unsafe.Pointer(mp))
7172 sched.nmidle++
7173 checkdead()
7174 }
7175
7176
7177
7178
7179
7180
7181 func mget() *m {
7182 assertLockHeld(&sched.lock)
7183
7184 mp := (*m)(sched.midle.pop())
7185 if mp != nil {
7186 sched.nmidle--
7187 }
7188 return mp
7189 }
7190
7191
7192
7193
7194
7195
7196
7197
7198 func mgetSpecific(mp *m) *m {
7199 assertLockHeld(&sched.lock)
7200
7201 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7202
7203 return nil
7204 }
7205
7206 sched.midle.remove(unsafe.Pointer(mp))
7207 sched.nmidle--
7208
7209 return mp
7210 }
7211
7212
7213
7214
7215
7216
7217 func globrunqput(gp *g) {
7218 assertLockHeld(&sched.lock)
7219
7220 sched.runq.pushBack(gp)
7221 }
7222
7223
7224
7225
7226
7227
7228 func globrunqputhead(gp *g) {
7229 assertLockHeld(&sched.lock)
7230
7231 sched.runq.push(gp)
7232 }
7233
7234
7235
7236
7237
7238
7239
7240 func globrunqputbatch(batch *gQueue) {
7241 assertLockHeld(&sched.lock)
7242
7243 sched.runq.pushBackAll(*batch)
7244 *batch = gQueue{}
7245 }
7246
7247
7248
7249 func globrunqget() *g {
7250 assertLockHeld(&sched.lock)
7251
7252 if sched.runq.size == 0 {
7253 return nil
7254 }
7255
7256 return sched.runq.pop()
7257 }
7258
7259
7260
7261 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7262 assertLockHeld(&sched.lock)
7263
7264 if sched.runq.size == 0 {
7265 return
7266 }
7267
7268 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7269
7270 gp = sched.runq.pop()
7271 n--
7272
7273 for ; n > 0; n-- {
7274 gp1 := sched.runq.pop()
7275 q.pushBack(gp1)
7276 }
7277 return
7278 }
7279
7280
7281 type pMask []uint32
7282
7283
7284 func (p pMask) read(id uint32) bool {
7285 word := id / 32
7286 mask := uint32(1) << (id % 32)
7287 return (atomic.Load(&p[word]) & mask) != 0
7288 }
7289
7290
7291 func (p pMask) set(id int32) {
7292 word := id / 32
7293 mask := uint32(1) << (id % 32)
7294 atomic.Or(&p[word], mask)
7295 }
7296
7297
7298 func (p pMask) clear(id int32) {
7299 word := id / 32
7300 mask := uint32(1) << (id % 32)
7301 atomic.And(&p[word], ^mask)
7302 }
7303
7304
7305 func (p pMask) any() bool {
7306 for i := range p {
7307 if atomic.Load(&p[i]) != 0 {
7308 return true
7309 }
7310 }
7311 return false
7312 }
7313
7314
7315
7316
7317
7318 func (p pMask) resize(nprocs int32) pMask {
7319 maskWords := (nprocs + 31) / 32
7320
7321 if maskWords <= int32(cap(p)) {
7322 return p[:maskWords]
7323 }
7324 newMask := make([]uint32, maskWords)
7325
7326 copy(newMask, p)
7327 return newMask
7328 }
7329
7330
7331
7332
7333
7334
7335
7336
7337
7338
7339
7340
7341 func pidleput(pp *p, now int64) int64 {
7342 assertLockHeld(&sched.lock)
7343
7344 if !runqempty(pp) {
7345 throw("pidleput: P has non-empty run queue")
7346 }
7347 if now == 0 {
7348 now = nanotime()
7349 }
7350 if pp.timers.len.Load() == 0 {
7351 timerpMask.clear(pp.id)
7352 }
7353 idlepMask.set(pp.id)
7354 pp.link = sched.pidle
7355 sched.pidle.set(pp)
7356 sched.npidle.Add(1)
7357 if !pp.limiterEvent.start(limiterEventIdle, now) {
7358 throw("must be able to track idle limiter event")
7359 }
7360 return now
7361 }
7362
7363
7364
7365
7366
7367
7368
7369
7370 func pidleget(now int64) (*p, int64) {
7371 assertLockHeld(&sched.lock)
7372
7373 pp := sched.pidle.ptr()
7374 if pp != nil {
7375
7376 if now == 0 {
7377 now = nanotime()
7378 }
7379 timerpMask.set(pp.id)
7380 idlepMask.clear(pp.id)
7381 sched.pidle = pp.link
7382 sched.npidle.Add(-1)
7383 pp.limiterEvent.stop(limiterEventIdle, now)
7384 }
7385 return pp, now
7386 }
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398 func pidlegetSpinning(now int64) (*p, int64) {
7399 assertLockHeld(&sched.lock)
7400
7401 pp, now := pidleget(now)
7402 if pp == nil {
7403
7404
7405
7406 sched.needspinning.Store(1)
7407 return nil, now
7408 }
7409
7410 return pp, now
7411 }
7412
7413
7414
7415 func runqempty(pp *p) bool {
7416
7417
7418
7419
7420 for {
7421 head := atomic.Load(&pp.runqhead)
7422 tail := atomic.Load(&pp.runqtail)
7423 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7424 if tail == atomic.Load(&pp.runqtail) {
7425 return head == tail && runnext == 0
7426 }
7427 }
7428 }
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439 const randomizeScheduler = raceenabled
7440
7441
7442
7443
7444
7445
7446 func runqput(pp *p, gp *g, next bool) {
7447 if !haveSysmon && next {
7448
7449
7450
7451
7452
7453
7454
7455
7456 next = false
7457 }
7458 if randomizeScheduler && next && randn(2) == 0 {
7459 next = false
7460 }
7461
7462 if next {
7463 retryNext:
7464 oldnext := pp.runnext
7465 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7466 goto retryNext
7467 }
7468 if oldnext == 0 {
7469 return
7470 }
7471
7472 gp = oldnext.ptr()
7473 }
7474
7475 retry:
7476 h := atomic.LoadAcq(&pp.runqhead)
7477 t := pp.runqtail
7478 if t-h < uint32(len(pp.runq)) {
7479 pp.runq[t%uint32(len(pp.runq))].set(gp)
7480 atomic.StoreRel(&pp.runqtail, t+1)
7481 return
7482 }
7483 if runqputslow(pp, gp, h, t) {
7484 return
7485 }
7486
7487 goto retry
7488 }
7489
7490
7491
7492 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7493 var batch [len(pp.runq)/2 + 1]*g
7494
7495
7496 n := t - h
7497 n = n / 2
7498 if n != uint32(len(pp.runq)/2) {
7499 throw("runqputslow: queue is not full")
7500 }
7501 for i := uint32(0); i < n; i++ {
7502 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7503 }
7504 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7505 return false
7506 }
7507 batch[n] = gp
7508
7509 if randomizeScheduler {
7510 for i := uint32(1); i <= n; i++ {
7511 j := cheaprandn(i + 1)
7512 batch[i], batch[j] = batch[j], batch[i]
7513 }
7514 }
7515
7516
7517 for i := uint32(0); i < n; i++ {
7518 batch[i].schedlink.set(batch[i+1])
7519 }
7520
7521 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7522
7523
7524 lock(&sched.lock)
7525 globrunqputbatch(&q)
7526 unlock(&sched.lock)
7527 return true
7528 }
7529
7530
7531
7532
7533 func runqputbatch(pp *p, q *gQueue) {
7534 if q.empty() {
7535 return
7536 }
7537 h := atomic.LoadAcq(&pp.runqhead)
7538 t := pp.runqtail
7539 n := uint32(0)
7540 for !q.empty() && t-h < uint32(len(pp.runq)) {
7541 gp := q.pop()
7542 pp.runq[t%uint32(len(pp.runq))].set(gp)
7543 t++
7544 n++
7545 }
7546
7547 if randomizeScheduler {
7548 off := func(o uint32) uint32 {
7549 return (pp.runqtail + o) % uint32(len(pp.runq))
7550 }
7551 for i := uint32(1); i < n; i++ {
7552 j := cheaprandn(i + 1)
7553 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7554 }
7555 }
7556
7557 atomic.StoreRel(&pp.runqtail, t)
7558
7559 return
7560 }
7561
7562
7563
7564
7565
7566 func runqget(pp *p) (gp *g, inheritTime bool) {
7567
7568 next := pp.runnext
7569
7570
7571
7572 if next != 0 && pp.runnext.cas(next, 0) {
7573 return next.ptr(), true
7574 }
7575
7576 for {
7577 h := atomic.LoadAcq(&pp.runqhead)
7578 t := pp.runqtail
7579 if t == h {
7580 return nil, false
7581 }
7582 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7583 if atomic.CasRel(&pp.runqhead, h, h+1) {
7584 return gp, false
7585 }
7586 }
7587 }
7588
7589
7590
7591 func runqdrain(pp *p) (drainQ gQueue) {
7592 oldNext := pp.runnext
7593 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7594 drainQ.pushBack(oldNext.ptr())
7595 }
7596
7597 retry:
7598 h := atomic.LoadAcq(&pp.runqhead)
7599 t := pp.runqtail
7600 qn := t - h
7601 if qn == 0 {
7602 return
7603 }
7604 if qn > uint32(len(pp.runq)) {
7605 goto retry
7606 }
7607
7608 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7609 goto retry
7610 }
7611
7612
7613
7614
7615
7616
7617
7618
7619 for i := uint32(0); i < qn; i++ {
7620 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7621 drainQ.pushBack(gp)
7622 }
7623 return
7624 }
7625
7626
7627
7628
7629
7630 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7631 for {
7632 h := atomic.LoadAcq(&pp.runqhead)
7633 t := atomic.LoadAcq(&pp.runqtail)
7634 n := t - h
7635 n = n - n/2
7636 if n == 0 {
7637 if stealRunNextG {
7638
7639 if next := pp.runnext; next != 0 {
7640 if pp.status == _Prunning {
7641 if mp := pp.m.ptr(); mp != nil {
7642 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7643
7644
7645
7646
7647
7648
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658
7659
7660
7661
7662 if !osHasLowResTimer {
7663 usleep(3)
7664 } else {
7665
7666
7667
7668 osyield()
7669 }
7670 }
7671 }
7672 }
7673 if !pp.runnext.cas(next, 0) {
7674 continue
7675 }
7676 batch[batchHead%uint32(len(batch))] = next
7677 return 1
7678 }
7679 }
7680 return 0
7681 }
7682 if n > uint32(len(pp.runq)/2) {
7683 continue
7684 }
7685 for i := uint32(0); i < n; i++ {
7686 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7687 batch[(batchHead+i)%uint32(len(batch))] = g
7688 }
7689 if atomic.CasRel(&pp.runqhead, h, h+n) {
7690 return n
7691 }
7692 }
7693 }
7694
7695
7696
7697
7698 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7699 t := pp.runqtail
7700 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7701 if n == 0 {
7702 return nil
7703 }
7704 n--
7705 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7706 if n == 0 {
7707 return gp
7708 }
7709 h := atomic.LoadAcq(&pp.runqhead)
7710 if t-h+n >= uint32(len(pp.runq)) {
7711 throw("runqsteal: runq overflow")
7712 }
7713 atomic.StoreRel(&pp.runqtail, t+n)
7714 return gp
7715 }
7716
7717
7718
7719 type gQueue struct {
7720 head guintptr
7721 tail guintptr
7722 size int32
7723 }
7724
7725
7726 func (q *gQueue) empty() bool {
7727 return q.head == 0
7728 }
7729
7730
7731 func (q *gQueue) push(gp *g) {
7732 gp.schedlink = q.head
7733 q.head.set(gp)
7734 if q.tail == 0 {
7735 q.tail.set(gp)
7736 }
7737 q.size++
7738 }
7739
7740
7741 func (q *gQueue) pushBack(gp *g) {
7742 gp.schedlink = 0
7743 if q.tail != 0 {
7744 q.tail.ptr().schedlink.set(gp)
7745 } else {
7746 q.head.set(gp)
7747 }
7748 q.tail.set(gp)
7749 q.size++
7750 }
7751
7752
7753
7754 func (q *gQueue) pushBackAll(q2 gQueue) {
7755 if q2.tail == 0 {
7756 return
7757 }
7758 q2.tail.ptr().schedlink = 0
7759 if q.tail != 0 {
7760 q.tail.ptr().schedlink = q2.head
7761 } else {
7762 q.head = q2.head
7763 }
7764 q.tail = q2.tail
7765 q.size += q2.size
7766 }
7767
7768
7769
7770 func (q *gQueue) pop() *g {
7771 gp := q.head.ptr()
7772 if gp != nil {
7773 q.head = gp.schedlink
7774 if q.head == 0 {
7775 q.tail = 0
7776 }
7777 q.size--
7778 }
7779 return gp
7780 }
7781
7782
7783 func (q *gQueue) popList() gList {
7784 stack := gList{q.head, q.size}
7785 *q = gQueue{}
7786 return stack
7787 }
7788
7789
7790
7791 type gList struct {
7792 head guintptr
7793 size int32
7794 }
7795
7796
7797 func (l *gList) empty() bool {
7798 return l.head == 0
7799 }
7800
7801
7802 func (l *gList) push(gp *g) {
7803 gp.schedlink = l.head
7804 l.head.set(gp)
7805 l.size++
7806 }
7807
7808
7809 func (l *gList) pushAll(q gQueue) {
7810 if !q.empty() {
7811 q.tail.ptr().schedlink = l.head
7812 l.head = q.head
7813 l.size += q.size
7814 }
7815 }
7816
7817
7818 func (l *gList) pop() *g {
7819 gp := l.head.ptr()
7820 if gp != nil {
7821 l.head = gp.schedlink
7822 l.size--
7823 }
7824 return gp
7825 }
7826
7827
7828 func setMaxThreads(in int) (out int) {
7829 lock(&sched.lock)
7830 out = int(sched.maxmcount)
7831 if in > 0x7fffffff {
7832 sched.maxmcount = 0x7fffffff
7833 } else {
7834 sched.maxmcount = int32(in)
7835 }
7836 checkmcount()
7837 unlock(&sched.lock)
7838 return
7839 }
7840
7841
7842
7843
7844
7845
7846
7847
7848
7849
7850
7851
7852
7853 func procPin() int {
7854 gp := getg()
7855 mp := gp.m
7856
7857 mp.locks++
7858 return int(mp.p.ptr().id)
7859 }
7860
7861
7862
7863
7864
7865
7866
7867
7868
7869
7870
7871
7872
7873 func procUnpin() {
7874 gp := getg()
7875 gp.m.locks--
7876 }
7877
7878
7879
7880 func sync_runtime_procPin() int {
7881 return procPin()
7882 }
7883
7884
7885
7886 func sync_runtime_procUnpin() {
7887 procUnpin()
7888 }
7889
7890
7891
7892 func sync_atomic_runtime_procPin() int {
7893 return procPin()
7894 }
7895
7896
7897
7898 func sync_atomic_runtime_procUnpin() {
7899 procUnpin()
7900 }
7901
7902
7903
7904
7905
7906 func internal_sync_runtime_canSpin(i int) bool {
7907
7908
7909
7910
7911
7912 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7913 return false
7914 }
7915 if p := getg().m.p.ptr(); !runqempty(p) {
7916 return false
7917 }
7918 return true
7919 }
7920
7921
7922
7923 func internal_sync_runtime_doSpin() {
7924 procyield(active_spin_cnt)
7925 }
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935
7936
7937
7938
7939
7940
7941 func sync_runtime_canSpin(i int) bool {
7942 return internal_sync_runtime_canSpin(i)
7943 }
7944
7945
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955
7956
7957 func sync_runtime_doSpin() {
7958 internal_sync_runtime_doSpin()
7959 }
7960
7961 var stealOrder randomOrder
7962
7963
7964
7965
7966
7967 type randomOrder struct {
7968 count uint32
7969 coprimes []uint32
7970 }
7971
7972 type randomEnum struct {
7973 i uint32
7974 count uint32
7975 pos uint32
7976 inc uint32
7977 }
7978
7979 func (ord *randomOrder) reset(count uint32) {
7980 ord.count = count
7981 ord.coprimes = ord.coprimes[:0]
7982 for i := uint32(1); i <= count; i++ {
7983 if gcd(i, count) == 1 {
7984 ord.coprimes = append(ord.coprimes, i)
7985 }
7986 }
7987 }
7988
7989 func (ord *randomOrder) start(i uint32) randomEnum {
7990 return randomEnum{
7991 count: ord.count,
7992 pos: i % ord.count,
7993 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7994 }
7995 }
7996
7997 func (enum *randomEnum) done() bool {
7998 return enum.i == enum.count
7999 }
8000
8001 func (enum *randomEnum) next() {
8002 enum.i++
8003 enum.pos = (enum.pos + enum.inc) % enum.count
8004 }
8005
8006 func (enum *randomEnum) position() uint32 {
8007 return enum.pos
8008 }
8009
8010 func gcd(a, b uint32) uint32 {
8011 for b != 0 {
8012 a, b = b, a%b
8013 }
8014 return a
8015 }
8016
8017
8018
8019 type initTask struct {
8020 state uint32
8021 nfns uint32
8022
8023 }
8024
8025
8026
8027 var inittrace tracestat
8028
8029 type tracestat struct {
8030 active bool
8031 id uint64
8032 allocs uint64
8033 bytes uint64
8034 }
8035
8036 func doInit(ts []*initTask) {
8037 for _, t := range ts {
8038 doInit1(t)
8039 }
8040 }
8041
8042 func doInit1(t *initTask) {
8043 switch t.state {
8044 case 2:
8045 return
8046 case 1:
8047 throw("recursive call during initialization - linker skew")
8048 default:
8049 t.state = 1
8050
8051 var (
8052 start int64
8053 before tracestat
8054 )
8055
8056 if inittrace.active {
8057 start = nanotime()
8058
8059 before = inittrace
8060 }
8061
8062 if t.nfns == 0 {
8063
8064 throw("inittask with no functions")
8065 }
8066
8067 firstFunc := add(unsafe.Pointer(t), 8)
8068 for i := uint32(0); i < t.nfns; i++ {
8069 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8070 f := *(*func())(unsafe.Pointer(&p))
8071 f()
8072 }
8073
8074 if inittrace.active {
8075 end := nanotime()
8076
8077 after := inittrace
8078
8079 f := *(*func())(unsafe.Pointer(&firstFunc))
8080 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8081
8082 var sbuf [24]byte
8083 print("init ", pkg, " @")
8084 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8085 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8086 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8087 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8088 print("\n")
8089 }
8090
8091 t.state = 2
8092 }
8093 }
8094
View as plain text