Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/stringslite"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 if GOARCH == "wasm" {
270
271
272
273
274
275
276
277 pause(sys.GetCallerSP() - 16)
278 panic("unreachable")
279 }
280 return
281 }
282 fn := main_main
283 fn()
284
285 exitHooksRun := false
286 if raceenabled {
287 runExitHooks(0)
288 exitHooksRun = true
289 racefini()
290 }
291
292
293
294
295
296
297
298
299 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
300 runExitHooks(0)
301 exitHooksRun = true
302 lsandoleakcheck()
303 }
304
305
306
307
308
309 if runningPanicDefers.Load() != 0 {
310
311 for c := 0; c < 1000; c++ {
312 if runningPanicDefers.Load() == 0 {
313 break
314 }
315 Gosched()
316 }
317 }
318 if panicking.Load() != 0 {
319 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
320 }
321 if !exitHooksRun {
322 runExitHooks(0)
323 }
324
325 exit(0)
326 for {
327 var x *int32
328 *x = 0
329 }
330 }
331
332
333
334
335 func os_beforeExit(exitCode int) {
336 runExitHooks(exitCode)
337 if exitCode == 0 && raceenabled {
338 racefini()
339 }
340
341
342 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
343 lsandoleakcheck()
344 }
345 }
346
347 func init() {
348 exithook.Gosched = Gosched
349 exithook.Goid = func() uint64 { return getg().goid }
350 exithook.Throw = throw
351 }
352
353 func runExitHooks(code int) {
354 exithook.Run(code)
355 }
356
357
358 func init() {
359 go forcegchelper()
360 }
361
362 func forcegchelper() {
363 forcegc.g = getg()
364 lockInit(&forcegc.lock, lockRankForcegc)
365 for {
366 lock(&forcegc.lock)
367 if forcegc.idle.Load() {
368 throw("forcegc: phase error")
369 }
370 forcegc.idle.Store(true)
371 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
372
373 if debug.gctrace > 0 {
374 println("GC forced")
375 }
376
377 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
378 }
379 }
380
381
382
383
384
385 func Gosched() {
386 checkTimeouts()
387 mcall(gosched_m)
388 }
389
390
391
392
393
394 func goschedguarded() {
395 mcall(goschedguarded_m)
396 }
397
398
399
400
401
402
403 func goschedIfBusy() {
404 gp := getg()
405
406
407 if !gp.preempt && sched.npidle.Load() > 0 {
408 return
409 }
410 mcall(gosched_m)
411 }
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
442 if reason != waitReasonSleep {
443 checkTimeouts()
444 }
445 mp := acquirem()
446 gp := mp.curg
447 status := readgstatus(gp)
448 if status != _Grunning && status != _Gscanrunning {
449 throw("gopark: bad g status")
450 }
451 mp.waitlock = lock
452 mp.waitunlockf = unlockf
453 gp.waitreason = reason
454 mp.waitTraceBlockReason = traceReason
455 mp.waitTraceSkip = traceskip
456 releasem(mp)
457
458 mcall(park_m)
459 }
460
461
462
463 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
464 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
465 }
466
467
468
469
470
471
472
473
474
475
476
477 func goready(gp *g, traceskip int) {
478 systemstack(func() {
479 ready(gp, traceskip, true)
480 })
481 }
482
483
484 func acquireSudog() *sudog {
485
486
487
488
489
490
491
492
493 mp := acquirem()
494 pp := mp.p.ptr()
495 if len(pp.sudogcache) == 0 {
496 lock(&sched.sudoglock)
497
498 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
499 s := sched.sudogcache
500 sched.sudogcache = s.next
501 s.next = nil
502 pp.sudogcache = append(pp.sudogcache, s)
503 }
504 unlock(&sched.sudoglock)
505
506 if len(pp.sudogcache) == 0 {
507 pp.sudogcache = append(pp.sudogcache, new(sudog))
508 }
509 }
510 n := len(pp.sudogcache)
511 s := pp.sudogcache[n-1]
512 pp.sudogcache[n-1] = nil
513 pp.sudogcache = pp.sudogcache[:n-1]
514 if s.elem != nil {
515 throw("acquireSudog: found s.elem != nil in cache")
516 }
517 releasem(mp)
518 return s
519 }
520
521
522 func releaseSudog(s *sudog) {
523 if s.elem != nil {
524 throw("runtime: sudog with non-nil elem")
525 }
526 if s.isSelect {
527 throw("runtime: sudog with non-false isSelect")
528 }
529 if s.next != nil {
530 throw("runtime: sudog with non-nil next")
531 }
532 if s.prev != nil {
533 throw("runtime: sudog with non-nil prev")
534 }
535 if s.waitlink != nil {
536 throw("runtime: sudog with non-nil waitlink")
537 }
538 if s.c != nil {
539 throw("runtime: sudog with non-nil c")
540 }
541 gp := getg()
542 if gp.param != nil {
543 throw("runtime: releaseSudog with non-nil gp.param")
544 }
545 mp := acquirem()
546 pp := mp.p.ptr()
547 if len(pp.sudogcache) == cap(pp.sudogcache) {
548
549 var first, last *sudog
550 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
551 n := len(pp.sudogcache)
552 p := pp.sudogcache[n-1]
553 pp.sudogcache[n-1] = nil
554 pp.sudogcache = pp.sudogcache[:n-1]
555 if first == nil {
556 first = p
557 } else {
558 last.next = p
559 }
560 last = p
561 }
562 lock(&sched.sudoglock)
563 last.next = sched.sudogcache
564 sched.sudogcache = first
565 unlock(&sched.sudoglock)
566 }
567 pp.sudogcache = append(pp.sudogcache, s)
568 releasem(mp)
569 }
570
571
572 func badmcall(fn func(*g)) {
573 throw("runtime: mcall called on m->g0 stack")
574 }
575
576 func badmcall2(fn func(*g)) {
577 throw("runtime: mcall function returned")
578 }
579
580 func badreflectcall() {
581 panic(plainError("arg size to reflect.call more than 1GB"))
582 }
583
584
585
586 func badmorestackg0() {
587 if !crashStackImplemented {
588 writeErrStr("fatal: morestack on g0\n")
589 return
590 }
591
592 g := getg()
593 switchToCrashStack(func() {
594 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
595 g.m.traceback = 2
596 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
597 print("\n")
598
599 throw("morestack on g0")
600 })
601 }
602
603
604
605 func badmorestackgsignal() {
606 writeErrStr("fatal: morestack on gsignal\n")
607 }
608
609
610 func badctxt() {
611 throw("ctxt != 0")
612 }
613
614
615
616 var gcrash g
617
618 var crashingG atomic.Pointer[g]
619
620
621
622
623
624
625
626
627
628 func switchToCrashStack(fn func()) {
629 me := getg()
630 if crashingG.CompareAndSwapNoWB(nil, me) {
631 switchToCrashStack0(fn)
632 abort()
633 }
634 if crashingG.Load() == me {
635
636 writeErrStr("fatal: recursive switchToCrashStack\n")
637 abort()
638 }
639
640 usleep_no_g(100)
641 writeErrStr("fatal: concurrent switchToCrashStack\n")
642 abort()
643 }
644
645
646
647
648 const crashStackImplemented = GOOS != "windows"
649
650
651 func switchToCrashStack0(fn func())
652
653 func lockedOSThread() bool {
654 gp := getg()
655 return gp.lockedm != 0 && gp.m.lockedg != 0
656 }
657
658 var (
659
660
661
662
663
664
665 allglock mutex
666 allgs []*g
667
668
669
670
671
672
673
674
675
676
677
678
679
680 allglen uintptr
681 allgptr **g
682 )
683
684 func allgadd(gp *g) {
685 if readgstatus(gp) == _Gidle {
686 throw("allgadd: bad status Gidle")
687 }
688
689 lock(&allglock)
690 allgs = append(allgs, gp)
691 if &allgs[0] != allgptr {
692 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
693 }
694 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
695 unlock(&allglock)
696 }
697
698
699
700
701 func allGsSnapshot() []*g {
702 assertWorldStoppedOrLockHeld(&allglock)
703
704
705
706
707
708
709 return allgs[:len(allgs):len(allgs)]
710 }
711
712
713 func atomicAllG() (**g, uintptr) {
714 length := atomic.Loaduintptr(&allglen)
715 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
716 return ptr, length
717 }
718
719
720 func atomicAllGIndex(ptr **g, i uintptr) *g {
721 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
722 }
723
724
725
726
727 func forEachG(fn func(gp *g)) {
728 lock(&allglock)
729 for _, gp := range allgs {
730 fn(gp)
731 }
732 unlock(&allglock)
733 }
734
735
736
737
738
739 func forEachGRace(fn func(gp *g)) {
740 ptr, length := atomicAllG()
741 for i := uintptr(0); i < length; i++ {
742 gp := atomicAllGIndex(ptr, i)
743 fn(gp)
744 }
745 return
746 }
747
748 const (
749
750
751 _GoidCacheBatch = 16
752 )
753
754
755
756 func cpuinit(env string) {
757 switch GOOS {
758 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
759 cpu.DebugOptions = true
760 }
761 cpu.Initialize(env)
762
763
764
765 switch GOARCH {
766 case "386", "amd64":
767 x86HasPOPCNT = cpu.X86.HasPOPCNT
768 x86HasSSE41 = cpu.X86.HasSSE41
769 x86HasFMA = cpu.X86.HasFMA
770
771 case "arm":
772 armHasVFPv4 = cpu.ARM.HasVFPv4
773
774 case "arm64":
775 arm64HasATOMICS = cpu.ARM64.HasATOMICS
776
777 case "loong64":
778 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
779 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
780 loong64HasLSX = cpu.Loong64.HasLSX
781 }
782 }
783
784
785
786
787 func getGodebugEarly() string {
788 const prefix = "GODEBUG="
789 var env string
790 switch GOOS {
791 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
792
793
794
795 n := int32(0)
796 for argv_index(argv, argc+1+n) != nil {
797 n++
798 }
799
800 for i := int32(0); i < n; i++ {
801 p := argv_index(argv, argc+1+i)
802 s := unsafe.String(p, findnull(p))
803
804 if stringslite.HasPrefix(s, prefix) {
805 env = gostring(p)[len(prefix):]
806 break
807 }
808 }
809 }
810 return env
811 }
812
813
814
815
816
817
818
819
820
821 func schedinit() {
822 lockInit(&sched.lock, lockRankSched)
823 lockInit(&sched.sysmonlock, lockRankSysmon)
824 lockInit(&sched.deferlock, lockRankDefer)
825 lockInit(&sched.sudoglock, lockRankSudog)
826 lockInit(&deadlock, lockRankDeadlock)
827 lockInit(&paniclk, lockRankPanic)
828 lockInit(&allglock, lockRankAllg)
829 lockInit(&allpLock, lockRankAllp)
830 lockInit(&reflectOffs.lock, lockRankReflectOffs)
831 lockInit(&finlock, lockRankFin)
832 lockInit(&cpuprof.lock, lockRankCpuprof)
833 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
834 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
835 traceLockInit()
836
837
838
839 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
840
841 lockVerifyMSize()
842
843
844
845 gp := getg()
846 if raceenabled {
847 gp.racectx, raceprocctx0 = raceinit()
848 }
849
850 sched.maxmcount = 10000
851 crashFD.Store(^uintptr(0))
852
853
854 worldStopped()
855
856 ticks.init()
857 moduledataverify()
858 stackinit()
859 mallocinit()
860 godebug := getGodebugEarly()
861 cpuinit(godebug)
862 randinit()
863 alginit()
864 mcommoninit(gp.m, -1)
865 modulesinit()
866 typelinksinit()
867 itabsinit()
868 stkobjinit()
869
870 sigsave(&gp.m.sigmask)
871 initSigmask = gp.m.sigmask
872
873 goargs()
874 goenvs()
875 secure()
876 checkfds()
877 parsedebugvars()
878 gcinit()
879
880
881
882 gcrash.stack = stackalloc(16384)
883 gcrash.stackguard0 = gcrash.stack.lo + 1000
884 gcrash.stackguard1 = gcrash.stack.lo + 1000
885
886
887
888
889
890 if disableMemoryProfiling {
891 MemProfileRate = 0
892 }
893
894
895 mProfStackInit(gp.m)
896
897 lock(&sched.lock)
898 sched.lastpoll.Store(nanotime())
899 procs := ncpu
900 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
901 procs = n
902 }
903 if procresize(procs) != nil {
904 throw("unknown runnable goroutine during bootstrap")
905 }
906 unlock(&sched.lock)
907
908
909 worldStarted()
910
911 if buildVersion == "" {
912
913
914 buildVersion = "unknown"
915 }
916 if len(modinfo) == 1 {
917
918
919 modinfo = ""
920 }
921 }
922
923 func dumpgstatus(gp *g) {
924 thisg := getg()
925 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
926 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
927 }
928
929
930 func checkmcount() {
931 assertLockHeld(&sched.lock)
932
933
934
935
936
937
938
939
940
941 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
942 if count > sched.maxmcount {
943 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
944 throw("thread exhaustion")
945 }
946 }
947
948
949
950
951
952 func mReserveID() int64 {
953 assertLockHeld(&sched.lock)
954
955 if sched.mnext+1 < sched.mnext {
956 throw("runtime: thread ID overflow")
957 }
958 id := sched.mnext
959 sched.mnext++
960 checkmcount()
961 return id
962 }
963
964
965 func mcommoninit(mp *m, id int64) {
966 gp := getg()
967
968
969 if gp != gp.m.g0 {
970 callers(1, mp.createstack[:])
971 }
972
973 lock(&sched.lock)
974
975 if id >= 0 {
976 mp.id = id
977 } else {
978 mp.id = mReserveID()
979 }
980
981 mrandinit(mp)
982
983 mpreinit(mp)
984 if mp.gsignal != nil {
985 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
986 }
987
988
989
990 mp.alllink = allm
991
992
993
994 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
995 unlock(&sched.lock)
996
997
998 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
999 mp.cgoCallers = new(cgoCallers)
1000 }
1001 mProfStackInit(mp)
1002 }
1003
1004
1005
1006
1007
1008 func mProfStackInit(mp *m) {
1009 if debug.profstackdepth == 0 {
1010
1011
1012 return
1013 }
1014 mp.profStack = makeProfStackFP()
1015 mp.mLockProfile.stack = makeProfStackFP()
1016 }
1017
1018
1019
1020
1021 func makeProfStackFP() []uintptr {
1022
1023
1024
1025
1026
1027
1028 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1029 }
1030
1031
1032
1033 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1034
1035
1036 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1037
1038 func (mp *m) becomeSpinning() {
1039 mp.spinning = true
1040 sched.nmspinning.Add(1)
1041 sched.needspinning.Store(0)
1042 }
1043
1044 func (mp *m) hasCgoOnStack() bool {
1045 return mp.ncgo > 0 || mp.isextra
1046 }
1047
1048 const (
1049
1050
1051 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1052
1053
1054
1055 osHasLowResClockInt = goos.IsWindows
1056
1057
1058
1059 osHasLowResClock = osHasLowResClockInt > 0
1060 )
1061
1062
1063 func ready(gp *g, traceskip int, next bool) {
1064 status := readgstatus(gp)
1065
1066
1067 mp := acquirem()
1068 if status&^_Gscan != _Gwaiting {
1069 dumpgstatus(gp)
1070 throw("bad g->status in ready")
1071 }
1072
1073
1074 trace := traceAcquire()
1075 casgstatus(gp, _Gwaiting, _Grunnable)
1076 if trace.ok() {
1077 trace.GoUnpark(gp, traceskip)
1078 traceRelease(trace)
1079 }
1080 runqput(mp.p.ptr(), gp, next)
1081 wakep()
1082 releasem(mp)
1083 }
1084
1085
1086
1087 const freezeStopWait = 0x7fffffff
1088
1089
1090
1091 var freezing atomic.Bool
1092
1093
1094
1095
1096 func freezetheworld() {
1097 freezing.Store(true)
1098 if debug.dontfreezetheworld > 0 {
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 usleep(1000)
1124 return
1125 }
1126
1127
1128
1129
1130 for i := 0; i < 5; i++ {
1131
1132 sched.stopwait = freezeStopWait
1133 sched.gcwaiting.Store(true)
1134
1135 if !preemptall() {
1136 break
1137 }
1138 usleep(1000)
1139 }
1140
1141 usleep(1000)
1142 preemptall()
1143 usleep(1000)
1144 }
1145
1146
1147
1148
1149
1150 func readgstatus(gp *g) uint32 {
1151 return gp.atomicstatus.Load()
1152 }
1153
1154
1155
1156
1157
1158 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1159 success := false
1160
1161
1162 switch oldval {
1163 default:
1164 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1165 dumpgstatus(gp)
1166 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1167 case _Gscanrunnable,
1168 _Gscanwaiting,
1169 _Gscanrunning,
1170 _Gscansyscall,
1171 _Gscanpreempted:
1172 if newval == oldval&^_Gscan {
1173 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1174 }
1175 }
1176 if !success {
1177 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1178 dumpgstatus(gp)
1179 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1180 }
1181 releaseLockRankAndM(lockRankGscan)
1182 }
1183
1184
1185
1186 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1187 switch oldval {
1188 case _Grunnable,
1189 _Grunning,
1190 _Gwaiting,
1191 _Gsyscall:
1192 if newval == oldval|_Gscan {
1193 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1194 if r {
1195 acquireLockRankAndM(lockRankGscan)
1196 }
1197 return r
1198
1199 }
1200 }
1201 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1202 throw("castogscanstatus")
1203 panic("not reached")
1204 }
1205
1206
1207
1208 var casgstatusAlwaysTrack = false
1209
1210
1211
1212
1213
1214
1215
1216 func casgstatus(gp *g, oldval, newval uint32) {
1217 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1218 systemstack(func() {
1219
1220
1221 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1222 throw("casgstatus: bad incoming values")
1223 })
1224 }
1225
1226 lockWithRankMayAcquire(nil, lockRankGscan)
1227
1228
1229 const yieldDelay = 5 * 1000
1230 var nextYield int64
1231
1232
1233
1234 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1235 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1236 systemstack(func() {
1237
1238
1239 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1240 })
1241 }
1242 if i == 0 {
1243 nextYield = nanotime() + yieldDelay
1244 }
1245 if nanotime() < nextYield {
1246 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1247 procyield(1)
1248 }
1249 } else {
1250 osyield()
1251 nextYield = nanotime() + yieldDelay/2
1252 }
1253 }
1254
1255 if gp.syncGroup != nil {
1256 systemstack(func() {
1257 gp.syncGroup.changegstatus(gp, oldval, newval)
1258 })
1259 }
1260
1261 if oldval == _Grunning {
1262
1263 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1264 gp.tracking = true
1265 }
1266 gp.trackingSeq++
1267 }
1268 if !gp.tracking {
1269 return
1270 }
1271
1272
1273
1274
1275
1276
1277 switch oldval {
1278 case _Grunnable:
1279
1280
1281
1282 now := nanotime()
1283 gp.runnableTime += now - gp.trackingStamp
1284 gp.trackingStamp = 0
1285 case _Gwaiting:
1286 if !gp.waitreason.isMutexWait() {
1287
1288 break
1289 }
1290
1291
1292
1293
1294
1295 now := nanotime()
1296 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1297 gp.trackingStamp = 0
1298 }
1299 switch newval {
1300 case _Gwaiting:
1301 if !gp.waitreason.isMutexWait() {
1302
1303 break
1304 }
1305
1306 now := nanotime()
1307 gp.trackingStamp = now
1308 case _Grunnable:
1309
1310
1311 now := nanotime()
1312 gp.trackingStamp = now
1313 case _Grunning:
1314
1315
1316
1317 gp.tracking = false
1318 sched.timeToRun.record(gp.runnableTime)
1319 gp.runnableTime = 0
1320 }
1321 }
1322
1323
1324
1325
1326 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1327
1328 gp.waitreason = reason
1329 casgstatus(gp, old, _Gwaiting)
1330 }
1331
1332
1333
1334
1335
1336 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1337 if !reason.isWaitingForGC() {
1338 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1339 }
1340 casGToWaiting(gp, old, reason)
1341 }
1342
1343
1344
1345
1346
1347 func casGToPreemptScan(gp *g, old, new uint32) {
1348 if old != _Grunning || new != _Gscan|_Gpreempted {
1349 throw("bad g transition")
1350 }
1351 acquireLockRankAndM(lockRankGscan)
1352 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1353 }
1354
1355
1356
1357
1358
1359
1360 }
1361
1362
1363
1364
1365 func casGFromPreempted(gp *g, old, new uint32) bool {
1366 if old != _Gpreempted || new != _Gwaiting {
1367 throw("bad g transition")
1368 }
1369 gp.waitreason = waitReasonPreempted
1370 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1371 return false
1372 }
1373 if sg := gp.syncGroup; sg != nil {
1374 sg.changegstatus(gp, _Gpreempted, _Gwaiting)
1375 }
1376 return true
1377 }
1378
1379
1380 type stwReason uint8
1381
1382
1383
1384
1385 const (
1386 stwUnknown stwReason = iota
1387 stwGCMarkTerm
1388 stwGCSweepTerm
1389 stwWriteHeapDump
1390 stwGoroutineProfile
1391 stwGoroutineProfileCleanup
1392 stwAllGoroutinesStack
1393 stwReadMemStats
1394 stwAllThreadsSyscall
1395 stwGOMAXPROCS
1396 stwStartTrace
1397 stwStopTrace
1398 stwForTestCountPagesInUse
1399 stwForTestReadMetricsSlow
1400 stwForTestReadMemStatsSlow
1401 stwForTestPageCachePagesLeaked
1402 stwForTestResetDebugLog
1403 )
1404
1405 func (r stwReason) String() string {
1406 return stwReasonStrings[r]
1407 }
1408
1409 func (r stwReason) isGC() bool {
1410 return r == stwGCMarkTerm || r == stwGCSweepTerm
1411 }
1412
1413
1414
1415
1416 var stwReasonStrings = [...]string{
1417 stwUnknown: "unknown",
1418 stwGCMarkTerm: "GC mark termination",
1419 stwGCSweepTerm: "GC sweep termination",
1420 stwWriteHeapDump: "write heap dump",
1421 stwGoroutineProfile: "goroutine profile",
1422 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1423 stwAllGoroutinesStack: "all goroutines stack trace",
1424 stwReadMemStats: "read mem stats",
1425 stwAllThreadsSyscall: "AllThreadsSyscall",
1426 stwGOMAXPROCS: "GOMAXPROCS",
1427 stwStartTrace: "start trace",
1428 stwStopTrace: "stop trace",
1429 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1430 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1431 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1432 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1433 stwForTestResetDebugLog: "ResetDebugLog (test)",
1434 }
1435
1436
1437
1438 type worldStop struct {
1439 reason stwReason
1440 startedStopping int64
1441 finishedStopping int64
1442 stoppingCPUTime int64
1443 }
1444
1445
1446
1447
1448 var stopTheWorldContext worldStop
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 func stopTheWorld(reason stwReason) worldStop {
1468 semacquire(&worldsema)
1469 gp := getg()
1470 gp.m.preemptoff = reason.String()
1471 systemstack(func() {
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1487 stopTheWorldContext = stopTheWorldWithSema(reason)
1488 casgstatus(gp, _Gwaiting, _Grunning)
1489 })
1490 return stopTheWorldContext
1491 }
1492
1493
1494
1495
1496 func startTheWorld(w worldStop) {
1497 systemstack(func() { startTheWorldWithSema(0, w) })
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 mp := acquirem()
1515 mp.preemptoff = ""
1516 semrelease1(&worldsema, true, 0)
1517 releasem(mp)
1518 }
1519
1520
1521
1522
1523 func stopTheWorldGC(reason stwReason) worldStop {
1524 semacquire(&gcsema)
1525 return stopTheWorld(reason)
1526 }
1527
1528
1529
1530
1531 func startTheWorldGC(w worldStop) {
1532 startTheWorld(w)
1533 semrelease(&gcsema)
1534 }
1535
1536
1537 var worldsema uint32 = 1
1538
1539
1540
1541
1542
1543
1544
1545 var gcsema uint32 = 1
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 func stopTheWorldWithSema(reason stwReason) worldStop {
1578 trace := traceAcquire()
1579 if trace.ok() {
1580 trace.STWStart(reason)
1581 traceRelease(trace)
1582 }
1583 gp := getg()
1584
1585
1586
1587 if gp.m.locks > 0 {
1588 throw("stopTheWorld: holding locks")
1589 }
1590
1591 lock(&sched.lock)
1592 start := nanotime()
1593 sched.stopwait = gomaxprocs
1594 sched.gcwaiting.Store(true)
1595 preemptall()
1596
1597 gp.m.p.ptr().status = _Pgcstop
1598 gp.m.p.ptr().gcStopTime = start
1599 sched.stopwait--
1600
1601 trace = traceAcquire()
1602 for _, pp := range allp {
1603 s := pp.status
1604 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1605 if trace.ok() {
1606 trace.ProcSteal(pp, false)
1607 }
1608 pp.syscalltick++
1609 pp.gcStopTime = nanotime()
1610 sched.stopwait--
1611 }
1612 }
1613 if trace.ok() {
1614 traceRelease(trace)
1615 }
1616
1617
1618 now := nanotime()
1619 for {
1620 pp, _ := pidleget(now)
1621 if pp == nil {
1622 break
1623 }
1624 pp.status = _Pgcstop
1625 pp.gcStopTime = nanotime()
1626 sched.stopwait--
1627 }
1628 wait := sched.stopwait > 0
1629 unlock(&sched.lock)
1630
1631
1632 if wait {
1633 for {
1634
1635 if notetsleep(&sched.stopnote, 100*1000) {
1636 noteclear(&sched.stopnote)
1637 break
1638 }
1639 preemptall()
1640 }
1641 }
1642
1643 finish := nanotime()
1644 startTime := finish - start
1645 if reason.isGC() {
1646 sched.stwStoppingTimeGC.record(startTime)
1647 } else {
1648 sched.stwStoppingTimeOther.record(startTime)
1649 }
1650
1651
1652
1653
1654
1655 stoppingCPUTime := int64(0)
1656 bad := ""
1657 if sched.stopwait != 0 {
1658 bad = "stopTheWorld: not stopped (stopwait != 0)"
1659 } else {
1660 for _, pp := range allp {
1661 if pp.status != _Pgcstop {
1662 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1663 }
1664 if pp.gcStopTime == 0 && bad == "" {
1665 bad = "stopTheWorld: broken CPU time accounting"
1666 }
1667 stoppingCPUTime += finish - pp.gcStopTime
1668 pp.gcStopTime = 0
1669 }
1670 }
1671 if freezing.Load() {
1672
1673
1674
1675
1676 lock(&deadlock)
1677 lock(&deadlock)
1678 }
1679 if bad != "" {
1680 throw(bad)
1681 }
1682
1683 worldStopped()
1684
1685 return worldStop{
1686 reason: reason,
1687 startedStopping: start,
1688 finishedStopping: finish,
1689 stoppingCPUTime: stoppingCPUTime,
1690 }
1691 }
1692
1693
1694
1695
1696
1697
1698
1699 func startTheWorldWithSema(now int64, w worldStop) int64 {
1700 assertWorldStopped()
1701
1702 mp := acquirem()
1703 if netpollinited() {
1704 list, delta := netpoll(0)
1705 injectglist(&list)
1706 netpollAdjustWaiters(delta)
1707 }
1708 lock(&sched.lock)
1709
1710 procs := gomaxprocs
1711 if newprocs != 0 {
1712 procs = newprocs
1713 newprocs = 0
1714 }
1715 p1 := procresize(procs)
1716 sched.gcwaiting.Store(false)
1717 if sched.sysmonwait.Load() {
1718 sched.sysmonwait.Store(false)
1719 notewakeup(&sched.sysmonnote)
1720 }
1721 unlock(&sched.lock)
1722
1723 worldStarted()
1724
1725 for p1 != nil {
1726 p := p1
1727 p1 = p1.link.ptr()
1728 if p.m != 0 {
1729 mp := p.m.ptr()
1730 p.m = 0
1731 if mp.nextp != 0 {
1732 throw("startTheWorld: inconsistent mp->nextp")
1733 }
1734 mp.nextp.set(p)
1735 notewakeup(&mp.park)
1736 } else {
1737
1738 newm(nil, p, -1)
1739 }
1740 }
1741
1742
1743 if now == 0 {
1744 now = nanotime()
1745 }
1746 totalTime := now - w.startedStopping
1747 if w.reason.isGC() {
1748 sched.stwTotalTimeGC.record(totalTime)
1749 } else {
1750 sched.stwTotalTimeOther.record(totalTime)
1751 }
1752 trace := traceAcquire()
1753 if trace.ok() {
1754 trace.STWDone()
1755 traceRelease(trace)
1756 }
1757
1758
1759
1760
1761 wakep()
1762
1763 releasem(mp)
1764
1765 return now
1766 }
1767
1768
1769
1770 func usesLibcall() bool {
1771 switch GOOS {
1772 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1773 return true
1774 case "openbsd":
1775 return GOARCH != "mips64"
1776 }
1777 return false
1778 }
1779
1780
1781
1782 func mStackIsSystemAllocated() bool {
1783 switch GOOS {
1784 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1785 return true
1786 case "openbsd":
1787 return GOARCH != "mips64"
1788 }
1789 return false
1790 }
1791
1792
1793
1794 func mstart()
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805 func mstart0() {
1806 gp := getg()
1807
1808 osStack := gp.stack.lo == 0
1809 if osStack {
1810
1811
1812
1813
1814
1815
1816
1817
1818 size := gp.stack.hi
1819 if size == 0 {
1820 size = 16384 * sys.StackGuardMultiplier
1821 }
1822 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1823 gp.stack.lo = gp.stack.hi - size + 1024
1824 }
1825
1826
1827 gp.stackguard0 = gp.stack.lo + stackGuard
1828
1829
1830 gp.stackguard1 = gp.stackguard0
1831 mstart1()
1832
1833
1834 if mStackIsSystemAllocated() {
1835
1836
1837
1838 osStack = true
1839 }
1840 mexit(osStack)
1841 }
1842
1843
1844
1845
1846
1847 func mstart1() {
1848 gp := getg()
1849
1850 if gp != gp.m.g0 {
1851 throw("bad runtime·mstart")
1852 }
1853
1854
1855
1856
1857
1858
1859
1860 gp.sched.g = guintptr(unsafe.Pointer(gp))
1861 gp.sched.pc = sys.GetCallerPC()
1862 gp.sched.sp = sys.GetCallerSP()
1863
1864 asminit()
1865 minit()
1866
1867
1868
1869 if gp.m == &m0 {
1870 mstartm0()
1871 }
1872
1873 if debug.dataindependenttiming == 1 {
1874 sys.EnableDIT()
1875 }
1876
1877 if fn := gp.m.mstartfn; fn != nil {
1878 fn()
1879 }
1880
1881 if gp.m != &m0 {
1882 acquirep(gp.m.nextp.ptr())
1883 gp.m.nextp = 0
1884 }
1885 schedule()
1886 }
1887
1888
1889
1890
1891
1892
1893
1894 func mstartm0() {
1895
1896
1897
1898 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1899 cgoHasExtraM = true
1900 newextram()
1901 }
1902 initsig(false)
1903 }
1904
1905
1906
1907
1908 func mPark() {
1909 gp := getg()
1910 notesleep(&gp.m.park)
1911 noteclear(&gp.m.park)
1912 }
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924 func mexit(osStack bool) {
1925 mp := getg().m
1926
1927 if mp == &m0 {
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939 handoffp(releasep())
1940 lock(&sched.lock)
1941 sched.nmfreed++
1942 checkdead()
1943 unlock(&sched.lock)
1944 mPark()
1945 throw("locked m0 woke up")
1946 }
1947
1948 sigblock(true)
1949 unminit()
1950
1951
1952 if mp.gsignal != nil {
1953 stackfree(mp.gsignal.stack)
1954
1955
1956
1957
1958 mp.gsignal = nil
1959 }
1960
1961
1962 vgetrandomDestroy(mp)
1963
1964
1965 lock(&sched.lock)
1966 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1967 if *pprev == mp {
1968 *pprev = mp.alllink
1969 goto found
1970 }
1971 }
1972 throw("m not found in allm")
1973 found:
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988 mp.freeWait.Store(freeMWait)
1989 mp.freelink = sched.freem
1990 sched.freem = mp
1991 unlock(&sched.lock)
1992
1993 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1994 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1995
1996
1997 handoffp(releasep())
1998
1999
2000
2001
2002
2003 lock(&sched.lock)
2004 sched.nmfreed++
2005 checkdead()
2006 unlock(&sched.lock)
2007
2008 if GOOS == "darwin" || GOOS == "ios" {
2009
2010
2011 if mp.signalPending.Load() != 0 {
2012 pendingPreemptSignals.Add(-1)
2013 }
2014 }
2015
2016
2017
2018 mdestroy(mp)
2019
2020 if osStack {
2021
2022 mp.freeWait.Store(freeMRef)
2023
2024
2025
2026 return
2027 }
2028
2029
2030
2031
2032
2033 exitThread(&mp.freeWait)
2034 }
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 func forEachP(reason waitReason, fn func(*p)) {
2047 systemstack(func() {
2048 gp := getg().m.curg
2049
2050
2051
2052
2053
2054
2055
2056
2057 casGToWaitingForGC(gp, _Grunning, reason)
2058 forEachPInternal(fn)
2059 casgstatus(gp, _Gwaiting, _Grunning)
2060 })
2061 }
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072 func forEachPInternal(fn func(*p)) {
2073 mp := acquirem()
2074 pp := getg().m.p.ptr()
2075
2076 lock(&sched.lock)
2077 if sched.safePointWait != 0 {
2078 throw("forEachP: sched.safePointWait != 0")
2079 }
2080 sched.safePointWait = gomaxprocs - 1
2081 sched.safePointFn = fn
2082
2083
2084 for _, p2 := range allp {
2085 if p2 != pp {
2086 atomic.Store(&p2.runSafePointFn, 1)
2087 }
2088 }
2089 preemptall()
2090
2091
2092
2093
2094
2095
2096
2097 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2098 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2099 fn(p)
2100 sched.safePointWait--
2101 }
2102 }
2103
2104 wait := sched.safePointWait > 0
2105 unlock(&sched.lock)
2106
2107
2108 fn(pp)
2109
2110
2111
2112 for _, p2 := range allp {
2113 s := p2.status
2114
2115
2116
2117 trace := traceAcquire()
2118 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2119 if trace.ok() {
2120
2121 trace.ProcSteal(p2, false)
2122 traceRelease(trace)
2123 }
2124 p2.syscalltick++
2125 handoffp(p2)
2126 } else if trace.ok() {
2127 traceRelease(trace)
2128 }
2129 }
2130
2131
2132 if wait {
2133 for {
2134
2135
2136
2137
2138 if notetsleep(&sched.safePointNote, 100*1000) {
2139 noteclear(&sched.safePointNote)
2140 break
2141 }
2142 preemptall()
2143 }
2144 }
2145 if sched.safePointWait != 0 {
2146 throw("forEachP: not done")
2147 }
2148 for _, p2 := range allp {
2149 if p2.runSafePointFn != 0 {
2150 throw("forEachP: P did not run fn")
2151 }
2152 }
2153
2154 lock(&sched.lock)
2155 sched.safePointFn = nil
2156 unlock(&sched.lock)
2157 releasem(mp)
2158 }
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171 func runSafePointFn() {
2172 p := getg().m.p.ptr()
2173
2174
2175
2176 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2177 return
2178 }
2179 sched.safePointFn(p)
2180 lock(&sched.lock)
2181 sched.safePointWait--
2182 if sched.safePointWait == 0 {
2183 notewakeup(&sched.safePointNote)
2184 }
2185 unlock(&sched.lock)
2186 }
2187
2188
2189
2190
2191 var cgoThreadStart unsafe.Pointer
2192
2193 type cgothreadstart struct {
2194 g guintptr
2195 tls *uint64
2196 fn unsafe.Pointer
2197 }
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208 func allocm(pp *p, fn func(), id int64) *m {
2209 allocmLock.rlock()
2210
2211
2212
2213
2214 acquirem()
2215
2216 gp := getg()
2217 if gp.m.p == 0 {
2218 acquirep(pp)
2219 }
2220
2221
2222
2223 if sched.freem != nil {
2224 lock(&sched.lock)
2225 var newList *m
2226 for freem := sched.freem; freem != nil; {
2227
2228 wait := freem.freeWait.Load()
2229 if wait == freeMWait {
2230 next := freem.freelink
2231 freem.freelink = newList
2232 newList = freem
2233 freem = next
2234 continue
2235 }
2236
2237
2238
2239 if traceEnabled() || traceShuttingDown() {
2240 traceThreadDestroy(freem)
2241 }
2242
2243
2244
2245 if wait == freeMStack {
2246
2247
2248
2249 systemstack(func() {
2250 stackfree(freem.g0.stack)
2251 })
2252 }
2253 freem = freem.freelink
2254 }
2255 sched.freem = newList
2256 unlock(&sched.lock)
2257 }
2258
2259 mp := new(m)
2260 mp.mstartfn = fn
2261 mcommoninit(mp, id)
2262
2263
2264
2265 if iscgo || mStackIsSystemAllocated() {
2266 mp.g0 = malg(-1)
2267 } else {
2268 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2269 }
2270 mp.g0.m = mp
2271
2272 if pp == gp.m.p.ptr() {
2273 releasep()
2274 }
2275
2276 releasem(gp.m)
2277 allocmLock.runlock()
2278 return mp
2279 }
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320 func needm(signal bool) {
2321 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2322
2323
2324
2325
2326
2327
2328 writeErrStr("fatal error: cgo callback before cgo call\n")
2329 exit(1)
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 var sigmask sigset
2341 sigsave(&sigmask)
2342 sigblock(false)
2343
2344
2345
2346
2347 mp, last := getExtraM()
2348
2349
2350
2351
2352
2353
2354
2355
2356 mp.needextram = last
2357
2358
2359 mp.sigmask = sigmask
2360
2361
2362
2363 osSetupTLS(mp)
2364
2365
2366
2367 setg(mp.g0)
2368 sp := sys.GetCallerSP()
2369 callbackUpdateSystemStack(mp, sp, signal)
2370
2371
2372
2373
2374 mp.isExtraInC = false
2375
2376
2377 asminit()
2378 minit()
2379
2380
2381
2382
2383
2384
2385 var trace traceLocker
2386 if !signal {
2387 trace = traceAcquire()
2388 }
2389
2390
2391 casgstatus(mp.curg, _Gdead, _Gsyscall)
2392 sched.ngsys.Add(-1)
2393
2394 if !signal {
2395 if trace.ok() {
2396 trace.GoCreateSyscall(mp.curg)
2397 traceRelease(trace)
2398 }
2399 }
2400 mp.isExtraInSig = signal
2401 }
2402
2403
2404
2405
2406 func needAndBindM() {
2407 needm(false)
2408
2409 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2410 cgoBindM()
2411 }
2412 }
2413
2414
2415
2416
2417 func newextram() {
2418 c := extraMWaiters.Swap(0)
2419 if c > 0 {
2420 for i := uint32(0); i < c; i++ {
2421 oneNewExtraM()
2422 }
2423 } else if extraMLength.Load() == 0 {
2424
2425 oneNewExtraM()
2426 }
2427 }
2428
2429
2430 func oneNewExtraM() {
2431
2432
2433
2434
2435
2436 mp := allocm(nil, nil, -1)
2437 gp := malg(4096)
2438 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2439 gp.sched.sp = gp.stack.hi
2440 gp.sched.sp -= 4 * goarch.PtrSize
2441 gp.sched.lr = 0
2442 gp.sched.g = guintptr(unsafe.Pointer(gp))
2443 gp.syscallpc = gp.sched.pc
2444 gp.syscallsp = gp.sched.sp
2445 gp.stktopsp = gp.sched.sp
2446
2447
2448
2449
2450 casgstatus(gp, _Gidle, _Gdead)
2451 gp.m = mp
2452 mp.curg = gp
2453 mp.isextra = true
2454
2455 mp.isExtraInC = true
2456 mp.lockedInt++
2457 mp.lockedg.set(gp)
2458 gp.lockedm.set(mp)
2459 gp.goid = sched.goidgen.Add(1)
2460 if raceenabled {
2461 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2462 }
2463
2464 allgadd(gp)
2465
2466
2467
2468
2469
2470 sched.ngsys.Add(1)
2471
2472
2473 addExtraM(mp)
2474 }
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509 func dropm() {
2510
2511
2512
2513 mp := getg().m
2514
2515
2516
2517
2518
2519 var trace traceLocker
2520 if !mp.isExtraInSig {
2521 trace = traceAcquire()
2522 }
2523
2524
2525 casgstatus(mp.curg, _Gsyscall, _Gdead)
2526 mp.curg.preemptStop = false
2527 sched.ngsys.Add(1)
2528
2529 if !mp.isExtraInSig {
2530 if trace.ok() {
2531 trace.GoDestroySyscall()
2532 traceRelease(trace)
2533 }
2534 }
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549 mp.syscalltick--
2550
2551
2552
2553 mp.curg.trace.reset()
2554
2555
2556
2557
2558 if traceEnabled() || traceShuttingDown() {
2559
2560
2561
2562
2563
2564
2565
2566 lock(&sched.lock)
2567 traceThreadDestroy(mp)
2568 unlock(&sched.lock)
2569 }
2570 mp.isExtraInSig = false
2571
2572
2573
2574
2575
2576 sigmask := mp.sigmask
2577 sigblock(false)
2578 unminit()
2579
2580 setg(nil)
2581
2582
2583
2584 g0 := mp.g0
2585 g0.stack.hi = 0
2586 g0.stack.lo = 0
2587 g0.stackguard0 = 0
2588 g0.stackguard1 = 0
2589 mp.g0StackAccurate = false
2590
2591 putExtraM(mp)
2592
2593 msigrestore(sigmask)
2594 }
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 func cgoBindM() {
2617 if GOOS == "windows" || GOOS == "plan9" {
2618 fatal("bindm in unexpected GOOS")
2619 }
2620 g := getg()
2621 if g.m.g0 != g {
2622 fatal("the current g is not g0")
2623 }
2624 if _cgo_bindm != nil {
2625 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2626 }
2627 }
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640 func getm() uintptr {
2641 return uintptr(unsafe.Pointer(getg().m))
2642 }
2643
2644 var (
2645
2646
2647
2648
2649
2650
2651 extraM atomic.Uintptr
2652
2653 extraMLength atomic.Uint32
2654
2655 extraMWaiters atomic.Uint32
2656
2657
2658 extraMInUse atomic.Uint32
2659 )
2660
2661
2662
2663
2664
2665
2666
2667
2668 func lockextra(nilokay bool) *m {
2669 const locked = 1
2670
2671 incr := false
2672 for {
2673 old := extraM.Load()
2674 if old == locked {
2675 osyield_no_g()
2676 continue
2677 }
2678 if old == 0 && !nilokay {
2679 if !incr {
2680
2681
2682
2683 extraMWaiters.Add(1)
2684 incr = true
2685 }
2686 usleep_no_g(1)
2687 continue
2688 }
2689 if extraM.CompareAndSwap(old, locked) {
2690 return (*m)(unsafe.Pointer(old))
2691 }
2692 osyield_no_g()
2693 continue
2694 }
2695 }
2696
2697
2698 func unlockextra(mp *m, delta int32) {
2699 extraMLength.Add(delta)
2700 extraM.Store(uintptr(unsafe.Pointer(mp)))
2701 }
2702
2703
2704
2705
2706
2707
2708
2709
2710 func getExtraM() (mp *m, last bool) {
2711 mp = lockextra(false)
2712 extraMInUse.Add(1)
2713 unlockextra(mp.schedlink.ptr(), -1)
2714 return mp, mp.schedlink.ptr() == nil
2715 }
2716
2717
2718
2719
2720
2721 func putExtraM(mp *m) {
2722 extraMInUse.Add(-1)
2723 addExtraM(mp)
2724 }
2725
2726
2727
2728
2729 func addExtraM(mp *m) {
2730 mnext := lockextra(true)
2731 mp.schedlink.set(mnext)
2732 unlockextra(mp, 1)
2733 }
2734
2735 var (
2736
2737
2738
2739 allocmLock rwmutex
2740
2741
2742
2743
2744 execLock rwmutex
2745 )
2746
2747
2748
2749 const (
2750 failthreadcreate = "runtime: failed to create new OS thread\n"
2751 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2752 )
2753
2754
2755
2756
2757 var newmHandoff struct {
2758 lock mutex
2759
2760
2761
2762 newm muintptr
2763
2764
2765
2766 waiting bool
2767 wake note
2768
2769
2770
2771
2772 haveTemplateThread uint32
2773 }
2774
2775
2776
2777
2778
2779
2780
2781
2782 func newm(fn func(), pp *p, id int64) {
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793 acquirem()
2794
2795 mp := allocm(pp, fn, id)
2796 mp.nextp.set(pp)
2797 mp.sigmask = initSigmask
2798 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810 lock(&newmHandoff.lock)
2811 if newmHandoff.haveTemplateThread == 0 {
2812 throw("on a locked thread with no template thread")
2813 }
2814 mp.schedlink = newmHandoff.newm
2815 newmHandoff.newm.set(mp)
2816 if newmHandoff.waiting {
2817 newmHandoff.waiting = false
2818 notewakeup(&newmHandoff.wake)
2819 }
2820 unlock(&newmHandoff.lock)
2821
2822
2823
2824 releasem(getg().m)
2825 return
2826 }
2827 newm1(mp)
2828 releasem(getg().m)
2829 }
2830
2831 func newm1(mp *m) {
2832 if iscgo {
2833 var ts cgothreadstart
2834 if _cgo_thread_start == nil {
2835 throw("_cgo_thread_start missing")
2836 }
2837 ts.g.set(mp.g0)
2838 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2839 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2840 if msanenabled {
2841 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2842 }
2843 if asanenabled {
2844 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2845 }
2846 execLock.rlock()
2847 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2848 execLock.runlock()
2849 return
2850 }
2851 execLock.rlock()
2852 newosproc(mp)
2853 execLock.runlock()
2854 }
2855
2856
2857
2858
2859
2860 func startTemplateThread() {
2861 if GOARCH == "wasm" {
2862 return
2863 }
2864
2865
2866
2867 mp := acquirem()
2868 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2869 releasem(mp)
2870 return
2871 }
2872 newm(templateThread, nil, -1)
2873 releasem(mp)
2874 }
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888 func templateThread() {
2889 lock(&sched.lock)
2890 sched.nmsys++
2891 checkdead()
2892 unlock(&sched.lock)
2893
2894 for {
2895 lock(&newmHandoff.lock)
2896 for newmHandoff.newm != 0 {
2897 newm := newmHandoff.newm.ptr()
2898 newmHandoff.newm = 0
2899 unlock(&newmHandoff.lock)
2900 for newm != nil {
2901 next := newm.schedlink.ptr()
2902 newm.schedlink = 0
2903 newm1(newm)
2904 newm = next
2905 }
2906 lock(&newmHandoff.lock)
2907 }
2908 newmHandoff.waiting = true
2909 noteclear(&newmHandoff.wake)
2910 unlock(&newmHandoff.lock)
2911 notesleep(&newmHandoff.wake)
2912 }
2913 }
2914
2915
2916
2917 func stopm() {
2918 gp := getg()
2919
2920 if gp.m.locks != 0 {
2921 throw("stopm holding locks")
2922 }
2923 if gp.m.p != 0 {
2924 throw("stopm holding p")
2925 }
2926 if gp.m.spinning {
2927 throw("stopm spinning")
2928 }
2929
2930 lock(&sched.lock)
2931 mput(gp.m)
2932 unlock(&sched.lock)
2933 mPark()
2934 acquirep(gp.m.nextp.ptr())
2935 gp.m.nextp = 0
2936 }
2937
2938 func mspinning() {
2939
2940 getg().m.spinning = true
2941 }
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960 func startm(pp *p, spinning, lockheld bool) {
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977 mp := acquirem()
2978 if !lockheld {
2979 lock(&sched.lock)
2980 }
2981 if pp == nil {
2982 if spinning {
2983
2984
2985
2986 throw("startm: P required for spinning=true")
2987 }
2988 pp, _ = pidleget(0)
2989 if pp == nil {
2990 if !lockheld {
2991 unlock(&sched.lock)
2992 }
2993 releasem(mp)
2994 return
2995 }
2996 }
2997 nmp := mget()
2998 if nmp == nil {
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013 id := mReserveID()
3014 unlock(&sched.lock)
3015
3016 var fn func()
3017 if spinning {
3018
3019 fn = mspinning
3020 }
3021 newm(fn, pp, id)
3022
3023 if lockheld {
3024 lock(&sched.lock)
3025 }
3026
3027
3028 releasem(mp)
3029 return
3030 }
3031 if !lockheld {
3032 unlock(&sched.lock)
3033 }
3034 if nmp.spinning {
3035 throw("startm: m is spinning")
3036 }
3037 if nmp.nextp != 0 {
3038 throw("startm: m has p")
3039 }
3040 if spinning && !runqempty(pp) {
3041 throw("startm: p has runnable gs")
3042 }
3043
3044 nmp.spinning = spinning
3045 nmp.nextp.set(pp)
3046 notewakeup(&nmp.park)
3047
3048
3049 releasem(mp)
3050 }
3051
3052
3053
3054
3055
3056 func handoffp(pp *p) {
3057
3058
3059
3060
3061 if !runqempty(pp) || sched.runqsize != 0 {
3062 startm(pp, false, false)
3063 return
3064 }
3065
3066 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3067 startm(pp, false, false)
3068 return
3069 }
3070
3071 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3072 startm(pp, false, false)
3073 return
3074 }
3075
3076
3077 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3078 sched.needspinning.Store(0)
3079 startm(pp, true, false)
3080 return
3081 }
3082 lock(&sched.lock)
3083 if sched.gcwaiting.Load() {
3084 pp.status = _Pgcstop
3085 pp.gcStopTime = nanotime()
3086 sched.stopwait--
3087 if sched.stopwait == 0 {
3088 notewakeup(&sched.stopnote)
3089 }
3090 unlock(&sched.lock)
3091 return
3092 }
3093 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3094 sched.safePointFn(pp)
3095 sched.safePointWait--
3096 if sched.safePointWait == 0 {
3097 notewakeup(&sched.safePointNote)
3098 }
3099 }
3100 if sched.runqsize != 0 {
3101 unlock(&sched.lock)
3102 startm(pp, false, false)
3103 return
3104 }
3105
3106
3107 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3108 unlock(&sched.lock)
3109 startm(pp, false, false)
3110 return
3111 }
3112
3113
3114
3115 when := pp.timers.wakeTime()
3116 pidleput(pp, 0)
3117 unlock(&sched.lock)
3118
3119 if when != 0 {
3120 wakeNetPoller(when)
3121 }
3122 }
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137 func wakep() {
3138
3139
3140 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3141 return
3142 }
3143
3144
3145
3146
3147
3148
3149 mp := acquirem()
3150
3151 var pp *p
3152 lock(&sched.lock)
3153 pp, _ = pidlegetSpinning(0)
3154 if pp == nil {
3155 if sched.nmspinning.Add(-1) < 0 {
3156 throw("wakep: negative nmspinning")
3157 }
3158 unlock(&sched.lock)
3159 releasem(mp)
3160 return
3161 }
3162
3163
3164
3165
3166 unlock(&sched.lock)
3167
3168 startm(pp, true, false)
3169
3170 releasem(mp)
3171 }
3172
3173
3174
3175 func stoplockedm() {
3176 gp := getg()
3177
3178 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3179 throw("stoplockedm: inconsistent locking")
3180 }
3181 if gp.m.p != 0 {
3182
3183 pp := releasep()
3184 handoffp(pp)
3185 }
3186 incidlelocked(1)
3187
3188 mPark()
3189 status := readgstatus(gp.m.lockedg.ptr())
3190 if status&^_Gscan != _Grunnable {
3191 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3192 dumpgstatus(gp.m.lockedg.ptr())
3193 throw("stoplockedm: not runnable")
3194 }
3195 acquirep(gp.m.nextp.ptr())
3196 gp.m.nextp = 0
3197 }
3198
3199
3200
3201
3202
3203 func startlockedm(gp *g) {
3204 mp := gp.lockedm.ptr()
3205 if mp == getg().m {
3206 throw("startlockedm: locked to me")
3207 }
3208 if mp.nextp != 0 {
3209 throw("startlockedm: m has p")
3210 }
3211
3212 incidlelocked(-1)
3213 pp := releasep()
3214 mp.nextp.set(pp)
3215 notewakeup(&mp.park)
3216 stopm()
3217 }
3218
3219
3220
3221 func gcstopm() {
3222 gp := getg()
3223
3224 if !sched.gcwaiting.Load() {
3225 throw("gcstopm: not waiting for gc")
3226 }
3227 if gp.m.spinning {
3228 gp.m.spinning = false
3229
3230
3231 if sched.nmspinning.Add(-1) < 0 {
3232 throw("gcstopm: negative nmspinning")
3233 }
3234 }
3235 pp := releasep()
3236 lock(&sched.lock)
3237 pp.status = _Pgcstop
3238 pp.gcStopTime = nanotime()
3239 sched.stopwait--
3240 if sched.stopwait == 0 {
3241 notewakeup(&sched.stopnote)
3242 }
3243 unlock(&sched.lock)
3244 stopm()
3245 }
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256 func execute(gp *g, inheritTime bool) {
3257 mp := getg().m
3258
3259 if goroutineProfile.active {
3260
3261
3262
3263 tryRecordGoroutineProfile(gp, nil, osyield)
3264 }
3265
3266
3267
3268 mp.curg = gp
3269 gp.m = mp
3270 casgstatus(gp, _Grunnable, _Grunning)
3271 gp.waitsince = 0
3272 gp.preempt = false
3273 gp.stackguard0 = gp.stack.lo + stackGuard
3274 if !inheritTime {
3275 mp.p.ptr().schedtick++
3276 }
3277
3278
3279 hz := sched.profilehz
3280 if mp.profilehz != hz {
3281 setThreadCPUProfiler(hz)
3282 }
3283
3284 trace := traceAcquire()
3285 if trace.ok() {
3286 trace.GoStart()
3287 traceRelease(trace)
3288 }
3289
3290 gogo(&gp.sched)
3291 }
3292
3293
3294
3295
3296
3297 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3298 mp := getg().m
3299
3300
3301
3302
3303
3304 top:
3305 pp := mp.p.ptr()
3306 if sched.gcwaiting.Load() {
3307 gcstopm()
3308 goto top
3309 }
3310 if pp.runSafePointFn != 0 {
3311 runSafePointFn()
3312 }
3313
3314
3315
3316
3317
3318 now, pollUntil, _ := pp.timers.check(0)
3319
3320
3321 if traceEnabled() || traceShuttingDown() {
3322 gp := traceReader()
3323 if gp != nil {
3324 trace := traceAcquire()
3325 casgstatus(gp, _Gwaiting, _Grunnable)
3326 if trace.ok() {
3327 trace.GoUnpark(gp, 0)
3328 traceRelease(trace)
3329 }
3330 return gp, false, true
3331 }
3332 }
3333
3334
3335 if gcBlackenEnabled != 0 {
3336 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3337 if gp != nil {
3338 return gp, false, true
3339 }
3340 now = tnow
3341 }
3342
3343
3344
3345
3346 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3347 lock(&sched.lock)
3348 gp := globrunqget(pp, 1)
3349 unlock(&sched.lock)
3350 if gp != nil {
3351 return gp, false, false
3352 }
3353 }
3354
3355
3356 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3357 if gp := wakefing(); gp != nil {
3358 ready(gp, 0, true)
3359 }
3360 }
3361 if *cgo_yield != nil {
3362 asmcgocall(*cgo_yield, nil)
3363 }
3364
3365
3366 if gp, inheritTime := runqget(pp); gp != nil {
3367 return gp, inheritTime, false
3368 }
3369
3370
3371 if sched.runqsize != 0 {
3372 lock(&sched.lock)
3373 gp := globrunqget(pp, 0)
3374 unlock(&sched.lock)
3375 if gp != nil {
3376 return gp, false, false
3377 }
3378 }
3379
3380
3381
3382
3383
3384
3385
3386
3387 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3388 if list, delta := netpoll(0); !list.empty() {
3389 gp := list.pop()
3390 injectglist(&list)
3391 netpollAdjustWaiters(delta)
3392 trace := traceAcquire()
3393 casgstatus(gp, _Gwaiting, _Grunnable)
3394 if trace.ok() {
3395 trace.GoUnpark(gp, 0)
3396 traceRelease(trace)
3397 }
3398 return gp, false, false
3399 }
3400 }
3401
3402
3403
3404
3405
3406
3407 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3408 if !mp.spinning {
3409 mp.becomeSpinning()
3410 }
3411
3412 gp, inheritTime, tnow, w, newWork := stealWork(now)
3413 if gp != nil {
3414
3415 return gp, inheritTime, false
3416 }
3417 if newWork {
3418
3419
3420 goto top
3421 }
3422
3423 now = tnow
3424 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3425
3426 pollUntil = w
3427 }
3428 }
3429
3430
3431
3432
3433
3434 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3435 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3436 if node != nil {
3437 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3438 gp := node.gp.ptr()
3439
3440 trace := traceAcquire()
3441 casgstatus(gp, _Gwaiting, _Grunnable)
3442 if trace.ok() {
3443 trace.GoUnpark(gp, 0)
3444 traceRelease(trace)
3445 }
3446 return gp, false, false
3447 }
3448 gcController.removeIdleMarkWorker()
3449 }
3450
3451
3452
3453
3454
3455 gp, otherReady := beforeIdle(now, pollUntil)
3456 if gp != nil {
3457 trace := traceAcquire()
3458 casgstatus(gp, _Gwaiting, _Grunnable)
3459 if trace.ok() {
3460 trace.GoUnpark(gp, 0)
3461 traceRelease(trace)
3462 }
3463 return gp, false, false
3464 }
3465 if otherReady {
3466 goto top
3467 }
3468
3469
3470
3471
3472
3473 allpSnapshot := allp
3474
3475
3476 idlepMaskSnapshot := idlepMask
3477 timerpMaskSnapshot := timerpMask
3478
3479
3480 lock(&sched.lock)
3481 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3482 unlock(&sched.lock)
3483 goto top
3484 }
3485 if sched.runqsize != 0 {
3486 gp := globrunqget(pp, 0)
3487 unlock(&sched.lock)
3488 return gp, false, false
3489 }
3490 if !mp.spinning && sched.needspinning.Load() == 1 {
3491
3492 mp.becomeSpinning()
3493 unlock(&sched.lock)
3494 goto top
3495 }
3496 if releasep() != pp {
3497 throw("findrunnable: wrong p")
3498 }
3499 now = pidleput(pp, now)
3500 unlock(&sched.lock)
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538 wasSpinning := mp.spinning
3539 if mp.spinning {
3540 mp.spinning = false
3541 if sched.nmspinning.Add(-1) < 0 {
3542 throw("findrunnable: negative nmspinning")
3543 }
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556 lock(&sched.lock)
3557 if sched.runqsize != 0 {
3558 pp, _ := pidlegetSpinning(0)
3559 if pp != nil {
3560 gp := globrunqget(pp, 0)
3561 if gp == nil {
3562 throw("global runq empty with non-zero runqsize")
3563 }
3564 unlock(&sched.lock)
3565 acquirep(pp)
3566 mp.becomeSpinning()
3567 return gp, false, false
3568 }
3569 }
3570 unlock(&sched.lock)
3571
3572 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3573 if pp != nil {
3574 acquirep(pp)
3575 mp.becomeSpinning()
3576 goto top
3577 }
3578
3579
3580 pp, gp := checkIdleGCNoP()
3581 if pp != nil {
3582 acquirep(pp)
3583 mp.becomeSpinning()
3584
3585
3586 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3587 trace := traceAcquire()
3588 casgstatus(gp, _Gwaiting, _Grunnable)
3589 if trace.ok() {
3590 trace.GoUnpark(gp, 0)
3591 traceRelease(trace)
3592 }
3593 return gp, false, false
3594 }
3595
3596
3597
3598
3599
3600
3601
3602 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3603 }
3604
3605
3606 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3607 sched.pollUntil.Store(pollUntil)
3608 if mp.p != 0 {
3609 throw("findrunnable: netpoll with p")
3610 }
3611 if mp.spinning {
3612 throw("findrunnable: netpoll with spinning")
3613 }
3614 delay := int64(-1)
3615 if pollUntil != 0 {
3616 if now == 0 {
3617 now = nanotime()
3618 }
3619 delay = pollUntil - now
3620 if delay < 0 {
3621 delay = 0
3622 }
3623 }
3624 if faketime != 0 {
3625
3626 delay = 0
3627 }
3628 list, delta := netpoll(delay)
3629
3630 now = nanotime()
3631 sched.pollUntil.Store(0)
3632 sched.lastpoll.Store(now)
3633 if faketime != 0 && list.empty() {
3634
3635
3636 stopm()
3637 goto top
3638 }
3639 lock(&sched.lock)
3640 pp, _ := pidleget(now)
3641 unlock(&sched.lock)
3642 if pp == nil {
3643 injectglist(&list)
3644 netpollAdjustWaiters(delta)
3645 } else {
3646 acquirep(pp)
3647 if !list.empty() {
3648 gp := list.pop()
3649 injectglist(&list)
3650 netpollAdjustWaiters(delta)
3651 trace := traceAcquire()
3652 casgstatus(gp, _Gwaiting, _Grunnable)
3653 if trace.ok() {
3654 trace.GoUnpark(gp, 0)
3655 traceRelease(trace)
3656 }
3657 return gp, false, false
3658 }
3659 if wasSpinning {
3660 mp.becomeSpinning()
3661 }
3662 goto top
3663 }
3664 } else if pollUntil != 0 && netpollinited() {
3665 pollerPollUntil := sched.pollUntil.Load()
3666 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3667 netpollBreak()
3668 }
3669 }
3670 stopm()
3671 goto top
3672 }
3673
3674
3675
3676
3677
3678 func pollWork() bool {
3679 if sched.runqsize != 0 {
3680 return true
3681 }
3682 p := getg().m.p.ptr()
3683 if !runqempty(p) {
3684 return true
3685 }
3686 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3687 if list, delta := netpoll(0); !list.empty() {
3688 injectglist(&list)
3689 netpollAdjustWaiters(delta)
3690 return true
3691 }
3692 }
3693 return false
3694 }
3695
3696
3697
3698
3699
3700
3701
3702 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3703 pp := getg().m.p.ptr()
3704
3705 ranTimer := false
3706
3707 const stealTries = 4
3708 for i := 0; i < stealTries; i++ {
3709 stealTimersOrRunNextG := i == stealTries-1
3710
3711 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3712 if sched.gcwaiting.Load() {
3713
3714 return nil, false, now, pollUntil, true
3715 }
3716 p2 := allp[enum.position()]
3717 if pp == p2 {
3718 continue
3719 }
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3735 tnow, w, ran := p2.timers.check(now)
3736 now = tnow
3737 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3738 pollUntil = w
3739 }
3740 if ran {
3741
3742
3743
3744
3745
3746
3747
3748
3749 if gp, inheritTime := runqget(pp); gp != nil {
3750 return gp, inheritTime, now, pollUntil, ranTimer
3751 }
3752 ranTimer = true
3753 }
3754 }
3755
3756
3757 if !idlepMask.read(enum.position()) {
3758 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3759 return gp, false, now, pollUntil, ranTimer
3760 }
3761 }
3762 }
3763 }
3764
3765
3766
3767
3768 return nil, false, now, pollUntil, ranTimer
3769 }
3770
3771
3772
3773
3774
3775
3776 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3777 for id, p2 := range allpSnapshot {
3778 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3779 lock(&sched.lock)
3780 pp, _ := pidlegetSpinning(0)
3781 if pp == nil {
3782
3783 unlock(&sched.lock)
3784 return nil
3785 }
3786 unlock(&sched.lock)
3787 return pp
3788 }
3789 }
3790
3791
3792 return nil
3793 }
3794
3795
3796
3797
3798 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3799 for id, p2 := range allpSnapshot {
3800 if timerpMaskSnapshot.read(uint32(id)) {
3801 w := p2.timers.wakeTime()
3802 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3803 pollUntil = w
3804 }
3805 }
3806 }
3807
3808 return pollUntil
3809 }
3810
3811
3812
3813
3814
3815 func checkIdleGCNoP() (*p, *g) {
3816
3817
3818
3819
3820
3821
3822 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3823 return nil, nil
3824 }
3825 if !gcMarkWorkAvailable(nil) {
3826 return nil, nil
3827 }
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846 lock(&sched.lock)
3847 pp, now := pidlegetSpinning(0)
3848 if pp == nil {
3849 unlock(&sched.lock)
3850 return nil, nil
3851 }
3852
3853
3854 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3855 pidleput(pp, now)
3856 unlock(&sched.lock)
3857 return nil, nil
3858 }
3859
3860 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3861 if node == nil {
3862 pidleput(pp, now)
3863 unlock(&sched.lock)
3864 gcController.removeIdleMarkWorker()
3865 return nil, nil
3866 }
3867
3868 unlock(&sched.lock)
3869
3870 return pp, node.gp.ptr()
3871 }
3872
3873
3874
3875
3876 func wakeNetPoller(when int64) {
3877 if sched.lastpoll.Load() == 0 {
3878
3879
3880
3881
3882 pollerPollUntil := sched.pollUntil.Load()
3883 if pollerPollUntil == 0 || pollerPollUntil > when {
3884 netpollBreak()
3885 }
3886 } else {
3887
3888
3889 if GOOS != "plan9" {
3890 wakep()
3891 }
3892 }
3893 }
3894
3895 func resetspinning() {
3896 gp := getg()
3897 if !gp.m.spinning {
3898 throw("resetspinning: not a spinning m")
3899 }
3900 gp.m.spinning = false
3901 nmspinning := sched.nmspinning.Add(-1)
3902 if nmspinning < 0 {
3903 throw("findrunnable: negative nmspinning")
3904 }
3905
3906
3907
3908 wakep()
3909 }
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919 func injectglist(glist *gList) {
3920 if glist.empty() {
3921 return
3922 }
3923
3924
3925
3926 head := glist.head.ptr()
3927 var tail *g
3928 qsize := 0
3929 trace := traceAcquire()
3930 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3931 tail = gp
3932 qsize++
3933 casgstatus(gp, _Gwaiting, _Grunnable)
3934 if trace.ok() {
3935 trace.GoUnpark(gp, 0)
3936 }
3937 }
3938 if trace.ok() {
3939 traceRelease(trace)
3940 }
3941
3942
3943 var q gQueue
3944 q.head.set(head)
3945 q.tail.set(tail)
3946 *glist = gList{}
3947
3948 startIdle := func(n int) {
3949 for i := 0; i < n; i++ {
3950 mp := acquirem()
3951 lock(&sched.lock)
3952
3953 pp, _ := pidlegetSpinning(0)
3954 if pp == nil {
3955 unlock(&sched.lock)
3956 releasem(mp)
3957 break
3958 }
3959
3960 startm(pp, false, true)
3961 unlock(&sched.lock)
3962 releasem(mp)
3963 }
3964 }
3965
3966 pp := getg().m.p.ptr()
3967 if pp == nil {
3968 lock(&sched.lock)
3969 globrunqputbatch(&q, int32(qsize))
3970 unlock(&sched.lock)
3971 startIdle(qsize)
3972 return
3973 }
3974
3975 npidle := int(sched.npidle.Load())
3976 var (
3977 globq gQueue
3978 n int
3979 )
3980 for n = 0; n < npidle && !q.empty(); n++ {
3981 g := q.pop()
3982 globq.pushBack(g)
3983 }
3984 if n > 0 {
3985 lock(&sched.lock)
3986 globrunqputbatch(&globq, int32(n))
3987 unlock(&sched.lock)
3988 startIdle(n)
3989 qsize -= n
3990 }
3991
3992 if !q.empty() {
3993 runqputbatch(pp, &q, qsize)
3994 }
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009 wakep()
4010 }
4011
4012
4013
4014 func schedule() {
4015 mp := getg().m
4016
4017 if mp.locks != 0 {
4018 throw("schedule: holding locks")
4019 }
4020
4021 if mp.lockedg != 0 {
4022 stoplockedm()
4023 execute(mp.lockedg.ptr(), false)
4024 }
4025
4026
4027
4028 if mp.incgo {
4029 throw("schedule: in cgo")
4030 }
4031
4032 top:
4033 pp := mp.p.ptr()
4034 pp.preempt = false
4035
4036
4037
4038
4039 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4040 throw("schedule: spinning with local work")
4041 }
4042
4043 gp, inheritTime, tryWakeP := findRunnable()
4044
4045 if debug.dontfreezetheworld > 0 && freezing.Load() {
4046
4047
4048
4049
4050
4051
4052
4053 lock(&deadlock)
4054 lock(&deadlock)
4055 }
4056
4057
4058
4059
4060 if mp.spinning {
4061 resetspinning()
4062 }
4063
4064 if sched.disable.user && !schedEnabled(gp) {
4065
4066
4067
4068 lock(&sched.lock)
4069 if schedEnabled(gp) {
4070
4071
4072 unlock(&sched.lock)
4073 } else {
4074 sched.disable.runnable.pushBack(gp)
4075 sched.disable.n++
4076 unlock(&sched.lock)
4077 goto top
4078 }
4079 }
4080
4081
4082
4083 if tryWakeP {
4084 wakep()
4085 }
4086 if gp.lockedm != 0 {
4087
4088
4089 startlockedm(gp)
4090 goto top
4091 }
4092
4093 execute(gp, inheritTime)
4094 }
4095
4096
4097
4098
4099
4100
4101
4102
4103 func dropg() {
4104 gp := getg()
4105
4106 setMNoWB(&gp.m.curg.m, nil)
4107 setGNoWB(&gp.m.curg, nil)
4108 }
4109
4110 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4111 unlock((*mutex)(lock))
4112 return true
4113 }
4114
4115
4116 func park_m(gp *g) {
4117 mp := getg().m
4118
4119 trace := traceAcquire()
4120
4121
4122
4123
4124
4125 sg := gp.syncGroup
4126 if sg != nil {
4127 sg.incActive()
4128 }
4129
4130 if trace.ok() {
4131
4132
4133
4134 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4135 }
4136
4137
4138 casgstatus(gp, _Grunning, _Gwaiting)
4139 if trace.ok() {
4140 traceRelease(trace)
4141 }
4142
4143 dropg()
4144
4145 if fn := mp.waitunlockf; fn != nil {
4146 ok := fn(gp, mp.waitlock)
4147 mp.waitunlockf = nil
4148 mp.waitlock = nil
4149 if !ok {
4150 trace := traceAcquire()
4151 casgstatus(gp, _Gwaiting, _Grunnable)
4152 if sg != nil {
4153 sg.decActive()
4154 }
4155 if trace.ok() {
4156 trace.GoUnpark(gp, 2)
4157 traceRelease(trace)
4158 }
4159 execute(gp, true)
4160 }
4161 }
4162
4163 if sg != nil {
4164 sg.decActive()
4165 }
4166
4167 schedule()
4168 }
4169
4170 func goschedImpl(gp *g, preempted bool) {
4171 trace := traceAcquire()
4172 status := readgstatus(gp)
4173 if status&^_Gscan != _Grunning {
4174 dumpgstatus(gp)
4175 throw("bad g status")
4176 }
4177 if trace.ok() {
4178
4179
4180
4181 if preempted {
4182 trace.GoPreempt()
4183 } else {
4184 trace.GoSched()
4185 }
4186 }
4187 casgstatus(gp, _Grunning, _Grunnable)
4188 if trace.ok() {
4189 traceRelease(trace)
4190 }
4191
4192 dropg()
4193 lock(&sched.lock)
4194 globrunqput(gp)
4195 unlock(&sched.lock)
4196
4197 if mainStarted {
4198 wakep()
4199 }
4200
4201 schedule()
4202 }
4203
4204
4205 func gosched_m(gp *g) {
4206 goschedImpl(gp, false)
4207 }
4208
4209
4210 func goschedguarded_m(gp *g) {
4211 if !canPreemptM(gp.m) {
4212 gogo(&gp.sched)
4213 }
4214 goschedImpl(gp, false)
4215 }
4216
4217 func gopreempt_m(gp *g) {
4218 goschedImpl(gp, true)
4219 }
4220
4221
4222
4223
4224 func preemptPark(gp *g) {
4225 status := readgstatus(gp)
4226 if status&^_Gscan != _Grunning {
4227 dumpgstatus(gp)
4228 throw("bad g status")
4229 }
4230
4231 if gp.asyncSafePoint {
4232
4233
4234
4235 f := findfunc(gp.sched.pc)
4236 if !f.valid() {
4237 throw("preempt at unknown pc")
4238 }
4239 if f.flag&abi.FuncFlagSPWrite != 0 {
4240 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4241 throw("preempt SPWRITE")
4242 }
4243 }
4244
4245
4246
4247
4248
4249
4250
4251 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4252 dropg()
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269 trace := traceAcquire()
4270 if trace.ok() {
4271 trace.GoPark(traceBlockPreempted, 0)
4272 }
4273 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4274 if trace.ok() {
4275 traceRelease(trace)
4276 }
4277 schedule()
4278 }
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294 func goyield() {
4295 checkTimeouts()
4296 mcall(goyield_m)
4297 }
4298
4299 func goyield_m(gp *g) {
4300 trace := traceAcquire()
4301 pp := gp.m.p.ptr()
4302 if trace.ok() {
4303
4304
4305
4306 trace.GoPreempt()
4307 }
4308 casgstatus(gp, _Grunning, _Grunnable)
4309 if trace.ok() {
4310 traceRelease(trace)
4311 }
4312 dropg()
4313 runqput(pp, gp, false)
4314 schedule()
4315 }
4316
4317
4318 func goexit1() {
4319 if raceenabled {
4320 if gp := getg(); gp.syncGroup != nil {
4321 racereleasemergeg(gp, gp.syncGroup.raceaddr())
4322 }
4323 racegoend()
4324 }
4325 trace := traceAcquire()
4326 if trace.ok() {
4327 trace.GoEnd()
4328 traceRelease(trace)
4329 }
4330 mcall(goexit0)
4331 }
4332
4333
4334 func goexit0(gp *g) {
4335 gdestroy(gp)
4336 schedule()
4337 }
4338
4339 func gdestroy(gp *g) {
4340 mp := getg().m
4341 pp := mp.p.ptr()
4342
4343 casgstatus(gp, _Grunning, _Gdead)
4344 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4345 if isSystemGoroutine(gp, false) {
4346 sched.ngsys.Add(-1)
4347 }
4348 gp.m = nil
4349 locked := gp.lockedm != 0
4350 gp.lockedm = 0
4351 mp.lockedg = 0
4352 gp.preemptStop = false
4353 gp.paniconfault = false
4354 gp._defer = nil
4355 gp._panic = nil
4356 gp.writebuf = nil
4357 gp.waitreason = waitReasonZero
4358 gp.param = nil
4359 gp.labels = nil
4360 gp.timer = nil
4361 gp.syncGroup = nil
4362
4363 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4364
4365
4366
4367 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4368 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4369 gcController.bgScanCredit.Add(scanCredit)
4370 gp.gcAssistBytes = 0
4371 }
4372
4373 dropg()
4374
4375 if GOARCH == "wasm" {
4376 gfput(pp, gp)
4377 return
4378 }
4379
4380 if locked && mp.lockedInt != 0 {
4381 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4382 if mp.isextra {
4383 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4384 }
4385 throw("exited a goroutine internally locked to the OS thread")
4386 }
4387 gfput(pp, gp)
4388 if locked {
4389
4390
4391
4392
4393
4394
4395 if GOOS != "plan9" {
4396 gogo(&mp.g0.sched)
4397 } else {
4398
4399
4400 mp.lockedExt = 0
4401 }
4402 }
4403 }
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413 func save(pc, sp, bp uintptr) {
4414 gp := getg()
4415
4416 if gp == gp.m.g0 || gp == gp.m.gsignal {
4417
4418
4419
4420
4421
4422 throw("save on system g not allowed")
4423 }
4424
4425 gp.sched.pc = pc
4426 gp.sched.sp = sp
4427 gp.sched.lr = 0
4428 gp.sched.bp = bp
4429
4430
4431
4432 if gp.sched.ctxt != nil {
4433 badctxt()
4434 }
4435 }
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461 func reentersyscall(pc, sp, bp uintptr) {
4462 trace := traceAcquire()
4463 gp := getg()
4464
4465
4466
4467 gp.m.locks++
4468
4469
4470
4471
4472
4473 gp.stackguard0 = stackPreempt
4474 gp.throwsplit = true
4475
4476
4477 save(pc, sp, bp)
4478 gp.syscallsp = sp
4479 gp.syscallpc = pc
4480 gp.syscallbp = bp
4481 casgstatus(gp, _Grunning, _Gsyscall)
4482 if staticLockRanking {
4483
4484
4485 save(pc, sp, bp)
4486 }
4487 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4488 systemstack(func() {
4489 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4490 throw("entersyscall")
4491 })
4492 }
4493 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4494 systemstack(func() {
4495 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4496 throw("entersyscall")
4497 })
4498 }
4499
4500 if trace.ok() {
4501 systemstack(func() {
4502 trace.GoSysCall()
4503 traceRelease(trace)
4504 })
4505
4506
4507
4508 save(pc, sp, bp)
4509 }
4510
4511 if sched.sysmonwait.Load() {
4512 systemstack(entersyscall_sysmon)
4513 save(pc, sp, bp)
4514 }
4515
4516 if gp.m.p.ptr().runSafePointFn != 0 {
4517
4518 systemstack(runSafePointFn)
4519 save(pc, sp, bp)
4520 }
4521
4522 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4523 pp := gp.m.p.ptr()
4524 pp.m = 0
4525 gp.m.oldp.set(pp)
4526 gp.m.p = 0
4527 atomic.Store(&pp.status, _Psyscall)
4528 if sched.gcwaiting.Load() {
4529 systemstack(entersyscall_gcwait)
4530 save(pc, sp, bp)
4531 }
4532
4533 gp.m.locks--
4534 }
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550 func entersyscall() {
4551
4552
4553
4554
4555 fp := getcallerfp()
4556 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4557 }
4558
4559 func entersyscall_sysmon() {
4560 lock(&sched.lock)
4561 if sched.sysmonwait.Load() {
4562 sched.sysmonwait.Store(false)
4563 notewakeup(&sched.sysmonnote)
4564 }
4565 unlock(&sched.lock)
4566 }
4567
4568 func entersyscall_gcwait() {
4569 gp := getg()
4570 pp := gp.m.oldp.ptr()
4571
4572 lock(&sched.lock)
4573 trace := traceAcquire()
4574 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4575 if trace.ok() {
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585 trace.ProcSteal(pp, true)
4586 traceRelease(trace)
4587 }
4588 pp.gcStopTime = nanotime()
4589 pp.syscalltick++
4590 if sched.stopwait--; sched.stopwait == 0 {
4591 notewakeup(&sched.stopnote)
4592 }
4593 } else if trace.ok() {
4594 traceRelease(trace)
4595 }
4596 unlock(&sched.lock)
4597 }
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611 func entersyscallblock() {
4612 gp := getg()
4613
4614 gp.m.locks++
4615 gp.throwsplit = true
4616 gp.stackguard0 = stackPreempt
4617 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4618 gp.m.p.ptr().syscalltick++
4619
4620
4621 pc := sys.GetCallerPC()
4622 sp := sys.GetCallerSP()
4623 bp := getcallerfp()
4624 save(pc, sp, bp)
4625 gp.syscallsp = gp.sched.sp
4626 gp.syscallpc = gp.sched.pc
4627 gp.syscallbp = gp.sched.bp
4628 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4629 sp1 := sp
4630 sp2 := gp.sched.sp
4631 sp3 := gp.syscallsp
4632 systemstack(func() {
4633 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4634 throw("entersyscallblock")
4635 })
4636 }
4637 casgstatus(gp, _Grunning, _Gsyscall)
4638 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4639 systemstack(func() {
4640 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4641 throw("entersyscallblock")
4642 })
4643 }
4644 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4645 systemstack(func() {
4646 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4647 throw("entersyscallblock")
4648 })
4649 }
4650
4651 systemstack(entersyscallblock_handoff)
4652
4653
4654 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4655
4656 gp.m.locks--
4657 }
4658
4659 func entersyscallblock_handoff() {
4660 trace := traceAcquire()
4661 if trace.ok() {
4662 trace.GoSysCall()
4663 traceRelease(trace)
4664 }
4665 handoffp(releasep())
4666 }
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688 func exitsyscall() {
4689 gp := getg()
4690
4691 gp.m.locks++
4692 if sys.GetCallerSP() > gp.syscallsp {
4693 throw("exitsyscall: syscall frame is no longer valid")
4694 }
4695
4696 gp.waitsince = 0
4697 oldp := gp.m.oldp.ptr()
4698 gp.m.oldp = 0
4699 if exitsyscallfast(oldp) {
4700
4701
4702 if goroutineProfile.active {
4703
4704
4705
4706 systemstack(func() {
4707 tryRecordGoroutineProfileWB(gp)
4708 })
4709 }
4710 trace := traceAcquire()
4711 if trace.ok() {
4712 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4713 systemstack(func() {
4714
4715
4716
4717
4718 trace.GoSysExit(lostP)
4719 if lostP {
4720
4721
4722
4723
4724 trace.GoStart()
4725 }
4726 })
4727 }
4728
4729 gp.m.p.ptr().syscalltick++
4730
4731 casgstatus(gp, _Gsyscall, _Grunning)
4732 if trace.ok() {
4733 traceRelease(trace)
4734 }
4735
4736
4737
4738 gp.syscallsp = 0
4739 gp.m.locks--
4740 if gp.preempt {
4741
4742 gp.stackguard0 = stackPreempt
4743 } else {
4744
4745 gp.stackguard0 = gp.stack.lo + stackGuard
4746 }
4747 gp.throwsplit = false
4748
4749 if sched.disable.user && !schedEnabled(gp) {
4750
4751 Gosched()
4752 }
4753
4754 return
4755 }
4756
4757 gp.m.locks--
4758
4759
4760 mcall(exitsyscall0)
4761
4762
4763
4764
4765
4766
4767
4768 gp.syscallsp = 0
4769 gp.m.p.ptr().syscalltick++
4770 gp.throwsplit = false
4771 }
4772
4773
4774 func exitsyscallfast(oldp *p) bool {
4775
4776 if sched.stopwait == freezeStopWait {
4777 return false
4778 }
4779
4780
4781 trace := traceAcquire()
4782 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4783
4784 wirep(oldp)
4785 exitsyscallfast_reacquired(trace)
4786 if trace.ok() {
4787 traceRelease(trace)
4788 }
4789 return true
4790 }
4791 if trace.ok() {
4792 traceRelease(trace)
4793 }
4794
4795
4796 if sched.pidle != 0 {
4797 var ok bool
4798 systemstack(func() {
4799 ok = exitsyscallfast_pidle()
4800 })
4801 if ok {
4802 return true
4803 }
4804 }
4805 return false
4806 }
4807
4808
4809
4810
4811
4812
4813 func exitsyscallfast_reacquired(trace traceLocker) {
4814 gp := getg()
4815 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4816 if trace.ok() {
4817
4818
4819
4820 systemstack(func() {
4821
4822
4823 trace.ProcSteal(gp.m.p.ptr(), true)
4824 trace.ProcStart()
4825 })
4826 }
4827 gp.m.p.ptr().syscalltick++
4828 }
4829 }
4830
4831 func exitsyscallfast_pidle() bool {
4832 lock(&sched.lock)
4833 pp, _ := pidleget(0)
4834 if pp != nil && sched.sysmonwait.Load() {
4835 sched.sysmonwait.Store(false)
4836 notewakeup(&sched.sysmonnote)
4837 }
4838 unlock(&sched.lock)
4839 if pp != nil {
4840 acquirep(pp)
4841 return true
4842 }
4843 return false
4844 }
4845
4846
4847
4848
4849
4850
4851
4852 func exitsyscall0(gp *g) {
4853 var trace traceLocker
4854 traceExitingSyscall()
4855 trace = traceAcquire()
4856 casgstatus(gp, _Gsyscall, _Grunnable)
4857 traceExitedSyscall()
4858 if trace.ok() {
4859
4860
4861
4862
4863 trace.GoSysExit(true)
4864 traceRelease(trace)
4865 }
4866 dropg()
4867 lock(&sched.lock)
4868 var pp *p
4869 if schedEnabled(gp) {
4870 pp, _ = pidleget(0)
4871 }
4872 var locked bool
4873 if pp == nil {
4874 globrunqput(gp)
4875
4876
4877
4878
4879
4880
4881 locked = gp.lockedm != 0
4882 } else if sched.sysmonwait.Load() {
4883 sched.sysmonwait.Store(false)
4884 notewakeup(&sched.sysmonnote)
4885 }
4886 unlock(&sched.lock)
4887 if pp != nil {
4888 acquirep(pp)
4889 execute(gp, false)
4890 }
4891 if locked {
4892
4893
4894
4895
4896 stoplockedm()
4897 execute(gp, false)
4898 }
4899 stopm()
4900 schedule()
4901 }
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915 func syscall_runtime_BeforeFork() {
4916 gp := getg().m.curg
4917
4918
4919
4920
4921 gp.m.locks++
4922 sigsave(&gp.m.sigmask)
4923 sigblock(false)
4924
4925
4926
4927
4928
4929 gp.stackguard0 = stackFork
4930 }
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944 func syscall_runtime_AfterFork() {
4945 gp := getg().m.curg
4946
4947
4948 gp.stackguard0 = gp.stack.lo + stackGuard
4949
4950 msigrestore(gp.m.sigmask)
4951
4952 gp.m.locks--
4953 }
4954
4955
4956
4957 var inForkedChild bool
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978 func syscall_runtime_AfterForkInChild() {
4979
4980
4981
4982
4983 inForkedChild = true
4984
4985 clearSignalHandlers()
4986
4987
4988
4989 msigrestore(getg().m.sigmask)
4990
4991 inForkedChild = false
4992 }
4993
4994
4995
4996
4997 var pendingPreemptSignals atomic.Int32
4998
4999
5000
5001
5002 func syscall_runtime_BeforeExec() {
5003
5004 execLock.lock()
5005
5006
5007
5008 if GOOS == "darwin" || GOOS == "ios" {
5009 for pendingPreemptSignals.Load() > 0 {
5010 osyield()
5011 }
5012 }
5013 }
5014
5015
5016
5017
5018 func syscall_runtime_AfterExec() {
5019 execLock.unlock()
5020 }
5021
5022
5023 func malg(stacksize int32) *g {
5024 newg := new(g)
5025 if stacksize >= 0 {
5026 stacksize = round2(stackSystem + stacksize)
5027 systemstack(func() {
5028 newg.stack = stackalloc(uint32(stacksize))
5029 })
5030 newg.stackguard0 = newg.stack.lo + stackGuard
5031 newg.stackguard1 = ^uintptr(0)
5032
5033
5034 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5035 }
5036 return newg
5037 }
5038
5039
5040
5041
5042 func newproc(fn *funcval) {
5043 gp := getg()
5044 pc := sys.GetCallerPC()
5045 systemstack(func() {
5046 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5047
5048 pp := getg().m.p.ptr()
5049 runqput(pp, newg, true)
5050
5051 if mainStarted {
5052 wakep()
5053 }
5054 })
5055 }
5056
5057
5058
5059
5060 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5061 if fn == nil {
5062 fatal("go of nil func value")
5063 }
5064
5065 mp := acquirem()
5066 pp := mp.p.ptr()
5067 newg := gfget(pp)
5068 if newg == nil {
5069 newg = malg(stackMin)
5070 casgstatus(newg, _Gidle, _Gdead)
5071 allgadd(newg)
5072 }
5073 if newg.stack.hi == 0 {
5074 throw("newproc1: newg missing stack")
5075 }
5076
5077 if readgstatus(newg) != _Gdead {
5078 throw("newproc1: new g is not Gdead")
5079 }
5080
5081 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5082 totalSize = alignUp(totalSize, sys.StackAlign)
5083 sp := newg.stack.hi - totalSize
5084 if usesLR {
5085
5086 *(*uintptr)(unsafe.Pointer(sp)) = 0
5087 prepGoExitFrame(sp)
5088 }
5089 if GOARCH == "arm64" {
5090
5091 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5092 }
5093
5094 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5095 newg.sched.sp = sp
5096 newg.stktopsp = sp
5097 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5098 newg.sched.g = guintptr(unsafe.Pointer(newg))
5099 gostartcallfn(&newg.sched, fn)
5100 newg.parentGoid = callergp.goid
5101 newg.gopc = callerpc
5102 newg.ancestors = saveAncestors(callergp)
5103 newg.startpc = fn.fn
5104 if isSystemGoroutine(newg, false) {
5105 sched.ngsys.Add(1)
5106 } else {
5107
5108 newg.syncGroup = callergp.syncGroup
5109 if mp.curg != nil {
5110 newg.labels = mp.curg.labels
5111 }
5112 if goroutineProfile.active {
5113
5114
5115
5116
5117
5118 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5119 }
5120 }
5121
5122 newg.trackingSeq = uint8(cheaprand())
5123 if newg.trackingSeq%gTrackingPeriod == 0 {
5124 newg.tracking = true
5125 }
5126 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5127
5128
5129 trace := traceAcquire()
5130 var status uint32 = _Grunnable
5131 if parked {
5132 status = _Gwaiting
5133 newg.waitreason = waitreason
5134 }
5135 if pp.goidcache == pp.goidcacheend {
5136
5137
5138
5139 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5140 pp.goidcache -= _GoidCacheBatch - 1
5141 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5142 }
5143 newg.goid = pp.goidcache
5144 casgstatus(newg, _Gdead, status)
5145 pp.goidcache++
5146 newg.trace.reset()
5147 if trace.ok() {
5148 trace.GoCreate(newg, newg.startpc, parked)
5149 traceRelease(trace)
5150 }
5151
5152
5153 if raceenabled {
5154 newg.racectx = racegostart(callerpc)
5155 newg.raceignore = 0
5156 if newg.labels != nil {
5157
5158
5159 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5160 }
5161 }
5162 releasem(mp)
5163
5164 return newg
5165 }
5166
5167
5168
5169
5170 func saveAncestors(callergp *g) *[]ancestorInfo {
5171
5172 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5173 return nil
5174 }
5175 var callerAncestors []ancestorInfo
5176 if callergp.ancestors != nil {
5177 callerAncestors = *callergp.ancestors
5178 }
5179 n := int32(len(callerAncestors)) + 1
5180 if n > debug.tracebackancestors {
5181 n = debug.tracebackancestors
5182 }
5183 ancestors := make([]ancestorInfo, n)
5184 copy(ancestors[1:], callerAncestors)
5185
5186 var pcs [tracebackInnerFrames]uintptr
5187 npcs := gcallers(callergp, 0, pcs[:])
5188 ipcs := make([]uintptr, npcs)
5189 copy(ipcs, pcs[:])
5190 ancestors[0] = ancestorInfo{
5191 pcs: ipcs,
5192 goid: callergp.goid,
5193 gopc: callergp.gopc,
5194 }
5195
5196 ancestorsp := new([]ancestorInfo)
5197 *ancestorsp = ancestors
5198 return ancestorsp
5199 }
5200
5201
5202
5203 func gfput(pp *p, gp *g) {
5204 if readgstatus(gp) != _Gdead {
5205 throw("gfput: bad status (not Gdead)")
5206 }
5207
5208 stksize := gp.stack.hi - gp.stack.lo
5209
5210 if stksize != uintptr(startingStackSize) {
5211
5212 stackfree(gp.stack)
5213 gp.stack.lo = 0
5214 gp.stack.hi = 0
5215 gp.stackguard0 = 0
5216 }
5217
5218 pp.gFree.push(gp)
5219 pp.gFree.n++
5220 if pp.gFree.n >= 64 {
5221 var (
5222 inc int32
5223 stackQ gQueue
5224 noStackQ gQueue
5225 )
5226 for pp.gFree.n >= 32 {
5227 gp := pp.gFree.pop()
5228 pp.gFree.n--
5229 if gp.stack.lo == 0 {
5230 noStackQ.push(gp)
5231 } else {
5232 stackQ.push(gp)
5233 }
5234 inc++
5235 }
5236 lock(&sched.gFree.lock)
5237 sched.gFree.noStack.pushAll(noStackQ)
5238 sched.gFree.stack.pushAll(stackQ)
5239 sched.gFree.n += inc
5240 unlock(&sched.gFree.lock)
5241 }
5242 }
5243
5244
5245
5246 func gfget(pp *p) *g {
5247 retry:
5248 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5249 lock(&sched.gFree.lock)
5250
5251 for pp.gFree.n < 32 {
5252
5253 gp := sched.gFree.stack.pop()
5254 if gp == nil {
5255 gp = sched.gFree.noStack.pop()
5256 if gp == nil {
5257 break
5258 }
5259 }
5260 sched.gFree.n--
5261 pp.gFree.push(gp)
5262 pp.gFree.n++
5263 }
5264 unlock(&sched.gFree.lock)
5265 goto retry
5266 }
5267 gp := pp.gFree.pop()
5268 if gp == nil {
5269 return nil
5270 }
5271 pp.gFree.n--
5272 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5273
5274
5275
5276 systemstack(func() {
5277 stackfree(gp.stack)
5278 gp.stack.lo = 0
5279 gp.stack.hi = 0
5280 gp.stackguard0 = 0
5281 })
5282 }
5283 if gp.stack.lo == 0 {
5284
5285 systemstack(func() {
5286 gp.stack = stackalloc(startingStackSize)
5287 })
5288 gp.stackguard0 = gp.stack.lo + stackGuard
5289 } else {
5290 if raceenabled {
5291 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5292 }
5293 if msanenabled {
5294 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5295 }
5296 if asanenabled {
5297 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5298 }
5299 }
5300 return gp
5301 }
5302
5303
5304 func gfpurge(pp *p) {
5305 var (
5306 inc int32
5307 stackQ gQueue
5308 noStackQ gQueue
5309 )
5310 for !pp.gFree.empty() {
5311 gp := pp.gFree.pop()
5312 pp.gFree.n--
5313 if gp.stack.lo == 0 {
5314 noStackQ.push(gp)
5315 } else {
5316 stackQ.push(gp)
5317 }
5318 inc++
5319 }
5320 lock(&sched.gFree.lock)
5321 sched.gFree.noStack.pushAll(noStackQ)
5322 sched.gFree.stack.pushAll(stackQ)
5323 sched.gFree.n += inc
5324 unlock(&sched.gFree.lock)
5325 }
5326
5327
5328 func Breakpoint() {
5329 breakpoint()
5330 }
5331
5332
5333
5334
5335
5336
5337 func dolockOSThread() {
5338 if GOARCH == "wasm" {
5339 return
5340 }
5341 gp := getg()
5342 gp.m.lockedg.set(gp)
5343 gp.lockedm.set(gp.m)
5344 }
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362 func LockOSThread() {
5363 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5364
5365
5366
5367 startTemplateThread()
5368 }
5369 gp := getg()
5370 gp.m.lockedExt++
5371 if gp.m.lockedExt == 0 {
5372 gp.m.lockedExt--
5373 panic("LockOSThread nesting overflow")
5374 }
5375 dolockOSThread()
5376 }
5377
5378
5379 func lockOSThread() {
5380 getg().m.lockedInt++
5381 dolockOSThread()
5382 }
5383
5384
5385
5386
5387
5388
5389 func dounlockOSThread() {
5390 if GOARCH == "wasm" {
5391 return
5392 }
5393 gp := getg()
5394 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5395 return
5396 }
5397 gp.m.lockedg = 0
5398 gp.lockedm = 0
5399 }
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415 func UnlockOSThread() {
5416 gp := getg()
5417 if gp.m.lockedExt == 0 {
5418 return
5419 }
5420 gp.m.lockedExt--
5421 dounlockOSThread()
5422 }
5423
5424
5425 func unlockOSThread() {
5426 gp := getg()
5427 if gp.m.lockedInt == 0 {
5428 systemstack(badunlockosthread)
5429 }
5430 gp.m.lockedInt--
5431 dounlockOSThread()
5432 }
5433
5434 func badunlockosthread() {
5435 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5436 }
5437
5438 func gcount() int32 {
5439 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5440 for _, pp := range allp {
5441 n -= pp.gFree.n
5442 }
5443
5444
5445
5446 if n < 1 {
5447 n = 1
5448 }
5449 return n
5450 }
5451
5452 func mcount() int32 {
5453 return int32(sched.mnext - sched.nmfreed)
5454 }
5455
5456 var prof struct {
5457 signalLock atomic.Uint32
5458
5459
5460
5461 hz atomic.Int32
5462 }
5463
5464 func _System() { _System() }
5465 func _ExternalCode() { _ExternalCode() }
5466 func _LostExternalCode() { _LostExternalCode() }
5467 func _GC() { _GC() }
5468 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5469 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5470 func _VDSO() { _VDSO() }
5471
5472
5473
5474
5475
5476 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5477 if prof.hz.Load() == 0 {
5478 return
5479 }
5480
5481
5482
5483
5484 if mp != nil && mp.profilehz == 0 {
5485 return
5486 }
5487
5488
5489
5490
5491
5492
5493
5494 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5495 if f := findfunc(pc); f.valid() {
5496 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5497 cpuprof.lostAtomic++
5498 return
5499 }
5500 }
5501 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5502
5503
5504
5505 cpuprof.lostAtomic++
5506 return
5507 }
5508 }
5509
5510
5511
5512
5513
5514
5515
5516 getg().m.mallocing++
5517
5518 var u unwinder
5519 var stk [maxCPUProfStack]uintptr
5520 n := 0
5521 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5522 cgoOff := 0
5523
5524
5525
5526
5527
5528 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5529 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5530 cgoOff++
5531 }
5532 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5533 mp.cgoCallers[0] = 0
5534 }
5535
5536
5537 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5538 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5539
5540
5541 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5542 } else if mp != nil && mp.vdsoSP != 0 {
5543
5544
5545 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5546 } else {
5547 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5548 }
5549 n += tracebackPCs(&u, 0, stk[n:])
5550
5551 if n <= 0 {
5552
5553
5554 n = 2
5555 if inVDSOPage(pc) {
5556 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5557 } else if pc > firstmoduledata.etext {
5558
5559 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5560 }
5561 stk[0] = pc
5562 if mp.preemptoff != "" {
5563 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5564 } else {
5565 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5566 }
5567 }
5568
5569 if prof.hz.Load() != 0 {
5570
5571
5572
5573 var tagPtr *unsafe.Pointer
5574 if gp != nil && gp.m != nil && gp.m.curg != nil {
5575 tagPtr = &gp.m.curg.labels
5576 }
5577 cpuprof.add(tagPtr, stk[:n])
5578
5579 gprof := gp
5580 var mp *m
5581 var pp *p
5582 if gp != nil && gp.m != nil {
5583 if gp.m.curg != nil {
5584 gprof = gp.m.curg
5585 }
5586 mp = gp.m
5587 pp = gp.m.p.ptr()
5588 }
5589 traceCPUSample(gprof, mp, pp, stk[:n])
5590 }
5591 getg().m.mallocing--
5592 }
5593
5594
5595
5596 func setcpuprofilerate(hz int32) {
5597
5598 if hz < 0 {
5599 hz = 0
5600 }
5601
5602
5603
5604 gp := getg()
5605 gp.m.locks++
5606
5607
5608
5609
5610 setThreadCPUProfiler(0)
5611
5612 for !prof.signalLock.CompareAndSwap(0, 1) {
5613 osyield()
5614 }
5615 if prof.hz.Load() != hz {
5616 setProcessCPUProfiler(hz)
5617 prof.hz.Store(hz)
5618 }
5619 prof.signalLock.Store(0)
5620
5621 lock(&sched.lock)
5622 sched.profilehz = hz
5623 unlock(&sched.lock)
5624
5625 if hz != 0 {
5626 setThreadCPUProfiler(hz)
5627 }
5628
5629 gp.m.locks--
5630 }
5631
5632
5633
5634 func (pp *p) init(id int32) {
5635 pp.id = id
5636 pp.status = _Pgcstop
5637 pp.sudogcache = pp.sudogbuf[:0]
5638 pp.deferpool = pp.deferpoolbuf[:0]
5639 pp.wbBuf.reset()
5640 if pp.mcache == nil {
5641 if id == 0 {
5642 if mcache0 == nil {
5643 throw("missing mcache?")
5644 }
5645
5646
5647 pp.mcache = mcache0
5648 } else {
5649 pp.mcache = allocmcache()
5650 }
5651 }
5652 if raceenabled && pp.raceprocctx == 0 {
5653 if id == 0 {
5654 pp.raceprocctx = raceprocctx0
5655 raceprocctx0 = 0
5656 } else {
5657 pp.raceprocctx = raceproccreate()
5658 }
5659 }
5660 lockInit(&pp.timers.mu, lockRankTimers)
5661
5662
5663
5664 timerpMask.set(id)
5665
5666
5667 idlepMask.clear(id)
5668 }
5669
5670
5671
5672
5673
5674 func (pp *p) destroy() {
5675 assertLockHeld(&sched.lock)
5676 assertWorldStopped()
5677
5678
5679 for pp.runqhead != pp.runqtail {
5680
5681 pp.runqtail--
5682 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5683
5684 globrunqputhead(gp)
5685 }
5686 if pp.runnext != 0 {
5687 globrunqputhead(pp.runnext.ptr())
5688 pp.runnext = 0
5689 }
5690
5691
5692 getg().m.p.ptr().timers.take(&pp.timers)
5693
5694
5695 if gcphase != _GCoff {
5696 wbBufFlush1(pp)
5697 pp.gcw.dispose()
5698 }
5699 for i := range pp.sudogbuf {
5700 pp.sudogbuf[i] = nil
5701 }
5702 pp.sudogcache = pp.sudogbuf[:0]
5703 pp.pinnerCache = nil
5704 for j := range pp.deferpoolbuf {
5705 pp.deferpoolbuf[j] = nil
5706 }
5707 pp.deferpool = pp.deferpoolbuf[:0]
5708 systemstack(func() {
5709 for i := 0; i < pp.mspancache.len; i++ {
5710
5711 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5712 }
5713 pp.mspancache.len = 0
5714 lock(&mheap_.lock)
5715 pp.pcache.flush(&mheap_.pages)
5716 unlock(&mheap_.lock)
5717 })
5718 freemcache(pp.mcache)
5719 pp.mcache = nil
5720 gfpurge(pp)
5721 if raceenabled {
5722 if pp.timers.raceCtx != 0 {
5723
5724
5725
5726
5727
5728 mp := getg().m
5729 phold := mp.p.ptr()
5730 mp.p.set(pp)
5731
5732 racectxend(pp.timers.raceCtx)
5733 pp.timers.raceCtx = 0
5734
5735 mp.p.set(phold)
5736 }
5737 raceprocdestroy(pp.raceprocctx)
5738 pp.raceprocctx = 0
5739 }
5740 pp.gcAssistTime = 0
5741 pp.status = _Pdead
5742 }
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752 func procresize(nprocs int32) *p {
5753 assertLockHeld(&sched.lock)
5754 assertWorldStopped()
5755
5756 old := gomaxprocs
5757 if old < 0 || nprocs <= 0 {
5758 throw("procresize: invalid arg")
5759 }
5760 trace := traceAcquire()
5761 if trace.ok() {
5762 trace.Gomaxprocs(nprocs)
5763 traceRelease(trace)
5764 }
5765
5766
5767 now := nanotime()
5768 if sched.procresizetime != 0 {
5769 sched.totaltime += int64(old) * (now - sched.procresizetime)
5770 }
5771 sched.procresizetime = now
5772
5773 maskWords := (nprocs + 31) / 32
5774
5775
5776 if nprocs > int32(len(allp)) {
5777
5778
5779 lock(&allpLock)
5780 if nprocs <= int32(cap(allp)) {
5781 allp = allp[:nprocs]
5782 } else {
5783 nallp := make([]*p, nprocs)
5784
5785
5786 copy(nallp, allp[:cap(allp)])
5787 allp = nallp
5788 }
5789
5790 if maskWords <= int32(cap(idlepMask)) {
5791 idlepMask = idlepMask[:maskWords]
5792 timerpMask = timerpMask[:maskWords]
5793 } else {
5794 nidlepMask := make([]uint32, maskWords)
5795
5796 copy(nidlepMask, idlepMask)
5797 idlepMask = nidlepMask
5798
5799 ntimerpMask := make([]uint32, maskWords)
5800 copy(ntimerpMask, timerpMask)
5801 timerpMask = ntimerpMask
5802 }
5803 unlock(&allpLock)
5804 }
5805
5806
5807 for i := old; i < nprocs; i++ {
5808 pp := allp[i]
5809 if pp == nil {
5810 pp = new(p)
5811 }
5812 pp.init(i)
5813 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5814 }
5815
5816 gp := getg()
5817 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5818
5819 gp.m.p.ptr().status = _Prunning
5820 gp.m.p.ptr().mcache.prepareForSweep()
5821 } else {
5822
5823
5824
5825
5826
5827 if gp.m.p != 0 {
5828 trace := traceAcquire()
5829 if trace.ok() {
5830
5831
5832
5833 trace.GoSched()
5834 trace.ProcStop(gp.m.p.ptr())
5835 traceRelease(trace)
5836 }
5837 gp.m.p.ptr().m = 0
5838 }
5839 gp.m.p = 0
5840 pp := allp[0]
5841 pp.m = 0
5842 pp.status = _Pidle
5843 acquirep(pp)
5844 trace := traceAcquire()
5845 if trace.ok() {
5846 trace.GoStart()
5847 traceRelease(trace)
5848 }
5849 }
5850
5851
5852 mcache0 = nil
5853
5854
5855 for i := nprocs; i < old; i++ {
5856 pp := allp[i]
5857 pp.destroy()
5858
5859 }
5860
5861
5862 if int32(len(allp)) != nprocs {
5863 lock(&allpLock)
5864 allp = allp[:nprocs]
5865 idlepMask = idlepMask[:maskWords]
5866 timerpMask = timerpMask[:maskWords]
5867 unlock(&allpLock)
5868 }
5869
5870 var runnablePs *p
5871 for i := nprocs - 1; i >= 0; i-- {
5872 pp := allp[i]
5873 if gp.m.p.ptr() == pp {
5874 continue
5875 }
5876 pp.status = _Pidle
5877 if runqempty(pp) {
5878 pidleput(pp, now)
5879 } else {
5880 pp.m.set(mget())
5881 pp.link.set(runnablePs)
5882 runnablePs = pp
5883 }
5884 }
5885 stealOrder.reset(uint32(nprocs))
5886 var int32p *int32 = &gomaxprocs
5887 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5888 if old != nprocs {
5889
5890 gcCPULimiter.resetCapacity(now, nprocs)
5891 }
5892 return runnablePs
5893 }
5894
5895
5896
5897
5898
5899
5900
5901 func acquirep(pp *p) {
5902
5903 wirep(pp)
5904
5905
5906
5907
5908
5909 pp.mcache.prepareForSweep()
5910
5911 trace := traceAcquire()
5912 if trace.ok() {
5913 trace.ProcStart()
5914 traceRelease(trace)
5915 }
5916 }
5917
5918
5919
5920
5921
5922
5923
5924 func wirep(pp *p) {
5925 gp := getg()
5926
5927 if gp.m.p != 0 {
5928
5929
5930 systemstack(func() {
5931 throw("wirep: already in go")
5932 })
5933 }
5934 if pp.m != 0 || pp.status != _Pidle {
5935
5936
5937 systemstack(func() {
5938 id := int64(0)
5939 if pp.m != 0 {
5940 id = pp.m.ptr().id
5941 }
5942 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5943 throw("wirep: invalid p state")
5944 })
5945 }
5946 gp.m.p.set(pp)
5947 pp.m.set(gp.m)
5948 pp.status = _Prunning
5949 }
5950
5951
5952 func releasep() *p {
5953 trace := traceAcquire()
5954 if trace.ok() {
5955 trace.ProcStop(getg().m.p.ptr())
5956 traceRelease(trace)
5957 }
5958 return releasepNoTrace()
5959 }
5960
5961
5962 func releasepNoTrace() *p {
5963 gp := getg()
5964
5965 if gp.m.p == 0 {
5966 throw("releasep: invalid arg")
5967 }
5968 pp := gp.m.p.ptr()
5969 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5970 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5971 throw("releasep: invalid p state")
5972 }
5973 gp.m.p = 0
5974 pp.m = 0
5975 pp.status = _Pidle
5976 return pp
5977 }
5978
5979 func incidlelocked(v int32) {
5980 lock(&sched.lock)
5981 sched.nmidlelocked += v
5982 if v > 0 {
5983 checkdead()
5984 }
5985 unlock(&sched.lock)
5986 }
5987
5988
5989
5990
5991 func checkdead() {
5992 assertLockHeld(&sched.lock)
5993
5994
5995
5996
5997
5998
5999 if (islibrary || isarchive) && GOARCH != "wasm" {
6000 return
6001 }
6002
6003
6004
6005
6006
6007 if panicking.Load() > 0 {
6008 return
6009 }
6010
6011
6012
6013
6014
6015 var run0 int32
6016 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6017 run0 = 1
6018 }
6019
6020 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6021 if run > run0 {
6022 return
6023 }
6024 if run < 0 {
6025 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6026 unlock(&sched.lock)
6027 throw("checkdead: inconsistent counts")
6028 }
6029
6030 grunning := 0
6031 forEachG(func(gp *g) {
6032 if isSystemGoroutine(gp, false) {
6033 return
6034 }
6035 s := readgstatus(gp)
6036 switch s &^ _Gscan {
6037 case _Gwaiting,
6038 _Gpreempted:
6039 grunning++
6040 case _Grunnable,
6041 _Grunning,
6042 _Gsyscall:
6043 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6044 unlock(&sched.lock)
6045 throw("checkdead: runnable g")
6046 }
6047 })
6048 if grunning == 0 {
6049 unlock(&sched.lock)
6050 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6051 }
6052
6053
6054 if faketime != 0 {
6055 if when := timeSleepUntil(); when < maxWhen {
6056 faketime = when
6057
6058
6059 pp, _ := pidleget(faketime)
6060 if pp == nil {
6061
6062
6063 unlock(&sched.lock)
6064 throw("checkdead: no p for timer")
6065 }
6066 mp := mget()
6067 if mp == nil {
6068
6069
6070 unlock(&sched.lock)
6071 throw("checkdead: no m for timer")
6072 }
6073
6074
6075
6076 sched.nmspinning.Add(1)
6077 mp.spinning = true
6078 mp.nextp.set(pp)
6079 notewakeup(&mp.park)
6080 return
6081 }
6082 }
6083
6084
6085 for _, pp := range allp {
6086 if len(pp.timers.heap) > 0 {
6087 return
6088 }
6089 }
6090
6091 unlock(&sched.lock)
6092 fatal("all goroutines are asleep - deadlock!")
6093 }
6094
6095
6096
6097
6098
6099
6100 var forcegcperiod int64 = 2 * 60 * 1e9
6101
6102
6103
6104 var needSysmonWorkaround bool = false
6105
6106
6107
6108
6109 const haveSysmon = GOARCH != "wasm"
6110
6111
6112
6113
6114 func sysmon() {
6115 lock(&sched.lock)
6116 sched.nmsys++
6117 checkdead()
6118 unlock(&sched.lock)
6119
6120 lasttrace := int64(0)
6121 idle := 0
6122 delay := uint32(0)
6123
6124 for {
6125 if idle == 0 {
6126 delay = 20
6127 } else if idle > 50 {
6128 delay *= 2
6129 }
6130 if delay > 10*1000 {
6131 delay = 10 * 1000
6132 }
6133 usleep(delay)
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150 now := nanotime()
6151 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6152 lock(&sched.lock)
6153 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6154 syscallWake := false
6155 next := timeSleepUntil()
6156 if next > now {
6157 sched.sysmonwait.Store(true)
6158 unlock(&sched.lock)
6159
6160
6161 sleep := forcegcperiod / 2
6162 if next-now < sleep {
6163 sleep = next - now
6164 }
6165 shouldRelax := sleep >= osRelaxMinNS
6166 if shouldRelax {
6167 osRelax(true)
6168 }
6169 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6170 if shouldRelax {
6171 osRelax(false)
6172 }
6173 lock(&sched.lock)
6174 sched.sysmonwait.Store(false)
6175 noteclear(&sched.sysmonnote)
6176 }
6177 if syscallWake {
6178 idle = 0
6179 delay = 20
6180 }
6181 }
6182 unlock(&sched.lock)
6183 }
6184
6185 lock(&sched.sysmonlock)
6186
6187
6188 now = nanotime()
6189
6190
6191 if *cgo_yield != nil {
6192 asmcgocall(*cgo_yield, nil)
6193 }
6194
6195 lastpoll := sched.lastpoll.Load()
6196 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6197 sched.lastpoll.CompareAndSwap(lastpoll, now)
6198 list, delta := netpoll(0)
6199 if !list.empty() {
6200
6201
6202
6203
6204
6205
6206
6207 incidlelocked(-1)
6208 injectglist(&list)
6209 incidlelocked(1)
6210 netpollAdjustWaiters(delta)
6211 }
6212 }
6213 if GOOS == "netbsd" && needSysmonWorkaround {
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229 if next := timeSleepUntil(); next < now {
6230 startm(nil, false, false)
6231 }
6232 }
6233 if scavenger.sysmonWake.Load() != 0 {
6234
6235 scavenger.wake()
6236 }
6237
6238
6239 if retake(now) != 0 {
6240 idle = 0
6241 } else {
6242 idle++
6243 }
6244
6245 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6246 lock(&forcegc.lock)
6247 forcegc.idle.Store(false)
6248 var list gList
6249 list.push(forcegc.g)
6250 injectglist(&list)
6251 unlock(&forcegc.lock)
6252 }
6253 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6254 lasttrace = now
6255 schedtrace(debug.scheddetail > 0)
6256 }
6257 unlock(&sched.sysmonlock)
6258 }
6259 }
6260
6261 type sysmontick struct {
6262 schedtick uint32
6263 syscalltick uint32
6264 schedwhen int64
6265 syscallwhen int64
6266 }
6267
6268
6269
6270 const forcePreemptNS = 10 * 1000 * 1000
6271
6272 func retake(now int64) uint32 {
6273 n := 0
6274
6275
6276 lock(&allpLock)
6277
6278
6279
6280 for i := 0; i < len(allp); i++ {
6281 pp := allp[i]
6282 if pp == nil {
6283
6284
6285 continue
6286 }
6287 pd := &pp.sysmontick
6288 s := pp.status
6289 sysretake := false
6290 if s == _Prunning || s == _Psyscall {
6291
6292
6293
6294
6295 t := int64(pp.schedtick)
6296 if int64(pd.schedtick) != t {
6297 pd.schedtick = uint32(t)
6298 pd.schedwhen = now
6299 } else if pd.schedwhen+forcePreemptNS <= now {
6300 preemptone(pp)
6301
6302
6303 sysretake = true
6304 }
6305 }
6306 if s == _Psyscall {
6307
6308 t := int64(pp.syscalltick)
6309 if !sysretake && int64(pd.syscalltick) != t {
6310 pd.syscalltick = uint32(t)
6311 pd.syscallwhen = now
6312 continue
6313 }
6314
6315
6316
6317 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6318 continue
6319 }
6320
6321 unlock(&allpLock)
6322
6323
6324
6325
6326 incidlelocked(-1)
6327 trace := traceAcquire()
6328 if atomic.Cas(&pp.status, s, _Pidle) {
6329 if trace.ok() {
6330 trace.ProcSteal(pp, false)
6331 traceRelease(trace)
6332 }
6333 n++
6334 pp.syscalltick++
6335 handoffp(pp)
6336 } else if trace.ok() {
6337 traceRelease(trace)
6338 }
6339 incidlelocked(1)
6340 lock(&allpLock)
6341 }
6342 }
6343 unlock(&allpLock)
6344 return uint32(n)
6345 }
6346
6347
6348
6349
6350
6351
6352 func preemptall() bool {
6353 res := false
6354 for _, pp := range allp {
6355 if pp.status != _Prunning {
6356 continue
6357 }
6358 if preemptone(pp) {
6359 res = true
6360 }
6361 }
6362 return res
6363 }
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375 func preemptone(pp *p) bool {
6376 mp := pp.m.ptr()
6377 if mp == nil || mp == getg().m {
6378 return false
6379 }
6380 gp := mp.curg
6381 if gp == nil || gp == mp.g0 {
6382 return false
6383 }
6384
6385 gp.preempt = true
6386
6387
6388
6389
6390
6391 gp.stackguard0 = stackPreempt
6392
6393
6394 if preemptMSupported && debug.asyncpreemptoff == 0 {
6395 pp.preempt = true
6396 preemptM(mp)
6397 }
6398
6399 return true
6400 }
6401
6402 var starttime int64
6403
6404 func schedtrace(detailed bool) {
6405 now := nanotime()
6406 if starttime == 0 {
6407 starttime = now
6408 }
6409
6410 lock(&sched.lock)
6411 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6412 if detailed {
6413 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6414 }
6415
6416
6417
6418 for i, pp := range allp {
6419 h := atomic.Load(&pp.runqhead)
6420 t := atomic.Load(&pp.runqtail)
6421 if detailed {
6422 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6423 mp := pp.m.ptr()
6424 if mp != nil {
6425 print(mp.id)
6426 } else {
6427 print("nil")
6428 }
6429 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6430 } else {
6431
6432
6433 print(" ")
6434 if i == 0 {
6435 print("[ ")
6436 }
6437 print(t - h)
6438 if i == len(allp)-1 {
6439 print(" ]")
6440 }
6441 }
6442 }
6443
6444 if !detailed {
6445
6446 print(" schedticks=[ ")
6447 for _, pp := range allp {
6448 print(pp.schedtick)
6449 print(" ")
6450 }
6451 print("]\n")
6452 }
6453
6454 if !detailed {
6455 unlock(&sched.lock)
6456 return
6457 }
6458
6459 for mp := allm; mp != nil; mp = mp.alllink {
6460 pp := mp.p.ptr()
6461 print(" M", mp.id, ": p=")
6462 if pp != nil {
6463 print(pp.id)
6464 } else {
6465 print("nil")
6466 }
6467 print(" curg=")
6468 if mp.curg != nil {
6469 print(mp.curg.goid)
6470 } else {
6471 print("nil")
6472 }
6473 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6474 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6475 print(lockedg.goid)
6476 } else {
6477 print("nil")
6478 }
6479 print("\n")
6480 }
6481
6482 forEachG(func(gp *g) {
6483 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6484 if gp.m != nil {
6485 print(gp.m.id)
6486 } else {
6487 print("nil")
6488 }
6489 print(" lockedm=")
6490 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6491 print(lockedm.id)
6492 } else {
6493 print("nil")
6494 }
6495 print("\n")
6496 })
6497 unlock(&sched.lock)
6498 }
6499
6500
6501
6502
6503
6504
6505 func schedEnableUser(enable bool) {
6506 lock(&sched.lock)
6507 if sched.disable.user == !enable {
6508 unlock(&sched.lock)
6509 return
6510 }
6511 sched.disable.user = !enable
6512 if enable {
6513 n := sched.disable.n
6514 sched.disable.n = 0
6515 globrunqputbatch(&sched.disable.runnable, n)
6516 unlock(&sched.lock)
6517 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6518 startm(nil, false, false)
6519 }
6520 } else {
6521 unlock(&sched.lock)
6522 }
6523 }
6524
6525
6526
6527
6528
6529 func schedEnabled(gp *g) bool {
6530 assertLockHeld(&sched.lock)
6531
6532 if sched.disable.user {
6533 return isSystemGoroutine(gp, true)
6534 }
6535 return true
6536 }
6537
6538
6539
6540
6541
6542
6543 func mput(mp *m) {
6544 assertLockHeld(&sched.lock)
6545
6546 mp.schedlink = sched.midle
6547 sched.midle.set(mp)
6548 sched.nmidle++
6549 checkdead()
6550 }
6551
6552
6553
6554
6555
6556
6557 func mget() *m {
6558 assertLockHeld(&sched.lock)
6559
6560 mp := sched.midle.ptr()
6561 if mp != nil {
6562 sched.midle = mp.schedlink
6563 sched.nmidle--
6564 }
6565 return mp
6566 }
6567
6568
6569
6570
6571
6572
6573 func globrunqput(gp *g) {
6574 assertLockHeld(&sched.lock)
6575
6576 sched.runq.pushBack(gp)
6577 sched.runqsize++
6578 }
6579
6580
6581
6582
6583
6584
6585 func globrunqputhead(gp *g) {
6586 assertLockHeld(&sched.lock)
6587
6588 sched.runq.push(gp)
6589 sched.runqsize++
6590 }
6591
6592
6593
6594
6595
6596
6597
6598 func globrunqputbatch(batch *gQueue, n int32) {
6599 assertLockHeld(&sched.lock)
6600
6601 sched.runq.pushBackAll(*batch)
6602 sched.runqsize += n
6603 *batch = gQueue{}
6604 }
6605
6606
6607
6608 func globrunqget(pp *p, max int32) *g {
6609 assertLockHeld(&sched.lock)
6610
6611 if sched.runqsize == 0 {
6612 return nil
6613 }
6614
6615 n := sched.runqsize/gomaxprocs + 1
6616 if n > sched.runqsize {
6617 n = sched.runqsize
6618 }
6619 if max > 0 && n > max {
6620 n = max
6621 }
6622 if n > int32(len(pp.runq))/2 {
6623 n = int32(len(pp.runq)) / 2
6624 }
6625
6626 sched.runqsize -= n
6627
6628 gp := sched.runq.pop()
6629 n--
6630 for ; n > 0; n-- {
6631 gp1 := sched.runq.pop()
6632 runqput(pp, gp1, false)
6633 }
6634 return gp
6635 }
6636
6637
6638 type pMask []uint32
6639
6640
6641 func (p pMask) read(id uint32) bool {
6642 word := id / 32
6643 mask := uint32(1) << (id % 32)
6644 return (atomic.Load(&p[word]) & mask) != 0
6645 }
6646
6647
6648 func (p pMask) set(id int32) {
6649 word := id / 32
6650 mask := uint32(1) << (id % 32)
6651 atomic.Or(&p[word], mask)
6652 }
6653
6654
6655 func (p pMask) clear(id int32) {
6656 word := id / 32
6657 mask := uint32(1) << (id % 32)
6658 atomic.And(&p[word], ^mask)
6659 }
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669
6670
6671
6672 func pidleput(pp *p, now int64) int64 {
6673 assertLockHeld(&sched.lock)
6674
6675 if !runqempty(pp) {
6676 throw("pidleput: P has non-empty run queue")
6677 }
6678 if now == 0 {
6679 now = nanotime()
6680 }
6681 if pp.timers.len.Load() == 0 {
6682 timerpMask.clear(pp.id)
6683 }
6684 idlepMask.set(pp.id)
6685 pp.link = sched.pidle
6686 sched.pidle.set(pp)
6687 sched.npidle.Add(1)
6688 if !pp.limiterEvent.start(limiterEventIdle, now) {
6689 throw("must be able to track idle limiter event")
6690 }
6691 return now
6692 }
6693
6694
6695
6696
6697
6698
6699
6700
6701 func pidleget(now int64) (*p, int64) {
6702 assertLockHeld(&sched.lock)
6703
6704 pp := sched.pidle.ptr()
6705 if pp != nil {
6706
6707 if now == 0 {
6708 now = nanotime()
6709 }
6710 timerpMask.set(pp.id)
6711 idlepMask.clear(pp.id)
6712 sched.pidle = pp.link
6713 sched.npidle.Add(-1)
6714 pp.limiterEvent.stop(limiterEventIdle, now)
6715 }
6716 return pp, now
6717 }
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728
6729 func pidlegetSpinning(now int64) (*p, int64) {
6730 assertLockHeld(&sched.lock)
6731
6732 pp, now := pidleget(now)
6733 if pp == nil {
6734
6735
6736
6737 sched.needspinning.Store(1)
6738 return nil, now
6739 }
6740
6741 return pp, now
6742 }
6743
6744
6745
6746 func runqempty(pp *p) bool {
6747
6748
6749
6750
6751 for {
6752 head := atomic.Load(&pp.runqhead)
6753 tail := atomic.Load(&pp.runqtail)
6754 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6755 if tail == atomic.Load(&pp.runqtail) {
6756 return head == tail && runnext == 0
6757 }
6758 }
6759 }
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770 const randomizeScheduler = raceenabled
6771
6772
6773
6774
6775
6776
6777 func runqput(pp *p, gp *g, next bool) {
6778 if !haveSysmon && next {
6779
6780
6781
6782
6783
6784
6785
6786
6787 next = false
6788 }
6789 if randomizeScheduler && next && randn(2) == 0 {
6790 next = false
6791 }
6792
6793 if next {
6794 retryNext:
6795 oldnext := pp.runnext
6796 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6797 goto retryNext
6798 }
6799 if oldnext == 0 {
6800 return
6801 }
6802
6803 gp = oldnext.ptr()
6804 }
6805
6806 retry:
6807 h := atomic.LoadAcq(&pp.runqhead)
6808 t := pp.runqtail
6809 if t-h < uint32(len(pp.runq)) {
6810 pp.runq[t%uint32(len(pp.runq))].set(gp)
6811 atomic.StoreRel(&pp.runqtail, t+1)
6812 return
6813 }
6814 if runqputslow(pp, gp, h, t) {
6815 return
6816 }
6817
6818 goto retry
6819 }
6820
6821
6822
6823 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6824 var batch [len(pp.runq)/2 + 1]*g
6825
6826
6827 n := t - h
6828 n = n / 2
6829 if n != uint32(len(pp.runq)/2) {
6830 throw("runqputslow: queue is not full")
6831 }
6832 for i := uint32(0); i < n; i++ {
6833 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6834 }
6835 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6836 return false
6837 }
6838 batch[n] = gp
6839
6840 if randomizeScheduler {
6841 for i := uint32(1); i <= n; i++ {
6842 j := cheaprandn(i + 1)
6843 batch[i], batch[j] = batch[j], batch[i]
6844 }
6845 }
6846
6847
6848 for i := uint32(0); i < n; i++ {
6849 batch[i].schedlink.set(batch[i+1])
6850 }
6851 var q gQueue
6852 q.head.set(batch[0])
6853 q.tail.set(batch[n])
6854
6855
6856 lock(&sched.lock)
6857 globrunqputbatch(&q, int32(n+1))
6858 unlock(&sched.lock)
6859 return true
6860 }
6861
6862
6863
6864
6865
6866 func runqputbatch(pp *p, q *gQueue, qsize int) {
6867 h := atomic.LoadAcq(&pp.runqhead)
6868 t := pp.runqtail
6869 n := uint32(0)
6870 for !q.empty() && t-h < uint32(len(pp.runq)) {
6871 gp := q.pop()
6872 pp.runq[t%uint32(len(pp.runq))].set(gp)
6873 t++
6874 n++
6875 }
6876 qsize -= int(n)
6877
6878 if randomizeScheduler {
6879 off := func(o uint32) uint32 {
6880 return (pp.runqtail + o) % uint32(len(pp.runq))
6881 }
6882 for i := uint32(1); i < n; i++ {
6883 j := cheaprandn(i + 1)
6884 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6885 }
6886 }
6887
6888 atomic.StoreRel(&pp.runqtail, t)
6889 if !q.empty() {
6890 lock(&sched.lock)
6891 globrunqputbatch(q, int32(qsize))
6892 unlock(&sched.lock)
6893 }
6894 }
6895
6896
6897
6898
6899
6900 func runqget(pp *p) (gp *g, inheritTime bool) {
6901
6902 next := pp.runnext
6903
6904
6905
6906 if next != 0 && pp.runnext.cas(next, 0) {
6907 return next.ptr(), true
6908 }
6909
6910 for {
6911 h := atomic.LoadAcq(&pp.runqhead)
6912 t := pp.runqtail
6913 if t == h {
6914 return nil, false
6915 }
6916 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6917 if atomic.CasRel(&pp.runqhead, h, h+1) {
6918 return gp, false
6919 }
6920 }
6921 }
6922
6923
6924
6925 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6926 oldNext := pp.runnext
6927 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6928 drainQ.pushBack(oldNext.ptr())
6929 n++
6930 }
6931
6932 retry:
6933 h := atomic.LoadAcq(&pp.runqhead)
6934 t := pp.runqtail
6935 qn := t - h
6936 if qn == 0 {
6937 return
6938 }
6939 if qn > uint32(len(pp.runq)) {
6940 goto retry
6941 }
6942
6943 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6944 goto retry
6945 }
6946
6947
6948
6949
6950
6951
6952
6953
6954 for i := uint32(0); i < qn; i++ {
6955 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6956 drainQ.pushBack(gp)
6957 n++
6958 }
6959 return
6960 }
6961
6962
6963
6964
6965
6966 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6967 for {
6968 h := atomic.LoadAcq(&pp.runqhead)
6969 t := atomic.LoadAcq(&pp.runqtail)
6970 n := t - h
6971 n = n - n/2
6972 if n == 0 {
6973 if stealRunNextG {
6974
6975 if next := pp.runnext; next != 0 {
6976 if pp.status == _Prunning {
6977
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987 if !osHasLowResTimer {
6988 usleep(3)
6989 } else {
6990
6991
6992
6993 osyield()
6994 }
6995 }
6996 if !pp.runnext.cas(next, 0) {
6997 continue
6998 }
6999 batch[batchHead%uint32(len(batch))] = next
7000 return 1
7001 }
7002 }
7003 return 0
7004 }
7005 if n > uint32(len(pp.runq)/2) {
7006 continue
7007 }
7008 for i := uint32(0); i < n; i++ {
7009 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7010 batch[(batchHead+i)%uint32(len(batch))] = g
7011 }
7012 if atomic.CasRel(&pp.runqhead, h, h+n) {
7013 return n
7014 }
7015 }
7016 }
7017
7018
7019
7020
7021 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7022 t := pp.runqtail
7023 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7024 if n == 0 {
7025 return nil
7026 }
7027 n--
7028 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7029 if n == 0 {
7030 return gp
7031 }
7032 h := atomic.LoadAcq(&pp.runqhead)
7033 if t-h+n >= uint32(len(pp.runq)) {
7034 throw("runqsteal: runq overflow")
7035 }
7036 atomic.StoreRel(&pp.runqtail, t+n)
7037 return gp
7038 }
7039
7040
7041
7042 type gQueue struct {
7043 head guintptr
7044 tail guintptr
7045 }
7046
7047
7048 func (q *gQueue) empty() bool {
7049 return q.head == 0
7050 }
7051
7052
7053 func (q *gQueue) push(gp *g) {
7054 gp.schedlink = q.head
7055 q.head.set(gp)
7056 if q.tail == 0 {
7057 q.tail.set(gp)
7058 }
7059 }
7060
7061
7062 func (q *gQueue) pushBack(gp *g) {
7063 gp.schedlink = 0
7064 if q.tail != 0 {
7065 q.tail.ptr().schedlink.set(gp)
7066 } else {
7067 q.head.set(gp)
7068 }
7069 q.tail.set(gp)
7070 }
7071
7072
7073
7074 func (q *gQueue) pushBackAll(q2 gQueue) {
7075 if q2.tail == 0 {
7076 return
7077 }
7078 q2.tail.ptr().schedlink = 0
7079 if q.tail != 0 {
7080 q.tail.ptr().schedlink = q2.head
7081 } else {
7082 q.head = q2.head
7083 }
7084 q.tail = q2.tail
7085 }
7086
7087
7088
7089 func (q *gQueue) pop() *g {
7090 gp := q.head.ptr()
7091 if gp != nil {
7092 q.head = gp.schedlink
7093 if q.head == 0 {
7094 q.tail = 0
7095 }
7096 }
7097 return gp
7098 }
7099
7100
7101 func (q *gQueue) popList() gList {
7102 stack := gList{q.head}
7103 *q = gQueue{}
7104 return stack
7105 }
7106
7107
7108
7109 type gList struct {
7110 head guintptr
7111 }
7112
7113
7114 func (l *gList) empty() bool {
7115 return l.head == 0
7116 }
7117
7118
7119 func (l *gList) push(gp *g) {
7120 gp.schedlink = l.head
7121 l.head.set(gp)
7122 }
7123
7124
7125 func (l *gList) pushAll(q gQueue) {
7126 if !q.empty() {
7127 q.tail.ptr().schedlink = l.head
7128 l.head = q.head
7129 }
7130 }
7131
7132
7133 func (l *gList) pop() *g {
7134 gp := l.head.ptr()
7135 if gp != nil {
7136 l.head = gp.schedlink
7137 }
7138 return gp
7139 }
7140
7141
7142 func setMaxThreads(in int) (out int) {
7143 lock(&sched.lock)
7144 out = int(sched.maxmcount)
7145 if in > 0x7fffffff {
7146 sched.maxmcount = 0x7fffffff
7147 } else {
7148 sched.maxmcount = int32(in)
7149 }
7150 checkmcount()
7151 unlock(&sched.lock)
7152 return
7153 }
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167 func procPin() int {
7168 gp := getg()
7169 mp := gp.m
7170
7171 mp.locks++
7172 return int(mp.p.ptr().id)
7173 }
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187 func procUnpin() {
7188 gp := getg()
7189 gp.m.locks--
7190 }
7191
7192
7193
7194 func sync_runtime_procPin() int {
7195 return procPin()
7196 }
7197
7198
7199
7200 func sync_runtime_procUnpin() {
7201 procUnpin()
7202 }
7203
7204
7205
7206 func sync_atomic_runtime_procPin() int {
7207 return procPin()
7208 }
7209
7210
7211
7212 func sync_atomic_runtime_procUnpin() {
7213 procUnpin()
7214 }
7215
7216
7217
7218
7219
7220 func internal_sync_runtime_canSpin(i int) bool {
7221
7222
7223
7224
7225
7226 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7227 return false
7228 }
7229 if p := getg().m.p.ptr(); !runqempty(p) {
7230 return false
7231 }
7232 return true
7233 }
7234
7235
7236
7237 func internal_sync_runtime_doSpin() {
7238 procyield(active_spin_cnt)
7239 }
7240
7241
7242
7243
7244
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255 func sync_runtime_canSpin(i int) bool {
7256 return internal_sync_runtime_canSpin(i)
7257 }
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269
7270
7271 func sync_runtime_doSpin() {
7272 internal_sync_runtime_doSpin()
7273 }
7274
7275 var stealOrder randomOrder
7276
7277
7278
7279
7280
7281 type randomOrder struct {
7282 count uint32
7283 coprimes []uint32
7284 }
7285
7286 type randomEnum struct {
7287 i uint32
7288 count uint32
7289 pos uint32
7290 inc uint32
7291 }
7292
7293 func (ord *randomOrder) reset(count uint32) {
7294 ord.count = count
7295 ord.coprimes = ord.coprimes[:0]
7296 for i := uint32(1); i <= count; i++ {
7297 if gcd(i, count) == 1 {
7298 ord.coprimes = append(ord.coprimes, i)
7299 }
7300 }
7301 }
7302
7303 func (ord *randomOrder) start(i uint32) randomEnum {
7304 return randomEnum{
7305 count: ord.count,
7306 pos: i % ord.count,
7307 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7308 }
7309 }
7310
7311 func (enum *randomEnum) done() bool {
7312 return enum.i == enum.count
7313 }
7314
7315 func (enum *randomEnum) next() {
7316 enum.i++
7317 enum.pos = (enum.pos + enum.inc) % enum.count
7318 }
7319
7320 func (enum *randomEnum) position() uint32 {
7321 return enum.pos
7322 }
7323
7324 func gcd(a, b uint32) uint32 {
7325 for b != 0 {
7326 a, b = b, a%b
7327 }
7328 return a
7329 }
7330
7331
7332
7333 type initTask struct {
7334 state uint32
7335 nfns uint32
7336
7337 }
7338
7339
7340
7341 var inittrace tracestat
7342
7343 type tracestat struct {
7344 active bool
7345 id uint64
7346 allocs uint64
7347 bytes uint64
7348 }
7349
7350 func doInit(ts []*initTask) {
7351 for _, t := range ts {
7352 doInit1(t)
7353 }
7354 }
7355
7356 func doInit1(t *initTask) {
7357 switch t.state {
7358 case 2:
7359 return
7360 case 1:
7361 throw("recursive call during initialization - linker skew")
7362 default:
7363 t.state = 1
7364
7365 var (
7366 start int64
7367 before tracestat
7368 )
7369
7370 if inittrace.active {
7371 start = nanotime()
7372
7373 before = inittrace
7374 }
7375
7376 if t.nfns == 0 {
7377
7378 throw("inittask with no functions")
7379 }
7380
7381 firstFunc := add(unsafe.Pointer(t), 8)
7382 for i := uint32(0); i < t.nfns; i++ {
7383 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7384 f := *(*func())(unsafe.Pointer(&p))
7385 f()
7386 }
7387
7388 if inittrace.active {
7389 end := nanotime()
7390
7391 after := inittrace
7392
7393 f := *(*func())(unsafe.Pointer(&firstFunc))
7394 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7395
7396 var sbuf [24]byte
7397 print("init ", pkg, " @")
7398 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7399 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7400 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7401 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7402 print("\n")
7403 }
7404
7405 t.state = 2
7406 }
7407 }
7408
View as plain text