Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/strconv"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 last := lastmoduledatap
256 for m := &firstmoduledata; true; m = m.next {
257 doInit(m.inittasks)
258 if m == last {
259 break
260 }
261 }
262
263
264
265 inittrace.active = false
266
267 close(main_init_done)
268
269 needUnlock = false
270 unlockOSThread()
271
272 if isarchive || islibrary {
273
274
275 if GOARCH == "wasm" {
276
277
278
279
280
281
282
283 pause(sys.GetCallerSP() - 16)
284 panic("unreachable")
285 }
286 return
287 }
288 fn := main_main
289 fn()
290
291
292
293
294
295
296
297
298 exitHooksRun := false
299 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
300 runExitHooks(0)
301 exitHooksRun = true
302 lsandoleakcheck()
303 }
304
305
306
307
308
309 if runningPanicDefers.Load() != 0 {
310
311 for c := 0; c < 1000; c++ {
312 if runningPanicDefers.Load() == 0 {
313 break
314 }
315 Gosched()
316 }
317 }
318 if panicking.Load() != 0 {
319 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
320 }
321 if !exitHooksRun {
322 runExitHooks(0)
323 }
324 if raceenabled {
325 racefini()
326 }
327
328 exit(0)
329 for {
330 var x *int32
331 *x = 0
332 }
333 }
334
335
336
337
338 func os_beforeExit(exitCode int) {
339 runExitHooks(exitCode)
340 if exitCode == 0 && raceenabled {
341 racefini()
342 }
343
344
345 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
346 lsandoleakcheck()
347 }
348 }
349
350 func init() {
351 exithook.Gosched = Gosched
352 exithook.Goid = func() uint64 { return getg().goid }
353 exithook.Throw = throw
354 }
355
356 func runExitHooks(code int) {
357 exithook.Run(code)
358 }
359
360
361 func init() {
362 go forcegchelper()
363 }
364
365 func forcegchelper() {
366 forcegc.g = getg()
367 lockInit(&forcegc.lock, lockRankForcegc)
368 for {
369 lock(&forcegc.lock)
370 if forcegc.idle.Load() {
371 throw("forcegc: phase error")
372 }
373 forcegc.idle.Store(true)
374 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
375
376 if debug.gctrace > 0 {
377 println("GC forced")
378 }
379
380 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
381 }
382 }
383
384
385
386
387
388 func Gosched() {
389 checkTimeouts()
390 mcall(gosched_m)
391 }
392
393
394
395
396
397 func goschedguarded() {
398 mcall(goschedguarded_m)
399 }
400
401
402
403
404
405
406 func goschedIfBusy() {
407 gp := getg()
408
409
410 if !gp.preempt && sched.npidle.Load() > 0 {
411 return
412 }
413 mcall(gosched_m)
414 }
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
445 if reason != waitReasonSleep {
446 checkTimeouts()
447 }
448 mp := acquirem()
449 gp := mp.curg
450 status := readgstatus(gp)
451 if status != _Grunning && status != _Gscanrunning {
452 throw("gopark: bad g status")
453 }
454 mp.waitlock = lock
455 mp.waitunlockf = unlockf
456 gp.waitreason = reason
457 mp.waitTraceBlockReason = traceReason
458 mp.waitTraceSkip = traceskip
459 releasem(mp)
460
461 mcall(park_m)
462 }
463
464
465
466 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
467 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
468 }
469
470
471
472
473
474
475
476
477
478
479
480 func goready(gp *g, traceskip int) {
481 systemstack(func() {
482 ready(gp, traceskip, true)
483 })
484 }
485
486
487 func acquireSudog() *sudog {
488
489
490
491
492
493
494
495
496 mp := acquirem()
497 pp := mp.p.ptr()
498 if len(pp.sudogcache) == 0 {
499 lock(&sched.sudoglock)
500
501 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
502 s := sched.sudogcache
503 sched.sudogcache = s.next
504 s.next = nil
505 pp.sudogcache = append(pp.sudogcache, s)
506 }
507 unlock(&sched.sudoglock)
508
509 if len(pp.sudogcache) == 0 {
510 pp.sudogcache = append(pp.sudogcache, new(sudog))
511 }
512 }
513 n := len(pp.sudogcache)
514 s := pp.sudogcache[n-1]
515 pp.sudogcache[n-1] = nil
516 pp.sudogcache = pp.sudogcache[:n-1]
517 if s.elem.get() != nil {
518 throw("acquireSudog: found s.elem != nil in cache")
519 }
520 releasem(mp)
521 return s
522 }
523
524
525 func releaseSudog(s *sudog) {
526 if s.elem.get() != nil {
527 throw("runtime: sudog with non-nil elem")
528 }
529 if s.isSelect {
530 throw("runtime: sudog with non-false isSelect")
531 }
532 if s.next != nil {
533 throw("runtime: sudog with non-nil next")
534 }
535 if s.prev != nil {
536 throw("runtime: sudog with non-nil prev")
537 }
538 if s.waitlink != nil {
539 throw("runtime: sudog with non-nil waitlink")
540 }
541 if s.c.get() != nil {
542 throw("runtime: sudog with non-nil c")
543 }
544 gp := getg()
545 if gp.param != nil {
546 throw("runtime: releaseSudog with non-nil gp.param")
547 }
548 mp := acquirem()
549 pp := mp.p.ptr()
550 if len(pp.sudogcache) == cap(pp.sudogcache) {
551
552 var first, last *sudog
553 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
554 n := len(pp.sudogcache)
555 p := pp.sudogcache[n-1]
556 pp.sudogcache[n-1] = nil
557 pp.sudogcache = pp.sudogcache[:n-1]
558 if first == nil {
559 first = p
560 } else {
561 last.next = p
562 }
563 last = p
564 }
565 lock(&sched.sudoglock)
566 last.next = sched.sudogcache
567 sched.sudogcache = first
568 unlock(&sched.sudoglock)
569 }
570 pp.sudogcache = append(pp.sudogcache, s)
571 releasem(mp)
572 }
573
574
575 func badmcall(fn func(*g)) {
576 throw("runtime: mcall called on m->g0 stack")
577 }
578
579 func badmcall2(fn func(*g)) {
580 throw("runtime: mcall function returned")
581 }
582
583 func badreflectcall() {
584 panic(plainError("arg size to reflect.call more than 1GB"))
585 }
586
587
588
589 func badmorestackg0() {
590 if !crashStackImplemented {
591 writeErrStr("fatal: morestack on g0\n")
592 return
593 }
594
595 g := getg()
596 switchToCrashStack(func() {
597 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
598 g.m.traceback = 2
599 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
600 print("\n")
601
602 throw("morestack on g0")
603 })
604 }
605
606
607
608 func badmorestackgsignal() {
609 writeErrStr("fatal: morestack on gsignal\n")
610 }
611
612
613 func badctxt() {
614 throw("ctxt != 0")
615 }
616
617
618
619 var gcrash g
620
621 var crashingG atomic.Pointer[g]
622
623
624
625
626
627
628
629
630
631 func switchToCrashStack(fn func()) {
632 me := getg()
633 if crashingG.CompareAndSwapNoWB(nil, me) {
634 switchToCrashStack0(fn)
635 abort()
636 }
637 if crashingG.Load() == me {
638
639 writeErrStr("fatal: recursive switchToCrashStack\n")
640 abort()
641 }
642
643 usleep_no_g(100)
644 writeErrStr("fatal: concurrent switchToCrashStack\n")
645 abort()
646 }
647
648
649
650
651 const crashStackImplemented = GOOS != "windows"
652
653
654 func switchToCrashStack0(fn func())
655
656 func lockedOSThread() bool {
657 gp := getg()
658 return gp.lockedm != 0 && gp.m.lockedg != 0
659 }
660
661 var (
662
663
664
665
666
667
668 allglock mutex
669 allgs []*g
670
671
672
673
674
675
676
677
678
679
680
681
682
683 allglen uintptr
684 allgptr **g
685 )
686
687 func allgadd(gp *g) {
688 if readgstatus(gp) == _Gidle {
689 throw("allgadd: bad status Gidle")
690 }
691
692 lock(&allglock)
693 allgs = append(allgs, gp)
694 if &allgs[0] != allgptr {
695 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
696 }
697 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
698 unlock(&allglock)
699 }
700
701
702
703
704 func allGsSnapshot() []*g {
705 assertWorldStoppedOrLockHeld(&allglock)
706
707
708
709
710
711
712 return allgs[:len(allgs):len(allgs)]
713 }
714
715
716 func atomicAllG() (**g, uintptr) {
717 length := atomic.Loaduintptr(&allglen)
718 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
719 return ptr, length
720 }
721
722
723 func atomicAllGIndex(ptr **g, i uintptr) *g {
724 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
725 }
726
727
728
729
730 func forEachG(fn func(gp *g)) {
731 lock(&allglock)
732 for _, gp := range allgs {
733 fn(gp)
734 }
735 unlock(&allglock)
736 }
737
738
739
740
741
742 func forEachGRace(fn func(gp *g)) {
743 ptr, length := atomicAllG()
744 for i := uintptr(0); i < length; i++ {
745 gp := atomicAllGIndex(ptr, i)
746 fn(gp)
747 }
748 return
749 }
750
751 const (
752
753
754 _GoidCacheBatch = 16
755 )
756
757
758
759 func cpuinit(env string) {
760 cpu.Initialize(env)
761
762
763
764 switch GOARCH {
765 case "386", "amd64":
766 x86HasPOPCNT = cpu.X86.HasPOPCNT
767 x86HasSSE41 = cpu.X86.HasSSE41
768 x86HasFMA = cpu.X86.HasFMA
769
770 case "arm":
771 armHasVFPv4 = cpu.ARM.HasVFPv4
772
773 case "arm64":
774 arm64HasATOMICS = cpu.ARM64.HasATOMICS
775
776 case "loong64":
777 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
778 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
779 loong64HasLSX = cpu.Loong64.HasLSX
780
781 case "riscv64":
782 riscv64HasZbb = cpu.RISCV64.HasZbb
783 }
784 }
785
786
787
788
789
790
791 func getGodebugEarly() (string, bool) {
792 const prefix = "GODEBUG="
793 var env string
794 switch GOOS {
795 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
796
797
798
799 n := int32(0)
800 for argv_index(argv, argc+1+n) != nil {
801 n++
802 }
803
804 for i := int32(0); i < n; i++ {
805 p := argv_index(argv, argc+1+i)
806 s := unsafe.String(p, findnull(p))
807
808 if stringslite.HasPrefix(s, prefix) {
809 env = gostringnocopy(p)[len(prefix):]
810 break
811 }
812 }
813 break
814
815 default:
816 return "", false
817 }
818 return env, true
819 }
820
821
822
823
824
825
826
827
828
829 func schedinit() {
830 lockInit(&sched.lock, lockRankSched)
831 lockInit(&sched.sysmonlock, lockRankSysmon)
832 lockInit(&sched.deferlock, lockRankDefer)
833 lockInit(&sched.sudoglock, lockRankSudog)
834 lockInit(&deadlock, lockRankDeadlock)
835 lockInit(&paniclk, lockRankPanic)
836 lockInit(&allglock, lockRankAllg)
837 lockInit(&allpLock, lockRankAllp)
838 lockInit(&reflectOffs.lock, lockRankReflectOffs)
839 lockInit(&finlock, lockRankFin)
840 lockInit(&cpuprof.lock, lockRankCpuprof)
841 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
842 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
843 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
844 traceLockInit()
845
846
847
848 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
849
850 lockVerifyMSize()
851
852 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
853
854
855
856 gp := getg()
857 if raceenabled {
858 gp.racectx, raceprocctx0 = raceinit()
859 }
860
861 sched.maxmcount = 10000
862 crashFD.Store(^uintptr(0))
863
864
865 worldStopped()
866
867 godebug, parsedGodebug := getGodebugEarly()
868 if parsedGodebug {
869 parseRuntimeDebugVars(godebug)
870 }
871 ticks.init()
872 moduledataverify()
873 stackinit()
874 randinit()
875 mallocinit()
876 cpuinit(godebug)
877 alginit()
878 mcommoninit(gp.m, -1)
879 modulesinit()
880 typelinksinit()
881 itabsinit()
882 stkobjinit()
883
884 sigsave(&gp.m.sigmask)
885 initSigmask = gp.m.sigmask
886
887 goargs()
888 goenvs()
889 secure()
890 checkfds()
891 if !parsedGodebug {
892
893
894 parseRuntimeDebugVars(gogetenv("GODEBUG"))
895 }
896 finishDebugVarsSetup()
897 gcinit()
898
899
900
901 gcrash.stack = stackalloc(16384)
902 gcrash.stackguard0 = gcrash.stack.lo + 1000
903 gcrash.stackguard1 = gcrash.stack.lo + 1000
904
905
906
907
908
909 if disableMemoryProfiling {
910 MemProfileRate = 0
911 }
912
913
914 mProfStackInit(gp.m)
915 defaultGOMAXPROCSInit()
916
917 lock(&sched.lock)
918 sched.lastpoll.Store(nanotime())
919 var procs int32
920 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
921 procs = int32(n)
922 sched.customGOMAXPROCS = true
923 } else {
924
925
926
927
928
929
930
931
932 procs = defaultGOMAXPROCS(numCPUStartup)
933 }
934 if procresize(procs) != nil {
935 throw("unknown runnable goroutine during bootstrap")
936 }
937 unlock(&sched.lock)
938
939
940 worldStarted()
941
942 if buildVersion == "" {
943
944
945 buildVersion = "unknown"
946 }
947 if len(modinfo) == 1 {
948
949
950 modinfo = ""
951 }
952 }
953
954 func dumpgstatus(gp *g) {
955 thisg := getg()
956 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
957 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
958 }
959
960
961 func checkmcount() {
962 assertLockHeld(&sched.lock)
963
964
965
966
967
968
969
970
971
972 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
973 if count > sched.maxmcount {
974 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
975 throw("thread exhaustion")
976 }
977 }
978
979
980
981
982
983 func mReserveID() int64 {
984 assertLockHeld(&sched.lock)
985
986 if sched.mnext+1 < sched.mnext {
987 throw("runtime: thread ID overflow")
988 }
989 id := sched.mnext
990 sched.mnext++
991 checkmcount()
992 return id
993 }
994
995
996 func mcommoninit(mp *m, id int64) {
997 gp := getg()
998
999
1000 if gp != gp.m.g0 {
1001 callers(1, mp.createstack[:])
1002 }
1003
1004 lock(&sched.lock)
1005
1006 if id >= 0 {
1007 mp.id = id
1008 } else {
1009 mp.id = mReserveID()
1010 }
1011
1012 mp.self = newMWeakPointer(mp)
1013
1014 mrandinit(mp)
1015
1016 mpreinit(mp)
1017 if mp.gsignal != nil {
1018 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1019 }
1020
1021
1022
1023 mp.alllink = allm
1024
1025
1026
1027 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1028 unlock(&sched.lock)
1029
1030
1031 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1032 mp.cgoCallers = new(cgoCallers)
1033 }
1034 mProfStackInit(mp)
1035 }
1036
1037
1038
1039
1040
1041 func mProfStackInit(mp *m) {
1042 if debug.profstackdepth == 0 {
1043
1044
1045 return
1046 }
1047 mp.profStack = makeProfStackFP()
1048 mp.mLockProfile.stack = makeProfStackFP()
1049 }
1050
1051
1052
1053
1054 func makeProfStackFP() []uintptr {
1055
1056
1057
1058
1059
1060
1061 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1062 }
1063
1064
1065
1066 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1067
1068
1069 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1070
1071 func (mp *m) becomeSpinning() {
1072 mp.spinning = true
1073 sched.nmspinning.Add(1)
1074 sched.needspinning.Store(0)
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084 func (mp *m) snapshotAllp() []*p {
1085 mp.allpSnapshot = allp
1086 return mp.allpSnapshot
1087 }
1088
1089
1090
1091
1092
1093
1094
1095 func (mp *m) clearAllpSnapshot() {
1096 mp.allpSnapshot = nil
1097 }
1098
1099 func (mp *m) hasCgoOnStack() bool {
1100 return mp.ncgo > 0 || mp.isextra
1101 }
1102
1103 const (
1104
1105
1106 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1107
1108
1109
1110 osHasLowResClockInt = goos.IsWindows
1111
1112
1113
1114 osHasLowResClock = osHasLowResClockInt > 0
1115 )
1116
1117
1118 func ready(gp *g, traceskip int, next bool) {
1119 status := readgstatus(gp)
1120
1121
1122 mp := acquirem()
1123 if status&^_Gscan != _Gwaiting {
1124 dumpgstatus(gp)
1125 throw("bad g->status in ready")
1126 }
1127
1128
1129 trace := traceAcquire()
1130 casgstatus(gp, _Gwaiting, _Grunnable)
1131 if trace.ok() {
1132 trace.GoUnpark(gp, traceskip)
1133 traceRelease(trace)
1134 }
1135 runqput(mp.p.ptr(), gp, next)
1136 wakep()
1137 releasem(mp)
1138 }
1139
1140
1141
1142 const freezeStopWait = 0x7fffffff
1143
1144
1145
1146 var freezing atomic.Bool
1147
1148
1149
1150
1151 func freezetheworld() {
1152 freezing.Store(true)
1153 if debug.dontfreezetheworld > 0 {
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 usleep(1000)
1179 return
1180 }
1181
1182
1183
1184
1185 for i := 0; i < 5; i++ {
1186
1187 sched.stopwait = freezeStopWait
1188 sched.gcwaiting.Store(true)
1189
1190 if !preemptall() {
1191 break
1192 }
1193 usleep(1000)
1194 }
1195
1196 usleep(1000)
1197 preemptall()
1198 usleep(1000)
1199 }
1200
1201
1202
1203
1204
1205 func readgstatus(gp *g) uint32 {
1206 return gp.atomicstatus.Load()
1207 }
1208
1209
1210
1211
1212
1213 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1214 success := false
1215
1216
1217 switch oldval {
1218 default:
1219 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1220 dumpgstatus(gp)
1221 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1222 case _Gscanrunnable,
1223 _Gscanwaiting,
1224 _Gscanrunning,
1225 _Gscansyscall,
1226 _Gscanleaked,
1227 _Gscanpreempted,
1228 _Gscandeadextra:
1229 if newval == oldval&^_Gscan {
1230 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1231 }
1232 }
1233 if !success {
1234 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1235 dumpgstatus(gp)
1236 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1237 }
1238 releaseLockRankAndM(lockRankGscan)
1239 }
1240
1241
1242
1243 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1244 switch oldval {
1245 case _Grunnable,
1246 _Grunning,
1247 _Gwaiting,
1248 _Gleaked,
1249 _Gsyscall,
1250 _Gdeadextra:
1251 if newval == oldval|_Gscan {
1252 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1253 if r {
1254 acquireLockRankAndM(lockRankGscan)
1255 }
1256 return r
1257
1258 }
1259 }
1260 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1261 throw("bad oldval passed to castogscanstatus")
1262 return false
1263 }
1264
1265
1266
1267 var casgstatusAlwaysTrack = false
1268
1269
1270
1271
1272
1273
1274
1275 func casgstatus(gp *g, oldval, newval uint32) {
1276 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1277 systemstack(func() {
1278
1279
1280 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1281 throw("casgstatus: bad incoming values")
1282 })
1283 }
1284
1285 lockWithRankMayAcquire(nil, lockRankGscan)
1286
1287
1288 const yieldDelay = 5 * 1000
1289 var nextYield int64
1290
1291
1292
1293 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1294 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1295 systemstack(func() {
1296
1297
1298 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1299 })
1300 }
1301 if i == 0 {
1302 nextYield = nanotime() + yieldDelay
1303 }
1304 if nanotime() < nextYield {
1305 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1306 procyield(1)
1307 }
1308 } else {
1309 osyield()
1310 nextYield = nanotime() + yieldDelay/2
1311 }
1312 }
1313
1314 if gp.bubble != nil {
1315 systemstack(func() {
1316 gp.bubble.changegstatus(gp, oldval, newval)
1317 })
1318 }
1319
1320 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1321
1322
1323 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1324 gp.tracking = true
1325 }
1326 gp.trackingSeq++
1327 }
1328 if !gp.tracking {
1329 return
1330 }
1331
1332
1333
1334
1335
1336
1337 switch oldval {
1338 case _Grunnable:
1339
1340
1341
1342 now := nanotime()
1343 gp.runnableTime += now - gp.trackingStamp
1344 gp.trackingStamp = 0
1345 case _Gwaiting:
1346 if !gp.waitreason.isMutexWait() {
1347
1348 break
1349 }
1350
1351
1352
1353
1354
1355 now := nanotime()
1356 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1357 gp.trackingStamp = 0
1358 }
1359 switch newval {
1360 case _Gwaiting:
1361 if !gp.waitreason.isMutexWait() {
1362
1363 break
1364 }
1365
1366 now := nanotime()
1367 gp.trackingStamp = now
1368 case _Grunnable:
1369
1370
1371 now := nanotime()
1372 gp.trackingStamp = now
1373 case _Grunning:
1374
1375
1376
1377 gp.tracking = false
1378 sched.timeToRun.record(gp.runnableTime)
1379 gp.runnableTime = 0
1380 }
1381 }
1382
1383
1384
1385
1386 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1387
1388 gp.waitreason = reason
1389 casgstatus(gp, old, _Gwaiting)
1390 }
1391
1392
1393
1394
1395
1396
1397
1398
1399 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1400 if !reason.isWaitingForSuspendG() {
1401 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1402 }
1403 casGToWaiting(gp, old, reason)
1404 }
1405
1406
1407
1408
1409
1410 func casGToPreemptScan(gp *g, old, new uint32) {
1411 if old != _Grunning || new != _Gscan|_Gpreempted {
1412 throw("bad g transition")
1413 }
1414 acquireLockRankAndM(lockRankGscan)
1415 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1416 }
1417
1418
1419
1420
1421
1422
1423 }
1424
1425
1426
1427
1428 func casGFromPreempted(gp *g, old, new uint32) bool {
1429 if old != _Gpreempted || new != _Gwaiting {
1430 throw("bad g transition")
1431 }
1432 gp.waitreason = waitReasonPreempted
1433 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1434 return false
1435 }
1436 if bubble := gp.bubble; bubble != nil {
1437 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1438 }
1439 return true
1440 }
1441
1442
1443 type stwReason uint8
1444
1445
1446
1447
1448 const (
1449 stwUnknown stwReason = iota
1450 stwGCMarkTerm
1451 stwGCSweepTerm
1452 stwWriteHeapDump
1453 stwGoroutineProfile
1454 stwGoroutineProfileCleanup
1455 stwAllGoroutinesStack
1456 stwReadMemStats
1457 stwAllThreadsSyscall
1458 stwGOMAXPROCS
1459 stwStartTrace
1460 stwStopTrace
1461 stwForTestCountPagesInUse
1462 stwForTestReadMetricsSlow
1463 stwForTestReadMemStatsSlow
1464 stwForTestPageCachePagesLeaked
1465 stwForTestResetDebugLog
1466 )
1467
1468 func (r stwReason) String() string {
1469 return stwReasonStrings[r]
1470 }
1471
1472 func (r stwReason) isGC() bool {
1473 return r == stwGCMarkTerm || r == stwGCSweepTerm
1474 }
1475
1476
1477
1478
1479 var stwReasonStrings = [...]string{
1480 stwUnknown: "unknown",
1481 stwGCMarkTerm: "GC mark termination",
1482 stwGCSweepTerm: "GC sweep termination",
1483 stwWriteHeapDump: "write heap dump",
1484 stwGoroutineProfile: "goroutine profile",
1485 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1486 stwAllGoroutinesStack: "all goroutines stack trace",
1487 stwReadMemStats: "read mem stats",
1488 stwAllThreadsSyscall: "AllThreadsSyscall",
1489 stwGOMAXPROCS: "GOMAXPROCS",
1490 stwStartTrace: "start trace",
1491 stwStopTrace: "stop trace",
1492 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1493 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1494 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1495 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1496 stwForTestResetDebugLog: "ResetDebugLog (test)",
1497 }
1498
1499
1500
1501 type worldStop struct {
1502 reason stwReason
1503 startedStopping int64
1504 finishedStopping int64
1505 stoppingCPUTime int64
1506 }
1507
1508
1509
1510
1511 var stopTheWorldContext worldStop
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 func stopTheWorld(reason stwReason) worldStop {
1531 semacquire(&worldsema)
1532 gp := getg()
1533 gp.m.preemptoff = reason.String()
1534 systemstack(func() {
1535 stopTheWorldContext = stopTheWorldWithSema(reason)
1536 })
1537 return stopTheWorldContext
1538 }
1539
1540
1541
1542
1543 func startTheWorld(w worldStop) {
1544 systemstack(func() { startTheWorldWithSema(0, w) })
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 mp := acquirem()
1562 mp.preemptoff = ""
1563 semrelease1(&worldsema, true, 0)
1564 releasem(mp)
1565 }
1566
1567
1568
1569
1570 func stopTheWorldGC(reason stwReason) worldStop {
1571 semacquire(&gcsema)
1572 return stopTheWorld(reason)
1573 }
1574
1575
1576
1577
1578 func startTheWorldGC(w worldStop) {
1579 startTheWorld(w)
1580 semrelease(&gcsema)
1581 }
1582
1583
1584 var worldsema uint32 = 1
1585
1586
1587
1588
1589
1590
1591
1592 var gcsema uint32 = 1
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626 func stopTheWorldWithSema(reason stwReason) worldStop {
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1640
1641 trace := traceAcquire()
1642 if trace.ok() {
1643 trace.STWStart(reason)
1644 traceRelease(trace)
1645 }
1646 gp := getg()
1647
1648
1649
1650 if gp.m.locks > 0 {
1651 throw("stopTheWorld: holding locks")
1652 }
1653
1654 lock(&sched.lock)
1655 start := nanotime()
1656 sched.stopwait = gomaxprocs
1657 sched.gcwaiting.Store(true)
1658 preemptall()
1659
1660
1661 gp.m.p.ptr().status = _Pgcstop
1662 gp.m.p.ptr().gcStopTime = start
1663 sched.stopwait--
1664
1665
1666 for _, pp := range allp {
1667 if thread, ok := setBlockOnExitSyscall(pp); ok {
1668 thread.gcstopP()
1669 thread.resume()
1670 }
1671 }
1672
1673
1674 now := nanotime()
1675 for {
1676 pp, _ := pidleget(now)
1677 if pp == nil {
1678 break
1679 }
1680 pp.status = _Pgcstop
1681 pp.gcStopTime = nanotime()
1682 sched.stopwait--
1683 }
1684 wait := sched.stopwait > 0
1685 unlock(&sched.lock)
1686
1687
1688 if wait {
1689 for {
1690
1691 if notetsleep(&sched.stopnote, 100*1000) {
1692 noteclear(&sched.stopnote)
1693 break
1694 }
1695 preemptall()
1696 }
1697 }
1698
1699 finish := nanotime()
1700 startTime := finish - start
1701 if reason.isGC() {
1702 sched.stwStoppingTimeGC.record(startTime)
1703 } else {
1704 sched.stwStoppingTimeOther.record(startTime)
1705 }
1706
1707
1708
1709
1710
1711 stoppingCPUTime := int64(0)
1712 bad := ""
1713 if sched.stopwait != 0 {
1714 bad = "stopTheWorld: not stopped (stopwait != 0)"
1715 } else {
1716 for _, pp := range allp {
1717 if pp.status != _Pgcstop {
1718 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1719 }
1720 if pp.gcStopTime == 0 && bad == "" {
1721 bad = "stopTheWorld: broken CPU time accounting"
1722 }
1723 stoppingCPUTime += finish - pp.gcStopTime
1724 pp.gcStopTime = 0
1725 }
1726 }
1727 if freezing.Load() {
1728
1729
1730
1731
1732 lock(&deadlock)
1733 lock(&deadlock)
1734 }
1735 if bad != "" {
1736 throw(bad)
1737 }
1738
1739 worldStopped()
1740
1741
1742 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1743
1744 return worldStop{
1745 reason: reason,
1746 startedStopping: start,
1747 finishedStopping: finish,
1748 stoppingCPUTime: stoppingCPUTime,
1749 }
1750 }
1751
1752
1753
1754
1755
1756
1757
1758 func startTheWorldWithSema(now int64, w worldStop) int64 {
1759 assertWorldStopped()
1760
1761 mp := acquirem()
1762 if netpollinited() {
1763 list, delta := netpoll(0)
1764 injectglist(&list)
1765 netpollAdjustWaiters(delta)
1766 }
1767 lock(&sched.lock)
1768
1769 procs := gomaxprocs
1770 if newprocs != 0 {
1771 procs = newprocs
1772 newprocs = 0
1773 }
1774 p1 := procresize(procs)
1775 sched.gcwaiting.Store(false)
1776 if sched.sysmonwait.Load() {
1777 sched.sysmonwait.Store(false)
1778 notewakeup(&sched.sysmonnote)
1779 }
1780 unlock(&sched.lock)
1781
1782 worldStarted()
1783
1784 for p1 != nil {
1785 p := p1
1786 p1 = p1.link.ptr()
1787 if p.m != 0 {
1788 mp := p.m.ptr()
1789 p.m = 0
1790 if mp.nextp != 0 {
1791 throw("startTheWorld: inconsistent mp->nextp")
1792 }
1793 mp.nextp.set(p)
1794 notewakeup(&mp.park)
1795 } else {
1796
1797 newm(nil, p, -1)
1798 }
1799 }
1800
1801
1802 if now == 0 {
1803 now = nanotime()
1804 }
1805 totalTime := now - w.startedStopping
1806 if w.reason.isGC() {
1807 sched.stwTotalTimeGC.record(totalTime)
1808 } else {
1809 sched.stwTotalTimeOther.record(totalTime)
1810 }
1811 trace := traceAcquire()
1812 if trace.ok() {
1813 trace.STWDone()
1814 traceRelease(trace)
1815 }
1816
1817
1818
1819
1820 wakep()
1821
1822 releasem(mp)
1823
1824 return now
1825 }
1826
1827
1828
1829 func usesLibcall() bool {
1830 switch GOOS {
1831 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1832 return true
1833 }
1834 return false
1835 }
1836
1837
1838
1839 func mStackIsSystemAllocated() bool {
1840 switch GOOS {
1841 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1842 return true
1843 }
1844 return false
1845 }
1846
1847
1848
1849 func mstart()
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860 func mstart0() {
1861 gp := getg()
1862
1863 osStack := gp.stack.lo == 0
1864 if osStack {
1865
1866
1867
1868
1869
1870
1871
1872
1873 size := gp.stack.hi
1874 if size == 0 {
1875 size = 16384 * sys.StackGuardMultiplier
1876 }
1877 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1878 gp.stack.lo = gp.stack.hi - size + 1024
1879 }
1880
1881
1882 gp.stackguard0 = gp.stack.lo + stackGuard
1883
1884
1885 gp.stackguard1 = gp.stackguard0
1886 mstart1()
1887
1888
1889 if mStackIsSystemAllocated() {
1890
1891
1892
1893 osStack = true
1894 }
1895 mexit(osStack)
1896 }
1897
1898
1899
1900
1901
1902 func mstart1() {
1903 gp := getg()
1904
1905 if gp != gp.m.g0 {
1906 throw("bad runtime·mstart")
1907 }
1908
1909
1910
1911
1912
1913
1914
1915 gp.sched.g = guintptr(unsafe.Pointer(gp))
1916 gp.sched.pc = sys.GetCallerPC()
1917 gp.sched.sp = sys.GetCallerSP()
1918
1919 asminit()
1920 minit()
1921
1922
1923
1924 if gp.m == &m0 {
1925 mstartm0()
1926 }
1927
1928 if debug.dataindependenttiming == 1 {
1929 sys.EnableDIT()
1930 }
1931
1932 if fn := gp.m.mstartfn; fn != nil {
1933 fn()
1934 }
1935
1936 if gp.m != &m0 {
1937 acquirep(gp.m.nextp.ptr())
1938 gp.m.nextp = 0
1939 }
1940 schedule()
1941 }
1942
1943
1944
1945
1946
1947
1948
1949 func mstartm0() {
1950
1951
1952
1953 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1954 cgoHasExtraM = true
1955 newextram()
1956 }
1957 initsig(false)
1958 }
1959
1960
1961
1962
1963 func mPark() {
1964 gp := getg()
1965 notesleep(&gp.m.park)
1966 noteclear(&gp.m.park)
1967 }
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979 func mexit(osStack bool) {
1980 mp := getg().m
1981
1982 if mp == &m0 {
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 handoffp(releasep())
1995 lock(&sched.lock)
1996 sched.nmfreed++
1997 checkdead()
1998 unlock(&sched.lock)
1999 mPark()
2000 throw("locked m0 woke up")
2001 }
2002
2003 sigblock(true)
2004 unminit()
2005
2006
2007 if mp.gsignal != nil {
2008 stackfree(mp.gsignal.stack)
2009 if valgrindenabled {
2010 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2011 mp.gsignal.valgrindStackID = 0
2012 }
2013
2014
2015
2016
2017 mp.gsignal = nil
2018 }
2019
2020
2021 vgetrandomDestroy(mp)
2022
2023
2024
2025 mp.self.clear()
2026
2027
2028 lock(&sched.lock)
2029 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2030 if *pprev == mp {
2031 *pprev = mp.alllink
2032 goto found
2033 }
2034 }
2035 throw("m not found in allm")
2036 found:
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 mp.freeWait.Store(freeMWait)
2052 mp.freelink = sched.freem
2053 sched.freem = mp
2054 unlock(&sched.lock)
2055
2056 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2057 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2058
2059
2060 handoffp(releasep())
2061
2062
2063
2064
2065
2066 lock(&sched.lock)
2067 sched.nmfreed++
2068 checkdead()
2069 unlock(&sched.lock)
2070
2071 if GOOS == "darwin" || GOOS == "ios" {
2072
2073
2074 if mp.signalPending.Load() != 0 {
2075 pendingPreemptSignals.Add(-1)
2076 }
2077 }
2078
2079
2080
2081 mdestroy(mp)
2082
2083 if osStack {
2084
2085 mp.freeWait.Store(freeMRef)
2086
2087
2088
2089 return
2090 }
2091
2092
2093
2094
2095
2096 exitThread(&mp.freeWait)
2097 }
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 func forEachP(reason waitReason, fn func(*p)) {
2110 systemstack(func() {
2111 gp := getg().m.curg
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123 casGToWaitingForSuspendG(gp, _Grunning, reason)
2124 forEachPInternal(fn)
2125 casgstatus(gp, _Gwaiting, _Grunning)
2126 })
2127 }
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 func forEachPInternal(fn func(*p)) {
2139 mp := acquirem()
2140 pp := getg().m.p.ptr()
2141
2142 lock(&sched.lock)
2143 if sched.safePointWait != 0 {
2144 throw("forEachP: sched.safePointWait != 0")
2145 }
2146 sched.safePointWait = gomaxprocs - 1
2147 sched.safePointFn = fn
2148
2149
2150 for _, p2 := range allp {
2151 if p2 != pp {
2152 atomic.Store(&p2.runSafePointFn, 1)
2153 }
2154 }
2155 preemptall()
2156
2157
2158
2159
2160
2161
2162
2163 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2164 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2165 fn(p)
2166 sched.safePointWait--
2167 }
2168 }
2169
2170 wait := sched.safePointWait > 0
2171 unlock(&sched.lock)
2172
2173
2174 fn(pp)
2175
2176
2177
2178 for _, p2 := range allp {
2179 if atomic.Load(&p2.runSafePointFn) != 1 {
2180
2181 continue
2182 }
2183 if thread, ok := setBlockOnExitSyscall(p2); ok {
2184 thread.takeP()
2185 thread.resume()
2186 handoffp(p2)
2187 }
2188 }
2189
2190
2191 if wait {
2192 for {
2193
2194
2195
2196
2197 if notetsleep(&sched.safePointNote, 100*1000) {
2198 noteclear(&sched.safePointNote)
2199 break
2200 }
2201 preemptall()
2202 }
2203 }
2204 if sched.safePointWait != 0 {
2205 throw("forEachP: not done")
2206 }
2207 for _, p2 := range allp {
2208 if p2.runSafePointFn != 0 {
2209 throw("forEachP: P did not run fn")
2210 }
2211 }
2212
2213 lock(&sched.lock)
2214 sched.safePointFn = nil
2215 unlock(&sched.lock)
2216 releasem(mp)
2217 }
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230 func runSafePointFn() {
2231 p := getg().m.p.ptr()
2232
2233
2234
2235 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2236 return
2237 }
2238 sched.safePointFn(p)
2239 lock(&sched.lock)
2240 sched.safePointWait--
2241 if sched.safePointWait == 0 {
2242 notewakeup(&sched.safePointNote)
2243 }
2244 unlock(&sched.lock)
2245 }
2246
2247
2248
2249
2250 var cgoThreadStart unsafe.Pointer
2251
2252 type cgothreadstart struct {
2253 g guintptr
2254 tls *uint64
2255 fn unsafe.Pointer
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 func allocm(pp *p, fn func(), id int64) *m {
2268 allocmLock.rlock()
2269
2270
2271
2272
2273 acquirem()
2274
2275 gp := getg()
2276 if gp.m.p == 0 {
2277 acquirep(pp)
2278 }
2279
2280
2281
2282 if sched.freem != nil {
2283 lock(&sched.lock)
2284 var newList *m
2285 for freem := sched.freem; freem != nil; {
2286
2287 wait := freem.freeWait.Load()
2288 if wait == freeMWait {
2289 next := freem.freelink
2290 freem.freelink = newList
2291 newList = freem
2292 freem = next
2293 continue
2294 }
2295
2296
2297
2298 if traceEnabled() || traceShuttingDown() {
2299 traceThreadDestroy(freem)
2300 }
2301
2302
2303
2304 if wait == freeMStack {
2305
2306
2307
2308 systemstack(func() {
2309 stackfree(freem.g0.stack)
2310 if valgrindenabled {
2311 valgrindDeregisterStack(freem.g0.valgrindStackID)
2312 freem.g0.valgrindStackID = 0
2313 }
2314 })
2315 }
2316 freem = freem.freelink
2317 }
2318 sched.freem = newList
2319 unlock(&sched.lock)
2320 }
2321
2322 mp := &new(mPadded).m
2323 mp.mstartfn = fn
2324 mcommoninit(mp, id)
2325
2326
2327
2328 if iscgo || mStackIsSystemAllocated() {
2329 mp.g0 = malg(-1)
2330 } else {
2331 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2332 }
2333 mp.g0.m = mp
2334
2335 if pp == gp.m.p.ptr() {
2336 releasep()
2337 }
2338
2339 releasem(gp.m)
2340 allocmLock.runlock()
2341 return mp
2342 }
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383 func needm(signal bool) {
2384 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2385
2386
2387
2388
2389
2390
2391 writeErrStr("fatal error: cgo callback before cgo call\n")
2392 exit(1)
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 var sigmask sigset
2404 sigsave(&sigmask)
2405 sigblock(false)
2406
2407
2408
2409
2410 mp, last := getExtraM()
2411
2412
2413
2414
2415
2416
2417
2418
2419 mp.needextram = last
2420
2421
2422 mp.sigmask = sigmask
2423
2424
2425
2426 osSetupTLS(mp)
2427
2428
2429
2430 setg(mp.g0)
2431 sp := sys.GetCallerSP()
2432 callbackUpdateSystemStack(mp, sp, signal)
2433
2434
2435
2436
2437 mp.isExtraInC = false
2438
2439
2440 asminit()
2441 minit()
2442
2443
2444
2445
2446
2447
2448 var trace traceLocker
2449 if !signal {
2450 trace = traceAcquire()
2451 }
2452
2453
2454 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2455 sched.ngsys.Add(-1)
2456 sched.nGsyscallNoP.Add(1)
2457
2458 if !signal {
2459 if trace.ok() {
2460 trace.GoCreateSyscall(mp.curg)
2461 traceRelease(trace)
2462 }
2463 }
2464 mp.isExtraInSig = signal
2465 }
2466
2467
2468
2469
2470 func needAndBindM() {
2471 needm(false)
2472
2473 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2474 cgoBindM()
2475 }
2476 }
2477
2478
2479
2480
2481 func newextram() {
2482 c := extraMWaiters.Swap(0)
2483 if c > 0 {
2484 for i := uint32(0); i < c; i++ {
2485 oneNewExtraM()
2486 }
2487 } else if extraMLength.Load() == 0 {
2488
2489 oneNewExtraM()
2490 }
2491 }
2492
2493
2494 func oneNewExtraM() {
2495
2496
2497
2498
2499
2500 mp := allocm(nil, nil, -1)
2501 gp := malg(4096)
2502 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2503 gp.sched.sp = gp.stack.hi
2504 gp.sched.sp -= 4 * goarch.PtrSize
2505 gp.sched.lr = 0
2506 gp.sched.g = guintptr(unsafe.Pointer(gp))
2507 gp.syscallpc = gp.sched.pc
2508 gp.syscallsp = gp.sched.sp
2509 gp.stktopsp = gp.sched.sp
2510
2511
2512
2513 casgstatus(gp, _Gidle, _Gdeadextra)
2514 gp.m = mp
2515 mp.curg = gp
2516 mp.isextra = true
2517
2518 mp.isExtraInC = true
2519 mp.lockedInt++
2520 mp.lockedg.set(gp)
2521 gp.lockedm.set(mp)
2522 gp.goid = sched.goidgen.Add(1)
2523 if raceenabled {
2524 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2525 }
2526
2527 allgadd(gp)
2528
2529
2530
2531
2532
2533 sched.ngsys.Add(1)
2534
2535
2536 addExtraM(mp)
2537 }
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572 func dropm() {
2573
2574
2575
2576 mp := getg().m
2577
2578
2579
2580
2581
2582 var trace traceLocker
2583 if !mp.isExtraInSig {
2584 trace = traceAcquire()
2585 }
2586
2587
2588 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2589 mp.curg.preemptStop = false
2590 sched.ngsys.Add(1)
2591 sched.nGsyscallNoP.Add(-1)
2592
2593 if !mp.isExtraInSig {
2594 if trace.ok() {
2595 trace.GoDestroySyscall()
2596 traceRelease(trace)
2597 }
2598 }
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613 mp.syscalltick--
2614
2615
2616
2617 mp.curg.trace.reset()
2618
2619
2620
2621
2622 if traceEnabled() || traceShuttingDown() {
2623
2624
2625
2626
2627
2628
2629
2630 lock(&sched.lock)
2631 traceThreadDestroy(mp)
2632 unlock(&sched.lock)
2633 }
2634 mp.isExtraInSig = false
2635
2636
2637
2638
2639
2640 sigmask := mp.sigmask
2641 sigblock(false)
2642 unminit()
2643
2644 setg(nil)
2645
2646
2647
2648 g0 := mp.g0
2649 g0.stack.hi = 0
2650 g0.stack.lo = 0
2651 g0.stackguard0 = 0
2652 g0.stackguard1 = 0
2653 mp.g0StackAccurate = false
2654
2655 putExtraM(mp)
2656
2657 msigrestore(sigmask)
2658 }
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680 func cgoBindM() {
2681 if GOOS == "windows" || GOOS == "plan9" {
2682 fatal("bindm in unexpected GOOS")
2683 }
2684 g := getg()
2685 if g.m.g0 != g {
2686 fatal("the current g is not g0")
2687 }
2688 if _cgo_bindm != nil {
2689 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2690 }
2691 }
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 func getm() uintptr {
2705 return uintptr(unsafe.Pointer(getg().m))
2706 }
2707
2708 var (
2709
2710
2711
2712
2713
2714
2715 extraM atomic.Uintptr
2716
2717 extraMLength atomic.Uint32
2718
2719 extraMWaiters atomic.Uint32
2720
2721
2722 extraMInUse atomic.Uint32
2723 )
2724
2725
2726
2727
2728
2729
2730
2731
2732 func lockextra(nilokay bool) *m {
2733 const locked = 1
2734
2735 incr := false
2736 for {
2737 old := extraM.Load()
2738 if old == locked {
2739 osyield_no_g()
2740 continue
2741 }
2742 if old == 0 && !nilokay {
2743 if !incr {
2744
2745
2746
2747 extraMWaiters.Add(1)
2748 incr = true
2749 }
2750 usleep_no_g(1)
2751 continue
2752 }
2753 if extraM.CompareAndSwap(old, locked) {
2754 return (*m)(unsafe.Pointer(old))
2755 }
2756 osyield_no_g()
2757 continue
2758 }
2759 }
2760
2761
2762 func unlockextra(mp *m, delta int32) {
2763 extraMLength.Add(delta)
2764 extraM.Store(uintptr(unsafe.Pointer(mp)))
2765 }
2766
2767
2768
2769
2770
2771
2772
2773
2774 func getExtraM() (mp *m, last bool) {
2775 mp = lockextra(false)
2776 extraMInUse.Add(1)
2777 unlockextra(mp.schedlink.ptr(), -1)
2778 return mp, mp.schedlink.ptr() == nil
2779 }
2780
2781
2782
2783
2784
2785 func putExtraM(mp *m) {
2786 extraMInUse.Add(-1)
2787 addExtraM(mp)
2788 }
2789
2790
2791
2792
2793 func addExtraM(mp *m) {
2794 mnext := lockextra(true)
2795 mp.schedlink.set(mnext)
2796 unlockextra(mp, 1)
2797 }
2798
2799 var (
2800
2801
2802
2803 allocmLock rwmutex
2804
2805
2806
2807
2808 execLock rwmutex
2809 )
2810
2811
2812
2813 const (
2814 failthreadcreate = "runtime: failed to create new OS thread\n"
2815 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2816 )
2817
2818
2819
2820
2821 var newmHandoff struct {
2822 lock mutex
2823
2824
2825
2826 newm muintptr
2827
2828
2829
2830 waiting bool
2831 wake note
2832
2833
2834
2835
2836 haveTemplateThread uint32
2837 }
2838
2839
2840
2841
2842
2843
2844
2845
2846 func newm(fn func(), pp *p, id int64) {
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857 acquirem()
2858
2859 mp := allocm(pp, fn, id)
2860 mp.nextp.set(pp)
2861 mp.sigmask = initSigmask
2862 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874 lock(&newmHandoff.lock)
2875 if newmHandoff.haveTemplateThread == 0 {
2876 throw("on a locked thread with no template thread")
2877 }
2878 mp.schedlink = newmHandoff.newm
2879 newmHandoff.newm.set(mp)
2880 if newmHandoff.waiting {
2881 newmHandoff.waiting = false
2882 notewakeup(&newmHandoff.wake)
2883 }
2884 unlock(&newmHandoff.lock)
2885
2886
2887
2888 releasem(getg().m)
2889 return
2890 }
2891 newm1(mp)
2892 releasem(getg().m)
2893 }
2894
2895 func newm1(mp *m) {
2896 if iscgo {
2897 var ts cgothreadstart
2898 if _cgo_thread_start == nil {
2899 throw("_cgo_thread_start missing")
2900 }
2901 ts.g.set(mp.g0)
2902 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2903 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2904 if msanenabled {
2905 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2906 }
2907 if asanenabled {
2908 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2909 }
2910 execLock.rlock()
2911 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2912 execLock.runlock()
2913 return
2914 }
2915 execLock.rlock()
2916 newosproc(mp)
2917 execLock.runlock()
2918 }
2919
2920
2921
2922
2923
2924 func startTemplateThread() {
2925 if GOARCH == "wasm" {
2926 return
2927 }
2928
2929
2930
2931 mp := acquirem()
2932 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2933 releasem(mp)
2934 return
2935 }
2936 newm(templateThread, nil, -1)
2937 releasem(mp)
2938 }
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952 func templateThread() {
2953 lock(&sched.lock)
2954 sched.nmsys++
2955 checkdead()
2956 unlock(&sched.lock)
2957
2958 for {
2959 lock(&newmHandoff.lock)
2960 for newmHandoff.newm != 0 {
2961 newm := newmHandoff.newm.ptr()
2962 newmHandoff.newm = 0
2963 unlock(&newmHandoff.lock)
2964 for newm != nil {
2965 next := newm.schedlink.ptr()
2966 newm.schedlink = 0
2967 newm1(newm)
2968 newm = next
2969 }
2970 lock(&newmHandoff.lock)
2971 }
2972 newmHandoff.waiting = true
2973 noteclear(&newmHandoff.wake)
2974 unlock(&newmHandoff.lock)
2975 notesleep(&newmHandoff.wake)
2976 }
2977 }
2978
2979
2980
2981 func stopm() {
2982 gp := getg()
2983
2984 if gp.m.locks != 0 {
2985 throw("stopm holding locks")
2986 }
2987 if gp.m.p != 0 {
2988 throw("stopm holding p")
2989 }
2990 if gp.m.spinning {
2991 throw("stopm spinning")
2992 }
2993
2994 lock(&sched.lock)
2995 mput(gp.m)
2996 unlock(&sched.lock)
2997 mPark()
2998 acquirep(gp.m.nextp.ptr())
2999 gp.m.nextp = 0
3000 }
3001
3002 func mspinning() {
3003
3004 getg().m.spinning = true
3005 }
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024 func startm(pp *p, spinning, lockheld bool) {
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041 mp := acquirem()
3042 if !lockheld {
3043 lock(&sched.lock)
3044 }
3045 if pp == nil {
3046 if spinning {
3047
3048
3049
3050 throw("startm: P required for spinning=true")
3051 }
3052 pp, _ = pidleget(0)
3053 if pp == nil {
3054 if !lockheld {
3055 unlock(&sched.lock)
3056 }
3057 releasem(mp)
3058 return
3059 }
3060 }
3061 nmp := mget()
3062 if nmp == nil {
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077 id := mReserveID()
3078 unlock(&sched.lock)
3079
3080 var fn func()
3081 if spinning {
3082
3083 fn = mspinning
3084 }
3085 newm(fn, pp, id)
3086
3087 if lockheld {
3088 lock(&sched.lock)
3089 }
3090
3091
3092 releasem(mp)
3093 return
3094 }
3095 if !lockheld {
3096 unlock(&sched.lock)
3097 }
3098 if nmp.spinning {
3099 throw("startm: m is spinning")
3100 }
3101 if nmp.nextp != 0 {
3102 throw("startm: m has p")
3103 }
3104 if spinning && !runqempty(pp) {
3105 throw("startm: p has runnable gs")
3106 }
3107
3108 nmp.spinning = spinning
3109 nmp.nextp.set(pp)
3110 notewakeup(&nmp.park)
3111
3112
3113 releasem(mp)
3114 }
3115
3116
3117
3118
3119
3120 func handoffp(pp *p) {
3121
3122
3123
3124
3125 if !runqempty(pp) || !sched.runq.empty() {
3126 startm(pp, false, false)
3127 return
3128 }
3129
3130 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3131 startm(pp, false, false)
3132 return
3133 }
3134
3135 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3136 startm(pp, false, false)
3137 return
3138 }
3139
3140
3141 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3142 sched.needspinning.Store(0)
3143 startm(pp, true, false)
3144 return
3145 }
3146 lock(&sched.lock)
3147 if sched.gcwaiting.Load() {
3148 pp.status = _Pgcstop
3149 pp.gcStopTime = nanotime()
3150 sched.stopwait--
3151 if sched.stopwait == 0 {
3152 notewakeup(&sched.stopnote)
3153 }
3154 unlock(&sched.lock)
3155 return
3156 }
3157 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3158 sched.safePointFn(pp)
3159 sched.safePointWait--
3160 if sched.safePointWait == 0 {
3161 notewakeup(&sched.safePointNote)
3162 }
3163 }
3164 if !sched.runq.empty() {
3165 unlock(&sched.lock)
3166 startm(pp, false, false)
3167 return
3168 }
3169
3170
3171 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3172 unlock(&sched.lock)
3173 startm(pp, false, false)
3174 return
3175 }
3176
3177
3178
3179 when := pp.timers.wakeTime()
3180 pidleput(pp, 0)
3181 unlock(&sched.lock)
3182
3183 if when != 0 {
3184 wakeNetPoller(when)
3185 }
3186 }
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201 func wakep() {
3202
3203
3204 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3205 return
3206 }
3207
3208
3209
3210
3211
3212
3213 mp := acquirem()
3214
3215 var pp *p
3216 lock(&sched.lock)
3217 pp, _ = pidlegetSpinning(0)
3218 if pp == nil {
3219 if sched.nmspinning.Add(-1) < 0 {
3220 throw("wakep: negative nmspinning")
3221 }
3222 unlock(&sched.lock)
3223 releasem(mp)
3224 return
3225 }
3226
3227
3228
3229
3230 unlock(&sched.lock)
3231
3232 startm(pp, true, false)
3233
3234 releasem(mp)
3235 }
3236
3237
3238
3239 func stoplockedm() {
3240 gp := getg()
3241
3242 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3243 throw("stoplockedm: inconsistent locking")
3244 }
3245 if gp.m.p != 0 {
3246
3247 pp := releasep()
3248 handoffp(pp)
3249 }
3250 incidlelocked(1)
3251
3252 mPark()
3253 status := readgstatus(gp.m.lockedg.ptr())
3254 if status&^_Gscan != _Grunnable {
3255 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3256 dumpgstatus(gp.m.lockedg.ptr())
3257 throw("stoplockedm: not runnable")
3258 }
3259 acquirep(gp.m.nextp.ptr())
3260 gp.m.nextp = 0
3261 }
3262
3263
3264
3265
3266
3267 func startlockedm(gp *g) {
3268 mp := gp.lockedm.ptr()
3269 if mp == getg().m {
3270 throw("startlockedm: locked to me")
3271 }
3272 if mp.nextp != 0 {
3273 throw("startlockedm: m has p")
3274 }
3275
3276 incidlelocked(-1)
3277 pp := releasep()
3278 mp.nextp.set(pp)
3279 notewakeup(&mp.park)
3280 stopm()
3281 }
3282
3283
3284
3285 func gcstopm() {
3286 gp := getg()
3287
3288 if !sched.gcwaiting.Load() {
3289 throw("gcstopm: not waiting for gc")
3290 }
3291 if gp.m.spinning {
3292 gp.m.spinning = false
3293
3294
3295 if sched.nmspinning.Add(-1) < 0 {
3296 throw("gcstopm: negative nmspinning")
3297 }
3298 }
3299 pp := releasep()
3300 lock(&sched.lock)
3301 pp.status = _Pgcstop
3302 pp.gcStopTime = nanotime()
3303 sched.stopwait--
3304 if sched.stopwait == 0 {
3305 notewakeup(&sched.stopnote)
3306 }
3307 unlock(&sched.lock)
3308 stopm()
3309 }
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320 func execute(gp *g, inheritTime bool) {
3321 mp := getg().m
3322
3323 if goroutineProfile.active {
3324
3325
3326
3327 tryRecordGoroutineProfile(gp, nil, osyield)
3328 }
3329
3330
3331 mp.curg = gp
3332 gp.m = mp
3333 gp.syncSafePoint = false
3334 casgstatus(gp, _Grunnable, _Grunning)
3335 gp.waitsince = 0
3336 gp.preempt = false
3337 gp.stackguard0 = gp.stack.lo + stackGuard
3338 if !inheritTime {
3339 mp.p.ptr().schedtick++
3340 }
3341
3342
3343 hz := sched.profilehz
3344 if mp.profilehz != hz {
3345 setThreadCPUProfiler(hz)
3346 }
3347
3348 trace := traceAcquire()
3349 if trace.ok() {
3350 trace.GoStart()
3351 traceRelease(trace)
3352 }
3353
3354 gogo(&gp.sched)
3355 }
3356
3357
3358
3359
3360
3361 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3362 mp := getg().m
3363
3364
3365
3366
3367
3368 top:
3369
3370
3371
3372 mp.clearAllpSnapshot()
3373
3374 pp := mp.p.ptr()
3375 if sched.gcwaiting.Load() {
3376 gcstopm()
3377 goto top
3378 }
3379 if pp.runSafePointFn != 0 {
3380 runSafePointFn()
3381 }
3382
3383
3384
3385
3386
3387 now, pollUntil, _ := pp.timers.check(0, nil)
3388
3389
3390 if traceEnabled() || traceShuttingDown() {
3391 gp := traceReader()
3392 if gp != nil {
3393 trace := traceAcquire()
3394 casgstatus(gp, _Gwaiting, _Grunnable)
3395 if trace.ok() {
3396 trace.GoUnpark(gp, 0)
3397 traceRelease(trace)
3398 }
3399 return gp, false, true
3400 }
3401 }
3402
3403
3404 if gcBlackenEnabled != 0 {
3405 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3406 if gp != nil {
3407 return gp, false, true
3408 }
3409 now = tnow
3410 }
3411
3412
3413
3414
3415 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3416 lock(&sched.lock)
3417 gp := globrunqget()
3418 unlock(&sched.lock)
3419 if gp != nil {
3420 return gp, false, false
3421 }
3422 }
3423
3424
3425 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3426 if gp := wakefing(); gp != nil {
3427 ready(gp, 0, true)
3428 }
3429 }
3430
3431
3432 if gcCleanups.needsWake() {
3433 gcCleanups.wake()
3434 }
3435
3436 if *cgo_yield != nil {
3437 asmcgocall(*cgo_yield, nil)
3438 }
3439
3440
3441 if gp, inheritTime := runqget(pp); gp != nil {
3442 return gp, inheritTime, false
3443 }
3444
3445
3446 if !sched.runq.empty() {
3447 lock(&sched.lock)
3448 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3449 unlock(&sched.lock)
3450 if gp != nil {
3451 if runqputbatch(pp, &q); !q.empty() {
3452 throw("Couldn't put Gs into empty local runq")
3453 }
3454 return gp, false, false
3455 }
3456 }
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3468 list, delta := netpoll(0)
3469 sched.pollingNet.Store(0)
3470 if !list.empty() {
3471 gp := list.pop()
3472 injectglist(&list)
3473 netpollAdjustWaiters(delta)
3474 trace := traceAcquire()
3475 casgstatus(gp, _Gwaiting, _Grunnable)
3476 if trace.ok() {
3477 trace.GoUnpark(gp, 0)
3478 traceRelease(trace)
3479 }
3480 return gp, false, false
3481 }
3482 }
3483
3484
3485
3486
3487
3488
3489 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3490 if !mp.spinning {
3491 mp.becomeSpinning()
3492 }
3493
3494 gp, inheritTime, tnow, w, newWork := stealWork(now)
3495 if gp != nil {
3496
3497 return gp, inheritTime, false
3498 }
3499 if newWork {
3500
3501
3502 goto top
3503 }
3504
3505 now = tnow
3506 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3507
3508 pollUntil = w
3509 }
3510 }
3511
3512
3513
3514
3515
3516 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3517 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3518 if node != nil {
3519 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3520 gp := node.gp.ptr()
3521
3522 trace := traceAcquire()
3523 casgstatus(gp, _Gwaiting, _Grunnable)
3524 if trace.ok() {
3525 trace.GoUnpark(gp, 0)
3526 traceRelease(trace)
3527 }
3528 return gp, false, false
3529 }
3530 gcController.removeIdleMarkWorker()
3531 }
3532
3533
3534
3535
3536
3537 gp, otherReady := beforeIdle(now, pollUntil)
3538 if gp != nil {
3539 trace := traceAcquire()
3540 casgstatus(gp, _Gwaiting, _Grunnable)
3541 if trace.ok() {
3542 trace.GoUnpark(gp, 0)
3543 traceRelease(trace)
3544 }
3545 return gp, false, false
3546 }
3547 if otherReady {
3548 goto top
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559 allpSnapshot := mp.snapshotAllp()
3560
3561
3562 idlepMaskSnapshot := idlepMask
3563 timerpMaskSnapshot := timerpMask
3564
3565
3566 lock(&sched.lock)
3567 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3568 unlock(&sched.lock)
3569 goto top
3570 }
3571 if !sched.runq.empty() {
3572 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3573 unlock(&sched.lock)
3574 if gp == nil {
3575 throw("global runq empty with non-zero runqsize")
3576 }
3577 if runqputbatch(pp, &q); !q.empty() {
3578 throw("Couldn't put Gs into empty local runq")
3579 }
3580 return gp, false, false
3581 }
3582 if !mp.spinning && sched.needspinning.Load() == 1 {
3583
3584 mp.becomeSpinning()
3585 unlock(&sched.lock)
3586 goto top
3587 }
3588 if releasep() != pp {
3589 throw("findRunnable: wrong p")
3590 }
3591 now = pidleput(pp, now)
3592 unlock(&sched.lock)
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630 wasSpinning := mp.spinning
3631 if mp.spinning {
3632 mp.spinning = false
3633 if sched.nmspinning.Add(-1) < 0 {
3634 throw("findRunnable: negative nmspinning")
3635 }
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648 lock(&sched.lock)
3649 if !sched.runq.empty() {
3650 pp, _ := pidlegetSpinning(0)
3651 if pp != nil {
3652 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3653 unlock(&sched.lock)
3654 if gp == nil {
3655 throw("global runq empty with non-zero runqsize")
3656 }
3657 if runqputbatch(pp, &q); !q.empty() {
3658 throw("Couldn't put Gs into empty local runq")
3659 }
3660 acquirep(pp)
3661 mp.becomeSpinning()
3662 return gp, false, false
3663 }
3664 }
3665 unlock(&sched.lock)
3666
3667 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3668 if pp != nil {
3669 acquirep(pp)
3670 mp.becomeSpinning()
3671 goto top
3672 }
3673
3674
3675 pp, gp := checkIdleGCNoP()
3676 if pp != nil {
3677 acquirep(pp)
3678 mp.becomeSpinning()
3679
3680
3681 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3682 trace := traceAcquire()
3683 casgstatus(gp, _Gwaiting, _Grunnable)
3684 if trace.ok() {
3685 trace.GoUnpark(gp, 0)
3686 traceRelease(trace)
3687 }
3688 return gp, false, false
3689 }
3690
3691
3692
3693
3694
3695
3696
3697 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3698 }
3699
3700
3701
3702
3703
3704 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3705 sched.pollUntil.Store(pollUntil)
3706 if mp.p != 0 {
3707 throw("findRunnable: netpoll with p")
3708 }
3709 if mp.spinning {
3710 throw("findRunnable: netpoll with spinning")
3711 }
3712 delay := int64(-1)
3713 if pollUntil != 0 {
3714 if now == 0 {
3715 now = nanotime()
3716 }
3717 delay = pollUntil - now
3718 if delay < 0 {
3719 delay = 0
3720 }
3721 }
3722 if faketime != 0 {
3723
3724 delay = 0
3725 }
3726 list, delta := netpoll(delay)
3727
3728 now = nanotime()
3729 sched.pollUntil.Store(0)
3730 sched.lastpoll.Store(now)
3731 if faketime != 0 && list.empty() {
3732
3733
3734 stopm()
3735 goto top
3736 }
3737 lock(&sched.lock)
3738 pp, _ := pidleget(now)
3739 unlock(&sched.lock)
3740 if pp == nil {
3741 injectglist(&list)
3742 netpollAdjustWaiters(delta)
3743 } else {
3744 acquirep(pp)
3745 if !list.empty() {
3746 gp := list.pop()
3747 injectglist(&list)
3748 netpollAdjustWaiters(delta)
3749 trace := traceAcquire()
3750 casgstatus(gp, _Gwaiting, _Grunnable)
3751 if trace.ok() {
3752 trace.GoUnpark(gp, 0)
3753 traceRelease(trace)
3754 }
3755 return gp, false, false
3756 }
3757 if wasSpinning {
3758 mp.becomeSpinning()
3759 }
3760 goto top
3761 }
3762 } else if pollUntil != 0 && netpollinited() {
3763 pollerPollUntil := sched.pollUntil.Load()
3764 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3765 netpollBreak()
3766 }
3767 }
3768 stopm()
3769 goto top
3770 }
3771
3772
3773
3774
3775
3776 func pollWork() bool {
3777 if !sched.runq.empty() {
3778 return true
3779 }
3780 p := getg().m.p.ptr()
3781 if !runqempty(p) {
3782 return true
3783 }
3784 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3785 if list, delta := netpoll(0); !list.empty() {
3786 injectglist(&list)
3787 netpollAdjustWaiters(delta)
3788 return true
3789 }
3790 }
3791 return false
3792 }
3793
3794
3795
3796
3797
3798
3799
3800 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3801 pp := getg().m.p.ptr()
3802
3803 ranTimer := false
3804
3805 const stealTries = 4
3806 for i := 0; i < stealTries; i++ {
3807 stealTimersOrRunNextG := i == stealTries-1
3808
3809 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3810 if sched.gcwaiting.Load() {
3811
3812 return nil, false, now, pollUntil, true
3813 }
3814 p2 := allp[enum.position()]
3815 if pp == p2 {
3816 continue
3817 }
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3833 tnow, w, ran := p2.timers.check(now, nil)
3834 now = tnow
3835 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3836 pollUntil = w
3837 }
3838 if ran {
3839
3840
3841
3842
3843
3844
3845
3846
3847 if gp, inheritTime := runqget(pp); gp != nil {
3848 return gp, inheritTime, now, pollUntil, ranTimer
3849 }
3850 ranTimer = true
3851 }
3852 }
3853
3854
3855 if !idlepMask.read(enum.position()) {
3856 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3857 return gp, false, now, pollUntil, ranTimer
3858 }
3859 }
3860 }
3861 }
3862
3863
3864
3865
3866 return nil, false, now, pollUntil, ranTimer
3867 }
3868
3869
3870
3871
3872
3873
3874 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3875 for id, p2 := range allpSnapshot {
3876 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3877 lock(&sched.lock)
3878 pp, _ := pidlegetSpinning(0)
3879 if pp == nil {
3880
3881 unlock(&sched.lock)
3882 return nil
3883 }
3884 unlock(&sched.lock)
3885 return pp
3886 }
3887 }
3888
3889
3890 return nil
3891 }
3892
3893
3894
3895
3896 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3897 for id, p2 := range allpSnapshot {
3898 if timerpMaskSnapshot.read(uint32(id)) {
3899 w := p2.timers.wakeTime()
3900 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3901 pollUntil = w
3902 }
3903 }
3904 }
3905
3906 return pollUntil
3907 }
3908
3909
3910
3911
3912
3913 func checkIdleGCNoP() (*p, *g) {
3914
3915
3916
3917
3918
3919
3920 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3921 return nil, nil
3922 }
3923 if !gcShouldScheduleWorker(nil) {
3924 return nil, nil
3925 }
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944 lock(&sched.lock)
3945 pp, now := pidlegetSpinning(0)
3946 if pp == nil {
3947 unlock(&sched.lock)
3948 return nil, nil
3949 }
3950
3951
3952 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3953 pidleput(pp, now)
3954 unlock(&sched.lock)
3955 return nil, nil
3956 }
3957
3958 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3959 if node == nil {
3960 pidleput(pp, now)
3961 unlock(&sched.lock)
3962 gcController.removeIdleMarkWorker()
3963 return nil, nil
3964 }
3965
3966 unlock(&sched.lock)
3967
3968 return pp, node.gp.ptr()
3969 }
3970
3971
3972
3973
3974 func wakeNetPoller(when int64) {
3975 if sched.lastpoll.Load() == 0 {
3976
3977
3978
3979
3980 pollerPollUntil := sched.pollUntil.Load()
3981 if pollerPollUntil == 0 || pollerPollUntil > when {
3982 netpollBreak()
3983 }
3984 } else {
3985
3986
3987 if GOOS != "plan9" {
3988 wakep()
3989 }
3990 }
3991 }
3992
3993 func resetspinning() {
3994 gp := getg()
3995 if !gp.m.spinning {
3996 throw("resetspinning: not a spinning m")
3997 }
3998 gp.m.spinning = false
3999 nmspinning := sched.nmspinning.Add(-1)
4000 if nmspinning < 0 {
4001 throw("findRunnable: negative nmspinning")
4002 }
4003
4004
4005
4006 wakep()
4007 }
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017 func injectglist(glist *gList) {
4018 if glist.empty() {
4019 return
4020 }
4021
4022
4023
4024 var tail *g
4025 trace := traceAcquire()
4026 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4027 tail = gp
4028 casgstatus(gp, _Gwaiting, _Grunnable)
4029 if trace.ok() {
4030 trace.GoUnpark(gp, 0)
4031 }
4032 }
4033 if trace.ok() {
4034 traceRelease(trace)
4035 }
4036
4037
4038 q := gQueue{glist.head, tail.guintptr(), glist.size}
4039 *glist = gList{}
4040
4041 startIdle := func(n int32) {
4042 for ; n > 0; n-- {
4043 mp := acquirem()
4044 lock(&sched.lock)
4045
4046 pp, _ := pidlegetSpinning(0)
4047 if pp == nil {
4048 unlock(&sched.lock)
4049 releasem(mp)
4050 break
4051 }
4052
4053 startm(pp, false, true)
4054 unlock(&sched.lock)
4055 releasem(mp)
4056 }
4057 }
4058
4059 pp := getg().m.p.ptr()
4060 if pp == nil {
4061 n := q.size
4062 lock(&sched.lock)
4063 globrunqputbatch(&q)
4064 unlock(&sched.lock)
4065 startIdle(n)
4066 return
4067 }
4068
4069 var globq gQueue
4070 npidle := sched.npidle.Load()
4071 for ; npidle > 0 && !q.empty(); npidle-- {
4072 g := q.pop()
4073 globq.pushBack(g)
4074 }
4075 if !globq.empty() {
4076 n := globq.size
4077 lock(&sched.lock)
4078 globrunqputbatch(&globq)
4079 unlock(&sched.lock)
4080 startIdle(n)
4081 }
4082
4083 if runqputbatch(pp, &q); !q.empty() {
4084 lock(&sched.lock)
4085 globrunqputbatch(&q)
4086 unlock(&sched.lock)
4087 }
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102 wakep()
4103 }
4104
4105
4106
4107 func schedule() {
4108 mp := getg().m
4109
4110 if mp.locks != 0 {
4111 throw("schedule: holding locks")
4112 }
4113
4114 if mp.lockedg != 0 {
4115 stoplockedm()
4116 execute(mp.lockedg.ptr(), false)
4117 }
4118
4119
4120
4121 if mp.incgo {
4122 throw("schedule: in cgo")
4123 }
4124
4125 top:
4126 pp := mp.p.ptr()
4127 pp.preempt = false
4128
4129
4130
4131
4132 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4133 throw("schedule: spinning with local work")
4134 }
4135
4136 gp, inheritTime, tryWakeP := findRunnable()
4137
4138
4139 pp = mp.p.ptr()
4140
4141
4142
4143
4144 mp.clearAllpSnapshot()
4145
4146
4147
4148
4149
4150
4151
4152
4153 gcController.releaseNextGCMarkWorker(pp)
4154
4155 if debug.dontfreezetheworld > 0 && freezing.Load() {
4156
4157
4158
4159
4160
4161
4162
4163 lock(&deadlock)
4164 lock(&deadlock)
4165 }
4166
4167
4168
4169
4170 if mp.spinning {
4171 resetspinning()
4172 }
4173
4174 if sched.disable.user && !schedEnabled(gp) {
4175
4176
4177
4178 lock(&sched.lock)
4179 if schedEnabled(gp) {
4180
4181
4182 unlock(&sched.lock)
4183 } else {
4184 sched.disable.runnable.pushBack(gp)
4185 unlock(&sched.lock)
4186 goto top
4187 }
4188 }
4189
4190
4191
4192 if tryWakeP {
4193 wakep()
4194 }
4195 if gp.lockedm != 0 {
4196
4197
4198 startlockedm(gp)
4199 goto top
4200 }
4201
4202 execute(gp, inheritTime)
4203 }
4204
4205
4206
4207
4208
4209
4210
4211
4212 func dropg() {
4213 gp := getg()
4214
4215 setMNoWB(&gp.m.curg.m, nil)
4216 setGNoWB(&gp.m.curg, nil)
4217 }
4218
4219 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4220 unlock((*mutex)(lock))
4221 return true
4222 }
4223
4224
4225 func park_m(gp *g) {
4226 mp := getg().m
4227
4228 trace := traceAcquire()
4229
4230
4231
4232
4233
4234 bubble := gp.bubble
4235 if bubble != nil {
4236 bubble.incActive()
4237 }
4238
4239 if trace.ok() {
4240
4241
4242
4243 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4244 }
4245
4246
4247 casgstatus(gp, _Grunning, _Gwaiting)
4248 if trace.ok() {
4249 traceRelease(trace)
4250 }
4251
4252 dropg()
4253
4254 if fn := mp.waitunlockf; fn != nil {
4255 ok := fn(gp, mp.waitlock)
4256 mp.waitunlockf = nil
4257 mp.waitlock = nil
4258 if !ok {
4259 trace := traceAcquire()
4260 casgstatus(gp, _Gwaiting, _Grunnable)
4261 if bubble != nil {
4262 bubble.decActive()
4263 }
4264 if trace.ok() {
4265 trace.GoUnpark(gp, 2)
4266 traceRelease(trace)
4267 }
4268 execute(gp, true)
4269 }
4270 }
4271
4272 if bubble != nil {
4273 bubble.decActive()
4274 }
4275
4276 schedule()
4277 }
4278
4279 func goschedImpl(gp *g, preempted bool) {
4280 pp := gp.m.p.ptr()
4281 trace := traceAcquire()
4282 status := readgstatus(gp)
4283 if status&^_Gscan != _Grunning {
4284 dumpgstatus(gp)
4285 throw("bad g status")
4286 }
4287 if trace.ok() {
4288
4289
4290
4291 if preempted {
4292 trace.GoPreempt()
4293 } else {
4294 trace.GoSched()
4295 }
4296 }
4297 casgstatus(gp, _Grunning, _Grunnable)
4298 if trace.ok() {
4299 traceRelease(trace)
4300 }
4301
4302 dropg()
4303 if preempted && sched.gcwaiting.Load() {
4304
4305
4306 runqput(pp, gp, true)
4307 } else {
4308 lock(&sched.lock)
4309 globrunqput(gp)
4310 unlock(&sched.lock)
4311 }
4312
4313 if mainStarted {
4314 wakep()
4315 }
4316
4317 schedule()
4318 }
4319
4320
4321 func gosched_m(gp *g) {
4322 goschedImpl(gp, false)
4323 }
4324
4325
4326 func goschedguarded_m(gp *g) {
4327 if !canPreemptM(gp.m) {
4328 gogo(&gp.sched)
4329 }
4330 goschedImpl(gp, false)
4331 }
4332
4333 func gopreempt_m(gp *g) {
4334 goschedImpl(gp, true)
4335 }
4336
4337
4338
4339
4340 func preemptPark(gp *g) {
4341 status := readgstatus(gp)
4342 if status&^_Gscan != _Grunning {
4343 dumpgstatus(gp)
4344 throw("bad g status")
4345 }
4346
4347 if gp.asyncSafePoint {
4348
4349
4350
4351 f := findfunc(gp.sched.pc)
4352 if !f.valid() {
4353 throw("preempt at unknown pc")
4354 }
4355 if f.flag&abi.FuncFlagSPWrite != 0 {
4356 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4357 throw("preempt SPWRITE")
4358 }
4359 }
4360
4361
4362
4363
4364
4365
4366
4367 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4368 dropg()
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390 trace := traceAcquire()
4391 if trace.ok() {
4392 trace.GoPark(traceBlockPreempted, 0)
4393 }
4394 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4395 if trace.ok() {
4396 traceRelease(trace)
4397 }
4398 schedule()
4399 }
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415 func goyield() {
4416 checkTimeouts()
4417 mcall(goyield_m)
4418 }
4419
4420 func goyield_m(gp *g) {
4421 trace := traceAcquire()
4422 pp := gp.m.p.ptr()
4423 if trace.ok() {
4424
4425
4426
4427 trace.GoPreempt()
4428 }
4429 casgstatus(gp, _Grunning, _Grunnable)
4430 if trace.ok() {
4431 traceRelease(trace)
4432 }
4433 dropg()
4434 runqput(pp, gp, false)
4435 schedule()
4436 }
4437
4438
4439 func goexit1() {
4440 if raceenabled {
4441 if gp := getg(); gp.bubble != nil {
4442 racereleasemergeg(gp, gp.bubble.raceaddr())
4443 }
4444 racegoend()
4445 }
4446 trace := traceAcquire()
4447 if trace.ok() {
4448 trace.GoEnd()
4449 traceRelease(trace)
4450 }
4451 mcall(goexit0)
4452 }
4453
4454
4455 func goexit0(gp *g) {
4456 gdestroy(gp)
4457 schedule()
4458 }
4459
4460 func gdestroy(gp *g) {
4461 mp := getg().m
4462 pp := mp.p.ptr()
4463
4464 casgstatus(gp, _Grunning, _Gdead)
4465 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4466 if isSystemGoroutine(gp, false) {
4467 sched.ngsys.Add(-1)
4468 }
4469 gp.m = nil
4470 locked := gp.lockedm != 0
4471 gp.lockedm = 0
4472 mp.lockedg = 0
4473 gp.preemptStop = false
4474 gp.paniconfault = false
4475 gp._defer = nil
4476 gp._panic = nil
4477 gp.writebuf = nil
4478 gp.waitreason = waitReasonZero
4479 gp.param = nil
4480 gp.labels = nil
4481 gp.timer = nil
4482 gp.bubble = nil
4483
4484 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4485
4486
4487
4488 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4489 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4490 gcController.bgScanCredit.Add(scanCredit)
4491 gp.gcAssistBytes = 0
4492 }
4493
4494 dropg()
4495
4496 if GOARCH == "wasm" {
4497 gfput(pp, gp)
4498 return
4499 }
4500
4501 if locked && mp.lockedInt != 0 {
4502 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4503 if mp.isextra {
4504 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4505 }
4506 throw("exited a goroutine internally locked to the OS thread")
4507 }
4508 gfput(pp, gp)
4509 if locked {
4510
4511
4512
4513
4514
4515
4516 if GOOS != "plan9" {
4517 gogo(&mp.g0.sched)
4518 } else {
4519
4520
4521 mp.lockedExt = 0
4522 }
4523 }
4524 }
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534 func save(pc, sp, bp uintptr) {
4535 gp := getg()
4536
4537 if gp == gp.m.g0 || gp == gp.m.gsignal {
4538
4539
4540
4541
4542
4543 throw("save on system g not allowed")
4544 }
4545
4546 gp.sched.pc = pc
4547 gp.sched.sp = sp
4548 gp.sched.lr = 0
4549 gp.sched.bp = bp
4550
4551
4552
4553 if gp.sched.ctxt != nil {
4554 badctxt()
4555 }
4556 }
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582 func reentersyscall(pc, sp, bp uintptr) {
4583 gp := getg()
4584
4585
4586
4587 gp.m.locks++
4588
4589
4590
4591
4592
4593 gp.stackguard0 = stackPreempt
4594 gp.throwsplit = true
4595
4596
4597 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4598
4599 pp := gp.m.p.ptr()
4600 if pp.runSafePointFn != 0 {
4601
4602 systemstack(runSafePointFn)
4603 }
4604 gp.m.oldp.set(pp)
4605
4606
4607 save(pc, sp, bp)
4608 gp.syscallsp = sp
4609 gp.syscallpc = pc
4610 gp.syscallbp = bp
4611
4612
4613 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4614 systemstack(func() {
4615 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4616 throw("entersyscall")
4617 })
4618 }
4619 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4620 systemstack(func() {
4621 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4622 throw("entersyscall")
4623 })
4624 }
4625 trace := traceAcquire()
4626 if trace.ok() {
4627
4628
4629
4630
4631 systemstack(func() {
4632 trace.GoSysCall()
4633 })
4634
4635 save(pc, sp, bp)
4636 }
4637 if sched.gcwaiting.Load() {
4638
4639
4640
4641 systemstack(func() {
4642 entersyscallHandleGCWait(trace)
4643 })
4644
4645 save(pc, sp, bp)
4646 }
4647
4648
4649
4650
4651
4652 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4653 casgstatus(gp, _Grunning, _Gsyscall)
4654 }
4655 if staticLockRanking {
4656
4657 save(pc, sp, bp)
4658 }
4659 if trace.ok() {
4660
4661
4662
4663 traceRelease(trace)
4664 }
4665 if sched.sysmonwait.Load() {
4666 systemstack(entersyscallWakeSysmon)
4667
4668 save(pc, sp, bp)
4669 }
4670 gp.m.locks--
4671 }
4672
4673
4674
4675
4676 const debugExtendGrunningNoP = false
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692 func entersyscall() {
4693
4694
4695
4696
4697 fp := getcallerfp()
4698 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4699 }
4700
4701 func entersyscallWakeSysmon() {
4702 lock(&sched.lock)
4703 if sched.sysmonwait.Load() {
4704 sched.sysmonwait.Store(false)
4705 notewakeup(&sched.sysmonnote)
4706 }
4707 unlock(&sched.lock)
4708 }
4709
4710 func entersyscallHandleGCWait(trace traceLocker) {
4711 gp := getg()
4712
4713 lock(&sched.lock)
4714 if sched.stopwait > 0 {
4715
4716 pp := gp.m.p.ptr()
4717 pp.m = 0
4718 gp.m.p = 0
4719 atomic.Store(&pp.status, _Pgcstop)
4720
4721 if trace.ok() {
4722 trace.ProcStop(pp)
4723 }
4724 sched.nGsyscallNoP.Add(1)
4725 pp.gcStopTime = nanotime()
4726 pp.syscalltick++
4727 if sched.stopwait--; sched.stopwait == 0 {
4728 notewakeup(&sched.stopnote)
4729 }
4730 }
4731 unlock(&sched.lock)
4732 }
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746 func entersyscallblock() {
4747 gp := getg()
4748
4749 gp.m.locks++
4750 gp.throwsplit = true
4751 gp.stackguard0 = stackPreempt
4752 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4753 gp.m.p.ptr().syscalltick++
4754
4755 sched.nGsyscallNoP.Add(1)
4756
4757
4758 pc := sys.GetCallerPC()
4759 sp := sys.GetCallerSP()
4760 bp := getcallerfp()
4761 save(pc, sp, bp)
4762 gp.syscallsp = gp.sched.sp
4763 gp.syscallpc = gp.sched.pc
4764 gp.syscallbp = gp.sched.bp
4765 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4766 sp1 := sp
4767 sp2 := gp.sched.sp
4768 sp3 := gp.syscallsp
4769 systemstack(func() {
4770 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4771 throw("entersyscallblock")
4772 })
4773 }
4774
4775
4776
4777
4778
4779
4780 trace := traceAcquire()
4781 systemstack(func() {
4782 if trace.ok() {
4783 trace.GoSysCall()
4784 }
4785 handoffp(releasep())
4786 })
4787
4788
4789
4790 if debugExtendGrunningNoP {
4791 usleep(10)
4792 }
4793 casgstatus(gp, _Grunning, _Gsyscall)
4794 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4795 systemstack(func() {
4796 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4797 throw("entersyscallblock")
4798 })
4799 }
4800 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4801 systemstack(func() {
4802 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4803 throw("entersyscallblock")
4804 })
4805 }
4806 if trace.ok() {
4807 systemstack(func() {
4808 traceRelease(trace)
4809 })
4810 }
4811
4812
4813 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4814
4815 gp.m.locks--
4816 }
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838 func exitsyscall() {
4839 gp := getg()
4840
4841 gp.m.locks++
4842 if sys.GetCallerSP() > gp.syscallsp {
4843 throw("exitsyscall: syscall frame is no longer valid")
4844 }
4845 gp.waitsince = 0
4846
4847 if sched.stopwait == freezeStopWait {
4848
4849
4850
4851 systemstack(func() {
4852 lock(&deadlock)
4853 lock(&deadlock)
4854 })
4855 }
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4869 casgstatus(gp, _Gsyscall, _Grunning)
4870 }
4871
4872
4873
4874
4875 if debugExtendGrunningNoP {
4876 usleep(10)
4877 }
4878
4879
4880 oldp := gp.m.oldp.ptr()
4881 gp.m.oldp.set(nil)
4882
4883
4884 pp := gp.m.p.ptr()
4885 if pp != nil {
4886
4887 if trace := traceAcquire(); trace.ok() {
4888 systemstack(func() {
4889
4890
4891
4892
4893
4894
4895
4896
4897 if pp.syscalltick == gp.m.syscalltick {
4898 trace.GoSysExit(false)
4899 } else {
4900
4901
4902
4903
4904 trace.ProcSteal(pp)
4905 trace.ProcStart()
4906 trace.GoSysExit(true)
4907 trace.GoStart()
4908 }
4909 traceRelease(trace)
4910 })
4911 }
4912 } else {
4913
4914 systemstack(func() {
4915
4916 if pp := exitsyscallTryGetP(oldp); pp != nil {
4917
4918 acquirepNoTrace(pp)
4919
4920
4921 if trace := traceAcquire(); trace.ok() {
4922 trace.ProcStart()
4923 trace.GoSysExit(true)
4924 trace.GoStart()
4925 traceRelease(trace)
4926 }
4927 }
4928 })
4929 pp = gp.m.p.ptr()
4930 }
4931
4932
4933 if pp != nil {
4934 if goroutineProfile.active {
4935
4936
4937
4938 systemstack(func() {
4939 tryRecordGoroutineProfileWB(gp)
4940 })
4941 }
4942
4943
4944 pp.syscalltick++
4945
4946
4947
4948 gp.syscallsp = 0
4949 gp.m.locks--
4950 if gp.preempt {
4951
4952 gp.stackguard0 = stackPreempt
4953 } else {
4954
4955 gp.stackguard0 = gp.stack.lo + stackGuard
4956 }
4957 gp.throwsplit = false
4958
4959 if sched.disable.user && !schedEnabled(gp) {
4960
4961 Gosched()
4962 }
4963 return
4964 }
4965
4966 gp.m.locks--
4967
4968
4969 mcall(exitsyscallNoP)
4970
4971
4972
4973
4974
4975
4976
4977 gp.syscallsp = 0
4978 gp.m.p.ptr().syscalltick++
4979 gp.throwsplit = false
4980 }
4981
4982
4983
4984
4985
4986
4987
4988 func exitsyscallTryGetP(oldp *p) *p {
4989
4990 if oldp != nil {
4991 if thread, ok := setBlockOnExitSyscall(oldp); ok {
4992 thread.takeP()
4993 thread.resume()
4994 sched.nGsyscallNoP.Add(-1)
4995 return oldp
4996 }
4997 }
4998
4999
5000 if sched.pidle != 0 {
5001 lock(&sched.lock)
5002 pp, _ := pidleget(0)
5003 if pp != nil && sched.sysmonwait.Load() {
5004 sched.sysmonwait.Store(false)
5005 notewakeup(&sched.sysmonnote)
5006 }
5007 unlock(&sched.lock)
5008 if pp != nil {
5009 sched.nGsyscallNoP.Add(-1)
5010 return pp
5011 }
5012 }
5013 return nil
5014 }
5015
5016
5017
5018
5019
5020
5021
5022 func exitsyscallNoP(gp *g) {
5023 traceExitingSyscall()
5024 trace := traceAcquire()
5025 casgstatus(gp, _Grunning, _Grunnable)
5026 traceExitedSyscall()
5027 if trace.ok() {
5028
5029
5030
5031
5032 trace.GoSysExit(true)
5033 traceRelease(trace)
5034 }
5035 sched.nGsyscallNoP.Add(-1)
5036 dropg()
5037 lock(&sched.lock)
5038 var pp *p
5039 if schedEnabled(gp) {
5040 pp, _ = pidleget(0)
5041 }
5042 var locked bool
5043 if pp == nil {
5044 globrunqput(gp)
5045
5046
5047
5048
5049
5050
5051 locked = gp.lockedm != 0
5052 } else if sched.sysmonwait.Load() {
5053 sched.sysmonwait.Store(false)
5054 notewakeup(&sched.sysmonnote)
5055 }
5056 unlock(&sched.lock)
5057 if pp != nil {
5058 acquirep(pp)
5059 execute(gp, false)
5060 }
5061 if locked {
5062
5063
5064
5065
5066 stoplockedm()
5067 execute(gp, false)
5068 }
5069 stopm()
5070 schedule()
5071 }
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085 func syscall_runtime_BeforeFork() {
5086 gp := getg().m.curg
5087
5088
5089
5090
5091 gp.m.locks++
5092 sigsave(&gp.m.sigmask)
5093 sigblock(false)
5094
5095
5096
5097
5098
5099 gp.stackguard0 = stackFork
5100 }
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114 func syscall_runtime_AfterFork() {
5115 gp := getg().m.curg
5116
5117
5118 gp.stackguard0 = gp.stack.lo + stackGuard
5119
5120 msigrestore(gp.m.sigmask)
5121
5122 gp.m.locks--
5123 }
5124
5125
5126
5127 var inForkedChild bool
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148 func syscall_runtime_AfterForkInChild() {
5149
5150
5151
5152
5153 inForkedChild = true
5154
5155 clearSignalHandlers()
5156
5157
5158
5159 msigrestore(getg().m.sigmask)
5160
5161 inForkedChild = false
5162 }
5163
5164
5165
5166
5167 var pendingPreemptSignals atomic.Int32
5168
5169
5170
5171
5172 func syscall_runtime_BeforeExec() {
5173
5174 execLock.lock()
5175
5176
5177
5178 if GOOS == "darwin" || GOOS == "ios" {
5179 for pendingPreemptSignals.Load() > 0 {
5180 osyield()
5181 }
5182 }
5183 }
5184
5185
5186
5187
5188 func syscall_runtime_AfterExec() {
5189 execLock.unlock()
5190 }
5191
5192
5193 func malg(stacksize int32) *g {
5194 newg := new(g)
5195 if stacksize >= 0 {
5196 stacksize = round2(stackSystem + stacksize)
5197 systemstack(func() {
5198 newg.stack = stackalloc(uint32(stacksize))
5199 if valgrindenabled {
5200 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5201 }
5202 })
5203 newg.stackguard0 = newg.stack.lo + stackGuard
5204 newg.stackguard1 = ^uintptr(0)
5205
5206
5207 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5208 }
5209 return newg
5210 }
5211
5212
5213
5214
5215 func newproc(fn *funcval) {
5216 gp := getg()
5217 pc := sys.GetCallerPC()
5218 systemstack(func() {
5219 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5220
5221 pp := getg().m.p.ptr()
5222 runqput(pp, newg, true)
5223
5224 if mainStarted {
5225 wakep()
5226 }
5227 })
5228 }
5229
5230
5231
5232
5233 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5234 if fn == nil {
5235 fatal("go of nil func value")
5236 }
5237
5238 mp := acquirem()
5239 pp := mp.p.ptr()
5240 newg := gfget(pp)
5241 if newg == nil {
5242 newg = malg(stackMin)
5243 casgstatus(newg, _Gidle, _Gdead)
5244 allgadd(newg)
5245 }
5246 if newg.stack.hi == 0 {
5247 throw("newproc1: newg missing stack")
5248 }
5249
5250 if readgstatus(newg) != _Gdead {
5251 throw("newproc1: new g is not Gdead")
5252 }
5253
5254 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5255 totalSize = alignUp(totalSize, sys.StackAlign)
5256 sp := newg.stack.hi - totalSize
5257 if usesLR {
5258
5259 *(*uintptr)(unsafe.Pointer(sp)) = 0
5260 prepGoExitFrame(sp)
5261 }
5262 if GOARCH == "arm64" {
5263
5264 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5265 }
5266
5267 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5268 newg.sched.sp = sp
5269 newg.stktopsp = sp
5270 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5271 newg.sched.g = guintptr(unsafe.Pointer(newg))
5272 gostartcallfn(&newg.sched, fn)
5273 newg.parentGoid = callergp.goid
5274 newg.gopc = callerpc
5275 newg.ancestors = saveAncestors(callergp)
5276 newg.startpc = fn.fn
5277 newg.runningCleanups.Store(false)
5278 if isSystemGoroutine(newg, false) {
5279 sched.ngsys.Add(1)
5280 } else {
5281
5282 newg.bubble = callergp.bubble
5283 if mp.curg != nil {
5284 newg.labels = mp.curg.labels
5285 }
5286 if goroutineProfile.active {
5287
5288
5289
5290
5291
5292 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5293 }
5294 }
5295
5296 newg.trackingSeq = uint8(cheaprand())
5297 if newg.trackingSeq%gTrackingPeriod == 0 {
5298 newg.tracking = true
5299 }
5300 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5301
5302
5303
5304 trace := traceAcquire()
5305 var status uint32 = _Grunnable
5306 if parked {
5307 status = _Gwaiting
5308 newg.waitreason = waitreason
5309 }
5310 if pp.goidcache == pp.goidcacheend {
5311
5312
5313
5314 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5315 pp.goidcache -= _GoidCacheBatch - 1
5316 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5317 }
5318 newg.goid = pp.goidcache
5319 casgstatus(newg, _Gdead, status)
5320 pp.goidcache++
5321 newg.trace.reset()
5322 if trace.ok() {
5323 trace.GoCreate(newg, newg.startpc, parked)
5324 traceRelease(trace)
5325 }
5326
5327
5328 if raceenabled {
5329 newg.racectx = racegostart(callerpc)
5330 newg.raceignore = 0
5331 if newg.labels != nil {
5332
5333
5334 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5335 }
5336 }
5337 pp.goroutinesCreated++
5338 releasem(mp)
5339
5340 return newg
5341 }
5342
5343
5344
5345
5346 func saveAncestors(callergp *g) *[]ancestorInfo {
5347
5348 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5349 return nil
5350 }
5351 var callerAncestors []ancestorInfo
5352 if callergp.ancestors != nil {
5353 callerAncestors = *callergp.ancestors
5354 }
5355 n := int32(len(callerAncestors)) + 1
5356 if n > debug.tracebackancestors {
5357 n = debug.tracebackancestors
5358 }
5359 ancestors := make([]ancestorInfo, n)
5360 copy(ancestors[1:], callerAncestors)
5361
5362 var pcs [tracebackInnerFrames]uintptr
5363 npcs := gcallers(callergp, 0, pcs[:])
5364 ipcs := make([]uintptr, npcs)
5365 copy(ipcs, pcs[:])
5366 ancestors[0] = ancestorInfo{
5367 pcs: ipcs,
5368 goid: callergp.goid,
5369 gopc: callergp.gopc,
5370 }
5371
5372 ancestorsp := new([]ancestorInfo)
5373 *ancestorsp = ancestors
5374 return ancestorsp
5375 }
5376
5377
5378
5379 func gfput(pp *p, gp *g) {
5380 if readgstatus(gp) != _Gdead {
5381 throw("gfput: bad status (not Gdead)")
5382 }
5383
5384 stksize := gp.stack.hi - gp.stack.lo
5385
5386 if stksize != uintptr(startingStackSize) {
5387
5388 stackfree(gp.stack)
5389 gp.stack.lo = 0
5390 gp.stack.hi = 0
5391 gp.stackguard0 = 0
5392 if valgrindenabled {
5393 valgrindDeregisterStack(gp.valgrindStackID)
5394 gp.valgrindStackID = 0
5395 }
5396 }
5397
5398 pp.gFree.push(gp)
5399 if pp.gFree.size >= 64 {
5400 var (
5401 stackQ gQueue
5402 noStackQ gQueue
5403 )
5404 for pp.gFree.size >= 32 {
5405 gp := pp.gFree.pop()
5406 if gp.stack.lo == 0 {
5407 noStackQ.push(gp)
5408 } else {
5409 stackQ.push(gp)
5410 }
5411 }
5412 lock(&sched.gFree.lock)
5413 sched.gFree.noStack.pushAll(noStackQ)
5414 sched.gFree.stack.pushAll(stackQ)
5415 unlock(&sched.gFree.lock)
5416 }
5417 }
5418
5419
5420
5421 func gfget(pp *p) *g {
5422 retry:
5423 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5424 lock(&sched.gFree.lock)
5425
5426 for pp.gFree.size < 32 {
5427
5428 gp := sched.gFree.stack.pop()
5429 if gp == nil {
5430 gp = sched.gFree.noStack.pop()
5431 if gp == nil {
5432 break
5433 }
5434 }
5435 pp.gFree.push(gp)
5436 }
5437 unlock(&sched.gFree.lock)
5438 goto retry
5439 }
5440 gp := pp.gFree.pop()
5441 if gp == nil {
5442 return nil
5443 }
5444 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5445
5446
5447
5448 systemstack(func() {
5449 stackfree(gp.stack)
5450 gp.stack.lo = 0
5451 gp.stack.hi = 0
5452 gp.stackguard0 = 0
5453 if valgrindenabled {
5454 valgrindDeregisterStack(gp.valgrindStackID)
5455 gp.valgrindStackID = 0
5456 }
5457 })
5458 }
5459 if gp.stack.lo == 0 {
5460
5461 systemstack(func() {
5462 gp.stack = stackalloc(startingStackSize)
5463 if valgrindenabled {
5464 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5465 }
5466 })
5467 gp.stackguard0 = gp.stack.lo + stackGuard
5468 } else {
5469 if raceenabled {
5470 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5471 }
5472 if msanenabled {
5473 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5474 }
5475 if asanenabled {
5476 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5477 }
5478 }
5479 return gp
5480 }
5481
5482
5483 func gfpurge(pp *p) {
5484 var (
5485 stackQ gQueue
5486 noStackQ gQueue
5487 )
5488 for !pp.gFree.empty() {
5489 gp := pp.gFree.pop()
5490 if gp.stack.lo == 0 {
5491 noStackQ.push(gp)
5492 } else {
5493 stackQ.push(gp)
5494 }
5495 }
5496 lock(&sched.gFree.lock)
5497 sched.gFree.noStack.pushAll(noStackQ)
5498 sched.gFree.stack.pushAll(stackQ)
5499 unlock(&sched.gFree.lock)
5500 }
5501
5502
5503 func Breakpoint() {
5504 breakpoint()
5505 }
5506
5507
5508
5509
5510
5511
5512 func dolockOSThread() {
5513 if GOARCH == "wasm" {
5514 return
5515 }
5516 gp := getg()
5517 gp.m.lockedg.set(gp)
5518 gp.lockedm.set(gp.m)
5519 }
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537 func LockOSThread() {
5538 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5539
5540
5541
5542 startTemplateThread()
5543 }
5544 gp := getg()
5545 gp.m.lockedExt++
5546 if gp.m.lockedExt == 0 {
5547 gp.m.lockedExt--
5548 panic("LockOSThread nesting overflow")
5549 }
5550 dolockOSThread()
5551 }
5552
5553
5554 func lockOSThread() {
5555 getg().m.lockedInt++
5556 dolockOSThread()
5557 }
5558
5559
5560
5561
5562
5563
5564 func dounlockOSThread() {
5565 if GOARCH == "wasm" {
5566 return
5567 }
5568 gp := getg()
5569 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5570 return
5571 }
5572 gp.m.lockedg = 0
5573 gp.lockedm = 0
5574 }
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590 func UnlockOSThread() {
5591 gp := getg()
5592 if gp.m.lockedExt == 0 {
5593 return
5594 }
5595 gp.m.lockedExt--
5596 dounlockOSThread()
5597 }
5598
5599
5600 func unlockOSThread() {
5601 gp := getg()
5602 if gp.m.lockedInt == 0 {
5603 systemstack(badunlockosthread)
5604 }
5605 gp.m.lockedInt--
5606 dounlockOSThread()
5607 }
5608
5609 func badunlockosthread() {
5610 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5611 }
5612
5613 func gcount(includeSys bool) int32 {
5614 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5615 if !includeSys {
5616 n -= sched.ngsys.Load()
5617 }
5618 for _, pp := range allp {
5619 n -= pp.gFree.size
5620 }
5621
5622
5623
5624 if n < 1 {
5625 n = 1
5626 }
5627 return n
5628 }
5629
5630
5631
5632
5633
5634 func goroutineleakcount() int {
5635 return work.goroutineLeak.count
5636 }
5637
5638 func mcount() int32 {
5639 return int32(sched.mnext - sched.nmfreed)
5640 }
5641
5642 var prof struct {
5643 signalLock atomic.Uint32
5644
5645
5646
5647 hz atomic.Int32
5648 }
5649
5650 func _System() { _System() }
5651 func _ExternalCode() { _ExternalCode() }
5652 func _LostExternalCode() { _LostExternalCode() }
5653 func _GC() { _GC() }
5654 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5655 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5656 func _VDSO() { _VDSO() }
5657
5658
5659
5660
5661
5662 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5663 if prof.hz.Load() == 0 {
5664 return
5665 }
5666
5667
5668
5669
5670 if mp != nil && mp.profilehz == 0 {
5671 return
5672 }
5673
5674
5675
5676
5677
5678
5679
5680 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5681 if f := findfunc(pc); f.valid() {
5682 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5683 cpuprof.lostAtomic++
5684 return
5685 }
5686 }
5687 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5688
5689
5690
5691 cpuprof.lostAtomic++
5692 return
5693 }
5694 }
5695
5696
5697
5698
5699
5700
5701
5702 getg().m.mallocing++
5703
5704 var u unwinder
5705 var stk [maxCPUProfStack]uintptr
5706 n := 0
5707 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5708 cgoOff := 0
5709
5710
5711
5712
5713
5714 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5715 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5716 cgoOff++
5717 }
5718 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5719 mp.cgoCallers[0] = 0
5720 }
5721
5722
5723 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5724 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5725
5726
5727 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5728 } else if mp != nil && mp.vdsoSP != 0 {
5729
5730
5731 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5732 } else {
5733 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5734 }
5735 n += tracebackPCs(&u, 0, stk[n:])
5736
5737 if n <= 0 {
5738
5739
5740 n = 2
5741 if inVDSOPage(pc) {
5742 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5743 } else if pc > firstmoduledata.etext {
5744
5745 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5746 }
5747 stk[0] = pc
5748 if mp.preemptoff != "" {
5749 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5750 } else {
5751 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5752 }
5753 }
5754
5755 if prof.hz.Load() != 0 {
5756
5757
5758
5759 var tagPtr *unsafe.Pointer
5760 if gp != nil && gp.m != nil && gp.m.curg != nil {
5761 tagPtr = &gp.m.curg.labels
5762 }
5763 cpuprof.add(tagPtr, stk[:n])
5764
5765 gprof := gp
5766 var mp *m
5767 var pp *p
5768 if gp != nil && gp.m != nil {
5769 if gp.m.curg != nil {
5770 gprof = gp.m.curg
5771 }
5772 mp = gp.m
5773 pp = gp.m.p.ptr()
5774 }
5775 traceCPUSample(gprof, mp, pp, stk[:n])
5776 }
5777 getg().m.mallocing--
5778 }
5779
5780
5781
5782 func setcpuprofilerate(hz int32) {
5783
5784 if hz < 0 {
5785 hz = 0
5786 }
5787
5788
5789
5790 gp := getg()
5791 gp.m.locks++
5792
5793
5794
5795
5796 setThreadCPUProfiler(0)
5797
5798 for !prof.signalLock.CompareAndSwap(0, 1) {
5799 osyield()
5800 }
5801 if prof.hz.Load() != hz {
5802 setProcessCPUProfiler(hz)
5803 prof.hz.Store(hz)
5804 }
5805 prof.signalLock.Store(0)
5806
5807 lock(&sched.lock)
5808 sched.profilehz = hz
5809 unlock(&sched.lock)
5810
5811 if hz != 0 {
5812 setThreadCPUProfiler(hz)
5813 }
5814
5815 gp.m.locks--
5816 }
5817
5818
5819
5820 func (pp *p) init(id int32) {
5821 pp.id = id
5822 pp.gcw.id = id
5823 pp.status = _Pgcstop
5824 pp.sudogcache = pp.sudogbuf[:0]
5825 pp.deferpool = pp.deferpoolbuf[:0]
5826 pp.wbBuf.reset()
5827 if pp.mcache == nil {
5828 if id == 0 {
5829 if mcache0 == nil {
5830 throw("missing mcache?")
5831 }
5832
5833
5834 pp.mcache = mcache0
5835 } else {
5836 pp.mcache = allocmcache()
5837 }
5838 }
5839 if raceenabled && pp.raceprocctx == 0 {
5840 if id == 0 {
5841 pp.raceprocctx = raceprocctx0
5842 raceprocctx0 = 0
5843 } else {
5844 pp.raceprocctx = raceproccreate()
5845 }
5846 }
5847 lockInit(&pp.timers.mu, lockRankTimers)
5848
5849
5850
5851 timerpMask.set(id)
5852
5853
5854 idlepMask.clear(id)
5855 }
5856
5857
5858
5859
5860
5861 func (pp *p) destroy() {
5862 assertLockHeld(&sched.lock)
5863 assertWorldStopped()
5864
5865
5866 for pp.runqhead != pp.runqtail {
5867
5868 pp.runqtail--
5869 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5870
5871 globrunqputhead(gp)
5872 }
5873 if pp.runnext != 0 {
5874 globrunqputhead(pp.runnext.ptr())
5875 pp.runnext = 0
5876 }
5877
5878
5879 getg().m.p.ptr().timers.take(&pp.timers)
5880
5881
5882
5883 if phase := gcphase; phase != _GCoff {
5884 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5885 throw("P destroyed while GC is running")
5886 }
5887
5888 pp.gcw.spanq.destroy()
5889
5890 clear(pp.sudogbuf[:])
5891 pp.sudogcache = pp.sudogbuf[:0]
5892 pp.pinnerCache = nil
5893 clear(pp.deferpoolbuf[:])
5894 pp.deferpool = pp.deferpoolbuf[:0]
5895 systemstack(func() {
5896 for i := 0; i < pp.mspancache.len; i++ {
5897
5898 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5899 }
5900 pp.mspancache.len = 0
5901 lock(&mheap_.lock)
5902 pp.pcache.flush(&mheap_.pages)
5903 unlock(&mheap_.lock)
5904 })
5905 freemcache(pp.mcache)
5906 pp.mcache = nil
5907 gfpurge(pp)
5908 if raceenabled {
5909 if pp.timers.raceCtx != 0 {
5910
5911
5912
5913
5914
5915 mp := getg().m
5916 phold := mp.p.ptr()
5917 mp.p.set(pp)
5918
5919 racectxend(pp.timers.raceCtx)
5920 pp.timers.raceCtx = 0
5921
5922 mp.p.set(phold)
5923 }
5924 raceprocdestroy(pp.raceprocctx)
5925 pp.raceprocctx = 0
5926 }
5927 pp.gcAssistTime = 0
5928 gcCleanups.queued += pp.cleanupsQueued
5929 pp.cleanupsQueued = 0
5930 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5931 pp.goroutinesCreated = 0
5932 pp.xRegs.free()
5933 pp.status = _Pdead
5934 }
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944 func procresize(nprocs int32) *p {
5945 assertLockHeld(&sched.lock)
5946 assertWorldStopped()
5947
5948 old := gomaxprocs
5949 if old < 0 || nprocs <= 0 {
5950 throw("procresize: invalid arg")
5951 }
5952 trace := traceAcquire()
5953 if trace.ok() {
5954 trace.Gomaxprocs(nprocs)
5955 traceRelease(trace)
5956 }
5957
5958
5959 now := nanotime()
5960 if sched.procresizetime != 0 {
5961 sched.totaltime += int64(old) * (now - sched.procresizetime)
5962 }
5963 sched.procresizetime = now
5964
5965
5966 if nprocs > int32(len(allp)) {
5967
5968
5969 lock(&allpLock)
5970 if nprocs <= int32(cap(allp)) {
5971 allp = allp[:nprocs]
5972 } else {
5973 nallp := make([]*p, nprocs)
5974
5975
5976 copy(nallp, allp[:cap(allp)])
5977 allp = nallp
5978 }
5979
5980 idlepMask = idlepMask.resize(nprocs)
5981 timerpMask = timerpMask.resize(nprocs)
5982 work.spanqMask = work.spanqMask.resize(nprocs)
5983 unlock(&allpLock)
5984 }
5985
5986
5987 for i := old; i < nprocs; i++ {
5988 pp := allp[i]
5989 if pp == nil {
5990 pp = new(p)
5991 }
5992 pp.init(i)
5993 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5994 }
5995
5996 gp := getg()
5997 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5998
5999 gp.m.p.ptr().status = _Prunning
6000 gp.m.p.ptr().mcache.prepareForSweep()
6001 } else {
6002
6003
6004
6005
6006
6007 if gp.m.p != 0 {
6008 trace := traceAcquire()
6009 if trace.ok() {
6010
6011
6012
6013 trace.GoSched()
6014 trace.ProcStop(gp.m.p.ptr())
6015 traceRelease(trace)
6016 }
6017 gp.m.p.ptr().m = 0
6018 }
6019 gp.m.p = 0
6020 pp := allp[0]
6021 pp.m = 0
6022 pp.status = _Pidle
6023 acquirep(pp)
6024 trace := traceAcquire()
6025 if trace.ok() {
6026 trace.GoStart()
6027 traceRelease(trace)
6028 }
6029 }
6030
6031
6032 mcache0 = nil
6033
6034
6035 for i := nprocs; i < old; i++ {
6036 pp := allp[i]
6037 pp.destroy()
6038
6039 }
6040
6041
6042 if int32(len(allp)) != nprocs {
6043 lock(&allpLock)
6044 allp = allp[:nprocs]
6045 idlepMask = idlepMask.resize(nprocs)
6046 timerpMask = timerpMask.resize(nprocs)
6047 work.spanqMask = work.spanqMask.resize(nprocs)
6048 unlock(&allpLock)
6049 }
6050
6051
6052 var runnablePs *p
6053 var runnablePsNeedM *p
6054 var idlePs *p
6055 for i := nprocs - 1; i >= 0; i-- {
6056 pp := allp[i]
6057 if gp.m.p.ptr() == pp {
6058 continue
6059 }
6060 pp.status = _Pidle
6061 if runqempty(pp) {
6062 pp.link.set(idlePs)
6063 idlePs = pp
6064 continue
6065 }
6066
6067
6068
6069
6070
6071
6072
6073
6074 var mp *m
6075 if oldm := pp.oldm.get(); oldm != nil {
6076
6077 mp = mgetSpecific(oldm)
6078 }
6079 if mp == nil {
6080
6081 pp.link.set(runnablePsNeedM)
6082 runnablePsNeedM = pp
6083 continue
6084 }
6085 pp.m.set(mp)
6086 pp.link.set(runnablePs)
6087 runnablePs = pp
6088 }
6089
6090
6091 for runnablePsNeedM != nil {
6092 pp := runnablePsNeedM
6093 runnablePsNeedM = pp.link.ptr()
6094
6095 mp := mget()
6096 pp.m.set(mp)
6097 pp.link.set(runnablePs)
6098 runnablePs = pp
6099 }
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125 if gcBlackenEnabled != 0 {
6126 for idlePs != nil {
6127 pp := idlePs
6128
6129 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6130 if !ok {
6131
6132 break
6133 }
6134
6135
6136
6137
6138
6139
6140
6141
6142 idlePs = pp.link.ptr()
6143 mp := mget()
6144 pp.m.set(mp)
6145 pp.link.set(runnablePs)
6146 runnablePs = pp
6147 }
6148 }
6149
6150
6151 for idlePs != nil {
6152 pp := idlePs
6153 idlePs = pp.link.ptr()
6154 pidleput(pp, now)
6155 }
6156
6157 stealOrder.reset(uint32(nprocs))
6158 var int32p *int32 = &gomaxprocs
6159 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6160 if old != nprocs {
6161
6162 gcCPULimiter.resetCapacity(now, nprocs)
6163 }
6164 return runnablePs
6165 }
6166
6167
6168
6169
6170
6171
6172
6173 func acquirep(pp *p) {
6174
6175 acquirepNoTrace(pp)
6176
6177
6178 trace := traceAcquire()
6179 if trace.ok() {
6180 trace.ProcStart()
6181 traceRelease(trace)
6182 }
6183 }
6184
6185
6186
6187
6188 func acquirepNoTrace(pp *p) {
6189
6190 wirep(pp)
6191
6192
6193
6194
6195
6196
6197 pp.oldm = pp.m.ptr().self
6198
6199
6200
6201 pp.mcache.prepareForSweep()
6202 }
6203
6204
6205
6206
6207
6208
6209
6210 func wirep(pp *p) {
6211 gp := getg()
6212
6213 if gp.m.p != 0 {
6214
6215
6216 systemstack(func() {
6217 throw("wirep: already in go")
6218 })
6219 }
6220 if pp.m != 0 || pp.status != _Pidle {
6221
6222
6223 systemstack(func() {
6224 id := int64(0)
6225 if pp.m != 0 {
6226 id = pp.m.ptr().id
6227 }
6228 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6229 throw("wirep: invalid p state")
6230 })
6231 }
6232 gp.m.p.set(pp)
6233 pp.m.set(gp.m)
6234 pp.status = _Prunning
6235 }
6236
6237
6238 func releasep() *p {
6239 trace := traceAcquire()
6240 if trace.ok() {
6241 trace.ProcStop(getg().m.p.ptr())
6242 traceRelease(trace)
6243 }
6244 return releasepNoTrace()
6245 }
6246
6247
6248 func releasepNoTrace() *p {
6249 gp := getg()
6250
6251 if gp.m.p == 0 {
6252 throw("releasep: invalid arg")
6253 }
6254 pp := gp.m.p.ptr()
6255 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6256 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6257 throw("releasep: invalid p state")
6258 }
6259
6260
6261 gcController.releaseNextGCMarkWorker(pp)
6262
6263 gp.m.p = 0
6264 pp.m = 0
6265 pp.status = _Pidle
6266 return pp
6267 }
6268
6269 func incidlelocked(v int32) {
6270 lock(&sched.lock)
6271 sched.nmidlelocked += v
6272 if v > 0 {
6273 checkdead()
6274 }
6275 unlock(&sched.lock)
6276 }
6277
6278
6279
6280
6281 func checkdead() {
6282 assertLockHeld(&sched.lock)
6283
6284
6285
6286
6287
6288
6289 if (islibrary || isarchive) && GOARCH != "wasm" {
6290 return
6291 }
6292
6293
6294
6295
6296
6297 if panicking.Load() > 0 {
6298 return
6299 }
6300
6301
6302
6303
6304
6305 var run0 int32
6306 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6307 run0 = 1
6308 }
6309
6310 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6311 if run > run0 {
6312 return
6313 }
6314 if run < 0 {
6315 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6316 unlock(&sched.lock)
6317 throw("checkdead: inconsistent counts")
6318 }
6319
6320 grunning := 0
6321 forEachG(func(gp *g) {
6322 if isSystemGoroutine(gp, false) {
6323 return
6324 }
6325 s := readgstatus(gp)
6326 switch s &^ _Gscan {
6327 case _Gwaiting,
6328 _Gpreempted:
6329 grunning++
6330 case _Grunnable,
6331 _Grunning,
6332 _Gsyscall:
6333 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6334 unlock(&sched.lock)
6335 throw("checkdead: runnable g")
6336 }
6337 })
6338 if grunning == 0 {
6339 unlock(&sched.lock)
6340 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6341 }
6342
6343
6344 if faketime != 0 {
6345 if when := timeSleepUntil(); when < maxWhen {
6346 faketime = when
6347
6348
6349 pp, _ := pidleget(faketime)
6350 if pp == nil {
6351
6352
6353 unlock(&sched.lock)
6354 throw("checkdead: no p for timer")
6355 }
6356 mp := mget()
6357 if mp == nil {
6358
6359
6360 unlock(&sched.lock)
6361 throw("checkdead: no m for timer")
6362 }
6363
6364
6365
6366 sched.nmspinning.Add(1)
6367 mp.spinning = true
6368 mp.nextp.set(pp)
6369 notewakeup(&mp.park)
6370 return
6371 }
6372 }
6373
6374
6375 for _, pp := range allp {
6376 if len(pp.timers.heap) > 0 {
6377 return
6378 }
6379 }
6380
6381 unlock(&sched.lock)
6382 fatal("all goroutines are asleep - deadlock!")
6383 }
6384
6385
6386
6387
6388
6389
6390 var forcegcperiod int64 = 2 * 60 * 1e9
6391
6392
6393
6394
6395 const haveSysmon = GOARCH != "wasm"
6396
6397
6398
6399
6400 func sysmon() {
6401 lock(&sched.lock)
6402 sched.nmsys++
6403 checkdead()
6404 unlock(&sched.lock)
6405
6406 lastgomaxprocs := int64(0)
6407 lasttrace := int64(0)
6408 idle := 0
6409 delay := uint32(0)
6410
6411 for {
6412 if idle == 0 {
6413 delay = 20
6414 } else if idle > 50 {
6415 delay *= 2
6416 }
6417 if delay > 10*1000 {
6418 delay = 10 * 1000
6419 }
6420 usleep(delay)
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437 now := nanotime()
6438 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6439 lock(&sched.lock)
6440 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6441 syscallWake := false
6442 next := timeSleepUntil()
6443 if next > now {
6444 sched.sysmonwait.Store(true)
6445 unlock(&sched.lock)
6446
6447
6448 sleep := forcegcperiod / 2
6449 if next-now < sleep {
6450 sleep = next - now
6451 }
6452 shouldRelax := sleep >= osRelaxMinNS
6453 if shouldRelax {
6454 osRelax(true)
6455 }
6456 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6457 if shouldRelax {
6458 osRelax(false)
6459 }
6460 lock(&sched.lock)
6461 sched.sysmonwait.Store(false)
6462 noteclear(&sched.sysmonnote)
6463 }
6464 if syscallWake {
6465 idle = 0
6466 delay = 20
6467 }
6468 }
6469 unlock(&sched.lock)
6470 }
6471
6472 lock(&sched.sysmonlock)
6473
6474
6475 now = nanotime()
6476
6477
6478 if *cgo_yield != nil {
6479 asmcgocall(*cgo_yield, nil)
6480 }
6481
6482 lastpoll := sched.lastpoll.Load()
6483 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6484 sched.lastpoll.CompareAndSwap(lastpoll, now)
6485 list, delta := netpoll(0)
6486 if !list.empty() {
6487
6488
6489
6490
6491
6492
6493
6494 incidlelocked(-1)
6495 injectglist(&list)
6496 incidlelocked(1)
6497 netpollAdjustWaiters(delta)
6498 }
6499 }
6500
6501 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6502 sysmonUpdateGOMAXPROCS()
6503 lastgomaxprocs = now
6504 }
6505 if scavenger.sysmonWake.Load() != 0 {
6506
6507 scavenger.wake()
6508 }
6509
6510
6511 if retake(now) != 0 {
6512 idle = 0
6513 } else {
6514 idle++
6515 }
6516
6517 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6518 lock(&forcegc.lock)
6519 forcegc.idle.Store(false)
6520 var list gList
6521 list.push(forcegc.g)
6522 injectglist(&list)
6523 unlock(&forcegc.lock)
6524 }
6525 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6526 lasttrace = now
6527 schedtrace(debug.scheddetail > 0)
6528 }
6529 unlock(&sched.sysmonlock)
6530 }
6531 }
6532
6533 type sysmontick struct {
6534 schedtick uint32
6535 syscalltick uint32
6536 schedwhen int64
6537 syscallwhen int64
6538 }
6539
6540
6541
6542 const forcePreemptNS = 10 * 1000 * 1000
6543
6544 func retake(now int64) uint32 {
6545 n := 0
6546
6547
6548 lock(&allpLock)
6549
6550
6551
6552 for i := 0; i < len(allp); i++ {
6553
6554
6555
6556
6557
6558
6559
6560
6561 pp := allp[i]
6562 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6563
6564
6565 continue
6566 }
6567 pd := &pp.sysmontick
6568 sysretake := false
6569
6570
6571
6572
6573
6574 schedt := int64(pp.schedtick)
6575 if int64(pd.schedtick) != schedt {
6576 pd.schedtick = uint32(schedt)
6577 pd.schedwhen = now
6578 } else if pd.schedwhen+forcePreemptNS <= now {
6579 preemptone(pp)
6580
6581
6582
6583
6584 sysretake = true
6585 }
6586
6587
6588 unlock(&allpLock)
6589
6590
6591
6592
6593
6594
6595
6596
6597 incidlelocked(-1)
6598
6599
6600 thread, ok := setBlockOnExitSyscall(pp)
6601 if !ok {
6602
6603 goto done
6604 }
6605
6606
6607 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6608 pd.syscalltick = uint32(syst)
6609 pd.syscallwhen = now
6610 thread.resume()
6611 goto done
6612 }
6613
6614
6615
6616
6617 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6618 thread.resume()
6619 goto done
6620 }
6621
6622
6623
6624 thread.takeP()
6625 thread.resume()
6626 n++
6627
6628
6629 handoffp(pp)
6630
6631
6632
6633 done:
6634 incidlelocked(1)
6635 lock(&allpLock)
6636 }
6637 unlock(&allpLock)
6638 return uint32(n)
6639 }
6640
6641
6642
6643 type syscallingThread struct {
6644 gp *g
6645 mp *m
6646 pp *p
6647 status uint32
6648 }
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6665 if pp.status != _Prunning {
6666 return syscallingThread{}, false
6667 }
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679 mp := pp.m.ptr()
6680 if mp == nil {
6681
6682 return syscallingThread{}, false
6683 }
6684 gp := mp.curg
6685 if gp == nil {
6686
6687 return syscallingThread{}, false
6688 }
6689 status := readgstatus(gp) &^ _Gscan
6690
6691
6692
6693
6694 if status != _Gsyscall && status != _Gdeadextra {
6695
6696 return syscallingThread{}, false
6697 }
6698 if !castogscanstatus(gp, status, status|_Gscan) {
6699
6700 return syscallingThread{}, false
6701 }
6702 if gp.m != mp || gp.m.p.ptr() != pp {
6703
6704 casfrom_Gscanstatus(gp, status|_Gscan, status)
6705 return syscallingThread{}, false
6706 }
6707 return syscallingThread{gp, mp, pp, status}, true
6708 }
6709
6710
6711
6712
6713
6714 func (s syscallingThread) gcstopP() {
6715 assertLockHeld(&sched.lock)
6716
6717 s.releaseP(_Pgcstop)
6718 s.pp.gcStopTime = nanotime()
6719 sched.stopwait--
6720 }
6721
6722
6723
6724 func (s syscallingThread) takeP() {
6725 s.releaseP(_Pidle)
6726 }
6727
6728
6729
6730
6731 func (s syscallingThread) releaseP(state uint32) {
6732 if state != _Pidle && state != _Pgcstop {
6733 throw("attempted to release P into a bad state")
6734 }
6735 trace := traceAcquire()
6736 s.pp.m = 0
6737 s.mp.p = 0
6738 atomic.Store(&s.pp.status, state)
6739 if trace.ok() {
6740 trace.ProcSteal(s.pp)
6741 traceRelease(trace)
6742 }
6743 sched.nGsyscallNoP.Add(1)
6744 s.pp.syscalltick++
6745 }
6746
6747
6748 func (s syscallingThread) resume() {
6749 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6750 }
6751
6752
6753
6754
6755
6756
6757 func preemptall() bool {
6758 res := false
6759 for _, pp := range allp {
6760 if pp.status != _Prunning {
6761 continue
6762 }
6763 if preemptone(pp) {
6764 res = true
6765 }
6766 }
6767 return res
6768 }
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780 func preemptone(pp *p) bool {
6781 mp := pp.m.ptr()
6782 if mp == nil || mp == getg().m {
6783 return false
6784 }
6785 gp := mp.curg
6786 if gp == nil || gp == mp.g0 {
6787 return false
6788 }
6789 if readgstatus(gp)&^_Gscan == _Gsyscall {
6790
6791 return false
6792 }
6793
6794 gp.preempt = true
6795
6796
6797
6798
6799
6800 gp.stackguard0 = stackPreempt
6801
6802
6803 if preemptMSupported && debug.asyncpreemptoff == 0 {
6804 pp.preempt = true
6805 preemptM(mp)
6806 }
6807
6808 return true
6809 }
6810
6811 var starttime int64
6812
6813 func schedtrace(detailed bool) {
6814 now := nanotime()
6815 if starttime == 0 {
6816 starttime = now
6817 }
6818
6819 lock(&sched.lock)
6820 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6821 if detailed {
6822 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6823 }
6824
6825
6826
6827 for i, pp := range allp {
6828 h := atomic.Load(&pp.runqhead)
6829 t := atomic.Load(&pp.runqtail)
6830 if detailed {
6831 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6832 mp := pp.m.ptr()
6833 if mp != nil {
6834 print(mp.id)
6835 } else {
6836 print("nil")
6837 }
6838 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6839 } else {
6840
6841
6842 print(" ")
6843 if i == 0 {
6844 print("[ ")
6845 }
6846 print(t - h)
6847 if i == len(allp)-1 {
6848 print(" ]")
6849 }
6850 }
6851 }
6852
6853 if !detailed {
6854
6855 print(" schedticks=[ ")
6856 for _, pp := range allp {
6857 print(pp.schedtick)
6858 print(" ")
6859 }
6860 print("]\n")
6861 }
6862
6863 if !detailed {
6864 unlock(&sched.lock)
6865 return
6866 }
6867
6868 for mp := allm; mp != nil; mp = mp.alllink {
6869 pp := mp.p.ptr()
6870 print(" M", mp.id, ": p=")
6871 if pp != nil {
6872 print(pp.id)
6873 } else {
6874 print("nil")
6875 }
6876 print(" curg=")
6877 if mp.curg != nil {
6878 print(mp.curg.goid)
6879 } else {
6880 print("nil")
6881 }
6882 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6883 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6884 print(lockedg.goid)
6885 } else {
6886 print("nil")
6887 }
6888 print("\n")
6889 }
6890
6891 forEachG(func(gp *g) {
6892 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6893 if gp.m != nil {
6894 print(gp.m.id)
6895 } else {
6896 print("nil")
6897 }
6898 print(" lockedm=")
6899 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6900 print(lockedm.id)
6901 } else {
6902 print("nil")
6903 }
6904 print("\n")
6905 })
6906 unlock(&sched.lock)
6907 }
6908
6909 type updateMaxProcsGState struct {
6910 lock mutex
6911 g *g
6912 idle atomic.Bool
6913
6914
6915 procs int32
6916 }
6917
6918 var (
6919
6920
6921 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6922
6923
6924
6925 updateMaxProcsG updateMaxProcsGState
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974 computeMaxProcsLock mutex
6975 )
6976
6977
6978
6979
6980 func defaultGOMAXPROCSUpdateEnable() {
6981 if debug.updatemaxprocs == 0 {
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992
6993 updatemaxprocs.IncNonDefault()
6994 return
6995 }
6996
6997 go updateMaxProcsGoroutine()
6998 }
6999
7000 func updateMaxProcsGoroutine() {
7001 updateMaxProcsG.g = getg()
7002 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7003 for {
7004 lock(&updateMaxProcsG.lock)
7005 if updateMaxProcsG.idle.Load() {
7006 throw("updateMaxProcsGoroutine: phase error")
7007 }
7008 updateMaxProcsG.idle.Store(true)
7009 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7010
7011
7012 stw := stopTheWorldGC(stwGOMAXPROCS)
7013
7014
7015 lock(&sched.lock)
7016 custom := sched.customGOMAXPROCS
7017 unlock(&sched.lock)
7018 if custom {
7019 startTheWorldGC(stw)
7020 return
7021 }
7022
7023
7024
7025
7026
7027 newprocs = updateMaxProcsG.procs
7028 lock(&sched.lock)
7029 sched.customGOMAXPROCS = false
7030 unlock(&sched.lock)
7031
7032 startTheWorldGC(stw)
7033 }
7034 }
7035
7036 func sysmonUpdateGOMAXPROCS() {
7037
7038 lock(&computeMaxProcsLock)
7039
7040
7041 lock(&sched.lock)
7042 custom := sched.customGOMAXPROCS
7043 curr := gomaxprocs
7044 unlock(&sched.lock)
7045 if custom {
7046 unlock(&computeMaxProcsLock)
7047 return
7048 }
7049
7050
7051 procs := defaultGOMAXPROCS(0)
7052 unlock(&computeMaxProcsLock)
7053 if procs == curr {
7054
7055 return
7056 }
7057
7058
7059
7060
7061 if updateMaxProcsG.idle.Load() {
7062 lock(&updateMaxProcsG.lock)
7063 updateMaxProcsG.procs = procs
7064 updateMaxProcsG.idle.Store(false)
7065 var list gList
7066 list.push(updateMaxProcsG.g)
7067 injectglist(&list)
7068 unlock(&updateMaxProcsG.lock)
7069 }
7070 }
7071
7072
7073
7074
7075
7076
7077 func schedEnableUser(enable bool) {
7078 lock(&sched.lock)
7079 if sched.disable.user == !enable {
7080 unlock(&sched.lock)
7081 return
7082 }
7083 sched.disable.user = !enable
7084 if enable {
7085 n := sched.disable.runnable.size
7086 globrunqputbatch(&sched.disable.runnable)
7087 unlock(&sched.lock)
7088 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7089 startm(nil, false, false)
7090 }
7091 } else {
7092 unlock(&sched.lock)
7093 }
7094 }
7095
7096
7097
7098
7099
7100 func schedEnabled(gp *g) bool {
7101 assertLockHeld(&sched.lock)
7102
7103 if sched.disable.user {
7104 return isSystemGoroutine(gp, true)
7105 }
7106 return true
7107 }
7108
7109
7110
7111
7112
7113
7114 func mput(mp *m) {
7115 assertLockHeld(&sched.lock)
7116
7117 sched.midle.push(unsafe.Pointer(mp))
7118 sched.nmidle++
7119 checkdead()
7120 }
7121
7122
7123
7124
7125
7126
7127 func mget() *m {
7128 assertLockHeld(&sched.lock)
7129
7130 mp := (*m)(sched.midle.pop())
7131 if mp != nil {
7132 sched.nmidle--
7133 }
7134 return mp
7135 }
7136
7137
7138
7139
7140
7141
7142
7143
7144 func mgetSpecific(mp *m) *m {
7145 assertLockHeld(&sched.lock)
7146
7147 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7148
7149 return nil
7150 }
7151
7152 sched.midle.remove(unsafe.Pointer(mp))
7153 sched.nmidle--
7154
7155 return mp
7156 }
7157
7158
7159
7160
7161
7162
7163 func globrunqput(gp *g) {
7164 assertLockHeld(&sched.lock)
7165
7166 sched.runq.pushBack(gp)
7167 }
7168
7169
7170
7171
7172
7173
7174 func globrunqputhead(gp *g) {
7175 assertLockHeld(&sched.lock)
7176
7177 sched.runq.push(gp)
7178 }
7179
7180
7181
7182
7183
7184
7185
7186 func globrunqputbatch(batch *gQueue) {
7187 assertLockHeld(&sched.lock)
7188
7189 sched.runq.pushBackAll(*batch)
7190 *batch = gQueue{}
7191 }
7192
7193
7194
7195 func globrunqget() *g {
7196 assertLockHeld(&sched.lock)
7197
7198 if sched.runq.size == 0 {
7199 return nil
7200 }
7201
7202 return sched.runq.pop()
7203 }
7204
7205
7206
7207 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7208 assertLockHeld(&sched.lock)
7209
7210 if sched.runq.size == 0 {
7211 return
7212 }
7213
7214 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7215
7216 gp = sched.runq.pop()
7217 n--
7218
7219 for ; n > 0; n-- {
7220 gp1 := sched.runq.pop()
7221 q.pushBack(gp1)
7222 }
7223 return
7224 }
7225
7226
7227 type pMask []uint32
7228
7229
7230 func (p pMask) read(id uint32) bool {
7231 word := id / 32
7232 mask := uint32(1) << (id % 32)
7233 return (atomic.Load(&p[word]) & mask) != 0
7234 }
7235
7236
7237 func (p pMask) set(id int32) {
7238 word := id / 32
7239 mask := uint32(1) << (id % 32)
7240 atomic.Or(&p[word], mask)
7241 }
7242
7243
7244 func (p pMask) clear(id int32) {
7245 word := id / 32
7246 mask := uint32(1) << (id % 32)
7247 atomic.And(&p[word], ^mask)
7248 }
7249
7250
7251 func (p pMask) any() bool {
7252 for i := range p {
7253 if atomic.Load(&p[i]) != 0 {
7254 return true
7255 }
7256 }
7257 return false
7258 }
7259
7260
7261
7262
7263
7264 func (p pMask) resize(nprocs int32) pMask {
7265 maskWords := (nprocs + 31) / 32
7266
7267 if maskWords <= int32(cap(p)) {
7268 return p[:maskWords]
7269 }
7270 newMask := make([]uint32, maskWords)
7271
7272 copy(newMask, p)
7273 return newMask
7274 }
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287 func pidleput(pp *p, now int64) int64 {
7288 assertLockHeld(&sched.lock)
7289
7290 if !runqempty(pp) {
7291 throw("pidleput: P has non-empty run queue")
7292 }
7293 if now == 0 {
7294 now = nanotime()
7295 }
7296 if pp.timers.len.Load() == 0 {
7297 timerpMask.clear(pp.id)
7298 }
7299 idlepMask.set(pp.id)
7300 pp.link = sched.pidle
7301 sched.pidle.set(pp)
7302 sched.npidle.Add(1)
7303 if !pp.limiterEvent.start(limiterEventIdle, now) {
7304 throw("must be able to track idle limiter event")
7305 }
7306 return now
7307 }
7308
7309
7310
7311
7312
7313
7314
7315
7316 func pidleget(now int64) (*p, int64) {
7317 assertLockHeld(&sched.lock)
7318
7319 pp := sched.pidle.ptr()
7320 if pp != nil {
7321
7322 if now == 0 {
7323 now = nanotime()
7324 }
7325 timerpMask.set(pp.id)
7326 idlepMask.clear(pp.id)
7327 sched.pidle = pp.link
7328 sched.npidle.Add(-1)
7329 pp.limiterEvent.stop(limiterEventIdle, now)
7330 }
7331 return pp, now
7332 }
7333
7334
7335
7336
7337
7338
7339
7340
7341
7342
7343
7344 func pidlegetSpinning(now int64) (*p, int64) {
7345 assertLockHeld(&sched.lock)
7346
7347 pp, now := pidleget(now)
7348 if pp == nil {
7349
7350
7351
7352 sched.needspinning.Store(1)
7353 return nil, now
7354 }
7355
7356 return pp, now
7357 }
7358
7359
7360
7361 func runqempty(pp *p) bool {
7362
7363
7364
7365
7366 for {
7367 head := atomic.Load(&pp.runqhead)
7368 tail := atomic.Load(&pp.runqtail)
7369 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7370 if tail == atomic.Load(&pp.runqtail) {
7371 return head == tail && runnext == 0
7372 }
7373 }
7374 }
7375
7376
7377
7378
7379
7380
7381
7382
7383
7384
7385 const randomizeScheduler = raceenabled
7386
7387
7388
7389
7390
7391
7392 func runqput(pp *p, gp *g, next bool) {
7393 if !haveSysmon && next {
7394
7395
7396
7397
7398
7399
7400
7401
7402 next = false
7403 }
7404 if randomizeScheduler && next && randn(2) == 0 {
7405 next = false
7406 }
7407
7408 if next {
7409 retryNext:
7410 oldnext := pp.runnext
7411 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7412 goto retryNext
7413 }
7414 if oldnext == 0 {
7415 return
7416 }
7417
7418 gp = oldnext.ptr()
7419 }
7420
7421 retry:
7422 h := atomic.LoadAcq(&pp.runqhead)
7423 t := pp.runqtail
7424 if t-h < uint32(len(pp.runq)) {
7425 pp.runq[t%uint32(len(pp.runq))].set(gp)
7426 atomic.StoreRel(&pp.runqtail, t+1)
7427 return
7428 }
7429 if runqputslow(pp, gp, h, t) {
7430 return
7431 }
7432
7433 goto retry
7434 }
7435
7436
7437
7438 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7439 var batch [len(pp.runq)/2 + 1]*g
7440
7441
7442 n := t - h
7443 n = n / 2
7444 if n != uint32(len(pp.runq)/2) {
7445 throw("runqputslow: queue is not full")
7446 }
7447 for i := uint32(0); i < n; i++ {
7448 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7449 }
7450 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7451 return false
7452 }
7453 batch[n] = gp
7454
7455 if randomizeScheduler {
7456 for i := uint32(1); i <= n; i++ {
7457 j := cheaprandn(i + 1)
7458 batch[i], batch[j] = batch[j], batch[i]
7459 }
7460 }
7461
7462
7463 for i := uint32(0); i < n; i++ {
7464 batch[i].schedlink.set(batch[i+1])
7465 }
7466
7467 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7468
7469
7470 lock(&sched.lock)
7471 globrunqputbatch(&q)
7472 unlock(&sched.lock)
7473 return true
7474 }
7475
7476
7477
7478
7479 func runqputbatch(pp *p, q *gQueue) {
7480 if q.empty() {
7481 return
7482 }
7483 h := atomic.LoadAcq(&pp.runqhead)
7484 t := pp.runqtail
7485 n := uint32(0)
7486 for !q.empty() && t-h < uint32(len(pp.runq)) {
7487 gp := q.pop()
7488 pp.runq[t%uint32(len(pp.runq))].set(gp)
7489 t++
7490 n++
7491 }
7492
7493 if randomizeScheduler {
7494 off := func(o uint32) uint32 {
7495 return (pp.runqtail + o) % uint32(len(pp.runq))
7496 }
7497 for i := uint32(1); i < n; i++ {
7498 j := cheaprandn(i + 1)
7499 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7500 }
7501 }
7502
7503 atomic.StoreRel(&pp.runqtail, t)
7504
7505 return
7506 }
7507
7508
7509
7510
7511
7512 func runqget(pp *p) (gp *g, inheritTime bool) {
7513
7514 next := pp.runnext
7515
7516
7517
7518 if next != 0 && pp.runnext.cas(next, 0) {
7519 return next.ptr(), true
7520 }
7521
7522 for {
7523 h := atomic.LoadAcq(&pp.runqhead)
7524 t := pp.runqtail
7525 if t == h {
7526 return nil, false
7527 }
7528 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7529 if atomic.CasRel(&pp.runqhead, h, h+1) {
7530 return gp, false
7531 }
7532 }
7533 }
7534
7535
7536
7537 func runqdrain(pp *p) (drainQ gQueue) {
7538 oldNext := pp.runnext
7539 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7540 drainQ.pushBack(oldNext.ptr())
7541 }
7542
7543 retry:
7544 h := atomic.LoadAcq(&pp.runqhead)
7545 t := pp.runqtail
7546 qn := t - h
7547 if qn == 0 {
7548 return
7549 }
7550 if qn > uint32(len(pp.runq)) {
7551 goto retry
7552 }
7553
7554 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7555 goto retry
7556 }
7557
7558
7559
7560
7561
7562
7563
7564
7565 for i := uint32(0); i < qn; i++ {
7566 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7567 drainQ.pushBack(gp)
7568 }
7569 return
7570 }
7571
7572
7573
7574
7575
7576 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7577 for {
7578 h := atomic.LoadAcq(&pp.runqhead)
7579 t := atomic.LoadAcq(&pp.runqtail)
7580 n := t - h
7581 n = n - n/2
7582 if n == 0 {
7583 if stealRunNextG {
7584
7585 if next := pp.runnext; next != 0 {
7586 if pp.status == _Prunning {
7587 if mp := pp.m.ptr(); mp != nil {
7588 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608 if !osHasLowResTimer {
7609 usleep(3)
7610 } else {
7611
7612
7613
7614 osyield()
7615 }
7616 }
7617 }
7618 }
7619 if !pp.runnext.cas(next, 0) {
7620 continue
7621 }
7622 batch[batchHead%uint32(len(batch))] = next
7623 return 1
7624 }
7625 }
7626 return 0
7627 }
7628 if n > uint32(len(pp.runq)/2) {
7629 continue
7630 }
7631 for i := uint32(0); i < n; i++ {
7632 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7633 batch[(batchHead+i)%uint32(len(batch))] = g
7634 }
7635 if atomic.CasRel(&pp.runqhead, h, h+n) {
7636 return n
7637 }
7638 }
7639 }
7640
7641
7642
7643
7644 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7645 t := pp.runqtail
7646 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7647 if n == 0 {
7648 return nil
7649 }
7650 n--
7651 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7652 if n == 0 {
7653 return gp
7654 }
7655 h := atomic.LoadAcq(&pp.runqhead)
7656 if t-h+n >= uint32(len(pp.runq)) {
7657 throw("runqsteal: runq overflow")
7658 }
7659 atomic.StoreRel(&pp.runqtail, t+n)
7660 return gp
7661 }
7662
7663
7664
7665 type gQueue struct {
7666 head guintptr
7667 tail guintptr
7668 size int32
7669 }
7670
7671
7672 func (q *gQueue) empty() bool {
7673 return q.head == 0
7674 }
7675
7676
7677 func (q *gQueue) push(gp *g) {
7678 gp.schedlink = q.head
7679 q.head.set(gp)
7680 if q.tail == 0 {
7681 q.tail.set(gp)
7682 }
7683 q.size++
7684 }
7685
7686
7687 func (q *gQueue) pushBack(gp *g) {
7688 gp.schedlink = 0
7689 if q.tail != 0 {
7690 q.tail.ptr().schedlink.set(gp)
7691 } else {
7692 q.head.set(gp)
7693 }
7694 q.tail.set(gp)
7695 q.size++
7696 }
7697
7698
7699
7700 func (q *gQueue) pushBackAll(q2 gQueue) {
7701 if q2.tail == 0 {
7702 return
7703 }
7704 q2.tail.ptr().schedlink = 0
7705 if q.tail != 0 {
7706 q.tail.ptr().schedlink = q2.head
7707 } else {
7708 q.head = q2.head
7709 }
7710 q.tail = q2.tail
7711 q.size += q2.size
7712 }
7713
7714
7715
7716 func (q *gQueue) pop() *g {
7717 gp := q.head.ptr()
7718 if gp != nil {
7719 q.head = gp.schedlink
7720 if q.head == 0 {
7721 q.tail = 0
7722 }
7723 q.size--
7724 }
7725 return gp
7726 }
7727
7728
7729 func (q *gQueue) popList() gList {
7730 stack := gList{q.head, q.size}
7731 *q = gQueue{}
7732 return stack
7733 }
7734
7735
7736
7737 type gList struct {
7738 head guintptr
7739 size int32
7740 }
7741
7742
7743 func (l *gList) empty() bool {
7744 return l.head == 0
7745 }
7746
7747
7748 func (l *gList) push(gp *g) {
7749 gp.schedlink = l.head
7750 l.head.set(gp)
7751 l.size++
7752 }
7753
7754
7755 func (l *gList) pushAll(q gQueue) {
7756 if !q.empty() {
7757 q.tail.ptr().schedlink = l.head
7758 l.head = q.head
7759 l.size += q.size
7760 }
7761 }
7762
7763
7764 func (l *gList) pop() *g {
7765 gp := l.head.ptr()
7766 if gp != nil {
7767 l.head = gp.schedlink
7768 l.size--
7769 }
7770 return gp
7771 }
7772
7773
7774 func setMaxThreads(in int) (out int) {
7775 lock(&sched.lock)
7776 out = int(sched.maxmcount)
7777 if in > 0x7fffffff {
7778 sched.maxmcount = 0x7fffffff
7779 } else {
7780 sched.maxmcount = int32(in)
7781 }
7782 checkmcount()
7783 unlock(&sched.lock)
7784 return
7785 }
7786
7787
7788
7789
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799 func procPin() int {
7800 gp := getg()
7801 mp := gp.m
7802
7803 mp.locks++
7804 return int(mp.p.ptr().id)
7805 }
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819 func procUnpin() {
7820 gp := getg()
7821 gp.m.locks--
7822 }
7823
7824
7825
7826 func sync_runtime_procPin() int {
7827 return procPin()
7828 }
7829
7830
7831
7832 func sync_runtime_procUnpin() {
7833 procUnpin()
7834 }
7835
7836
7837
7838 func sync_atomic_runtime_procPin() int {
7839 return procPin()
7840 }
7841
7842
7843
7844 func sync_atomic_runtime_procUnpin() {
7845 procUnpin()
7846 }
7847
7848
7849
7850
7851
7852 func internal_sync_runtime_canSpin(i int) bool {
7853
7854
7855
7856
7857
7858 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7859 return false
7860 }
7861 if p := getg().m.p.ptr(); !runqempty(p) {
7862 return false
7863 }
7864 return true
7865 }
7866
7867
7868
7869 func internal_sync_runtime_doSpin() {
7870 procyield(active_spin_cnt)
7871 }
7872
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887 func sync_runtime_canSpin(i int) bool {
7888 return internal_sync_runtime_canSpin(i)
7889 }
7890
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903 func sync_runtime_doSpin() {
7904 internal_sync_runtime_doSpin()
7905 }
7906
7907 var stealOrder randomOrder
7908
7909
7910
7911
7912
7913 type randomOrder struct {
7914 count uint32
7915 coprimes []uint32
7916 }
7917
7918 type randomEnum struct {
7919 i uint32
7920 count uint32
7921 pos uint32
7922 inc uint32
7923 }
7924
7925 func (ord *randomOrder) reset(count uint32) {
7926 ord.count = count
7927 ord.coprimes = ord.coprimes[:0]
7928 for i := uint32(1); i <= count; i++ {
7929 if gcd(i, count) == 1 {
7930 ord.coprimes = append(ord.coprimes, i)
7931 }
7932 }
7933 }
7934
7935 func (ord *randomOrder) start(i uint32) randomEnum {
7936 return randomEnum{
7937 count: ord.count,
7938 pos: i % ord.count,
7939 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7940 }
7941 }
7942
7943 func (enum *randomEnum) done() bool {
7944 return enum.i == enum.count
7945 }
7946
7947 func (enum *randomEnum) next() {
7948 enum.i++
7949 enum.pos = (enum.pos + enum.inc) % enum.count
7950 }
7951
7952 func (enum *randomEnum) position() uint32 {
7953 return enum.pos
7954 }
7955
7956 func gcd(a, b uint32) uint32 {
7957 for b != 0 {
7958 a, b = b, a%b
7959 }
7960 return a
7961 }
7962
7963
7964
7965 type initTask struct {
7966 state uint32
7967 nfns uint32
7968
7969 }
7970
7971
7972
7973 var inittrace tracestat
7974
7975 type tracestat struct {
7976 active bool
7977 id uint64
7978 allocs uint64
7979 bytes uint64
7980 }
7981
7982 func doInit(ts []*initTask) {
7983 for _, t := range ts {
7984 doInit1(t)
7985 }
7986 }
7987
7988 func doInit1(t *initTask) {
7989 switch t.state {
7990 case 2:
7991 return
7992 case 1:
7993 throw("recursive call during initialization - linker skew")
7994 default:
7995 t.state = 1
7996
7997 var (
7998 start int64
7999 before tracestat
8000 )
8001
8002 if inittrace.active {
8003 start = nanotime()
8004
8005 before = inittrace
8006 }
8007
8008 if t.nfns == 0 {
8009
8010 throw("inittask with no functions")
8011 }
8012
8013 firstFunc := add(unsafe.Pointer(t), 8)
8014 for i := uint32(0); i < t.nfns; i++ {
8015 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8016 f := *(*func())(unsafe.Pointer(&p))
8017 f()
8018 }
8019
8020 if inittrace.active {
8021 end := nanotime()
8022
8023 after := inittrace
8024
8025 f := *(*func())(unsafe.Pointer(&firstFunc))
8026 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8027
8028 var sbuf [24]byte
8029 print("init ", pkg, " @")
8030 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8031 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8032 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8033 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8034 print("\n")
8035 }
8036
8037 t.state = 2
8038 }
8039 }
8040
View as plain text