Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/strconv"
15 "internal/runtime/sys"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 last := lastmoduledatap
256 for m := &firstmoduledata; true; m = m.next {
257 doInit(m.inittasks)
258 if m == last {
259 break
260 }
261 }
262
263
264
265 inittrace.active = false
266
267 close(main_init_done)
268
269 needUnlock = false
270 unlockOSThread()
271
272 if isarchive || islibrary {
273
274
275 if GOARCH == "wasm" {
276
277
278
279
280
281
282
283 pause(sys.GetCallerSP() - 16)
284 panic("unreachable")
285 }
286 return
287 }
288 fn := main_main
289 fn()
290
291 exitHooksRun := false
292 if raceenabled {
293 runExitHooks(0)
294 exitHooksRun = true
295 racefini()
296 }
297
298
299
300
301
302
303
304
305 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
306 runExitHooks(0)
307 exitHooksRun = true
308 lsandoleakcheck()
309 }
310
311
312
313
314
315 if runningPanicDefers.Load() != 0 {
316
317 for c := 0; c < 1000; c++ {
318 if runningPanicDefers.Load() == 0 {
319 break
320 }
321 Gosched()
322 }
323 }
324 if panicking.Load() != 0 {
325 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
326 }
327 if !exitHooksRun {
328 runExitHooks(0)
329 }
330
331 exit(0)
332 for {
333 var x *int32
334 *x = 0
335 }
336 }
337
338
339
340
341 func os_beforeExit(exitCode int) {
342 runExitHooks(exitCode)
343 if exitCode == 0 && raceenabled {
344 racefini()
345 }
346
347
348 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
349 lsandoleakcheck()
350 }
351 }
352
353 func init() {
354 exithook.Gosched = Gosched
355 exithook.Goid = func() uint64 { return getg().goid }
356 exithook.Throw = throw
357 }
358
359 func runExitHooks(code int) {
360 exithook.Run(code)
361 }
362
363
364 func init() {
365 go forcegchelper()
366 }
367
368 func forcegchelper() {
369 forcegc.g = getg()
370 lockInit(&forcegc.lock, lockRankForcegc)
371 for {
372 lock(&forcegc.lock)
373 if forcegc.idle.Load() {
374 throw("forcegc: phase error")
375 }
376 forcegc.idle.Store(true)
377 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
378
379 if debug.gctrace > 0 {
380 println("GC forced")
381 }
382
383 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
384 }
385 }
386
387
388
389
390
391 func Gosched() {
392 checkTimeouts()
393 mcall(gosched_m)
394 }
395
396
397
398
399
400 func goschedguarded() {
401 mcall(goschedguarded_m)
402 }
403
404
405
406
407
408
409 func goschedIfBusy() {
410 gp := getg()
411
412
413 if !gp.preempt && sched.npidle.Load() > 0 {
414 return
415 }
416 mcall(gosched_m)
417 }
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
448 if reason != waitReasonSleep {
449 checkTimeouts()
450 }
451 mp := acquirem()
452 gp := mp.curg
453 status := readgstatus(gp)
454 if status != _Grunning && status != _Gscanrunning {
455 throw("gopark: bad g status")
456 }
457 mp.waitlock = lock
458 mp.waitunlockf = unlockf
459 gp.waitreason = reason
460 mp.waitTraceBlockReason = traceReason
461 mp.waitTraceSkip = traceskip
462 releasem(mp)
463
464 mcall(park_m)
465 }
466
467
468
469 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
470 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
471 }
472
473
474
475
476
477
478
479
480
481
482
483 func goready(gp *g, traceskip int) {
484 systemstack(func() {
485 ready(gp, traceskip, true)
486 })
487 }
488
489
490 func acquireSudog() *sudog {
491
492
493
494
495
496
497
498
499 mp := acquirem()
500 pp := mp.p.ptr()
501 if len(pp.sudogcache) == 0 {
502 lock(&sched.sudoglock)
503
504 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
505 s := sched.sudogcache
506 sched.sudogcache = s.next
507 s.next = nil
508 pp.sudogcache = append(pp.sudogcache, s)
509 }
510 unlock(&sched.sudoglock)
511
512 if len(pp.sudogcache) == 0 {
513 pp.sudogcache = append(pp.sudogcache, new(sudog))
514 }
515 }
516 n := len(pp.sudogcache)
517 s := pp.sudogcache[n-1]
518 pp.sudogcache[n-1] = nil
519 pp.sudogcache = pp.sudogcache[:n-1]
520 if s.elem.get() != nil {
521 throw("acquireSudog: found s.elem != nil in cache")
522 }
523 releasem(mp)
524 return s
525 }
526
527
528 func releaseSudog(s *sudog) {
529 if s.elem.get() != nil {
530 throw("runtime: sudog with non-nil elem")
531 }
532 if s.isSelect {
533 throw("runtime: sudog with non-false isSelect")
534 }
535 if s.next != nil {
536 throw("runtime: sudog with non-nil next")
537 }
538 if s.prev != nil {
539 throw("runtime: sudog with non-nil prev")
540 }
541 if s.waitlink != nil {
542 throw("runtime: sudog with non-nil waitlink")
543 }
544 if s.c.get() != nil {
545 throw("runtime: sudog with non-nil c")
546 }
547 gp := getg()
548 if gp.param != nil {
549 throw("runtime: releaseSudog with non-nil gp.param")
550 }
551 mp := acquirem()
552 pp := mp.p.ptr()
553 if len(pp.sudogcache) == cap(pp.sudogcache) {
554
555 var first, last *sudog
556 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
557 n := len(pp.sudogcache)
558 p := pp.sudogcache[n-1]
559 pp.sudogcache[n-1] = nil
560 pp.sudogcache = pp.sudogcache[:n-1]
561 if first == nil {
562 first = p
563 } else {
564 last.next = p
565 }
566 last = p
567 }
568 lock(&sched.sudoglock)
569 last.next = sched.sudogcache
570 sched.sudogcache = first
571 unlock(&sched.sudoglock)
572 }
573 pp.sudogcache = append(pp.sudogcache, s)
574 releasem(mp)
575 }
576
577
578 func badmcall(fn func(*g)) {
579 throw("runtime: mcall called on m->g0 stack")
580 }
581
582 func badmcall2(fn func(*g)) {
583 throw("runtime: mcall function returned")
584 }
585
586 func badreflectcall() {
587 panic(plainError("arg size to reflect.call more than 1GB"))
588 }
589
590
591
592 func badmorestackg0() {
593 if !crashStackImplemented {
594 writeErrStr("fatal: morestack on g0\n")
595 return
596 }
597
598 g := getg()
599 switchToCrashStack(func() {
600 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
601 g.m.traceback = 2
602 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
603 print("\n")
604
605 throw("morestack on g0")
606 })
607 }
608
609
610
611 func badmorestackgsignal() {
612 writeErrStr("fatal: morestack on gsignal\n")
613 }
614
615
616 func badctxt() {
617 throw("ctxt != 0")
618 }
619
620
621
622 var gcrash g
623
624 var crashingG atomic.Pointer[g]
625
626
627
628
629
630
631
632
633
634 func switchToCrashStack(fn func()) {
635 me := getg()
636 if crashingG.CompareAndSwapNoWB(nil, me) {
637 switchToCrashStack0(fn)
638 abort()
639 }
640 if crashingG.Load() == me {
641
642 writeErrStr("fatal: recursive switchToCrashStack\n")
643 abort()
644 }
645
646 usleep_no_g(100)
647 writeErrStr("fatal: concurrent switchToCrashStack\n")
648 abort()
649 }
650
651
652
653
654 const crashStackImplemented = GOOS != "windows"
655
656
657 func switchToCrashStack0(fn func())
658
659 func lockedOSThread() bool {
660 gp := getg()
661 return gp.lockedm != 0 && gp.m.lockedg != 0
662 }
663
664 var (
665
666
667
668
669
670
671 allglock mutex
672 allgs []*g
673
674
675
676
677
678
679
680
681
682
683
684
685
686 allglen uintptr
687 allgptr **g
688 )
689
690 func allgadd(gp *g) {
691 if readgstatus(gp) == _Gidle {
692 throw("allgadd: bad status Gidle")
693 }
694
695 lock(&allglock)
696 allgs = append(allgs, gp)
697 if &allgs[0] != allgptr {
698 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
699 }
700 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
701 unlock(&allglock)
702 }
703
704
705
706
707 func allGsSnapshot() []*g {
708 assertWorldStoppedOrLockHeld(&allglock)
709
710
711
712
713
714
715 return allgs[:len(allgs):len(allgs)]
716 }
717
718
719 func atomicAllG() (**g, uintptr) {
720 length := atomic.Loaduintptr(&allglen)
721 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
722 return ptr, length
723 }
724
725
726 func atomicAllGIndex(ptr **g, i uintptr) *g {
727 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
728 }
729
730
731
732
733 func forEachG(fn func(gp *g)) {
734 lock(&allglock)
735 for _, gp := range allgs {
736 fn(gp)
737 }
738 unlock(&allglock)
739 }
740
741
742
743
744
745 func forEachGRace(fn func(gp *g)) {
746 ptr, length := atomicAllG()
747 for i := uintptr(0); i < length; i++ {
748 gp := atomicAllGIndex(ptr, i)
749 fn(gp)
750 }
751 return
752 }
753
754 const (
755
756
757 _GoidCacheBatch = 16
758 )
759
760
761
762 func cpuinit(env string) {
763 cpu.Initialize(env)
764
765
766
767 switch GOARCH {
768 case "386", "amd64":
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771 x86HasFMA = cpu.X86.HasFMA
772
773 case "arm":
774 armHasVFPv4 = cpu.ARM.HasVFPv4
775
776 case "arm64":
777 arm64HasATOMICS = cpu.ARM64.HasATOMICS
778
779 case "loong64":
780 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
781 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
782 loong64HasLSX = cpu.Loong64.HasLSX
783
784 case "riscv64":
785 riscv64HasZbb = cpu.RISCV64.HasZbb
786 }
787 }
788
789
790
791
792
793
794 func getGodebugEarly() (string, bool) {
795 const prefix = "GODEBUG="
796 var env string
797 switch GOOS {
798 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
799
800
801
802 n := int32(0)
803 for argv_index(argv, argc+1+n) != nil {
804 n++
805 }
806
807 for i := int32(0); i < n; i++ {
808 p := argv_index(argv, argc+1+i)
809 s := unsafe.String(p, findnull(p))
810
811 if stringslite.HasPrefix(s, prefix) {
812 env = gostringnocopy(p)[len(prefix):]
813 break
814 }
815 }
816 break
817
818 default:
819 return "", false
820 }
821 return env, true
822 }
823
824
825
826
827
828
829
830
831
832 func schedinit() {
833 lockInit(&sched.lock, lockRankSched)
834 lockInit(&sched.sysmonlock, lockRankSysmon)
835 lockInit(&sched.deferlock, lockRankDefer)
836 lockInit(&sched.sudoglock, lockRankSudog)
837 lockInit(&deadlock, lockRankDeadlock)
838 lockInit(&paniclk, lockRankPanic)
839 lockInit(&allglock, lockRankAllg)
840 lockInit(&allpLock, lockRankAllp)
841 lockInit(&reflectOffs.lock, lockRankReflectOffs)
842 lockInit(&finlock, lockRankFin)
843 lockInit(&cpuprof.lock, lockRankCpuprof)
844 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
845 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
846 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
847 traceLockInit()
848
849
850
851 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
852
853 lockVerifyMSize()
854
855
856
857 gp := getg()
858 if raceenabled {
859 gp.racectx, raceprocctx0 = raceinit()
860 }
861
862 sched.maxmcount = 10000
863 crashFD.Store(^uintptr(0))
864
865
866 worldStopped()
867
868 godebug, parsedGodebug := getGodebugEarly()
869 if parsedGodebug {
870 parseRuntimeDebugVars(godebug)
871 }
872 ticks.init()
873 moduledataverify()
874 stackinit()
875 randinit()
876 mallocinit()
877 cpuinit(godebug)
878 alginit()
879 mcommoninit(gp.m, -1)
880 modulesinit()
881 typelinksinit()
882 itabsinit()
883 stkobjinit()
884
885 sigsave(&gp.m.sigmask)
886 initSigmask = gp.m.sigmask
887
888 goargs()
889 goenvs()
890 secure()
891 checkfds()
892 if !parsedGodebug {
893
894
895 parseRuntimeDebugVars(gogetenv("GODEBUG"))
896 }
897 finishDebugVarsSetup()
898 gcinit()
899
900
901
902 gcrash.stack = stackalloc(16384)
903 gcrash.stackguard0 = gcrash.stack.lo + 1000
904 gcrash.stackguard1 = gcrash.stack.lo + 1000
905
906
907
908
909
910 if disableMemoryProfiling {
911 MemProfileRate = 0
912 }
913
914
915 mProfStackInit(gp.m)
916 defaultGOMAXPROCSInit()
917
918 lock(&sched.lock)
919 sched.lastpoll.Store(nanotime())
920 var procs int32
921 if n, ok := strconv.Atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
922 procs = n
923 sched.customGOMAXPROCS = true
924 } else {
925
926
927
928
929
930
931
932
933 procs = defaultGOMAXPROCS(numCPUStartup)
934 }
935 if procresize(procs) != nil {
936 throw("unknown runnable goroutine during bootstrap")
937 }
938 unlock(&sched.lock)
939
940
941 worldStarted()
942
943 if buildVersion == "" {
944
945
946 buildVersion = "unknown"
947 }
948 if len(modinfo) == 1 {
949
950
951 modinfo = ""
952 }
953 }
954
955 func dumpgstatus(gp *g) {
956 thisg := getg()
957 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
958 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
959 }
960
961
962 func checkmcount() {
963 assertLockHeld(&sched.lock)
964
965
966
967
968
969
970
971
972
973 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
974 if count > sched.maxmcount {
975 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
976 throw("thread exhaustion")
977 }
978 }
979
980
981
982
983
984 func mReserveID() int64 {
985 assertLockHeld(&sched.lock)
986
987 if sched.mnext+1 < sched.mnext {
988 throw("runtime: thread ID overflow")
989 }
990 id := sched.mnext
991 sched.mnext++
992 checkmcount()
993 return id
994 }
995
996
997 func mcommoninit(mp *m, id int64) {
998 gp := getg()
999
1000
1001 if gp != gp.m.g0 {
1002 callers(1, mp.createstack[:])
1003 }
1004
1005 lock(&sched.lock)
1006
1007 if id >= 0 {
1008 mp.id = id
1009 } else {
1010 mp.id = mReserveID()
1011 }
1012
1013 mrandinit(mp)
1014
1015 mpreinit(mp)
1016 if mp.gsignal != nil {
1017 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1018 }
1019
1020
1021
1022 mp.alllink = allm
1023
1024
1025
1026 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1027 unlock(&sched.lock)
1028
1029
1030 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1031 mp.cgoCallers = new(cgoCallers)
1032 }
1033 mProfStackInit(mp)
1034 }
1035
1036
1037
1038
1039
1040 func mProfStackInit(mp *m) {
1041 if debug.profstackdepth == 0 {
1042
1043
1044 return
1045 }
1046 mp.profStack = makeProfStackFP()
1047 mp.mLockProfile.stack = makeProfStackFP()
1048 }
1049
1050
1051
1052
1053 func makeProfStackFP() []uintptr {
1054
1055
1056
1057
1058
1059
1060 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1061 }
1062
1063
1064
1065 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1066
1067
1068 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1069
1070 func (mp *m) becomeSpinning() {
1071 mp.spinning = true
1072 sched.nmspinning.Add(1)
1073 sched.needspinning.Store(0)
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083 func (mp *m) snapshotAllp() []*p {
1084 mp.allpSnapshot = allp
1085 return mp.allpSnapshot
1086 }
1087
1088
1089
1090
1091
1092
1093
1094 func (mp *m) clearAllpSnapshot() {
1095 mp.allpSnapshot = nil
1096 }
1097
1098 func (mp *m) hasCgoOnStack() bool {
1099 return mp.ncgo > 0 || mp.isextra
1100 }
1101
1102 const (
1103
1104
1105 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1106
1107
1108
1109 osHasLowResClockInt = goos.IsWindows
1110
1111
1112
1113 osHasLowResClock = osHasLowResClockInt > 0
1114 )
1115
1116
1117 func ready(gp *g, traceskip int, next bool) {
1118 status := readgstatus(gp)
1119
1120
1121 mp := acquirem()
1122 if status&^_Gscan != _Gwaiting {
1123 dumpgstatus(gp)
1124 throw("bad g->status in ready")
1125 }
1126
1127
1128 trace := traceAcquire()
1129 casgstatus(gp, _Gwaiting, _Grunnable)
1130 if trace.ok() {
1131 trace.GoUnpark(gp, traceskip)
1132 traceRelease(trace)
1133 }
1134 runqput(mp.p.ptr(), gp, next)
1135 wakep()
1136 releasem(mp)
1137 }
1138
1139
1140
1141 const freezeStopWait = 0x7fffffff
1142
1143
1144
1145 var freezing atomic.Bool
1146
1147
1148
1149
1150 func freezetheworld() {
1151 freezing.Store(true)
1152 if debug.dontfreezetheworld > 0 {
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 usleep(1000)
1178 return
1179 }
1180
1181
1182
1183
1184 for i := 0; i < 5; i++ {
1185
1186 sched.stopwait = freezeStopWait
1187 sched.gcwaiting.Store(true)
1188
1189 if !preemptall() {
1190 break
1191 }
1192 usleep(1000)
1193 }
1194
1195 usleep(1000)
1196 preemptall()
1197 usleep(1000)
1198 }
1199
1200
1201
1202
1203
1204 func readgstatus(gp *g) uint32 {
1205 return gp.atomicstatus.Load()
1206 }
1207
1208
1209
1210
1211
1212 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1213 success := false
1214
1215
1216 switch oldval {
1217 default:
1218 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1219 dumpgstatus(gp)
1220 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1221 case _Gscanrunnable,
1222 _Gscanwaiting,
1223 _Gscanrunning,
1224 _Gscansyscall,
1225 _Gscanleaked,
1226 _Gscanpreempted:
1227 if newval == oldval&^_Gscan {
1228 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1229 }
1230 }
1231 if !success {
1232 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1233 dumpgstatus(gp)
1234 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1235 }
1236 releaseLockRankAndM(lockRankGscan)
1237 }
1238
1239
1240
1241 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1242 switch oldval {
1243 case _Grunnable,
1244 _Grunning,
1245 _Gwaiting,
1246 _Gleaked,
1247 _Gsyscall:
1248 if newval == oldval|_Gscan {
1249 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1250 if r {
1251 acquireLockRankAndM(lockRankGscan)
1252 }
1253 return r
1254
1255 }
1256 }
1257 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1258 throw("castogscanstatus")
1259 panic("not reached")
1260 }
1261
1262
1263
1264 var casgstatusAlwaysTrack = false
1265
1266
1267
1268
1269
1270
1271
1272 func casgstatus(gp *g, oldval, newval uint32) {
1273 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1274 systemstack(func() {
1275
1276
1277 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1278 throw("casgstatus: bad incoming values")
1279 })
1280 }
1281
1282 lockWithRankMayAcquire(nil, lockRankGscan)
1283
1284
1285 const yieldDelay = 5 * 1000
1286 var nextYield int64
1287
1288
1289
1290 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1291 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1292 systemstack(func() {
1293
1294
1295 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1296 })
1297 }
1298 if i == 0 {
1299 nextYield = nanotime() + yieldDelay
1300 }
1301 if nanotime() < nextYield {
1302 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1303 procyield(1)
1304 }
1305 } else {
1306 osyield()
1307 nextYield = nanotime() + yieldDelay/2
1308 }
1309 }
1310
1311 if gp.bubble != nil {
1312 systemstack(func() {
1313 gp.bubble.changegstatus(gp, oldval, newval)
1314 })
1315 }
1316
1317 if oldval == _Grunning {
1318
1319 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1320 gp.tracking = true
1321 }
1322 gp.trackingSeq++
1323 }
1324 if !gp.tracking {
1325 return
1326 }
1327
1328
1329
1330
1331
1332
1333 switch oldval {
1334 case _Grunnable:
1335
1336
1337
1338 now := nanotime()
1339 gp.runnableTime += now - gp.trackingStamp
1340 gp.trackingStamp = 0
1341 case _Gwaiting:
1342 if !gp.waitreason.isMutexWait() {
1343
1344 break
1345 }
1346
1347
1348
1349
1350
1351 now := nanotime()
1352 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1353 gp.trackingStamp = 0
1354 }
1355 switch newval {
1356 case _Gwaiting:
1357 if !gp.waitreason.isMutexWait() {
1358
1359 break
1360 }
1361
1362 now := nanotime()
1363 gp.trackingStamp = now
1364 case _Grunnable:
1365
1366
1367 now := nanotime()
1368 gp.trackingStamp = now
1369 case _Grunning:
1370
1371
1372
1373 gp.tracking = false
1374 sched.timeToRun.record(gp.runnableTime)
1375 gp.runnableTime = 0
1376 }
1377 }
1378
1379
1380
1381
1382 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1383
1384 gp.waitreason = reason
1385 casgstatus(gp, old, _Gwaiting)
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1396 if !reason.isWaitingForSuspendG() {
1397 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1398 }
1399 casGToWaiting(gp, old, reason)
1400 }
1401
1402
1403
1404
1405
1406 func casGToPreemptScan(gp *g, old, new uint32) {
1407 if old != _Grunning || new != _Gscan|_Gpreempted {
1408 throw("bad g transition")
1409 }
1410 acquireLockRankAndM(lockRankGscan)
1411 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1412 }
1413
1414
1415
1416
1417
1418
1419 }
1420
1421
1422
1423
1424 func casGFromPreempted(gp *g, old, new uint32) bool {
1425 if old != _Gpreempted || new != _Gwaiting {
1426 throw("bad g transition")
1427 }
1428 gp.waitreason = waitReasonPreempted
1429 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1430 return false
1431 }
1432 if bubble := gp.bubble; bubble != nil {
1433 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1434 }
1435 return true
1436 }
1437
1438
1439 type stwReason uint8
1440
1441
1442
1443
1444 const (
1445 stwUnknown stwReason = iota
1446 stwGCMarkTerm
1447 stwGCSweepTerm
1448 stwWriteHeapDump
1449 stwGoroutineProfile
1450 stwGoroutineProfileCleanup
1451 stwAllGoroutinesStack
1452 stwReadMemStats
1453 stwAllThreadsSyscall
1454 stwGOMAXPROCS
1455 stwStartTrace
1456 stwStopTrace
1457 stwForTestCountPagesInUse
1458 stwForTestReadMetricsSlow
1459 stwForTestReadMemStatsSlow
1460 stwForTestPageCachePagesLeaked
1461 stwForTestResetDebugLog
1462 )
1463
1464 func (r stwReason) String() string {
1465 return stwReasonStrings[r]
1466 }
1467
1468 func (r stwReason) isGC() bool {
1469 return r == stwGCMarkTerm || r == stwGCSweepTerm
1470 }
1471
1472
1473
1474
1475 var stwReasonStrings = [...]string{
1476 stwUnknown: "unknown",
1477 stwGCMarkTerm: "GC mark termination",
1478 stwGCSweepTerm: "GC sweep termination",
1479 stwWriteHeapDump: "write heap dump",
1480 stwGoroutineProfile: "goroutine profile",
1481 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1482 stwAllGoroutinesStack: "all goroutines stack trace",
1483 stwReadMemStats: "read mem stats",
1484 stwAllThreadsSyscall: "AllThreadsSyscall",
1485 stwGOMAXPROCS: "GOMAXPROCS",
1486 stwStartTrace: "start trace",
1487 stwStopTrace: "stop trace",
1488 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1489 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1490 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1491 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1492 stwForTestResetDebugLog: "ResetDebugLog (test)",
1493 }
1494
1495
1496
1497 type worldStop struct {
1498 reason stwReason
1499 startedStopping int64
1500 finishedStopping int64
1501 stoppingCPUTime int64
1502 }
1503
1504
1505
1506
1507 var stopTheWorldContext worldStop
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 func stopTheWorld(reason stwReason) worldStop {
1527 semacquire(&worldsema)
1528 gp := getg()
1529 gp.m.preemptoff = reason.String()
1530 systemstack(func() {
1531 stopTheWorldContext = stopTheWorldWithSema(reason)
1532 })
1533 return stopTheWorldContext
1534 }
1535
1536
1537
1538
1539 func startTheWorld(w worldStop) {
1540 systemstack(func() { startTheWorldWithSema(0, w) })
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 mp := acquirem()
1558 mp.preemptoff = ""
1559 semrelease1(&worldsema, true, 0)
1560 releasem(mp)
1561 }
1562
1563
1564
1565
1566 func stopTheWorldGC(reason stwReason) worldStop {
1567 semacquire(&gcsema)
1568 return stopTheWorld(reason)
1569 }
1570
1571
1572
1573
1574 func startTheWorldGC(w worldStop) {
1575 startTheWorld(w)
1576 semrelease(&gcsema)
1577 }
1578
1579
1580 var worldsema uint32 = 1
1581
1582
1583
1584
1585
1586
1587
1588 var gcsema uint32 = 1
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 func stopTheWorldWithSema(reason stwReason) worldStop {
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1636
1637 trace := traceAcquire()
1638 if trace.ok() {
1639 trace.STWStart(reason)
1640 traceRelease(trace)
1641 }
1642 gp := getg()
1643
1644
1645
1646 if gp.m.locks > 0 {
1647 throw("stopTheWorld: holding locks")
1648 }
1649
1650 lock(&sched.lock)
1651 start := nanotime()
1652 sched.stopwait = gomaxprocs
1653 sched.gcwaiting.Store(true)
1654 preemptall()
1655
1656 gp.m.p.ptr().status = _Pgcstop
1657 gp.m.p.ptr().gcStopTime = start
1658 sched.stopwait--
1659
1660 trace = traceAcquire()
1661 for _, pp := range allp {
1662 s := pp.status
1663 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1664 if trace.ok() {
1665 trace.ProcSteal(pp, false)
1666 }
1667 sched.nGsyscallNoP.Add(1)
1668 pp.syscalltick++
1669 pp.gcStopTime = nanotime()
1670 sched.stopwait--
1671 }
1672 }
1673 if trace.ok() {
1674 traceRelease(trace)
1675 }
1676
1677
1678 now := nanotime()
1679 for {
1680 pp, _ := pidleget(now)
1681 if pp == nil {
1682 break
1683 }
1684 pp.status = _Pgcstop
1685 pp.gcStopTime = nanotime()
1686 sched.stopwait--
1687 }
1688 wait := sched.stopwait > 0
1689 unlock(&sched.lock)
1690
1691
1692 if wait {
1693 for {
1694
1695 if notetsleep(&sched.stopnote, 100*1000) {
1696 noteclear(&sched.stopnote)
1697 break
1698 }
1699 preemptall()
1700 }
1701 }
1702
1703 finish := nanotime()
1704 startTime := finish - start
1705 if reason.isGC() {
1706 sched.stwStoppingTimeGC.record(startTime)
1707 } else {
1708 sched.stwStoppingTimeOther.record(startTime)
1709 }
1710
1711
1712
1713
1714
1715 stoppingCPUTime := int64(0)
1716 bad := ""
1717 if sched.stopwait != 0 {
1718 bad = "stopTheWorld: not stopped (stopwait != 0)"
1719 } else {
1720 for _, pp := range allp {
1721 if pp.status != _Pgcstop {
1722 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1723 }
1724 if pp.gcStopTime == 0 && bad == "" {
1725 bad = "stopTheWorld: broken CPU time accounting"
1726 }
1727 stoppingCPUTime += finish - pp.gcStopTime
1728 pp.gcStopTime = 0
1729 }
1730 }
1731 if freezing.Load() {
1732
1733
1734
1735
1736 lock(&deadlock)
1737 lock(&deadlock)
1738 }
1739 if bad != "" {
1740 throw(bad)
1741 }
1742
1743 worldStopped()
1744
1745
1746 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1747
1748 return worldStop{
1749 reason: reason,
1750 startedStopping: start,
1751 finishedStopping: finish,
1752 stoppingCPUTime: stoppingCPUTime,
1753 }
1754 }
1755
1756
1757
1758
1759
1760
1761
1762 func startTheWorldWithSema(now int64, w worldStop) int64 {
1763 assertWorldStopped()
1764
1765 mp := acquirem()
1766 if netpollinited() {
1767 list, delta := netpoll(0)
1768 injectglist(&list)
1769 netpollAdjustWaiters(delta)
1770 }
1771 lock(&sched.lock)
1772
1773 procs := gomaxprocs
1774 if newprocs != 0 {
1775 procs = newprocs
1776 newprocs = 0
1777 }
1778 p1 := procresize(procs)
1779 sched.gcwaiting.Store(false)
1780 if sched.sysmonwait.Load() {
1781 sched.sysmonwait.Store(false)
1782 notewakeup(&sched.sysmonnote)
1783 }
1784 unlock(&sched.lock)
1785
1786 worldStarted()
1787
1788 for p1 != nil {
1789 p := p1
1790 p1 = p1.link.ptr()
1791 if p.m != 0 {
1792 mp := p.m.ptr()
1793 p.m = 0
1794 if mp.nextp != 0 {
1795 throw("startTheWorld: inconsistent mp->nextp")
1796 }
1797 mp.nextp.set(p)
1798 notewakeup(&mp.park)
1799 } else {
1800
1801 newm(nil, p, -1)
1802 }
1803 }
1804
1805
1806 if now == 0 {
1807 now = nanotime()
1808 }
1809 totalTime := now - w.startedStopping
1810 if w.reason.isGC() {
1811 sched.stwTotalTimeGC.record(totalTime)
1812 } else {
1813 sched.stwTotalTimeOther.record(totalTime)
1814 }
1815 trace := traceAcquire()
1816 if trace.ok() {
1817 trace.STWDone()
1818 traceRelease(trace)
1819 }
1820
1821
1822
1823
1824 wakep()
1825
1826 releasem(mp)
1827
1828 return now
1829 }
1830
1831
1832
1833 func usesLibcall() bool {
1834 switch GOOS {
1835 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1836 return true
1837 }
1838 return false
1839 }
1840
1841
1842
1843 func mStackIsSystemAllocated() bool {
1844 switch GOOS {
1845 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1846 return true
1847 }
1848 return false
1849 }
1850
1851
1852
1853 func mstart()
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864 func mstart0() {
1865 gp := getg()
1866
1867 osStack := gp.stack.lo == 0
1868 if osStack {
1869
1870
1871
1872
1873
1874
1875
1876
1877 size := gp.stack.hi
1878 if size == 0 {
1879 size = 16384 * sys.StackGuardMultiplier
1880 }
1881 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1882 gp.stack.lo = gp.stack.hi - size + 1024
1883 }
1884
1885
1886 gp.stackguard0 = gp.stack.lo + stackGuard
1887
1888
1889 gp.stackguard1 = gp.stackguard0
1890 mstart1()
1891
1892
1893 if mStackIsSystemAllocated() {
1894
1895
1896
1897 osStack = true
1898 }
1899 mexit(osStack)
1900 }
1901
1902
1903
1904
1905
1906 func mstart1() {
1907 gp := getg()
1908
1909 if gp != gp.m.g0 {
1910 throw("bad runtime·mstart")
1911 }
1912
1913
1914
1915
1916
1917
1918
1919 gp.sched.g = guintptr(unsafe.Pointer(gp))
1920 gp.sched.pc = sys.GetCallerPC()
1921 gp.sched.sp = sys.GetCallerSP()
1922
1923 asminit()
1924 minit()
1925
1926
1927
1928 if gp.m == &m0 {
1929 mstartm0()
1930 }
1931
1932 if debug.dataindependenttiming == 1 {
1933 sys.EnableDIT()
1934 }
1935
1936 if fn := gp.m.mstartfn; fn != nil {
1937 fn()
1938 }
1939
1940 if gp.m != &m0 {
1941 acquirep(gp.m.nextp.ptr())
1942 gp.m.nextp = 0
1943 }
1944 schedule()
1945 }
1946
1947
1948
1949
1950
1951
1952
1953 func mstartm0() {
1954
1955
1956
1957 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1958 cgoHasExtraM = true
1959 newextram()
1960 }
1961 initsig(false)
1962 }
1963
1964
1965
1966
1967 func mPark() {
1968 gp := getg()
1969 notesleep(&gp.m.park)
1970 noteclear(&gp.m.park)
1971 }
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983 func mexit(osStack bool) {
1984 mp := getg().m
1985
1986 if mp == &m0 {
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998 handoffp(releasep())
1999 lock(&sched.lock)
2000 sched.nmfreed++
2001 checkdead()
2002 unlock(&sched.lock)
2003 mPark()
2004 throw("locked m0 woke up")
2005 }
2006
2007 sigblock(true)
2008 unminit()
2009
2010
2011 if mp.gsignal != nil {
2012 stackfree(mp.gsignal.stack)
2013 if valgrindenabled {
2014 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2015 mp.gsignal.valgrindStackID = 0
2016 }
2017
2018
2019
2020
2021 mp.gsignal = nil
2022 }
2023
2024
2025 vgetrandomDestroy(mp)
2026
2027
2028 lock(&sched.lock)
2029 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2030 if *pprev == mp {
2031 *pprev = mp.alllink
2032 goto found
2033 }
2034 }
2035 throw("m not found in allm")
2036 found:
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 mp.freeWait.Store(freeMWait)
2052 mp.freelink = sched.freem
2053 sched.freem = mp
2054 unlock(&sched.lock)
2055
2056 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2057 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2058
2059
2060 handoffp(releasep())
2061
2062
2063
2064
2065
2066 lock(&sched.lock)
2067 sched.nmfreed++
2068 checkdead()
2069 unlock(&sched.lock)
2070
2071 if GOOS == "darwin" || GOOS == "ios" {
2072
2073
2074 if mp.signalPending.Load() != 0 {
2075 pendingPreemptSignals.Add(-1)
2076 }
2077 }
2078
2079
2080
2081 mdestroy(mp)
2082
2083 if osStack {
2084
2085 mp.freeWait.Store(freeMRef)
2086
2087
2088
2089 return
2090 }
2091
2092
2093
2094
2095
2096 exitThread(&mp.freeWait)
2097 }
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 func forEachP(reason waitReason, fn func(*p)) {
2110 systemstack(func() {
2111 gp := getg().m.curg
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123 casGToWaitingForSuspendG(gp, _Grunning, reason)
2124 forEachPInternal(fn)
2125 casgstatus(gp, _Gwaiting, _Grunning)
2126 })
2127 }
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 func forEachPInternal(fn func(*p)) {
2139 mp := acquirem()
2140 pp := getg().m.p.ptr()
2141
2142 lock(&sched.lock)
2143 if sched.safePointWait != 0 {
2144 throw("forEachP: sched.safePointWait != 0")
2145 }
2146 sched.safePointWait = gomaxprocs - 1
2147 sched.safePointFn = fn
2148
2149
2150 for _, p2 := range allp {
2151 if p2 != pp {
2152 atomic.Store(&p2.runSafePointFn, 1)
2153 }
2154 }
2155 preemptall()
2156
2157
2158
2159
2160
2161
2162
2163 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2164 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2165 fn(p)
2166 sched.safePointWait--
2167 }
2168 }
2169
2170 wait := sched.safePointWait > 0
2171 unlock(&sched.lock)
2172
2173
2174 fn(pp)
2175
2176
2177
2178 for _, p2 := range allp {
2179 s := p2.status
2180
2181
2182
2183 trace := traceAcquire()
2184 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2185 if trace.ok() {
2186
2187 trace.ProcSteal(p2, false)
2188 traceRelease(trace)
2189 }
2190 sched.nGsyscallNoP.Add(1)
2191 p2.syscalltick++
2192 handoffp(p2)
2193 } else if trace.ok() {
2194 traceRelease(trace)
2195 }
2196 }
2197
2198
2199 if wait {
2200 for {
2201
2202
2203
2204
2205 if notetsleep(&sched.safePointNote, 100*1000) {
2206 noteclear(&sched.safePointNote)
2207 break
2208 }
2209 preemptall()
2210 }
2211 }
2212 if sched.safePointWait != 0 {
2213 throw("forEachP: not done")
2214 }
2215 for _, p2 := range allp {
2216 if p2.runSafePointFn != 0 {
2217 throw("forEachP: P did not run fn")
2218 }
2219 }
2220
2221 lock(&sched.lock)
2222 sched.safePointFn = nil
2223 unlock(&sched.lock)
2224 releasem(mp)
2225 }
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 func runSafePointFn() {
2239 p := getg().m.p.ptr()
2240
2241
2242
2243 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2244 return
2245 }
2246 sched.safePointFn(p)
2247 lock(&sched.lock)
2248 sched.safePointWait--
2249 if sched.safePointWait == 0 {
2250 notewakeup(&sched.safePointNote)
2251 }
2252 unlock(&sched.lock)
2253 }
2254
2255
2256
2257
2258 var cgoThreadStart unsafe.Pointer
2259
2260 type cgothreadstart struct {
2261 g guintptr
2262 tls *uint64
2263 fn unsafe.Pointer
2264 }
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275 func allocm(pp *p, fn func(), id int64) *m {
2276 allocmLock.rlock()
2277
2278
2279
2280
2281 acquirem()
2282
2283 gp := getg()
2284 if gp.m.p == 0 {
2285 acquirep(pp)
2286 }
2287
2288
2289
2290 if sched.freem != nil {
2291 lock(&sched.lock)
2292 var newList *m
2293 for freem := sched.freem; freem != nil; {
2294
2295 wait := freem.freeWait.Load()
2296 if wait == freeMWait {
2297 next := freem.freelink
2298 freem.freelink = newList
2299 newList = freem
2300 freem = next
2301 continue
2302 }
2303
2304
2305
2306 if traceEnabled() || traceShuttingDown() {
2307 traceThreadDestroy(freem)
2308 }
2309
2310
2311
2312 if wait == freeMStack {
2313
2314
2315
2316 systemstack(func() {
2317 stackfree(freem.g0.stack)
2318 if valgrindenabled {
2319 valgrindDeregisterStack(freem.g0.valgrindStackID)
2320 freem.g0.valgrindStackID = 0
2321 }
2322 })
2323 }
2324 freem = freem.freelink
2325 }
2326 sched.freem = newList
2327 unlock(&sched.lock)
2328 }
2329
2330 mp := &new(mPadded).m
2331 mp.mstartfn = fn
2332 mcommoninit(mp, id)
2333
2334
2335
2336 if iscgo || mStackIsSystemAllocated() {
2337 mp.g0 = malg(-1)
2338 } else {
2339 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2340 }
2341 mp.g0.m = mp
2342
2343 if pp == gp.m.p.ptr() {
2344 releasep()
2345 }
2346
2347 releasem(gp.m)
2348 allocmLock.runlock()
2349 return mp
2350 }
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391 func needm(signal bool) {
2392 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2393
2394
2395
2396
2397
2398
2399 writeErrStr("fatal error: cgo callback before cgo call\n")
2400 exit(1)
2401 }
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411 var sigmask sigset
2412 sigsave(&sigmask)
2413 sigblock(false)
2414
2415
2416
2417
2418 mp, last := getExtraM()
2419
2420
2421
2422
2423
2424
2425
2426
2427 mp.needextram = last
2428
2429
2430 mp.sigmask = sigmask
2431
2432
2433
2434 osSetupTLS(mp)
2435
2436
2437
2438 setg(mp.g0)
2439 sp := sys.GetCallerSP()
2440 callbackUpdateSystemStack(mp, sp, signal)
2441
2442
2443
2444
2445 mp.isExtraInC = false
2446
2447
2448 asminit()
2449 minit()
2450
2451
2452
2453
2454
2455
2456 var trace traceLocker
2457 if !signal {
2458 trace = traceAcquire()
2459 }
2460
2461
2462 casgstatus(mp.curg, _Gdead, _Gsyscall)
2463 sched.ngsys.Add(-1)
2464 sched.nGsyscallNoP.Add(1)
2465
2466 if !signal {
2467 if trace.ok() {
2468 trace.GoCreateSyscall(mp.curg)
2469 traceRelease(trace)
2470 }
2471 }
2472 mp.isExtraInSig = signal
2473 }
2474
2475
2476
2477
2478 func needAndBindM() {
2479 needm(false)
2480
2481 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2482 cgoBindM()
2483 }
2484 }
2485
2486
2487
2488
2489 func newextram() {
2490 c := extraMWaiters.Swap(0)
2491 if c > 0 {
2492 for i := uint32(0); i < c; i++ {
2493 oneNewExtraM()
2494 }
2495 } else if extraMLength.Load() == 0 {
2496
2497 oneNewExtraM()
2498 }
2499 }
2500
2501
2502 func oneNewExtraM() {
2503
2504
2505
2506
2507
2508 mp := allocm(nil, nil, -1)
2509 gp := malg(4096)
2510 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2511 gp.sched.sp = gp.stack.hi
2512 gp.sched.sp -= 4 * goarch.PtrSize
2513 gp.sched.lr = 0
2514 gp.sched.g = guintptr(unsafe.Pointer(gp))
2515 gp.syscallpc = gp.sched.pc
2516 gp.syscallsp = gp.sched.sp
2517 gp.stktopsp = gp.sched.sp
2518
2519
2520
2521
2522 casgstatus(gp, _Gidle, _Gdead)
2523 gp.m = mp
2524 mp.curg = gp
2525 mp.isextra = true
2526
2527 mp.isExtraInC = true
2528 mp.lockedInt++
2529 mp.lockedg.set(gp)
2530 gp.lockedm.set(mp)
2531 gp.goid = sched.goidgen.Add(1)
2532 if raceenabled {
2533 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2534 }
2535
2536 allgadd(gp)
2537
2538
2539
2540
2541
2542 sched.ngsys.Add(1)
2543
2544
2545 addExtraM(mp)
2546 }
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581 func dropm() {
2582
2583
2584
2585 mp := getg().m
2586
2587
2588
2589
2590
2591 var trace traceLocker
2592 if !mp.isExtraInSig {
2593 trace = traceAcquire()
2594 }
2595
2596
2597 casgstatus(mp.curg, _Gsyscall, _Gdead)
2598 mp.curg.preemptStop = false
2599 sched.ngsys.Add(1)
2600 sched.nGsyscallNoP.Add(-1)
2601
2602 if !mp.isExtraInSig {
2603 if trace.ok() {
2604 trace.GoDestroySyscall()
2605 traceRelease(trace)
2606 }
2607 }
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622 mp.syscalltick--
2623
2624
2625
2626 mp.curg.trace.reset()
2627
2628
2629
2630
2631 if traceEnabled() || traceShuttingDown() {
2632
2633
2634
2635
2636
2637
2638
2639 lock(&sched.lock)
2640 traceThreadDestroy(mp)
2641 unlock(&sched.lock)
2642 }
2643 mp.isExtraInSig = false
2644
2645
2646
2647
2648
2649 sigmask := mp.sigmask
2650 sigblock(false)
2651 unminit()
2652
2653 setg(nil)
2654
2655
2656
2657 g0 := mp.g0
2658 g0.stack.hi = 0
2659 g0.stack.lo = 0
2660 g0.stackguard0 = 0
2661 g0.stackguard1 = 0
2662 mp.g0StackAccurate = false
2663
2664 putExtraM(mp)
2665
2666 msigrestore(sigmask)
2667 }
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689 func cgoBindM() {
2690 if GOOS == "windows" || GOOS == "plan9" {
2691 fatal("bindm in unexpected GOOS")
2692 }
2693 g := getg()
2694 if g.m.g0 != g {
2695 fatal("the current g is not g0")
2696 }
2697 if _cgo_bindm != nil {
2698 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2699 }
2700 }
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713 func getm() uintptr {
2714 return uintptr(unsafe.Pointer(getg().m))
2715 }
2716
2717 var (
2718
2719
2720
2721
2722
2723
2724 extraM atomic.Uintptr
2725
2726 extraMLength atomic.Uint32
2727
2728 extraMWaiters atomic.Uint32
2729
2730
2731 extraMInUse atomic.Uint32
2732 )
2733
2734
2735
2736
2737
2738
2739
2740
2741 func lockextra(nilokay bool) *m {
2742 const locked = 1
2743
2744 incr := false
2745 for {
2746 old := extraM.Load()
2747 if old == locked {
2748 osyield_no_g()
2749 continue
2750 }
2751 if old == 0 && !nilokay {
2752 if !incr {
2753
2754
2755
2756 extraMWaiters.Add(1)
2757 incr = true
2758 }
2759 usleep_no_g(1)
2760 continue
2761 }
2762 if extraM.CompareAndSwap(old, locked) {
2763 return (*m)(unsafe.Pointer(old))
2764 }
2765 osyield_no_g()
2766 continue
2767 }
2768 }
2769
2770
2771 func unlockextra(mp *m, delta int32) {
2772 extraMLength.Add(delta)
2773 extraM.Store(uintptr(unsafe.Pointer(mp)))
2774 }
2775
2776
2777
2778
2779
2780
2781
2782
2783 func getExtraM() (mp *m, last bool) {
2784 mp = lockextra(false)
2785 extraMInUse.Add(1)
2786 unlockextra(mp.schedlink.ptr(), -1)
2787 return mp, mp.schedlink.ptr() == nil
2788 }
2789
2790
2791
2792
2793
2794 func putExtraM(mp *m) {
2795 extraMInUse.Add(-1)
2796 addExtraM(mp)
2797 }
2798
2799
2800
2801
2802 func addExtraM(mp *m) {
2803 mnext := lockextra(true)
2804 mp.schedlink.set(mnext)
2805 unlockextra(mp, 1)
2806 }
2807
2808 var (
2809
2810
2811
2812 allocmLock rwmutex
2813
2814
2815
2816
2817 execLock rwmutex
2818 )
2819
2820
2821
2822 const (
2823 failthreadcreate = "runtime: failed to create new OS thread\n"
2824 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2825 )
2826
2827
2828
2829
2830 var newmHandoff struct {
2831 lock mutex
2832
2833
2834
2835 newm muintptr
2836
2837
2838
2839 waiting bool
2840 wake note
2841
2842
2843
2844
2845 haveTemplateThread uint32
2846 }
2847
2848
2849
2850
2851
2852
2853
2854
2855 func newm(fn func(), pp *p, id int64) {
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866 acquirem()
2867
2868 mp := allocm(pp, fn, id)
2869 mp.nextp.set(pp)
2870 mp.sigmask = initSigmask
2871 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 lock(&newmHandoff.lock)
2884 if newmHandoff.haveTemplateThread == 0 {
2885 throw("on a locked thread with no template thread")
2886 }
2887 mp.schedlink = newmHandoff.newm
2888 newmHandoff.newm.set(mp)
2889 if newmHandoff.waiting {
2890 newmHandoff.waiting = false
2891 notewakeup(&newmHandoff.wake)
2892 }
2893 unlock(&newmHandoff.lock)
2894
2895
2896
2897 releasem(getg().m)
2898 return
2899 }
2900 newm1(mp)
2901 releasem(getg().m)
2902 }
2903
2904 func newm1(mp *m) {
2905 if iscgo {
2906 var ts cgothreadstart
2907 if _cgo_thread_start == nil {
2908 throw("_cgo_thread_start missing")
2909 }
2910 ts.g.set(mp.g0)
2911 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2912 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2913 if msanenabled {
2914 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2915 }
2916 if asanenabled {
2917 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2918 }
2919 execLock.rlock()
2920 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2921 execLock.runlock()
2922 return
2923 }
2924 execLock.rlock()
2925 newosproc(mp)
2926 execLock.runlock()
2927 }
2928
2929
2930
2931
2932
2933 func startTemplateThread() {
2934 if GOARCH == "wasm" {
2935 return
2936 }
2937
2938
2939
2940 mp := acquirem()
2941 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2942 releasem(mp)
2943 return
2944 }
2945 newm(templateThread, nil, -1)
2946 releasem(mp)
2947 }
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961 func templateThread() {
2962 lock(&sched.lock)
2963 sched.nmsys++
2964 checkdead()
2965 unlock(&sched.lock)
2966
2967 for {
2968 lock(&newmHandoff.lock)
2969 for newmHandoff.newm != 0 {
2970 newm := newmHandoff.newm.ptr()
2971 newmHandoff.newm = 0
2972 unlock(&newmHandoff.lock)
2973 for newm != nil {
2974 next := newm.schedlink.ptr()
2975 newm.schedlink = 0
2976 newm1(newm)
2977 newm = next
2978 }
2979 lock(&newmHandoff.lock)
2980 }
2981 newmHandoff.waiting = true
2982 noteclear(&newmHandoff.wake)
2983 unlock(&newmHandoff.lock)
2984 notesleep(&newmHandoff.wake)
2985 }
2986 }
2987
2988
2989
2990 func stopm() {
2991 gp := getg()
2992
2993 if gp.m.locks != 0 {
2994 throw("stopm holding locks")
2995 }
2996 if gp.m.p != 0 {
2997 throw("stopm holding p")
2998 }
2999 if gp.m.spinning {
3000 throw("stopm spinning")
3001 }
3002
3003 lock(&sched.lock)
3004 mput(gp.m)
3005 unlock(&sched.lock)
3006 mPark()
3007 acquirep(gp.m.nextp.ptr())
3008 gp.m.nextp = 0
3009 }
3010
3011 func mspinning() {
3012
3013 getg().m.spinning = true
3014 }
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033 func startm(pp *p, spinning, lockheld bool) {
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050 mp := acquirem()
3051 if !lockheld {
3052 lock(&sched.lock)
3053 }
3054 if pp == nil {
3055 if spinning {
3056
3057
3058
3059 throw("startm: P required for spinning=true")
3060 }
3061 pp, _ = pidleget(0)
3062 if pp == nil {
3063 if !lockheld {
3064 unlock(&sched.lock)
3065 }
3066 releasem(mp)
3067 return
3068 }
3069 }
3070 nmp := mget()
3071 if nmp == nil {
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086 id := mReserveID()
3087 unlock(&sched.lock)
3088
3089 var fn func()
3090 if spinning {
3091
3092 fn = mspinning
3093 }
3094 newm(fn, pp, id)
3095
3096 if lockheld {
3097 lock(&sched.lock)
3098 }
3099
3100
3101 releasem(mp)
3102 return
3103 }
3104 if !lockheld {
3105 unlock(&sched.lock)
3106 }
3107 if nmp.spinning {
3108 throw("startm: m is spinning")
3109 }
3110 if nmp.nextp != 0 {
3111 throw("startm: m has p")
3112 }
3113 if spinning && !runqempty(pp) {
3114 throw("startm: p has runnable gs")
3115 }
3116
3117 nmp.spinning = spinning
3118 nmp.nextp.set(pp)
3119 notewakeup(&nmp.park)
3120
3121
3122 releasem(mp)
3123 }
3124
3125
3126
3127
3128
3129 func handoffp(pp *p) {
3130
3131
3132
3133
3134 if !runqempty(pp) || !sched.runq.empty() {
3135 startm(pp, false, false)
3136 return
3137 }
3138
3139 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3140 startm(pp, false, false)
3141 return
3142 }
3143
3144 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3145 startm(pp, false, false)
3146 return
3147 }
3148
3149
3150 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3151 sched.needspinning.Store(0)
3152 startm(pp, true, false)
3153 return
3154 }
3155 lock(&sched.lock)
3156 if sched.gcwaiting.Load() {
3157 pp.status = _Pgcstop
3158 pp.gcStopTime = nanotime()
3159 sched.stopwait--
3160 if sched.stopwait == 0 {
3161 notewakeup(&sched.stopnote)
3162 }
3163 unlock(&sched.lock)
3164 return
3165 }
3166 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3167 sched.safePointFn(pp)
3168 sched.safePointWait--
3169 if sched.safePointWait == 0 {
3170 notewakeup(&sched.safePointNote)
3171 }
3172 }
3173 if !sched.runq.empty() {
3174 unlock(&sched.lock)
3175 startm(pp, false, false)
3176 return
3177 }
3178
3179
3180 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3181 unlock(&sched.lock)
3182 startm(pp, false, false)
3183 return
3184 }
3185
3186
3187
3188 when := pp.timers.wakeTime()
3189 pidleput(pp, 0)
3190 unlock(&sched.lock)
3191
3192 if when != 0 {
3193 wakeNetPoller(when)
3194 }
3195 }
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210 func wakep() {
3211
3212
3213 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3214 return
3215 }
3216
3217
3218
3219
3220
3221
3222 mp := acquirem()
3223
3224 var pp *p
3225 lock(&sched.lock)
3226 pp, _ = pidlegetSpinning(0)
3227 if pp == nil {
3228 if sched.nmspinning.Add(-1) < 0 {
3229 throw("wakep: negative nmspinning")
3230 }
3231 unlock(&sched.lock)
3232 releasem(mp)
3233 return
3234 }
3235
3236
3237
3238
3239 unlock(&sched.lock)
3240
3241 startm(pp, true, false)
3242
3243 releasem(mp)
3244 }
3245
3246
3247
3248 func stoplockedm() {
3249 gp := getg()
3250
3251 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3252 throw("stoplockedm: inconsistent locking")
3253 }
3254 if gp.m.p != 0 {
3255
3256 pp := releasep()
3257 handoffp(pp)
3258 }
3259 incidlelocked(1)
3260
3261 mPark()
3262 status := readgstatus(gp.m.lockedg.ptr())
3263 if status&^_Gscan != _Grunnable {
3264 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3265 dumpgstatus(gp.m.lockedg.ptr())
3266 throw("stoplockedm: not runnable")
3267 }
3268 acquirep(gp.m.nextp.ptr())
3269 gp.m.nextp = 0
3270 }
3271
3272
3273
3274
3275
3276 func startlockedm(gp *g) {
3277 mp := gp.lockedm.ptr()
3278 if mp == getg().m {
3279 throw("startlockedm: locked to me")
3280 }
3281 if mp.nextp != 0 {
3282 throw("startlockedm: m has p")
3283 }
3284
3285 incidlelocked(-1)
3286 pp := releasep()
3287 mp.nextp.set(pp)
3288 notewakeup(&mp.park)
3289 stopm()
3290 }
3291
3292
3293
3294 func gcstopm() {
3295 gp := getg()
3296
3297 if !sched.gcwaiting.Load() {
3298 throw("gcstopm: not waiting for gc")
3299 }
3300 if gp.m.spinning {
3301 gp.m.spinning = false
3302
3303
3304 if sched.nmspinning.Add(-1) < 0 {
3305 throw("gcstopm: negative nmspinning")
3306 }
3307 }
3308 pp := releasep()
3309 lock(&sched.lock)
3310 pp.status = _Pgcstop
3311 pp.gcStopTime = nanotime()
3312 sched.stopwait--
3313 if sched.stopwait == 0 {
3314 notewakeup(&sched.stopnote)
3315 }
3316 unlock(&sched.lock)
3317 stopm()
3318 }
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329 func execute(gp *g, inheritTime bool) {
3330 mp := getg().m
3331
3332 if goroutineProfile.active {
3333
3334
3335
3336 tryRecordGoroutineProfile(gp, nil, osyield)
3337 }
3338
3339
3340 mp.curg = gp
3341 gp.m = mp
3342 gp.syncSafePoint = false
3343 casgstatus(gp, _Grunnable, _Grunning)
3344 gp.waitsince = 0
3345 gp.preempt = false
3346 gp.stackguard0 = gp.stack.lo + stackGuard
3347 if !inheritTime {
3348 mp.p.ptr().schedtick++
3349 }
3350
3351
3352 hz := sched.profilehz
3353 if mp.profilehz != hz {
3354 setThreadCPUProfiler(hz)
3355 }
3356
3357 trace := traceAcquire()
3358 if trace.ok() {
3359 trace.GoStart()
3360 traceRelease(trace)
3361 }
3362
3363 gogo(&gp.sched)
3364 }
3365
3366
3367
3368
3369
3370 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3371 mp := getg().m
3372
3373
3374
3375
3376
3377 top:
3378
3379
3380
3381 mp.clearAllpSnapshot()
3382
3383 pp := mp.p.ptr()
3384 if sched.gcwaiting.Load() {
3385 gcstopm()
3386 goto top
3387 }
3388 if pp.runSafePointFn != 0 {
3389 runSafePointFn()
3390 }
3391
3392
3393
3394
3395
3396 now, pollUntil, _ := pp.timers.check(0, nil)
3397
3398
3399 if traceEnabled() || traceShuttingDown() {
3400 gp := traceReader()
3401 if gp != nil {
3402 trace := traceAcquire()
3403 casgstatus(gp, _Gwaiting, _Grunnable)
3404 if trace.ok() {
3405 trace.GoUnpark(gp, 0)
3406 traceRelease(trace)
3407 }
3408 return gp, false, true
3409 }
3410 }
3411
3412
3413 if gcBlackenEnabled != 0 {
3414 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3415 if gp != nil {
3416 return gp, false, true
3417 }
3418 now = tnow
3419 }
3420
3421
3422
3423
3424 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3425 lock(&sched.lock)
3426 gp := globrunqget()
3427 unlock(&sched.lock)
3428 if gp != nil {
3429 return gp, false, false
3430 }
3431 }
3432
3433
3434 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3435 if gp := wakefing(); gp != nil {
3436 ready(gp, 0, true)
3437 }
3438 }
3439
3440
3441 if gcCleanups.needsWake() {
3442 gcCleanups.wake()
3443 }
3444
3445 if *cgo_yield != nil {
3446 asmcgocall(*cgo_yield, nil)
3447 }
3448
3449
3450 if gp, inheritTime := runqget(pp); gp != nil {
3451 return gp, inheritTime, false
3452 }
3453
3454
3455 if !sched.runq.empty() {
3456 lock(&sched.lock)
3457 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3458 unlock(&sched.lock)
3459 if gp != nil {
3460 if runqputbatch(pp, &q); !q.empty() {
3461 throw("Couldn't put Gs into empty local runq")
3462 }
3463 return gp, false, false
3464 }
3465 }
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3477 list, delta := netpoll(0)
3478 sched.pollingNet.Store(0)
3479 if !list.empty() {
3480 gp := list.pop()
3481 injectglist(&list)
3482 netpollAdjustWaiters(delta)
3483 trace := traceAcquire()
3484 casgstatus(gp, _Gwaiting, _Grunnable)
3485 if trace.ok() {
3486 trace.GoUnpark(gp, 0)
3487 traceRelease(trace)
3488 }
3489 return gp, false, false
3490 }
3491 }
3492
3493
3494
3495
3496
3497
3498 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3499 if !mp.spinning {
3500 mp.becomeSpinning()
3501 }
3502
3503 gp, inheritTime, tnow, w, newWork := stealWork(now)
3504 if gp != nil {
3505
3506 return gp, inheritTime, false
3507 }
3508 if newWork {
3509
3510
3511 goto top
3512 }
3513
3514 now = tnow
3515 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3516
3517 pollUntil = w
3518 }
3519 }
3520
3521
3522
3523
3524
3525 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3526 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3527 if node != nil {
3528 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3529 gp := node.gp.ptr()
3530
3531 trace := traceAcquire()
3532 casgstatus(gp, _Gwaiting, _Grunnable)
3533 if trace.ok() {
3534 trace.GoUnpark(gp, 0)
3535 traceRelease(trace)
3536 }
3537 return gp, false, false
3538 }
3539 gcController.removeIdleMarkWorker()
3540 }
3541
3542
3543
3544
3545
3546 gp, otherReady := beforeIdle(now, pollUntil)
3547 if gp != nil {
3548 trace := traceAcquire()
3549 casgstatus(gp, _Gwaiting, _Grunnable)
3550 if trace.ok() {
3551 trace.GoUnpark(gp, 0)
3552 traceRelease(trace)
3553 }
3554 return gp, false, false
3555 }
3556 if otherReady {
3557 goto top
3558 }
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568 allpSnapshot := mp.snapshotAllp()
3569
3570
3571 idlepMaskSnapshot := idlepMask
3572 timerpMaskSnapshot := timerpMask
3573
3574
3575 lock(&sched.lock)
3576 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3577 unlock(&sched.lock)
3578 goto top
3579 }
3580 if !sched.runq.empty() {
3581 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3582 unlock(&sched.lock)
3583 if gp == nil {
3584 throw("global runq empty with non-zero runqsize")
3585 }
3586 if runqputbatch(pp, &q); !q.empty() {
3587 throw("Couldn't put Gs into empty local runq")
3588 }
3589 return gp, false, false
3590 }
3591 if !mp.spinning && sched.needspinning.Load() == 1 {
3592
3593 mp.becomeSpinning()
3594 unlock(&sched.lock)
3595 goto top
3596 }
3597 if releasep() != pp {
3598 throw("findrunnable: wrong p")
3599 }
3600 now = pidleput(pp, now)
3601 unlock(&sched.lock)
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639 wasSpinning := mp.spinning
3640 if mp.spinning {
3641 mp.spinning = false
3642 if sched.nmspinning.Add(-1) < 0 {
3643 throw("findrunnable: negative nmspinning")
3644 }
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657 lock(&sched.lock)
3658 if !sched.runq.empty() {
3659 pp, _ := pidlegetSpinning(0)
3660 if pp != nil {
3661 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3662 unlock(&sched.lock)
3663 if gp == nil {
3664 throw("global runq empty with non-zero runqsize")
3665 }
3666 if runqputbatch(pp, &q); !q.empty() {
3667 throw("Couldn't put Gs into empty local runq")
3668 }
3669 acquirep(pp)
3670 mp.becomeSpinning()
3671 return gp, false, false
3672 }
3673 }
3674 unlock(&sched.lock)
3675
3676 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3677 if pp != nil {
3678 acquirep(pp)
3679 mp.becomeSpinning()
3680 goto top
3681 }
3682
3683
3684 pp, gp := checkIdleGCNoP()
3685 if pp != nil {
3686 acquirep(pp)
3687 mp.becomeSpinning()
3688
3689
3690 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3691 trace := traceAcquire()
3692 casgstatus(gp, _Gwaiting, _Grunnable)
3693 if trace.ok() {
3694 trace.GoUnpark(gp, 0)
3695 traceRelease(trace)
3696 }
3697 return gp, false, false
3698 }
3699
3700
3701
3702
3703
3704
3705
3706 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3707 }
3708
3709
3710
3711
3712
3713 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3714 sched.pollUntil.Store(pollUntil)
3715 if mp.p != 0 {
3716 throw("findrunnable: netpoll with p")
3717 }
3718 if mp.spinning {
3719 throw("findrunnable: netpoll with spinning")
3720 }
3721 delay := int64(-1)
3722 if pollUntil != 0 {
3723 if now == 0 {
3724 now = nanotime()
3725 }
3726 delay = pollUntil - now
3727 if delay < 0 {
3728 delay = 0
3729 }
3730 }
3731 if faketime != 0 {
3732
3733 delay = 0
3734 }
3735 list, delta := netpoll(delay)
3736
3737 now = nanotime()
3738 sched.pollUntil.Store(0)
3739 sched.lastpoll.Store(now)
3740 if faketime != 0 && list.empty() {
3741
3742
3743 stopm()
3744 goto top
3745 }
3746 lock(&sched.lock)
3747 pp, _ := pidleget(now)
3748 unlock(&sched.lock)
3749 if pp == nil {
3750 injectglist(&list)
3751 netpollAdjustWaiters(delta)
3752 } else {
3753 acquirep(pp)
3754 if !list.empty() {
3755 gp := list.pop()
3756 injectglist(&list)
3757 netpollAdjustWaiters(delta)
3758 trace := traceAcquire()
3759 casgstatus(gp, _Gwaiting, _Grunnable)
3760 if trace.ok() {
3761 trace.GoUnpark(gp, 0)
3762 traceRelease(trace)
3763 }
3764 return gp, false, false
3765 }
3766 if wasSpinning {
3767 mp.becomeSpinning()
3768 }
3769 goto top
3770 }
3771 } else if pollUntil != 0 && netpollinited() {
3772 pollerPollUntil := sched.pollUntil.Load()
3773 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3774 netpollBreak()
3775 }
3776 }
3777 stopm()
3778 goto top
3779 }
3780
3781
3782
3783
3784
3785 func pollWork() bool {
3786 if !sched.runq.empty() {
3787 return true
3788 }
3789 p := getg().m.p.ptr()
3790 if !runqempty(p) {
3791 return true
3792 }
3793 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3794 if list, delta := netpoll(0); !list.empty() {
3795 injectglist(&list)
3796 netpollAdjustWaiters(delta)
3797 return true
3798 }
3799 }
3800 return false
3801 }
3802
3803
3804
3805
3806
3807
3808
3809 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3810 pp := getg().m.p.ptr()
3811
3812 ranTimer := false
3813
3814 const stealTries = 4
3815 for i := 0; i < stealTries; i++ {
3816 stealTimersOrRunNextG := i == stealTries-1
3817
3818 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3819 if sched.gcwaiting.Load() {
3820
3821 return nil, false, now, pollUntil, true
3822 }
3823 p2 := allp[enum.position()]
3824 if pp == p2 {
3825 continue
3826 }
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3842 tnow, w, ran := p2.timers.check(now, nil)
3843 now = tnow
3844 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3845 pollUntil = w
3846 }
3847 if ran {
3848
3849
3850
3851
3852
3853
3854
3855
3856 if gp, inheritTime := runqget(pp); gp != nil {
3857 return gp, inheritTime, now, pollUntil, ranTimer
3858 }
3859 ranTimer = true
3860 }
3861 }
3862
3863
3864 if !idlepMask.read(enum.position()) {
3865 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3866 return gp, false, now, pollUntil, ranTimer
3867 }
3868 }
3869 }
3870 }
3871
3872
3873
3874
3875 return nil, false, now, pollUntil, ranTimer
3876 }
3877
3878
3879
3880
3881
3882
3883 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3884 for id, p2 := range allpSnapshot {
3885 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3886 lock(&sched.lock)
3887 pp, _ := pidlegetSpinning(0)
3888 if pp == nil {
3889
3890 unlock(&sched.lock)
3891 return nil
3892 }
3893 unlock(&sched.lock)
3894 return pp
3895 }
3896 }
3897
3898
3899 return nil
3900 }
3901
3902
3903
3904
3905 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3906 for id, p2 := range allpSnapshot {
3907 if timerpMaskSnapshot.read(uint32(id)) {
3908 w := p2.timers.wakeTime()
3909 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3910 pollUntil = w
3911 }
3912 }
3913 }
3914
3915 return pollUntil
3916 }
3917
3918
3919
3920
3921
3922 func checkIdleGCNoP() (*p, *g) {
3923
3924
3925
3926
3927
3928
3929 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3930 return nil, nil
3931 }
3932 if !gcShouldScheduleWorker(nil) {
3933 return nil, nil
3934 }
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953 lock(&sched.lock)
3954 pp, now := pidlegetSpinning(0)
3955 if pp == nil {
3956 unlock(&sched.lock)
3957 return nil, nil
3958 }
3959
3960
3961 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3962 pidleput(pp, now)
3963 unlock(&sched.lock)
3964 return nil, nil
3965 }
3966
3967 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3968 if node == nil {
3969 pidleput(pp, now)
3970 unlock(&sched.lock)
3971 gcController.removeIdleMarkWorker()
3972 return nil, nil
3973 }
3974
3975 unlock(&sched.lock)
3976
3977 return pp, node.gp.ptr()
3978 }
3979
3980
3981
3982
3983 func wakeNetPoller(when int64) {
3984 if sched.lastpoll.Load() == 0 {
3985
3986
3987
3988
3989 pollerPollUntil := sched.pollUntil.Load()
3990 if pollerPollUntil == 0 || pollerPollUntil > when {
3991 netpollBreak()
3992 }
3993 } else {
3994
3995
3996 if GOOS != "plan9" {
3997 wakep()
3998 }
3999 }
4000 }
4001
4002 func resetspinning() {
4003 gp := getg()
4004 if !gp.m.spinning {
4005 throw("resetspinning: not a spinning m")
4006 }
4007 gp.m.spinning = false
4008 nmspinning := sched.nmspinning.Add(-1)
4009 if nmspinning < 0 {
4010 throw("findrunnable: negative nmspinning")
4011 }
4012
4013
4014
4015 wakep()
4016 }
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026 func injectglist(glist *gList) {
4027 if glist.empty() {
4028 return
4029 }
4030
4031
4032
4033 var tail *g
4034 trace := traceAcquire()
4035 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4036 tail = gp
4037 casgstatus(gp, _Gwaiting, _Grunnable)
4038 if trace.ok() {
4039 trace.GoUnpark(gp, 0)
4040 }
4041 }
4042 if trace.ok() {
4043 traceRelease(trace)
4044 }
4045
4046
4047 q := gQueue{glist.head, tail.guintptr(), glist.size}
4048 *glist = gList{}
4049
4050 startIdle := func(n int32) {
4051 for ; n > 0; n-- {
4052 mp := acquirem()
4053 lock(&sched.lock)
4054
4055 pp, _ := pidlegetSpinning(0)
4056 if pp == nil {
4057 unlock(&sched.lock)
4058 releasem(mp)
4059 break
4060 }
4061
4062 startm(pp, false, true)
4063 unlock(&sched.lock)
4064 releasem(mp)
4065 }
4066 }
4067
4068 pp := getg().m.p.ptr()
4069 if pp == nil {
4070 n := q.size
4071 lock(&sched.lock)
4072 globrunqputbatch(&q)
4073 unlock(&sched.lock)
4074 startIdle(n)
4075 return
4076 }
4077
4078 var globq gQueue
4079 npidle := sched.npidle.Load()
4080 for ; npidle > 0 && !q.empty(); npidle-- {
4081 g := q.pop()
4082 globq.pushBack(g)
4083 }
4084 if !globq.empty() {
4085 n := globq.size
4086 lock(&sched.lock)
4087 globrunqputbatch(&globq)
4088 unlock(&sched.lock)
4089 startIdle(n)
4090 }
4091
4092 if runqputbatch(pp, &q); !q.empty() {
4093 lock(&sched.lock)
4094 globrunqputbatch(&q)
4095 unlock(&sched.lock)
4096 }
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111 wakep()
4112 }
4113
4114
4115
4116 func schedule() {
4117 mp := getg().m
4118
4119 if mp.locks != 0 {
4120 throw("schedule: holding locks")
4121 }
4122
4123 if mp.lockedg != 0 {
4124 stoplockedm()
4125 execute(mp.lockedg.ptr(), false)
4126 }
4127
4128
4129
4130 if mp.incgo {
4131 throw("schedule: in cgo")
4132 }
4133
4134 top:
4135 pp := mp.p.ptr()
4136 pp.preempt = false
4137
4138
4139
4140
4141 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4142 throw("schedule: spinning with local work")
4143 }
4144
4145 gp, inheritTime, tryWakeP := findRunnable()
4146
4147
4148
4149
4150 mp.clearAllpSnapshot()
4151
4152 if debug.dontfreezetheworld > 0 && freezing.Load() {
4153
4154
4155
4156
4157
4158
4159
4160 lock(&deadlock)
4161 lock(&deadlock)
4162 }
4163
4164
4165
4166
4167 if mp.spinning {
4168 resetspinning()
4169 }
4170
4171 if sched.disable.user && !schedEnabled(gp) {
4172
4173
4174
4175 lock(&sched.lock)
4176 if schedEnabled(gp) {
4177
4178
4179 unlock(&sched.lock)
4180 } else {
4181 sched.disable.runnable.pushBack(gp)
4182 unlock(&sched.lock)
4183 goto top
4184 }
4185 }
4186
4187
4188
4189 if tryWakeP {
4190 wakep()
4191 }
4192 if gp.lockedm != 0 {
4193
4194
4195 startlockedm(gp)
4196 goto top
4197 }
4198
4199 execute(gp, inheritTime)
4200 }
4201
4202
4203
4204
4205
4206
4207
4208
4209 func dropg() {
4210 gp := getg()
4211
4212 setMNoWB(&gp.m.curg.m, nil)
4213 setGNoWB(&gp.m.curg, nil)
4214 }
4215
4216 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4217 unlock((*mutex)(lock))
4218 return true
4219 }
4220
4221
4222 func park_m(gp *g) {
4223 mp := getg().m
4224
4225 trace := traceAcquire()
4226
4227
4228
4229
4230
4231 bubble := gp.bubble
4232 if bubble != nil {
4233 bubble.incActive()
4234 }
4235
4236 if trace.ok() {
4237
4238
4239
4240 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4241 }
4242
4243
4244 casgstatus(gp, _Grunning, _Gwaiting)
4245 if trace.ok() {
4246 traceRelease(trace)
4247 }
4248
4249 dropg()
4250
4251 if fn := mp.waitunlockf; fn != nil {
4252 ok := fn(gp, mp.waitlock)
4253 mp.waitunlockf = nil
4254 mp.waitlock = nil
4255 if !ok {
4256 trace := traceAcquire()
4257 casgstatus(gp, _Gwaiting, _Grunnable)
4258 if bubble != nil {
4259 bubble.decActive()
4260 }
4261 if trace.ok() {
4262 trace.GoUnpark(gp, 2)
4263 traceRelease(trace)
4264 }
4265 execute(gp, true)
4266 }
4267 }
4268
4269 if bubble != nil {
4270 bubble.decActive()
4271 }
4272
4273 schedule()
4274 }
4275
4276 func goschedImpl(gp *g, preempted bool) {
4277 trace := traceAcquire()
4278 status := readgstatus(gp)
4279 if status&^_Gscan != _Grunning {
4280 dumpgstatus(gp)
4281 throw("bad g status")
4282 }
4283 if trace.ok() {
4284
4285
4286
4287 if preempted {
4288 trace.GoPreempt()
4289 } else {
4290 trace.GoSched()
4291 }
4292 }
4293 casgstatus(gp, _Grunning, _Grunnable)
4294 if trace.ok() {
4295 traceRelease(trace)
4296 }
4297
4298 dropg()
4299 lock(&sched.lock)
4300 globrunqput(gp)
4301 unlock(&sched.lock)
4302
4303 if mainStarted {
4304 wakep()
4305 }
4306
4307 schedule()
4308 }
4309
4310
4311 func gosched_m(gp *g) {
4312 goschedImpl(gp, false)
4313 }
4314
4315
4316 func goschedguarded_m(gp *g) {
4317 if !canPreemptM(gp.m) {
4318 gogo(&gp.sched)
4319 }
4320 goschedImpl(gp, false)
4321 }
4322
4323 func gopreempt_m(gp *g) {
4324 goschedImpl(gp, true)
4325 }
4326
4327
4328
4329
4330 func preemptPark(gp *g) {
4331 status := readgstatus(gp)
4332 if status&^_Gscan != _Grunning {
4333 dumpgstatus(gp)
4334 throw("bad g status")
4335 }
4336
4337 if gp.asyncSafePoint {
4338
4339
4340
4341 f := findfunc(gp.sched.pc)
4342 if !f.valid() {
4343 throw("preempt at unknown pc")
4344 }
4345 if f.flag&abi.FuncFlagSPWrite != 0 {
4346 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4347 throw("preempt SPWRITE")
4348 }
4349 }
4350
4351
4352
4353
4354
4355
4356
4357 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4358 dropg()
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375 trace := traceAcquire()
4376 if trace.ok() {
4377 trace.GoPark(traceBlockPreempted, 0)
4378 }
4379 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4380 if trace.ok() {
4381 traceRelease(trace)
4382 }
4383 schedule()
4384 }
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400 func goyield() {
4401 checkTimeouts()
4402 mcall(goyield_m)
4403 }
4404
4405 func goyield_m(gp *g) {
4406 trace := traceAcquire()
4407 pp := gp.m.p.ptr()
4408 if trace.ok() {
4409
4410
4411
4412 trace.GoPreempt()
4413 }
4414 casgstatus(gp, _Grunning, _Grunnable)
4415 if trace.ok() {
4416 traceRelease(trace)
4417 }
4418 dropg()
4419 runqput(pp, gp, false)
4420 schedule()
4421 }
4422
4423
4424 func goexit1() {
4425 if raceenabled {
4426 if gp := getg(); gp.bubble != nil {
4427 racereleasemergeg(gp, gp.bubble.raceaddr())
4428 }
4429 racegoend()
4430 }
4431 trace := traceAcquire()
4432 if trace.ok() {
4433 trace.GoEnd()
4434 traceRelease(trace)
4435 }
4436 mcall(goexit0)
4437 }
4438
4439
4440 func goexit0(gp *g) {
4441 gdestroy(gp)
4442 schedule()
4443 }
4444
4445 func gdestroy(gp *g) {
4446 mp := getg().m
4447 pp := mp.p.ptr()
4448
4449 casgstatus(gp, _Grunning, _Gdead)
4450 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4451 if isSystemGoroutine(gp, false) {
4452 sched.ngsys.Add(-1)
4453 }
4454 gp.m = nil
4455 locked := gp.lockedm != 0
4456 gp.lockedm = 0
4457 mp.lockedg = 0
4458 gp.preemptStop = false
4459 gp.paniconfault = false
4460 gp._defer = nil
4461 gp._panic = nil
4462 gp.writebuf = nil
4463 gp.waitreason = waitReasonZero
4464 gp.param = nil
4465 gp.labels = nil
4466 gp.timer = nil
4467 gp.bubble = nil
4468
4469 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4470
4471
4472
4473 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4474 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4475 gcController.bgScanCredit.Add(scanCredit)
4476 gp.gcAssistBytes = 0
4477 }
4478
4479 dropg()
4480
4481 if GOARCH == "wasm" {
4482 gfput(pp, gp)
4483 return
4484 }
4485
4486 if locked && mp.lockedInt != 0 {
4487 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4488 if mp.isextra {
4489 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4490 }
4491 throw("exited a goroutine internally locked to the OS thread")
4492 }
4493 gfput(pp, gp)
4494 if locked {
4495
4496
4497
4498
4499
4500
4501 if GOOS != "plan9" {
4502 gogo(&mp.g0.sched)
4503 } else {
4504
4505
4506 mp.lockedExt = 0
4507 }
4508 }
4509 }
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519 func save(pc, sp, bp uintptr) {
4520 gp := getg()
4521
4522 if gp == gp.m.g0 || gp == gp.m.gsignal {
4523
4524
4525
4526
4527
4528 throw("save on system g not allowed")
4529 }
4530
4531 gp.sched.pc = pc
4532 gp.sched.sp = sp
4533 gp.sched.lr = 0
4534 gp.sched.bp = bp
4535
4536
4537
4538 if gp.sched.ctxt != nil {
4539 badctxt()
4540 }
4541 }
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567 func reentersyscall(pc, sp, bp uintptr) {
4568 trace := traceAcquire()
4569 gp := getg()
4570
4571
4572
4573 gp.m.locks++
4574
4575
4576
4577
4578
4579 gp.stackguard0 = stackPreempt
4580 gp.throwsplit = true
4581
4582
4583 save(pc, sp, bp)
4584 gp.syscallsp = sp
4585 gp.syscallpc = pc
4586 gp.syscallbp = bp
4587 casgstatus(gp, _Grunning, _Gsyscall)
4588 if staticLockRanking {
4589
4590
4591 save(pc, sp, bp)
4592 }
4593 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4594 systemstack(func() {
4595 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4596 throw("entersyscall")
4597 })
4598 }
4599 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4600 systemstack(func() {
4601 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4602 throw("entersyscall")
4603 })
4604 }
4605
4606 if trace.ok() {
4607 systemstack(func() {
4608 trace.GoSysCall()
4609 traceRelease(trace)
4610 })
4611
4612
4613
4614 save(pc, sp, bp)
4615 }
4616
4617 if sched.sysmonwait.Load() {
4618 systemstack(entersyscall_sysmon)
4619 save(pc, sp, bp)
4620 }
4621
4622 if gp.m.p.ptr().runSafePointFn != 0 {
4623
4624 systemstack(runSafePointFn)
4625 save(pc, sp, bp)
4626 }
4627
4628 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4629 pp := gp.m.p.ptr()
4630 pp.m = 0
4631 gp.m.oldp.set(pp)
4632 gp.m.p = 0
4633 atomic.Store(&pp.status, _Psyscall)
4634 if sched.gcwaiting.Load() {
4635 systemstack(entersyscall_gcwait)
4636 save(pc, sp, bp)
4637 }
4638
4639 gp.m.locks--
4640 }
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656 func entersyscall() {
4657
4658
4659
4660
4661 fp := getcallerfp()
4662 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4663 }
4664
4665 func entersyscall_sysmon() {
4666 lock(&sched.lock)
4667 if sched.sysmonwait.Load() {
4668 sched.sysmonwait.Store(false)
4669 notewakeup(&sched.sysmonnote)
4670 }
4671 unlock(&sched.lock)
4672 }
4673
4674 func entersyscall_gcwait() {
4675 gp := getg()
4676 pp := gp.m.oldp.ptr()
4677
4678 lock(&sched.lock)
4679 trace := traceAcquire()
4680 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4681 if trace.ok() {
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691 trace.ProcSteal(pp, true)
4692 traceRelease(trace)
4693 }
4694 sched.nGsyscallNoP.Add(1)
4695 pp.gcStopTime = nanotime()
4696 pp.syscalltick++
4697 if sched.stopwait--; sched.stopwait == 0 {
4698 notewakeup(&sched.stopnote)
4699 }
4700 } else if trace.ok() {
4701 traceRelease(trace)
4702 }
4703 unlock(&sched.lock)
4704 }
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718 func entersyscallblock() {
4719 gp := getg()
4720
4721 gp.m.locks++
4722 gp.throwsplit = true
4723 gp.stackguard0 = stackPreempt
4724 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4725 gp.m.p.ptr().syscalltick++
4726
4727 sched.nGsyscallNoP.Add(1)
4728
4729
4730 pc := sys.GetCallerPC()
4731 sp := sys.GetCallerSP()
4732 bp := getcallerfp()
4733 save(pc, sp, bp)
4734 gp.syscallsp = gp.sched.sp
4735 gp.syscallpc = gp.sched.pc
4736 gp.syscallbp = gp.sched.bp
4737 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4738 sp1 := sp
4739 sp2 := gp.sched.sp
4740 sp3 := gp.syscallsp
4741 systemstack(func() {
4742 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4743 throw("entersyscallblock")
4744 })
4745 }
4746 casgstatus(gp, _Grunning, _Gsyscall)
4747 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4748 systemstack(func() {
4749 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4750 throw("entersyscallblock")
4751 })
4752 }
4753 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4754 systemstack(func() {
4755 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4756 throw("entersyscallblock")
4757 })
4758 }
4759
4760 systemstack(entersyscallblock_handoff)
4761
4762
4763 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4764
4765 gp.m.locks--
4766 }
4767
4768 func entersyscallblock_handoff() {
4769 trace := traceAcquire()
4770 if trace.ok() {
4771 trace.GoSysCall()
4772 traceRelease(trace)
4773 }
4774 handoffp(releasep())
4775 }
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797 func exitsyscall() {
4798 gp := getg()
4799
4800 gp.m.locks++
4801 if sys.GetCallerSP() > gp.syscallsp {
4802 throw("exitsyscall: syscall frame is no longer valid")
4803 }
4804
4805 gp.waitsince = 0
4806 oldp := gp.m.oldp.ptr()
4807 gp.m.oldp = 0
4808 if exitsyscallfast(oldp) {
4809
4810
4811 if goroutineProfile.active {
4812
4813
4814
4815 systemstack(func() {
4816 tryRecordGoroutineProfileWB(gp)
4817 })
4818 }
4819 trace := traceAcquire()
4820 if trace.ok() {
4821 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4822 systemstack(func() {
4823
4824
4825
4826
4827 trace.GoSysExit(lostP)
4828 if lostP {
4829
4830
4831
4832
4833 trace.GoStart()
4834 }
4835 })
4836 }
4837
4838 gp.m.p.ptr().syscalltick++
4839
4840 casgstatus(gp, _Gsyscall, _Grunning)
4841 if trace.ok() {
4842 traceRelease(trace)
4843 }
4844
4845
4846
4847 gp.syscallsp = 0
4848 gp.m.locks--
4849 if gp.preempt {
4850
4851 gp.stackguard0 = stackPreempt
4852 } else {
4853
4854 gp.stackguard0 = gp.stack.lo + stackGuard
4855 }
4856 gp.throwsplit = false
4857
4858 if sched.disable.user && !schedEnabled(gp) {
4859
4860 Gosched()
4861 }
4862
4863 return
4864 }
4865
4866 gp.m.locks--
4867
4868
4869 mcall(exitsyscall0)
4870
4871
4872
4873
4874
4875
4876
4877 gp.syscallsp = 0
4878 gp.m.p.ptr().syscalltick++
4879 gp.throwsplit = false
4880 }
4881
4882
4883 func exitsyscallfast(oldp *p) bool {
4884
4885 if sched.stopwait == freezeStopWait {
4886 return false
4887 }
4888
4889
4890 trace := traceAcquire()
4891 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4892
4893 wirep(oldp)
4894 exitsyscallfast_reacquired(trace)
4895 if trace.ok() {
4896 traceRelease(trace)
4897 }
4898 return true
4899 }
4900 if trace.ok() {
4901 traceRelease(trace)
4902 }
4903
4904
4905 if sched.pidle != 0 {
4906 var ok bool
4907 systemstack(func() {
4908 ok = exitsyscallfast_pidle()
4909 })
4910 if ok {
4911 return true
4912 }
4913 }
4914 return false
4915 }
4916
4917
4918
4919
4920
4921
4922 func exitsyscallfast_reacquired(trace traceLocker) {
4923 gp := getg()
4924 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4925 if trace.ok() {
4926
4927
4928
4929 systemstack(func() {
4930
4931
4932 trace.ProcSteal(gp.m.p.ptr(), true)
4933 trace.ProcStart()
4934 })
4935 }
4936 gp.m.p.ptr().syscalltick++
4937 }
4938 }
4939
4940 func exitsyscallfast_pidle() bool {
4941 lock(&sched.lock)
4942 pp, _ := pidleget(0)
4943 if pp != nil && sched.sysmonwait.Load() {
4944 sched.sysmonwait.Store(false)
4945 notewakeup(&sched.sysmonnote)
4946 }
4947 unlock(&sched.lock)
4948 if pp != nil {
4949 sched.nGsyscallNoP.Add(-1)
4950 acquirep(pp)
4951 return true
4952 }
4953 return false
4954 }
4955
4956
4957
4958
4959
4960
4961
4962 func exitsyscall0(gp *g) {
4963 var trace traceLocker
4964 traceExitingSyscall()
4965 trace = traceAcquire()
4966 casgstatus(gp, _Gsyscall, _Grunnable)
4967 traceExitedSyscall()
4968 if trace.ok() {
4969
4970
4971
4972
4973 trace.GoSysExit(true)
4974 traceRelease(trace)
4975 }
4976 sched.nGsyscallNoP.Add(-1)
4977 dropg()
4978 lock(&sched.lock)
4979 var pp *p
4980 if schedEnabled(gp) {
4981 pp, _ = pidleget(0)
4982 }
4983 var locked bool
4984 if pp == nil {
4985 globrunqput(gp)
4986
4987
4988
4989
4990
4991
4992 locked = gp.lockedm != 0
4993 } else if sched.sysmonwait.Load() {
4994 sched.sysmonwait.Store(false)
4995 notewakeup(&sched.sysmonnote)
4996 }
4997 unlock(&sched.lock)
4998 if pp != nil {
4999 acquirep(pp)
5000 execute(gp, false)
5001 }
5002 if locked {
5003
5004
5005
5006
5007 stoplockedm()
5008 execute(gp, false)
5009 }
5010 stopm()
5011 schedule()
5012 }
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026 func syscall_runtime_BeforeFork() {
5027 gp := getg().m.curg
5028
5029
5030
5031
5032 gp.m.locks++
5033 sigsave(&gp.m.sigmask)
5034 sigblock(false)
5035
5036
5037
5038
5039
5040 gp.stackguard0 = stackFork
5041 }
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055 func syscall_runtime_AfterFork() {
5056 gp := getg().m.curg
5057
5058
5059 gp.stackguard0 = gp.stack.lo + stackGuard
5060
5061 msigrestore(gp.m.sigmask)
5062
5063 gp.m.locks--
5064 }
5065
5066
5067
5068 var inForkedChild bool
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089 func syscall_runtime_AfterForkInChild() {
5090
5091
5092
5093
5094 inForkedChild = true
5095
5096 clearSignalHandlers()
5097
5098
5099
5100 msigrestore(getg().m.sigmask)
5101
5102 inForkedChild = false
5103 }
5104
5105
5106
5107
5108 var pendingPreemptSignals atomic.Int32
5109
5110
5111
5112
5113 func syscall_runtime_BeforeExec() {
5114
5115 execLock.lock()
5116
5117
5118
5119 if GOOS == "darwin" || GOOS == "ios" {
5120 for pendingPreemptSignals.Load() > 0 {
5121 osyield()
5122 }
5123 }
5124 }
5125
5126
5127
5128
5129 func syscall_runtime_AfterExec() {
5130 execLock.unlock()
5131 }
5132
5133
5134 func malg(stacksize int32) *g {
5135 newg := new(g)
5136 if stacksize >= 0 {
5137 stacksize = round2(stackSystem + stacksize)
5138 systemstack(func() {
5139 newg.stack = stackalloc(uint32(stacksize))
5140 if valgrindenabled {
5141 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5142 }
5143 })
5144 newg.stackguard0 = newg.stack.lo + stackGuard
5145 newg.stackguard1 = ^uintptr(0)
5146
5147
5148 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5149 }
5150 return newg
5151 }
5152
5153
5154
5155
5156 func newproc(fn *funcval) {
5157 gp := getg()
5158 pc := sys.GetCallerPC()
5159 systemstack(func() {
5160 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5161
5162 pp := getg().m.p.ptr()
5163 runqput(pp, newg, true)
5164
5165 if mainStarted {
5166 wakep()
5167 }
5168 })
5169 }
5170
5171
5172
5173
5174 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5175 if fn == nil {
5176 fatal("go of nil func value")
5177 }
5178
5179 mp := acquirem()
5180 pp := mp.p.ptr()
5181 newg := gfget(pp)
5182 if newg == nil {
5183 newg = malg(stackMin)
5184 casgstatus(newg, _Gidle, _Gdead)
5185 allgadd(newg)
5186 }
5187 if newg.stack.hi == 0 {
5188 throw("newproc1: newg missing stack")
5189 }
5190
5191 if readgstatus(newg) != _Gdead {
5192 throw("newproc1: new g is not Gdead")
5193 }
5194
5195 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5196 totalSize = alignUp(totalSize, sys.StackAlign)
5197 sp := newg.stack.hi - totalSize
5198 if usesLR {
5199
5200 *(*uintptr)(unsafe.Pointer(sp)) = 0
5201 prepGoExitFrame(sp)
5202 }
5203 if GOARCH == "arm64" {
5204
5205 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5206 }
5207
5208 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5209 newg.sched.sp = sp
5210 newg.stktopsp = sp
5211 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5212 newg.sched.g = guintptr(unsafe.Pointer(newg))
5213 gostartcallfn(&newg.sched, fn)
5214 newg.parentGoid = callergp.goid
5215 newg.gopc = callerpc
5216 newg.ancestors = saveAncestors(callergp)
5217 newg.startpc = fn.fn
5218 newg.runningCleanups.Store(false)
5219 if isSystemGoroutine(newg, false) {
5220 sched.ngsys.Add(1)
5221 } else {
5222
5223 newg.bubble = callergp.bubble
5224 if mp.curg != nil {
5225 newg.labels = mp.curg.labels
5226 }
5227 if goroutineProfile.active {
5228
5229
5230
5231
5232
5233 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5234 }
5235 }
5236
5237 newg.trackingSeq = uint8(cheaprand())
5238 if newg.trackingSeq%gTrackingPeriod == 0 {
5239 newg.tracking = true
5240 }
5241 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5242
5243
5244 trace := traceAcquire()
5245 var status uint32 = _Grunnable
5246 if parked {
5247 status = _Gwaiting
5248 newg.waitreason = waitreason
5249 }
5250 if pp.goidcache == pp.goidcacheend {
5251
5252
5253
5254 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5255 pp.goidcache -= _GoidCacheBatch - 1
5256 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5257 }
5258 newg.goid = pp.goidcache
5259 casgstatus(newg, _Gdead, status)
5260 pp.goidcache++
5261 newg.trace.reset()
5262 if trace.ok() {
5263 trace.GoCreate(newg, newg.startpc, parked)
5264 traceRelease(trace)
5265 }
5266
5267
5268 if raceenabled {
5269 newg.racectx = racegostart(callerpc)
5270 newg.raceignore = 0
5271 if newg.labels != nil {
5272
5273
5274 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5275 }
5276 }
5277 pp.goroutinesCreated++
5278 releasem(mp)
5279
5280 return newg
5281 }
5282
5283
5284
5285
5286 func saveAncestors(callergp *g) *[]ancestorInfo {
5287
5288 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5289 return nil
5290 }
5291 var callerAncestors []ancestorInfo
5292 if callergp.ancestors != nil {
5293 callerAncestors = *callergp.ancestors
5294 }
5295 n := int32(len(callerAncestors)) + 1
5296 if n > debug.tracebackancestors {
5297 n = debug.tracebackancestors
5298 }
5299 ancestors := make([]ancestorInfo, n)
5300 copy(ancestors[1:], callerAncestors)
5301
5302 var pcs [tracebackInnerFrames]uintptr
5303 npcs := gcallers(callergp, 0, pcs[:])
5304 ipcs := make([]uintptr, npcs)
5305 copy(ipcs, pcs[:])
5306 ancestors[0] = ancestorInfo{
5307 pcs: ipcs,
5308 goid: callergp.goid,
5309 gopc: callergp.gopc,
5310 }
5311
5312 ancestorsp := new([]ancestorInfo)
5313 *ancestorsp = ancestors
5314 return ancestorsp
5315 }
5316
5317
5318
5319 func gfput(pp *p, gp *g) {
5320 if readgstatus(gp) != _Gdead {
5321 throw("gfput: bad status (not Gdead)")
5322 }
5323
5324 stksize := gp.stack.hi - gp.stack.lo
5325
5326 if stksize != uintptr(startingStackSize) {
5327
5328 stackfree(gp.stack)
5329 gp.stack.lo = 0
5330 gp.stack.hi = 0
5331 gp.stackguard0 = 0
5332 if valgrindenabled {
5333 valgrindDeregisterStack(gp.valgrindStackID)
5334 gp.valgrindStackID = 0
5335 }
5336 }
5337
5338 pp.gFree.push(gp)
5339 if pp.gFree.size >= 64 {
5340 var (
5341 stackQ gQueue
5342 noStackQ gQueue
5343 )
5344 for pp.gFree.size >= 32 {
5345 gp := pp.gFree.pop()
5346 if gp.stack.lo == 0 {
5347 noStackQ.push(gp)
5348 } else {
5349 stackQ.push(gp)
5350 }
5351 }
5352 lock(&sched.gFree.lock)
5353 sched.gFree.noStack.pushAll(noStackQ)
5354 sched.gFree.stack.pushAll(stackQ)
5355 unlock(&sched.gFree.lock)
5356 }
5357 }
5358
5359
5360
5361 func gfget(pp *p) *g {
5362 retry:
5363 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5364 lock(&sched.gFree.lock)
5365
5366 for pp.gFree.size < 32 {
5367
5368 gp := sched.gFree.stack.pop()
5369 if gp == nil {
5370 gp = sched.gFree.noStack.pop()
5371 if gp == nil {
5372 break
5373 }
5374 }
5375 pp.gFree.push(gp)
5376 }
5377 unlock(&sched.gFree.lock)
5378 goto retry
5379 }
5380 gp := pp.gFree.pop()
5381 if gp == nil {
5382 return nil
5383 }
5384 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5385
5386
5387
5388 systemstack(func() {
5389 stackfree(gp.stack)
5390 gp.stack.lo = 0
5391 gp.stack.hi = 0
5392 gp.stackguard0 = 0
5393 if valgrindenabled {
5394 valgrindDeregisterStack(gp.valgrindStackID)
5395 gp.valgrindStackID = 0
5396 }
5397 })
5398 }
5399 if gp.stack.lo == 0 {
5400
5401 systemstack(func() {
5402 gp.stack = stackalloc(startingStackSize)
5403 if valgrindenabled {
5404 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5405 }
5406 })
5407 gp.stackguard0 = gp.stack.lo + stackGuard
5408 } else {
5409 if raceenabled {
5410 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5411 }
5412 if msanenabled {
5413 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5414 }
5415 if asanenabled {
5416 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5417 }
5418 }
5419 return gp
5420 }
5421
5422
5423 func gfpurge(pp *p) {
5424 var (
5425 stackQ gQueue
5426 noStackQ gQueue
5427 )
5428 for !pp.gFree.empty() {
5429 gp := pp.gFree.pop()
5430 if gp.stack.lo == 0 {
5431 noStackQ.push(gp)
5432 } else {
5433 stackQ.push(gp)
5434 }
5435 }
5436 lock(&sched.gFree.lock)
5437 sched.gFree.noStack.pushAll(noStackQ)
5438 sched.gFree.stack.pushAll(stackQ)
5439 unlock(&sched.gFree.lock)
5440 }
5441
5442
5443 func Breakpoint() {
5444 breakpoint()
5445 }
5446
5447
5448
5449
5450
5451
5452 func dolockOSThread() {
5453 if GOARCH == "wasm" {
5454 return
5455 }
5456 gp := getg()
5457 gp.m.lockedg.set(gp)
5458 gp.lockedm.set(gp.m)
5459 }
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477 func LockOSThread() {
5478 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5479
5480
5481
5482 startTemplateThread()
5483 }
5484 gp := getg()
5485 gp.m.lockedExt++
5486 if gp.m.lockedExt == 0 {
5487 gp.m.lockedExt--
5488 panic("LockOSThread nesting overflow")
5489 }
5490 dolockOSThread()
5491 }
5492
5493
5494 func lockOSThread() {
5495 getg().m.lockedInt++
5496 dolockOSThread()
5497 }
5498
5499
5500
5501
5502
5503
5504 func dounlockOSThread() {
5505 if GOARCH == "wasm" {
5506 return
5507 }
5508 gp := getg()
5509 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5510 return
5511 }
5512 gp.m.lockedg = 0
5513 gp.lockedm = 0
5514 }
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530 func UnlockOSThread() {
5531 gp := getg()
5532 if gp.m.lockedExt == 0 {
5533 return
5534 }
5535 gp.m.lockedExt--
5536 dounlockOSThread()
5537 }
5538
5539
5540 func unlockOSThread() {
5541 gp := getg()
5542 if gp.m.lockedInt == 0 {
5543 systemstack(badunlockosthread)
5544 }
5545 gp.m.lockedInt--
5546 dounlockOSThread()
5547 }
5548
5549 func badunlockosthread() {
5550 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5551 }
5552
5553 func gcount(includeSys bool) int32 {
5554 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5555 if !includeSys {
5556 n -= sched.ngsys.Load()
5557 }
5558 for _, pp := range allp {
5559 n -= pp.gFree.size
5560 }
5561
5562
5563
5564 if n < 1 {
5565 n = 1
5566 }
5567 return n
5568 }
5569
5570
5571
5572
5573
5574 func goroutineleakcount() int {
5575 return work.goroutineLeak.count
5576 }
5577
5578 func mcount() int32 {
5579 return int32(sched.mnext - sched.nmfreed)
5580 }
5581
5582 var prof struct {
5583 signalLock atomic.Uint32
5584
5585
5586
5587 hz atomic.Int32
5588 }
5589
5590 func _System() { _System() }
5591 func _ExternalCode() { _ExternalCode() }
5592 func _LostExternalCode() { _LostExternalCode() }
5593 func _GC() { _GC() }
5594 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5595 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5596 func _VDSO() { _VDSO() }
5597
5598
5599
5600
5601
5602 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5603 if prof.hz.Load() == 0 {
5604 return
5605 }
5606
5607
5608
5609
5610 if mp != nil && mp.profilehz == 0 {
5611 return
5612 }
5613
5614
5615
5616
5617
5618
5619
5620 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5621 if f := findfunc(pc); f.valid() {
5622 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5623 cpuprof.lostAtomic++
5624 return
5625 }
5626 }
5627 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5628
5629
5630
5631 cpuprof.lostAtomic++
5632 return
5633 }
5634 }
5635
5636
5637
5638
5639
5640
5641
5642 getg().m.mallocing++
5643
5644 var u unwinder
5645 var stk [maxCPUProfStack]uintptr
5646 n := 0
5647 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5648 cgoOff := 0
5649
5650
5651
5652
5653
5654 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5655 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5656 cgoOff++
5657 }
5658 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5659 mp.cgoCallers[0] = 0
5660 }
5661
5662
5663 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5664 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5665
5666
5667 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5668 } else if mp != nil && mp.vdsoSP != 0 {
5669
5670
5671 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5672 } else {
5673 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5674 }
5675 n += tracebackPCs(&u, 0, stk[n:])
5676
5677 if n <= 0 {
5678
5679
5680 n = 2
5681 if inVDSOPage(pc) {
5682 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5683 } else if pc > firstmoduledata.etext {
5684
5685 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5686 }
5687 stk[0] = pc
5688 if mp.preemptoff != "" {
5689 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5690 } else {
5691 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5692 }
5693 }
5694
5695 if prof.hz.Load() != 0 {
5696
5697
5698
5699 var tagPtr *unsafe.Pointer
5700 if gp != nil && gp.m != nil && gp.m.curg != nil {
5701 tagPtr = &gp.m.curg.labels
5702 }
5703 cpuprof.add(tagPtr, stk[:n])
5704
5705 gprof := gp
5706 var mp *m
5707 var pp *p
5708 if gp != nil && gp.m != nil {
5709 if gp.m.curg != nil {
5710 gprof = gp.m.curg
5711 }
5712 mp = gp.m
5713 pp = gp.m.p.ptr()
5714 }
5715 traceCPUSample(gprof, mp, pp, stk[:n])
5716 }
5717 getg().m.mallocing--
5718 }
5719
5720
5721
5722 func setcpuprofilerate(hz int32) {
5723
5724 if hz < 0 {
5725 hz = 0
5726 }
5727
5728
5729
5730 gp := getg()
5731 gp.m.locks++
5732
5733
5734
5735
5736 setThreadCPUProfiler(0)
5737
5738 for !prof.signalLock.CompareAndSwap(0, 1) {
5739 osyield()
5740 }
5741 if prof.hz.Load() != hz {
5742 setProcessCPUProfiler(hz)
5743 prof.hz.Store(hz)
5744 }
5745 prof.signalLock.Store(0)
5746
5747 lock(&sched.lock)
5748 sched.profilehz = hz
5749 unlock(&sched.lock)
5750
5751 if hz != 0 {
5752 setThreadCPUProfiler(hz)
5753 }
5754
5755 gp.m.locks--
5756 }
5757
5758
5759
5760 func (pp *p) init(id int32) {
5761 pp.id = id
5762 pp.gcw.id = id
5763 pp.status = _Pgcstop
5764 pp.sudogcache = pp.sudogbuf[:0]
5765 pp.deferpool = pp.deferpoolbuf[:0]
5766 pp.wbBuf.reset()
5767 if pp.mcache == nil {
5768 if id == 0 {
5769 if mcache0 == nil {
5770 throw("missing mcache?")
5771 }
5772
5773
5774 pp.mcache = mcache0
5775 } else {
5776 pp.mcache = allocmcache()
5777 }
5778 }
5779 if raceenabled && pp.raceprocctx == 0 {
5780 if id == 0 {
5781 pp.raceprocctx = raceprocctx0
5782 raceprocctx0 = 0
5783 } else {
5784 pp.raceprocctx = raceproccreate()
5785 }
5786 }
5787 lockInit(&pp.timers.mu, lockRankTimers)
5788
5789
5790
5791 timerpMask.set(id)
5792
5793
5794 idlepMask.clear(id)
5795 }
5796
5797
5798
5799
5800
5801 func (pp *p) destroy() {
5802 assertLockHeld(&sched.lock)
5803 assertWorldStopped()
5804
5805
5806 for pp.runqhead != pp.runqtail {
5807
5808 pp.runqtail--
5809 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5810
5811 globrunqputhead(gp)
5812 }
5813 if pp.runnext != 0 {
5814 globrunqputhead(pp.runnext.ptr())
5815 pp.runnext = 0
5816 }
5817
5818
5819 getg().m.p.ptr().timers.take(&pp.timers)
5820
5821
5822
5823 if phase := gcphase; phase != _GCoff {
5824 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5825 throw("P destroyed while GC is running")
5826 }
5827
5828 pp.gcw.spanq.destroy()
5829
5830 clear(pp.sudogbuf[:])
5831 pp.sudogcache = pp.sudogbuf[:0]
5832 pp.pinnerCache = nil
5833 clear(pp.deferpoolbuf[:])
5834 pp.deferpool = pp.deferpoolbuf[:0]
5835 systemstack(func() {
5836 for i := 0; i < pp.mspancache.len; i++ {
5837
5838 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5839 }
5840 pp.mspancache.len = 0
5841 lock(&mheap_.lock)
5842 pp.pcache.flush(&mheap_.pages)
5843 unlock(&mheap_.lock)
5844 })
5845 freemcache(pp.mcache)
5846 pp.mcache = nil
5847 gfpurge(pp)
5848 if raceenabled {
5849 if pp.timers.raceCtx != 0 {
5850
5851
5852
5853
5854
5855 mp := getg().m
5856 phold := mp.p.ptr()
5857 mp.p.set(pp)
5858
5859 racectxend(pp.timers.raceCtx)
5860 pp.timers.raceCtx = 0
5861
5862 mp.p.set(phold)
5863 }
5864 raceprocdestroy(pp.raceprocctx)
5865 pp.raceprocctx = 0
5866 }
5867 pp.gcAssistTime = 0
5868 gcCleanups.queued += pp.cleanupsQueued
5869 pp.cleanupsQueued = 0
5870 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5871 pp.goroutinesCreated = 0
5872 pp.xRegs.free()
5873 pp.status = _Pdead
5874 }
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884 func procresize(nprocs int32) *p {
5885 assertLockHeld(&sched.lock)
5886 assertWorldStopped()
5887
5888 old := gomaxprocs
5889 if old < 0 || nprocs <= 0 {
5890 throw("procresize: invalid arg")
5891 }
5892 trace := traceAcquire()
5893 if trace.ok() {
5894 trace.Gomaxprocs(nprocs)
5895 traceRelease(trace)
5896 }
5897
5898
5899 now := nanotime()
5900 if sched.procresizetime != 0 {
5901 sched.totaltime += int64(old) * (now - sched.procresizetime)
5902 }
5903 sched.procresizetime = now
5904
5905
5906 if nprocs > int32(len(allp)) {
5907
5908
5909 lock(&allpLock)
5910 if nprocs <= int32(cap(allp)) {
5911 allp = allp[:nprocs]
5912 } else {
5913 nallp := make([]*p, nprocs)
5914
5915
5916 copy(nallp, allp[:cap(allp)])
5917 allp = nallp
5918 }
5919
5920 idlepMask = idlepMask.resize(nprocs)
5921 timerpMask = timerpMask.resize(nprocs)
5922 work.spanqMask = work.spanqMask.resize(nprocs)
5923 unlock(&allpLock)
5924 }
5925
5926
5927 for i := old; i < nprocs; i++ {
5928 pp := allp[i]
5929 if pp == nil {
5930 pp = new(p)
5931 }
5932 pp.init(i)
5933 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5934 }
5935
5936 gp := getg()
5937 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5938
5939 gp.m.p.ptr().status = _Prunning
5940 gp.m.p.ptr().mcache.prepareForSweep()
5941 } else {
5942
5943
5944
5945
5946
5947 if gp.m.p != 0 {
5948 trace := traceAcquire()
5949 if trace.ok() {
5950
5951
5952
5953 trace.GoSched()
5954 trace.ProcStop(gp.m.p.ptr())
5955 traceRelease(trace)
5956 }
5957 gp.m.p.ptr().m = 0
5958 }
5959 gp.m.p = 0
5960 pp := allp[0]
5961 pp.m = 0
5962 pp.status = _Pidle
5963 acquirep(pp)
5964 trace := traceAcquire()
5965 if trace.ok() {
5966 trace.GoStart()
5967 traceRelease(trace)
5968 }
5969 }
5970
5971
5972 mcache0 = nil
5973
5974
5975 for i := nprocs; i < old; i++ {
5976 pp := allp[i]
5977 pp.destroy()
5978
5979 }
5980
5981
5982 if int32(len(allp)) != nprocs {
5983 lock(&allpLock)
5984 allp = allp[:nprocs]
5985 idlepMask = idlepMask.resize(nprocs)
5986 timerpMask = timerpMask.resize(nprocs)
5987 work.spanqMask = work.spanqMask.resize(nprocs)
5988 unlock(&allpLock)
5989 }
5990
5991 var runnablePs *p
5992 for i := nprocs - 1; i >= 0; i-- {
5993 pp := allp[i]
5994 if gp.m.p.ptr() == pp {
5995 continue
5996 }
5997 pp.status = _Pidle
5998 if runqempty(pp) {
5999 pidleput(pp, now)
6000 } else {
6001 pp.m.set(mget())
6002 pp.link.set(runnablePs)
6003 runnablePs = pp
6004 }
6005 }
6006 stealOrder.reset(uint32(nprocs))
6007 var int32p *int32 = &gomaxprocs
6008 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6009 if old != nprocs {
6010
6011 gcCPULimiter.resetCapacity(now, nprocs)
6012 }
6013 return runnablePs
6014 }
6015
6016
6017
6018
6019
6020
6021
6022 func acquirep(pp *p) {
6023
6024 wirep(pp)
6025
6026
6027
6028
6029
6030 pp.mcache.prepareForSweep()
6031
6032 trace := traceAcquire()
6033 if trace.ok() {
6034 trace.ProcStart()
6035 traceRelease(trace)
6036 }
6037 }
6038
6039
6040
6041
6042
6043
6044
6045 func wirep(pp *p) {
6046 gp := getg()
6047
6048 if gp.m.p != 0 {
6049
6050
6051 systemstack(func() {
6052 throw("wirep: already in go")
6053 })
6054 }
6055 if pp.m != 0 || pp.status != _Pidle {
6056
6057
6058 systemstack(func() {
6059 id := int64(0)
6060 if pp.m != 0 {
6061 id = pp.m.ptr().id
6062 }
6063 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6064 throw("wirep: invalid p state")
6065 })
6066 }
6067 gp.m.p.set(pp)
6068 pp.m.set(gp.m)
6069 pp.status = _Prunning
6070 }
6071
6072
6073 func releasep() *p {
6074 trace := traceAcquire()
6075 if trace.ok() {
6076 trace.ProcStop(getg().m.p.ptr())
6077 traceRelease(trace)
6078 }
6079 return releasepNoTrace()
6080 }
6081
6082
6083 func releasepNoTrace() *p {
6084 gp := getg()
6085
6086 if gp.m.p == 0 {
6087 throw("releasep: invalid arg")
6088 }
6089 pp := gp.m.p.ptr()
6090 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6091 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6092 throw("releasep: invalid p state")
6093 }
6094 gp.m.p = 0
6095 pp.m = 0
6096 pp.status = _Pidle
6097 return pp
6098 }
6099
6100 func incidlelocked(v int32) {
6101 lock(&sched.lock)
6102 sched.nmidlelocked += v
6103 if v > 0 {
6104 checkdead()
6105 }
6106 unlock(&sched.lock)
6107 }
6108
6109
6110
6111
6112 func checkdead() {
6113 assertLockHeld(&sched.lock)
6114
6115
6116
6117
6118
6119
6120 if (islibrary || isarchive) && GOARCH != "wasm" {
6121 return
6122 }
6123
6124
6125
6126
6127
6128 if panicking.Load() > 0 {
6129 return
6130 }
6131
6132
6133
6134
6135
6136 var run0 int32
6137 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6138 run0 = 1
6139 }
6140
6141 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6142 if run > run0 {
6143 return
6144 }
6145 if run < 0 {
6146 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6147 unlock(&sched.lock)
6148 throw("checkdead: inconsistent counts")
6149 }
6150
6151 grunning := 0
6152 forEachG(func(gp *g) {
6153 if isSystemGoroutine(gp, false) {
6154 return
6155 }
6156 s := readgstatus(gp)
6157 switch s &^ _Gscan {
6158 case _Gwaiting,
6159 _Gpreempted:
6160 grunning++
6161 case _Grunnable,
6162 _Grunning,
6163 _Gsyscall:
6164 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6165 unlock(&sched.lock)
6166 throw("checkdead: runnable g")
6167 }
6168 })
6169 if grunning == 0 {
6170 unlock(&sched.lock)
6171 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6172 }
6173
6174
6175 if faketime != 0 {
6176 if when := timeSleepUntil(); when < maxWhen {
6177 faketime = when
6178
6179
6180 pp, _ := pidleget(faketime)
6181 if pp == nil {
6182
6183
6184 unlock(&sched.lock)
6185 throw("checkdead: no p for timer")
6186 }
6187 mp := mget()
6188 if mp == nil {
6189
6190
6191 unlock(&sched.lock)
6192 throw("checkdead: no m for timer")
6193 }
6194
6195
6196
6197 sched.nmspinning.Add(1)
6198 mp.spinning = true
6199 mp.nextp.set(pp)
6200 notewakeup(&mp.park)
6201 return
6202 }
6203 }
6204
6205
6206 for _, pp := range allp {
6207 if len(pp.timers.heap) > 0 {
6208 return
6209 }
6210 }
6211
6212 unlock(&sched.lock)
6213 fatal("all goroutines are asleep - deadlock!")
6214 }
6215
6216
6217
6218
6219
6220
6221 var forcegcperiod int64 = 2 * 60 * 1e9
6222
6223
6224
6225
6226 const haveSysmon = GOARCH != "wasm"
6227
6228
6229
6230
6231 func sysmon() {
6232 lock(&sched.lock)
6233 sched.nmsys++
6234 checkdead()
6235 unlock(&sched.lock)
6236
6237 lastgomaxprocs := int64(0)
6238 lasttrace := int64(0)
6239 idle := 0
6240 delay := uint32(0)
6241
6242 for {
6243 if idle == 0 {
6244 delay = 20
6245 } else if idle > 50 {
6246 delay *= 2
6247 }
6248 if delay > 10*1000 {
6249 delay = 10 * 1000
6250 }
6251 usleep(delay)
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268 now := nanotime()
6269 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6270 lock(&sched.lock)
6271 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6272 syscallWake := false
6273 next := timeSleepUntil()
6274 if next > now {
6275 sched.sysmonwait.Store(true)
6276 unlock(&sched.lock)
6277
6278
6279 sleep := forcegcperiod / 2
6280 if next-now < sleep {
6281 sleep = next - now
6282 }
6283 shouldRelax := sleep >= osRelaxMinNS
6284 if shouldRelax {
6285 osRelax(true)
6286 }
6287 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6288 if shouldRelax {
6289 osRelax(false)
6290 }
6291 lock(&sched.lock)
6292 sched.sysmonwait.Store(false)
6293 noteclear(&sched.sysmonnote)
6294 }
6295 if syscallWake {
6296 idle = 0
6297 delay = 20
6298 }
6299 }
6300 unlock(&sched.lock)
6301 }
6302
6303 lock(&sched.sysmonlock)
6304
6305
6306 now = nanotime()
6307
6308
6309 if *cgo_yield != nil {
6310 asmcgocall(*cgo_yield, nil)
6311 }
6312
6313 lastpoll := sched.lastpoll.Load()
6314 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6315 sched.lastpoll.CompareAndSwap(lastpoll, now)
6316 list, delta := netpoll(0)
6317 if !list.empty() {
6318
6319
6320
6321
6322
6323
6324
6325 incidlelocked(-1)
6326 injectglist(&list)
6327 incidlelocked(1)
6328 netpollAdjustWaiters(delta)
6329 }
6330 }
6331
6332 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6333 sysmonUpdateGOMAXPROCS()
6334 lastgomaxprocs = now
6335 }
6336 if scavenger.sysmonWake.Load() != 0 {
6337
6338 scavenger.wake()
6339 }
6340
6341
6342 if retake(now) != 0 {
6343 idle = 0
6344 } else {
6345 idle++
6346 }
6347
6348 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6349 lock(&forcegc.lock)
6350 forcegc.idle.Store(false)
6351 var list gList
6352 list.push(forcegc.g)
6353 injectglist(&list)
6354 unlock(&forcegc.lock)
6355 }
6356 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6357 lasttrace = now
6358 schedtrace(debug.scheddetail > 0)
6359 }
6360 unlock(&sched.sysmonlock)
6361 }
6362 }
6363
6364 type sysmontick struct {
6365 schedtick uint32
6366 syscalltick uint32
6367 schedwhen int64
6368 syscallwhen int64
6369 }
6370
6371
6372
6373 const forcePreemptNS = 10 * 1000 * 1000
6374
6375 func retake(now int64) uint32 {
6376 n := 0
6377
6378
6379 lock(&allpLock)
6380
6381
6382
6383 for i := 0; i < len(allp); i++ {
6384 pp := allp[i]
6385 if pp == nil {
6386
6387
6388 continue
6389 }
6390 pd := &pp.sysmontick
6391 s := pp.status
6392 sysretake := false
6393 if s == _Prunning || s == _Psyscall {
6394
6395
6396
6397
6398 t := int64(pp.schedtick)
6399 if int64(pd.schedtick) != t {
6400 pd.schedtick = uint32(t)
6401 pd.schedwhen = now
6402 } else if pd.schedwhen+forcePreemptNS <= now {
6403 preemptone(pp)
6404
6405
6406 sysretake = true
6407 }
6408 }
6409 if s == _Psyscall {
6410
6411 t := int64(pp.syscalltick)
6412 if !sysretake && int64(pd.syscalltick) != t {
6413 pd.syscalltick = uint32(t)
6414 pd.syscallwhen = now
6415 continue
6416 }
6417
6418
6419
6420 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6421 continue
6422 }
6423
6424 unlock(&allpLock)
6425
6426
6427
6428
6429 incidlelocked(-1)
6430 trace := traceAcquire()
6431 if atomic.Cas(&pp.status, s, _Pidle) {
6432 if trace.ok() {
6433 trace.ProcSteal(pp, false)
6434 traceRelease(trace)
6435 }
6436 sched.nGsyscallNoP.Add(1)
6437 n++
6438 pp.syscalltick++
6439 handoffp(pp)
6440 } else if trace.ok() {
6441 traceRelease(trace)
6442 }
6443 incidlelocked(1)
6444 lock(&allpLock)
6445 }
6446 }
6447 unlock(&allpLock)
6448 return uint32(n)
6449 }
6450
6451
6452
6453
6454
6455
6456 func preemptall() bool {
6457 res := false
6458 for _, pp := range allp {
6459 if pp.status != _Prunning {
6460 continue
6461 }
6462 if preemptone(pp) {
6463 res = true
6464 }
6465 }
6466 return res
6467 }
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479 func preemptone(pp *p) bool {
6480 mp := pp.m.ptr()
6481 if mp == nil || mp == getg().m {
6482 return false
6483 }
6484 gp := mp.curg
6485 if gp == nil || gp == mp.g0 {
6486 return false
6487 }
6488
6489 gp.preempt = true
6490
6491
6492
6493
6494
6495 gp.stackguard0 = stackPreempt
6496
6497
6498 if preemptMSupported && debug.asyncpreemptoff == 0 {
6499 pp.preempt = true
6500 preemptM(mp)
6501 }
6502
6503 return true
6504 }
6505
6506 var starttime int64
6507
6508 func schedtrace(detailed bool) {
6509 now := nanotime()
6510 if starttime == 0 {
6511 starttime = now
6512 }
6513
6514 lock(&sched.lock)
6515 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6516 if detailed {
6517 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6518 }
6519
6520
6521
6522 for i, pp := range allp {
6523 h := atomic.Load(&pp.runqhead)
6524 t := atomic.Load(&pp.runqtail)
6525 if detailed {
6526 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6527 mp := pp.m.ptr()
6528 if mp != nil {
6529 print(mp.id)
6530 } else {
6531 print("nil")
6532 }
6533 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6534 } else {
6535
6536
6537 print(" ")
6538 if i == 0 {
6539 print("[ ")
6540 }
6541 print(t - h)
6542 if i == len(allp)-1 {
6543 print(" ]")
6544 }
6545 }
6546 }
6547
6548 if !detailed {
6549
6550 print(" schedticks=[ ")
6551 for _, pp := range allp {
6552 print(pp.schedtick)
6553 print(" ")
6554 }
6555 print("]\n")
6556 }
6557
6558 if !detailed {
6559 unlock(&sched.lock)
6560 return
6561 }
6562
6563 for mp := allm; mp != nil; mp = mp.alllink {
6564 pp := mp.p.ptr()
6565 print(" M", mp.id, ": p=")
6566 if pp != nil {
6567 print(pp.id)
6568 } else {
6569 print("nil")
6570 }
6571 print(" curg=")
6572 if mp.curg != nil {
6573 print(mp.curg.goid)
6574 } else {
6575 print("nil")
6576 }
6577 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6578 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6579 print(lockedg.goid)
6580 } else {
6581 print("nil")
6582 }
6583 print("\n")
6584 }
6585
6586 forEachG(func(gp *g) {
6587 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6588 if gp.m != nil {
6589 print(gp.m.id)
6590 } else {
6591 print("nil")
6592 }
6593 print(" lockedm=")
6594 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6595 print(lockedm.id)
6596 } else {
6597 print("nil")
6598 }
6599 print("\n")
6600 })
6601 unlock(&sched.lock)
6602 }
6603
6604 type updateMaxProcsGState struct {
6605 lock mutex
6606 g *g
6607 idle atomic.Bool
6608
6609
6610 procs int32
6611 }
6612
6613 var (
6614
6615
6616 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6617
6618
6619
6620 updateMaxProcsG updateMaxProcsGState
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669 computeMaxProcsLock mutex
6670 )
6671
6672
6673
6674
6675 func defaultGOMAXPROCSUpdateEnable() {
6676 if debug.updatemaxprocs == 0 {
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688 updatemaxprocs.IncNonDefault()
6689 return
6690 }
6691
6692 go updateMaxProcsGoroutine()
6693 }
6694
6695 func updateMaxProcsGoroutine() {
6696 updateMaxProcsG.g = getg()
6697 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
6698 for {
6699 lock(&updateMaxProcsG.lock)
6700 if updateMaxProcsG.idle.Load() {
6701 throw("updateMaxProcsGoroutine: phase error")
6702 }
6703 updateMaxProcsG.idle.Store(true)
6704 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
6705
6706
6707 stw := stopTheWorldGC(stwGOMAXPROCS)
6708
6709
6710 lock(&sched.lock)
6711 custom := sched.customGOMAXPROCS
6712 unlock(&sched.lock)
6713 if custom {
6714 startTheWorldGC(stw)
6715 return
6716 }
6717
6718
6719
6720
6721
6722 newprocs = updateMaxProcsG.procs
6723 lock(&sched.lock)
6724 sched.customGOMAXPROCS = false
6725 unlock(&sched.lock)
6726
6727 startTheWorldGC(stw)
6728 }
6729 }
6730
6731 func sysmonUpdateGOMAXPROCS() {
6732
6733 lock(&computeMaxProcsLock)
6734
6735
6736 lock(&sched.lock)
6737 custom := sched.customGOMAXPROCS
6738 curr := gomaxprocs
6739 unlock(&sched.lock)
6740 if custom {
6741 unlock(&computeMaxProcsLock)
6742 return
6743 }
6744
6745
6746 procs := defaultGOMAXPROCS(0)
6747 unlock(&computeMaxProcsLock)
6748 if procs == curr {
6749
6750 return
6751 }
6752
6753
6754
6755
6756 if updateMaxProcsG.idle.Load() {
6757 lock(&updateMaxProcsG.lock)
6758 updateMaxProcsG.procs = procs
6759 updateMaxProcsG.idle.Store(false)
6760 var list gList
6761 list.push(updateMaxProcsG.g)
6762 injectglist(&list)
6763 unlock(&updateMaxProcsG.lock)
6764 }
6765 }
6766
6767
6768
6769
6770
6771
6772 func schedEnableUser(enable bool) {
6773 lock(&sched.lock)
6774 if sched.disable.user == !enable {
6775 unlock(&sched.lock)
6776 return
6777 }
6778 sched.disable.user = !enable
6779 if enable {
6780 n := sched.disable.runnable.size
6781 globrunqputbatch(&sched.disable.runnable)
6782 unlock(&sched.lock)
6783 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6784 startm(nil, false, false)
6785 }
6786 } else {
6787 unlock(&sched.lock)
6788 }
6789 }
6790
6791
6792
6793
6794
6795 func schedEnabled(gp *g) bool {
6796 assertLockHeld(&sched.lock)
6797
6798 if sched.disable.user {
6799 return isSystemGoroutine(gp, true)
6800 }
6801 return true
6802 }
6803
6804
6805
6806
6807
6808
6809 func mput(mp *m) {
6810 assertLockHeld(&sched.lock)
6811
6812 mp.schedlink = sched.midle
6813 sched.midle.set(mp)
6814 sched.nmidle++
6815 checkdead()
6816 }
6817
6818
6819
6820
6821
6822
6823 func mget() *m {
6824 assertLockHeld(&sched.lock)
6825
6826 mp := sched.midle.ptr()
6827 if mp != nil {
6828 sched.midle = mp.schedlink
6829 sched.nmidle--
6830 }
6831 return mp
6832 }
6833
6834
6835
6836
6837
6838
6839 func globrunqput(gp *g) {
6840 assertLockHeld(&sched.lock)
6841
6842 sched.runq.pushBack(gp)
6843 }
6844
6845
6846
6847
6848
6849
6850 func globrunqputhead(gp *g) {
6851 assertLockHeld(&sched.lock)
6852
6853 sched.runq.push(gp)
6854 }
6855
6856
6857
6858
6859
6860
6861
6862 func globrunqputbatch(batch *gQueue) {
6863 assertLockHeld(&sched.lock)
6864
6865 sched.runq.pushBackAll(*batch)
6866 *batch = gQueue{}
6867 }
6868
6869
6870
6871 func globrunqget() *g {
6872 assertLockHeld(&sched.lock)
6873
6874 if sched.runq.size == 0 {
6875 return nil
6876 }
6877
6878 return sched.runq.pop()
6879 }
6880
6881
6882
6883 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
6884 assertLockHeld(&sched.lock)
6885
6886 if sched.runq.size == 0 {
6887 return
6888 }
6889
6890 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
6891
6892 gp = sched.runq.pop()
6893 n--
6894
6895 for ; n > 0; n-- {
6896 gp1 := sched.runq.pop()
6897 q.pushBack(gp1)
6898 }
6899 return
6900 }
6901
6902
6903 type pMask []uint32
6904
6905
6906 func (p pMask) read(id uint32) bool {
6907 word := id / 32
6908 mask := uint32(1) << (id % 32)
6909 return (atomic.Load(&p[word]) & mask) != 0
6910 }
6911
6912
6913 func (p pMask) set(id int32) {
6914 word := id / 32
6915 mask := uint32(1) << (id % 32)
6916 atomic.Or(&p[word], mask)
6917 }
6918
6919
6920 func (p pMask) clear(id int32) {
6921 word := id / 32
6922 mask := uint32(1) << (id % 32)
6923 atomic.And(&p[word], ^mask)
6924 }
6925
6926
6927 func (p pMask) any() bool {
6928 for i := range p {
6929 if atomic.Load(&p[i]) != 0 {
6930 return true
6931 }
6932 }
6933 return false
6934 }
6935
6936
6937
6938
6939
6940 func (p pMask) resize(nprocs int32) pMask {
6941 maskWords := (nprocs + 31) / 32
6942
6943 if maskWords <= int32(cap(p)) {
6944 return p[:maskWords]
6945 }
6946 newMask := make([]uint32, maskWords)
6947
6948 copy(newMask, p)
6949 return newMask
6950 }
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963 func pidleput(pp *p, now int64) int64 {
6964 assertLockHeld(&sched.lock)
6965
6966 if !runqempty(pp) {
6967 throw("pidleput: P has non-empty run queue")
6968 }
6969 if now == 0 {
6970 now = nanotime()
6971 }
6972 if pp.timers.len.Load() == 0 {
6973 timerpMask.clear(pp.id)
6974 }
6975 idlepMask.set(pp.id)
6976 pp.link = sched.pidle
6977 sched.pidle.set(pp)
6978 sched.npidle.Add(1)
6979 if !pp.limiterEvent.start(limiterEventIdle, now) {
6980 throw("must be able to track idle limiter event")
6981 }
6982 return now
6983 }
6984
6985
6986
6987
6988
6989
6990
6991
6992 func pidleget(now int64) (*p, int64) {
6993 assertLockHeld(&sched.lock)
6994
6995 pp := sched.pidle.ptr()
6996 if pp != nil {
6997
6998 if now == 0 {
6999 now = nanotime()
7000 }
7001 timerpMask.set(pp.id)
7002 idlepMask.clear(pp.id)
7003 sched.pidle = pp.link
7004 sched.npidle.Add(-1)
7005 pp.limiterEvent.stop(limiterEventIdle, now)
7006 }
7007 return pp, now
7008 }
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020 func pidlegetSpinning(now int64) (*p, int64) {
7021 assertLockHeld(&sched.lock)
7022
7023 pp, now := pidleget(now)
7024 if pp == nil {
7025
7026
7027
7028 sched.needspinning.Store(1)
7029 return nil, now
7030 }
7031
7032 return pp, now
7033 }
7034
7035
7036
7037 func runqempty(pp *p) bool {
7038
7039
7040
7041
7042 for {
7043 head := atomic.Load(&pp.runqhead)
7044 tail := atomic.Load(&pp.runqtail)
7045 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7046 if tail == atomic.Load(&pp.runqtail) {
7047 return head == tail && runnext == 0
7048 }
7049 }
7050 }
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061 const randomizeScheduler = raceenabled
7062
7063
7064
7065
7066
7067
7068 func runqput(pp *p, gp *g, next bool) {
7069 if !haveSysmon && next {
7070
7071
7072
7073
7074
7075
7076
7077
7078 next = false
7079 }
7080 if randomizeScheduler && next && randn(2) == 0 {
7081 next = false
7082 }
7083
7084 if next {
7085 retryNext:
7086 oldnext := pp.runnext
7087 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7088 goto retryNext
7089 }
7090 if oldnext == 0 {
7091 return
7092 }
7093
7094 gp = oldnext.ptr()
7095 }
7096
7097 retry:
7098 h := atomic.LoadAcq(&pp.runqhead)
7099 t := pp.runqtail
7100 if t-h < uint32(len(pp.runq)) {
7101 pp.runq[t%uint32(len(pp.runq))].set(gp)
7102 atomic.StoreRel(&pp.runqtail, t+1)
7103 return
7104 }
7105 if runqputslow(pp, gp, h, t) {
7106 return
7107 }
7108
7109 goto retry
7110 }
7111
7112
7113
7114 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7115 var batch [len(pp.runq)/2 + 1]*g
7116
7117
7118 n := t - h
7119 n = n / 2
7120 if n != uint32(len(pp.runq)/2) {
7121 throw("runqputslow: queue is not full")
7122 }
7123 for i := uint32(0); i < n; i++ {
7124 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7125 }
7126 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7127 return false
7128 }
7129 batch[n] = gp
7130
7131 if randomizeScheduler {
7132 for i := uint32(1); i <= n; i++ {
7133 j := cheaprandn(i + 1)
7134 batch[i], batch[j] = batch[j], batch[i]
7135 }
7136 }
7137
7138
7139 for i := uint32(0); i < n; i++ {
7140 batch[i].schedlink.set(batch[i+1])
7141 }
7142
7143 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7144
7145
7146 lock(&sched.lock)
7147 globrunqputbatch(&q)
7148 unlock(&sched.lock)
7149 return true
7150 }
7151
7152
7153
7154
7155 func runqputbatch(pp *p, q *gQueue) {
7156 if q.empty() {
7157 return
7158 }
7159 h := atomic.LoadAcq(&pp.runqhead)
7160 t := pp.runqtail
7161 n := uint32(0)
7162 for !q.empty() && t-h < uint32(len(pp.runq)) {
7163 gp := q.pop()
7164 pp.runq[t%uint32(len(pp.runq))].set(gp)
7165 t++
7166 n++
7167 }
7168
7169 if randomizeScheduler {
7170 off := func(o uint32) uint32 {
7171 return (pp.runqtail + o) % uint32(len(pp.runq))
7172 }
7173 for i := uint32(1); i < n; i++ {
7174 j := cheaprandn(i + 1)
7175 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7176 }
7177 }
7178
7179 atomic.StoreRel(&pp.runqtail, t)
7180
7181 return
7182 }
7183
7184
7185
7186
7187
7188 func runqget(pp *p) (gp *g, inheritTime bool) {
7189
7190 next := pp.runnext
7191
7192
7193
7194 if next != 0 && pp.runnext.cas(next, 0) {
7195 return next.ptr(), true
7196 }
7197
7198 for {
7199 h := atomic.LoadAcq(&pp.runqhead)
7200 t := pp.runqtail
7201 if t == h {
7202 return nil, false
7203 }
7204 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7205 if atomic.CasRel(&pp.runqhead, h, h+1) {
7206 return gp, false
7207 }
7208 }
7209 }
7210
7211
7212
7213 func runqdrain(pp *p) (drainQ gQueue) {
7214 oldNext := pp.runnext
7215 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7216 drainQ.pushBack(oldNext.ptr())
7217 }
7218
7219 retry:
7220 h := atomic.LoadAcq(&pp.runqhead)
7221 t := pp.runqtail
7222 qn := t - h
7223 if qn == 0 {
7224 return
7225 }
7226 if qn > uint32(len(pp.runq)) {
7227 goto retry
7228 }
7229
7230 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7231 goto retry
7232 }
7233
7234
7235
7236
7237
7238
7239
7240
7241 for i := uint32(0); i < qn; i++ {
7242 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7243 drainQ.pushBack(gp)
7244 }
7245 return
7246 }
7247
7248
7249
7250
7251
7252 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7253 for {
7254 h := atomic.LoadAcq(&pp.runqhead)
7255 t := atomic.LoadAcq(&pp.runqtail)
7256 n := t - h
7257 n = n - n/2
7258 if n == 0 {
7259 if stealRunNextG {
7260
7261 if next := pp.runnext; next != 0 {
7262 if pp.status == _Prunning {
7263
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273 if !osHasLowResTimer {
7274 usleep(3)
7275 } else {
7276
7277
7278
7279 osyield()
7280 }
7281 }
7282 if !pp.runnext.cas(next, 0) {
7283 continue
7284 }
7285 batch[batchHead%uint32(len(batch))] = next
7286 return 1
7287 }
7288 }
7289 return 0
7290 }
7291 if n > uint32(len(pp.runq)/2) {
7292 continue
7293 }
7294 for i := uint32(0); i < n; i++ {
7295 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7296 batch[(batchHead+i)%uint32(len(batch))] = g
7297 }
7298 if atomic.CasRel(&pp.runqhead, h, h+n) {
7299 return n
7300 }
7301 }
7302 }
7303
7304
7305
7306
7307 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7308 t := pp.runqtail
7309 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7310 if n == 0 {
7311 return nil
7312 }
7313 n--
7314 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7315 if n == 0 {
7316 return gp
7317 }
7318 h := atomic.LoadAcq(&pp.runqhead)
7319 if t-h+n >= uint32(len(pp.runq)) {
7320 throw("runqsteal: runq overflow")
7321 }
7322 atomic.StoreRel(&pp.runqtail, t+n)
7323 return gp
7324 }
7325
7326
7327
7328 type gQueue struct {
7329 head guintptr
7330 tail guintptr
7331 size int32
7332 }
7333
7334
7335 func (q *gQueue) empty() bool {
7336 return q.head == 0
7337 }
7338
7339
7340 func (q *gQueue) push(gp *g) {
7341 gp.schedlink = q.head
7342 q.head.set(gp)
7343 if q.tail == 0 {
7344 q.tail.set(gp)
7345 }
7346 q.size++
7347 }
7348
7349
7350 func (q *gQueue) pushBack(gp *g) {
7351 gp.schedlink = 0
7352 if q.tail != 0 {
7353 q.tail.ptr().schedlink.set(gp)
7354 } else {
7355 q.head.set(gp)
7356 }
7357 q.tail.set(gp)
7358 q.size++
7359 }
7360
7361
7362
7363 func (q *gQueue) pushBackAll(q2 gQueue) {
7364 if q2.tail == 0 {
7365 return
7366 }
7367 q2.tail.ptr().schedlink = 0
7368 if q.tail != 0 {
7369 q.tail.ptr().schedlink = q2.head
7370 } else {
7371 q.head = q2.head
7372 }
7373 q.tail = q2.tail
7374 q.size += q2.size
7375 }
7376
7377
7378
7379 func (q *gQueue) pop() *g {
7380 gp := q.head.ptr()
7381 if gp != nil {
7382 q.head = gp.schedlink
7383 if q.head == 0 {
7384 q.tail = 0
7385 }
7386 q.size--
7387 }
7388 return gp
7389 }
7390
7391
7392 func (q *gQueue) popList() gList {
7393 stack := gList{q.head, q.size}
7394 *q = gQueue{}
7395 return stack
7396 }
7397
7398
7399
7400 type gList struct {
7401 head guintptr
7402 size int32
7403 }
7404
7405
7406 func (l *gList) empty() bool {
7407 return l.head == 0
7408 }
7409
7410
7411 func (l *gList) push(gp *g) {
7412 gp.schedlink = l.head
7413 l.head.set(gp)
7414 l.size++
7415 }
7416
7417
7418 func (l *gList) pushAll(q gQueue) {
7419 if !q.empty() {
7420 q.tail.ptr().schedlink = l.head
7421 l.head = q.head
7422 l.size += q.size
7423 }
7424 }
7425
7426
7427 func (l *gList) pop() *g {
7428 gp := l.head.ptr()
7429 if gp != nil {
7430 l.head = gp.schedlink
7431 l.size--
7432 }
7433 return gp
7434 }
7435
7436
7437 func setMaxThreads(in int) (out int) {
7438 lock(&sched.lock)
7439 out = int(sched.maxmcount)
7440 if in > 0x7fffffff {
7441 sched.maxmcount = 0x7fffffff
7442 } else {
7443 sched.maxmcount = int32(in)
7444 }
7445 checkmcount()
7446 unlock(&sched.lock)
7447 return
7448 }
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462 func procPin() int {
7463 gp := getg()
7464 mp := gp.m
7465
7466 mp.locks++
7467 return int(mp.p.ptr().id)
7468 }
7469
7470
7471
7472
7473
7474
7475
7476
7477
7478
7479
7480
7481
7482 func procUnpin() {
7483 gp := getg()
7484 gp.m.locks--
7485 }
7486
7487
7488
7489 func sync_runtime_procPin() int {
7490 return procPin()
7491 }
7492
7493
7494
7495 func sync_runtime_procUnpin() {
7496 procUnpin()
7497 }
7498
7499
7500
7501 func sync_atomic_runtime_procPin() int {
7502 return procPin()
7503 }
7504
7505
7506
7507 func sync_atomic_runtime_procUnpin() {
7508 procUnpin()
7509 }
7510
7511
7512
7513
7514
7515 func internal_sync_runtime_canSpin(i int) bool {
7516
7517
7518
7519
7520
7521 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7522 return false
7523 }
7524 if p := getg().m.p.ptr(); !runqempty(p) {
7525 return false
7526 }
7527 return true
7528 }
7529
7530
7531
7532 func internal_sync_runtime_doSpin() {
7533 procyield(active_spin_cnt)
7534 }
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549
7550 func sync_runtime_canSpin(i int) bool {
7551 return internal_sync_runtime_canSpin(i)
7552 }
7553
7554
7555
7556
7557
7558
7559
7560
7561
7562
7563
7564
7565
7566 func sync_runtime_doSpin() {
7567 internal_sync_runtime_doSpin()
7568 }
7569
7570 var stealOrder randomOrder
7571
7572
7573
7574
7575
7576 type randomOrder struct {
7577 count uint32
7578 coprimes []uint32
7579 }
7580
7581 type randomEnum struct {
7582 i uint32
7583 count uint32
7584 pos uint32
7585 inc uint32
7586 }
7587
7588 func (ord *randomOrder) reset(count uint32) {
7589 ord.count = count
7590 ord.coprimes = ord.coprimes[:0]
7591 for i := uint32(1); i <= count; i++ {
7592 if gcd(i, count) == 1 {
7593 ord.coprimes = append(ord.coprimes, i)
7594 }
7595 }
7596 }
7597
7598 func (ord *randomOrder) start(i uint32) randomEnum {
7599 return randomEnum{
7600 count: ord.count,
7601 pos: i % ord.count,
7602 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7603 }
7604 }
7605
7606 func (enum *randomEnum) done() bool {
7607 return enum.i == enum.count
7608 }
7609
7610 func (enum *randomEnum) next() {
7611 enum.i++
7612 enum.pos = (enum.pos + enum.inc) % enum.count
7613 }
7614
7615 func (enum *randomEnum) position() uint32 {
7616 return enum.pos
7617 }
7618
7619 func gcd(a, b uint32) uint32 {
7620 for b != 0 {
7621 a, b = b, a%b
7622 }
7623 return a
7624 }
7625
7626
7627
7628 type initTask struct {
7629 state uint32
7630 nfns uint32
7631
7632 }
7633
7634
7635
7636 var inittrace tracestat
7637
7638 type tracestat struct {
7639 active bool
7640 id uint64
7641 allocs uint64
7642 bytes uint64
7643 }
7644
7645 func doInit(ts []*initTask) {
7646 for _, t := range ts {
7647 doInit1(t)
7648 }
7649 }
7650
7651 func doInit1(t *initTask) {
7652 switch t.state {
7653 case 2:
7654 return
7655 case 1:
7656 throw("recursive call during initialization - linker skew")
7657 default:
7658 t.state = 1
7659
7660 var (
7661 start int64
7662 before tracestat
7663 )
7664
7665 if inittrace.active {
7666 start = nanotime()
7667
7668 before = inittrace
7669 }
7670
7671 if t.nfns == 0 {
7672
7673 throw("inittask with no functions")
7674 }
7675
7676 firstFunc := add(unsafe.Pointer(t), 8)
7677 for i := uint32(0); i < t.nfns; i++ {
7678 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7679 f := *(*func())(unsafe.Pointer(&p))
7680 f()
7681 }
7682
7683 if inittrace.active {
7684 end := nanotime()
7685
7686 after := inittrace
7687
7688 f := *(*func())(unsafe.Pointer(&firstFunc))
7689 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7690
7691 var sbuf [24]byte
7692 print("init ", pkg, " @")
7693 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7694 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7695 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7696 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7697 print("\n")
7698 }
7699
7700 t.state = 2
7701 }
7702 }
7703
View as plain text