Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/strconv"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 last := lastmoduledatap
256 for m := &firstmoduledata; true; m = m.next {
257 doInit(m.inittasks)
258 if m == last {
259 break
260 }
261 }
262
263
264
265 inittrace.active = false
266
267 close(main_init_done)
268
269 needUnlock = false
270 unlockOSThread()
271
272 if isarchive || islibrary {
273
274
275 if GOARCH == "wasm" {
276
277
278
279
280
281
282
283 pause(sys.GetCallerSP() - 16)
284 panic("unreachable")
285 }
286 return
287 }
288 fn := main_main
289 fn()
290
291
292
293
294
295
296
297
298 exitHooksRun := false
299 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
300 runExitHooks(0)
301 exitHooksRun = true
302 lsandoleakcheck()
303 }
304
305
306
307
308
309 if runningPanicDefers.Load() != 0 {
310
311 for c := 0; c < 1000; c++ {
312 if runningPanicDefers.Load() == 0 {
313 break
314 }
315 Gosched()
316 }
317 }
318 if panicking.Load() != 0 {
319 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
320 }
321 if !exitHooksRun {
322 runExitHooks(0)
323 }
324 if raceenabled {
325 racefini()
326 }
327
328 exit(0)
329 for {
330 var x *int32
331 *x = 0
332 }
333 }
334
335
336
337
338 func os_beforeExit(exitCode int) {
339 runExitHooks(exitCode)
340 if exitCode == 0 && raceenabled {
341 racefini()
342 }
343
344
345 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
346 lsandoleakcheck()
347 }
348 }
349
350 func init() {
351 exithook.Gosched = Gosched
352 exithook.Goid = func() uint64 { return getg().goid }
353 exithook.Throw = throw
354 }
355
356 func runExitHooks(code int) {
357 exithook.Run(code)
358 }
359
360
361 func init() {
362 go forcegchelper()
363 }
364
365 func forcegchelper() {
366 forcegc.g = getg()
367 lockInit(&forcegc.lock, lockRankForcegc)
368 for {
369 lock(&forcegc.lock)
370 if forcegc.idle.Load() {
371 throw("forcegc: phase error")
372 }
373 forcegc.idle.Store(true)
374 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
375
376 if debug.gctrace > 0 {
377 println("GC forced")
378 }
379
380 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
381 }
382 }
383
384
385
386
387
388 func Gosched() {
389 checkTimeouts()
390 mcall(gosched_m)
391 }
392
393
394
395
396
397 func goschedguarded() {
398 mcall(goschedguarded_m)
399 }
400
401
402
403
404
405
406 func goschedIfBusy() {
407 gp := getg()
408
409
410 if !gp.preempt && sched.npidle.Load() > 0 {
411 return
412 }
413 mcall(gosched_m)
414 }
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
445 if reason != waitReasonSleep {
446 checkTimeouts()
447 }
448 mp := acquirem()
449 gp := mp.curg
450 status := readgstatus(gp)
451 if status != _Grunning && status != _Gscanrunning {
452 throw("gopark: bad g status")
453 }
454 mp.waitlock = lock
455 mp.waitunlockf = unlockf
456 gp.waitreason = reason
457 mp.waitTraceBlockReason = traceReason
458 mp.waitTraceSkip = traceskip
459 releasem(mp)
460
461 mcall(park_m)
462 }
463
464
465
466 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
467 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
468 }
469
470
471
472
473
474
475
476
477
478
479
480 func goready(gp *g, traceskip int) {
481 systemstack(func() {
482 ready(gp, traceskip, true)
483 })
484 }
485
486
487 func acquireSudog() *sudog {
488
489
490
491
492
493
494
495
496 mp := acquirem()
497 pp := mp.p.ptr()
498 if len(pp.sudogcache) == 0 {
499 lock(&sched.sudoglock)
500
501 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
502 s := sched.sudogcache
503 sched.sudogcache = s.next
504 s.next = nil
505 pp.sudogcache = append(pp.sudogcache, s)
506 }
507 unlock(&sched.sudoglock)
508
509 if len(pp.sudogcache) == 0 {
510 pp.sudogcache = append(pp.sudogcache, new(sudog))
511 }
512 }
513 n := len(pp.sudogcache)
514 s := pp.sudogcache[n-1]
515 pp.sudogcache[n-1] = nil
516 pp.sudogcache = pp.sudogcache[:n-1]
517 if s.elem.get() != nil {
518 throw("acquireSudog: found s.elem != nil in cache")
519 }
520 releasem(mp)
521 return s
522 }
523
524
525 func releaseSudog(s *sudog) {
526 if s.elem.get() != nil {
527 throw("runtime: sudog with non-nil elem")
528 }
529 if s.isSelect {
530 throw("runtime: sudog with non-false isSelect")
531 }
532 if s.next != nil {
533 throw("runtime: sudog with non-nil next")
534 }
535 if s.prev != nil {
536 throw("runtime: sudog with non-nil prev")
537 }
538 if s.waitlink != nil {
539 throw("runtime: sudog with non-nil waitlink")
540 }
541 if s.c.get() != nil {
542 throw("runtime: sudog with non-nil c")
543 }
544 gp := getg()
545 if gp.param != nil {
546 throw("runtime: releaseSudog with non-nil gp.param")
547 }
548 mp := acquirem()
549 pp := mp.p.ptr()
550 if len(pp.sudogcache) == cap(pp.sudogcache) {
551
552 var first, last *sudog
553 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
554 n := len(pp.sudogcache)
555 p := pp.sudogcache[n-1]
556 pp.sudogcache[n-1] = nil
557 pp.sudogcache = pp.sudogcache[:n-1]
558 if first == nil {
559 first = p
560 } else {
561 last.next = p
562 }
563 last = p
564 }
565 lock(&sched.sudoglock)
566 last.next = sched.sudogcache
567 sched.sudogcache = first
568 unlock(&sched.sudoglock)
569 }
570 pp.sudogcache = append(pp.sudogcache, s)
571 releasem(mp)
572 }
573
574
575 func badmcall(fn func(*g)) {
576 throw("runtime: mcall called on m->g0 stack")
577 }
578
579 func badmcall2(fn func(*g)) {
580 throw("runtime: mcall function returned")
581 }
582
583 func badreflectcall() {
584 panic(plainError("arg size to reflect.call more than 1GB"))
585 }
586
587
588
589 func badmorestackg0() {
590 if !crashStackImplemented {
591 writeErrStr("fatal: morestack on g0\n")
592 return
593 }
594
595 g := getg()
596 switchToCrashStack(func() {
597 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
598 g.m.traceback = 2
599 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
600 print("\n")
601
602 throw("morestack on g0")
603 })
604 }
605
606
607
608 func badmorestackgsignal() {
609 writeErrStr("fatal: morestack on gsignal\n")
610 }
611
612
613 func badctxt() {
614 throw("ctxt != 0")
615 }
616
617
618
619 var gcrash g
620
621 var crashingG atomic.Pointer[g]
622
623
624
625
626
627
628
629
630
631 func switchToCrashStack(fn func()) {
632 me := getg()
633 if crashingG.CompareAndSwapNoWB(nil, me) {
634 switchToCrashStack0(fn)
635 abort()
636 }
637 if crashingG.Load() == me {
638
639 writeErrStr("fatal: recursive switchToCrashStack\n")
640 abort()
641 }
642
643 usleep_no_g(100)
644 writeErrStr("fatal: concurrent switchToCrashStack\n")
645 abort()
646 }
647
648
649
650
651 const crashStackImplemented = GOOS != "windows"
652
653
654 func switchToCrashStack0(fn func())
655
656 func lockedOSThread() bool {
657 gp := getg()
658 return gp.lockedm != 0 && gp.m.lockedg != 0
659 }
660
661 var (
662
663
664
665
666
667
668 allglock mutex
669 allgs []*g
670
671
672
673
674
675
676
677
678
679
680
681
682
683 allglen uintptr
684 allgptr **g
685 )
686
687 func allgadd(gp *g) {
688 if readgstatus(gp) == _Gidle {
689 throw("allgadd: bad status Gidle")
690 }
691
692 lock(&allglock)
693 allgs = append(allgs, gp)
694 if &allgs[0] != allgptr {
695 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
696 }
697 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
698 unlock(&allglock)
699 }
700
701
702
703
704 func allGsSnapshot() []*g {
705 assertWorldStoppedOrLockHeld(&allglock)
706
707
708
709
710
711
712 return allgs[:len(allgs):len(allgs)]
713 }
714
715
716 func atomicAllG() (**g, uintptr) {
717 length := atomic.Loaduintptr(&allglen)
718 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
719 return ptr, length
720 }
721
722
723 func atomicAllGIndex(ptr **g, i uintptr) *g {
724 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
725 }
726
727
728
729
730 func forEachG(fn func(gp *g)) {
731 lock(&allglock)
732 for _, gp := range allgs {
733 fn(gp)
734 }
735 unlock(&allglock)
736 }
737
738
739
740
741
742 func forEachGRace(fn func(gp *g)) {
743 ptr, length := atomicAllG()
744 for i := uintptr(0); i < length; i++ {
745 gp := atomicAllGIndex(ptr, i)
746 fn(gp)
747 }
748 return
749 }
750
751 const (
752
753
754 _GoidCacheBatch = 16
755 )
756
757
758
759 func cpuinit(env string) {
760 cpu.Initialize(env)
761
762
763
764 switch GOARCH {
765 case "386", "amd64":
766 x86HasPOPCNT = cpu.X86.HasPOPCNT
767 x86HasSSE41 = cpu.X86.HasSSE41
768 x86HasFMA = cpu.X86.HasFMA
769
770 case "arm":
771 armHasVFPv4 = cpu.ARM.HasVFPv4
772
773 case "arm64":
774 arm64HasATOMICS = cpu.ARM64.HasATOMICS
775
776 case "loong64":
777 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
778 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
779 loong64HasLSX = cpu.Loong64.HasLSX
780
781 case "riscv64":
782 riscv64HasZbb = cpu.RISCV64.HasZbb
783 }
784 }
785
786
787
788
789
790
791 func getGodebugEarly() (string, bool) {
792 const prefix = "GODEBUG="
793 var env string
794 switch GOOS {
795 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
796
797
798
799 n := int32(0)
800 for argv_index(argv, argc+1+n) != nil {
801 n++
802 }
803
804 for i := int32(0); i < n; i++ {
805 p := argv_index(argv, argc+1+i)
806 s := unsafe.String(p, findnull(p))
807
808 if stringslite.HasPrefix(s, prefix) {
809 env = gostringnocopy(p)[len(prefix):]
810 break
811 }
812 }
813 break
814
815 default:
816 return "", false
817 }
818 return env, true
819 }
820
821
822
823
824
825
826
827
828
829 func schedinit() {
830 lockInit(&sched.lock, lockRankSched)
831 lockInit(&sched.sysmonlock, lockRankSysmon)
832 lockInit(&sched.deferlock, lockRankDefer)
833 lockInit(&sched.sudoglock, lockRankSudog)
834 lockInit(&deadlock, lockRankDeadlock)
835 lockInit(&paniclk, lockRankPanic)
836 lockInit(&allglock, lockRankAllg)
837 lockInit(&allpLock, lockRankAllp)
838 lockInit(&reflectOffs.lock, lockRankReflectOffs)
839 lockInit(&finlock, lockRankFin)
840 lockInit(&cpuprof.lock, lockRankCpuprof)
841 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
842 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
843 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
844 traceLockInit()
845
846
847
848 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
849
850 lockVerifyMSize()
851
852 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
853
854
855
856 gp := getg()
857 if raceenabled {
858 gp.racectx, raceprocctx0 = raceinit()
859 }
860
861 sched.maxmcount = 10000
862 crashFD.Store(^uintptr(0))
863
864
865 worldStopped()
866
867 godebug, parsedGodebug := getGodebugEarly()
868 if parsedGodebug {
869 parseRuntimeDebugVars(godebug)
870 }
871 ticks.init()
872 moduledataverify()
873 stackinit()
874 randinit()
875 mallocinit()
876 cpuinit(godebug)
877 alginit()
878 mcommoninit(gp.m, -1)
879 modulesinit()
880 typelinksinit()
881 itabsinit()
882 stkobjinit()
883
884 sigsave(&gp.m.sigmask)
885 initSigmask = gp.m.sigmask
886
887 goargs()
888 goenvs()
889 secure()
890 checkfds()
891 if !parsedGodebug {
892
893
894 parseRuntimeDebugVars(gogetenv("GODEBUG"))
895 }
896 finishDebugVarsSetup()
897 gcinit()
898
899
900
901 gcrash.stack = stackalloc(16384)
902 gcrash.stackguard0 = gcrash.stack.lo + 1000
903 gcrash.stackguard1 = gcrash.stack.lo + 1000
904
905
906
907
908
909 if disableMemoryProfiling {
910 MemProfileRate = 0
911 }
912
913
914 mProfStackInit(gp.m)
915 defaultGOMAXPROCSInit()
916
917 lock(&sched.lock)
918 sched.lastpoll.Store(nanotime())
919 var procs int32
920 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
921 procs = int32(n)
922 sched.customGOMAXPROCS = true
923 } else {
924
925
926
927
928
929
930
931
932 procs = defaultGOMAXPROCS(numCPUStartup)
933 }
934 if procresize(procs) != nil {
935 throw("unknown runnable goroutine during bootstrap")
936 }
937 unlock(&sched.lock)
938
939
940 worldStarted()
941
942 if buildVersion == "" {
943
944
945 buildVersion = "unknown"
946 }
947 if len(modinfo) == 1 {
948
949
950 modinfo = ""
951 }
952 }
953
954 func dumpgstatus(gp *g) {
955 thisg := getg()
956 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
957 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
958 }
959
960
961 func checkmcount() {
962 assertLockHeld(&sched.lock)
963
964
965
966
967
968
969
970
971
972 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
973 if count > sched.maxmcount {
974 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
975 throw("thread exhaustion")
976 }
977 }
978
979
980
981
982
983 func mReserveID() int64 {
984 assertLockHeld(&sched.lock)
985
986 if sched.mnext+1 < sched.mnext {
987 throw("runtime: thread ID overflow")
988 }
989 id := sched.mnext
990 sched.mnext++
991 checkmcount()
992 return id
993 }
994
995
996 func mcommoninit(mp *m, id int64) {
997 gp := getg()
998
999
1000 if gp != gp.m.g0 {
1001 callers(1, mp.createstack[:])
1002 }
1003
1004 lock(&sched.lock)
1005
1006 if id >= 0 {
1007 mp.id = id
1008 } else {
1009 mp.id = mReserveID()
1010 }
1011
1012 mp.self = newMWeakPointer(mp)
1013
1014 mrandinit(mp)
1015
1016 mpreinit(mp)
1017 if mp.gsignal != nil {
1018 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1019 }
1020
1021
1022
1023 mp.alllink = allm
1024
1025
1026
1027 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1028 unlock(&sched.lock)
1029
1030
1031 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1032 mp.cgoCallers = new(cgoCallers)
1033 }
1034 mProfStackInit(mp)
1035 }
1036
1037
1038
1039
1040
1041 func mProfStackInit(mp *m) {
1042 if debug.profstackdepth == 0 {
1043
1044
1045 return
1046 }
1047 mp.profStack = makeProfStackFP()
1048 mp.mLockProfile.stack = makeProfStackFP()
1049 }
1050
1051
1052
1053
1054 func makeProfStackFP() []uintptr {
1055
1056
1057
1058
1059
1060
1061 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1062 }
1063
1064
1065
1066 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1067
1068
1069 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1070
1071 func (mp *m) becomeSpinning() {
1072 mp.spinning = true
1073 sched.nmspinning.Add(1)
1074 sched.needspinning.Store(0)
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084 func (mp *m) snapshotAllp() []*p {
1085 mp.allpSnapshot = allp
1086 return mp.allpSnapshot
1087 }
1088
1089
1090
1091
1092
1093
1094
1095 func (mp *m) clearAllpSnapshot() {
1096 mp.allpSnapshot = nil
1097 }
1098
1099 func (mp *m) hasCgoOnStack() bool {
1100 return mp.ncgo > 0 || mp.isextra
1101 }
1102
1103 const (
1104
1105
1106 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1107
1108
1109
1110 osHasLowResClockInt = goos.IsWindows
1111
1112
1113
1114 osHasLowResClock = osHasLowResClockInt > 0
1115 )
1116
1117
1118 func ready(gp *g, traceskip int, next bool) {
1119 status := readgstatus(gp)
1120
1121
1122 mp := acquirem()
1123 if status&^_Gscan != _Gwaiting {
1124 dumpgstatus(gp)
1125 throw("bad g->status in ready")
1126 }
1127
1128
1129 trace := traceAcquire()
1130 casgstatus(gp, _Gwaiting, _Grunnable)
1131 if trace.ok() {
1132 trace.GoUnpark(gp, traceskip)
1133 traceRelease(trace)
1134 }
1135 runqput(mp.p.ptr(), gp, next)
1136 wakep()
1137 releasem(mp)
1138 }
1139
1140
1141
1142 const freezeStopWait = 0x7fffffff
1143
1144
1145
1146 var freezing atomic.Bool
1147
1148
1149
1150
1151 func freezetheworld() {
1152 freezing.Store(true)
1153 if debug.dontfreezetheworld > 0 {
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 usleep(1000)
1179 return
1180 }
1181
1182
1183
1184
1185 for i := 0; i < 5; i++ {
1186
1187 sched.stopwait = freezeStopWait
1188 sched.gcwaiting.Store(true)
1189
1190 if !preemptall() {
1191 break
1192 }
1193 usleep(1000)
1194 }
1195
1196 usleep(1000)
1197 preemptall()
1198 usleep(1000)
1199 }
1200
1201
1202
1203
1204
1205 func readgstatus(gp *g) uint32 {
1206 return gp.atomicstatus.Load()
1207 }
1208
1209
1210
1211
1212
1213 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1214 success := false
1215
1216
1217 switch oldval {
1218 default:
1219 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1220 dumpgstatus(gp)
1221 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1222 case _Gscanrunnable,
1223 _Gscanwaiting,
1224 _Gscanrunning,
1225 _Gscansyscall,
1226 _Gscanleaked,
1227 _Gscanpreempted,
1228 _Gscandeadextra:
1229 if newval == oldval&^_Gscan {
1230 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1231 }
1232 }
1233 if !success {
1234 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1235 dumpgstatus(gp)
1236 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1237 }
1238 releaseLockRankAndM(lockRankGscan)
1239 }
1240
1241
1242
1243 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1244 switch oldval {
1245 case _Grunnable,
1246 _Grunning,
1247 _Gwaiting,
1248 _Gleaked,
1249 _Gsyscall,
1250 _Gdeadextra:
1251 if newval == oldval|_Gscan {
1252 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1253 if r {
1254 acquireLockRankAndM(lockRankGscan)
1255 }
1256 return r
1257
1258 }
1259 }
1260 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1261 throw("bad oldval passed to castogscanstatus")
1262 return false
1263 }
1264
1265
1266
1267 var casgstatusAlwaysTrack = false
1268
1269
1270
1271
1272
1273
1274
1275 func casgstatus(gp *g, oldval, newval uint32) {
1276 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1277 systemstack(func() {
1278
1279
1280 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1281 throw("casgstatus: bad incoming values")
1282 })
1283 }
1284
1285 lockWithRankMayAcquire(nil, lockRankGscan)
1286
1287
1288 const yieldDelay = 5 * 1000
1289 var nextYield int64
1290
1291
1292
1293 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1294 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1295 systemstack(func() {
1296
1297
1298 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1299 })
1300 }
1301 if i == 0 {
1302 nextYield = nanotime() + yieldDelay
1303 }
1304 if nanotime() < nextYield {
1305 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1306 procyield(1)
1307 }
1308 } else {
1309 osyield()
1310 nextYield = nanotime() + yieldDelay/2
1311 }
1312 }
1313
1314 if gp.bubble != nil {
1315 systemstack(func() {
1316 gp.bubble.changegstatus(gp, oldval, newval)
1317 })
1318 }
1319
1320 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1321
1322
1323 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1324 gp.tracking = true
1325 }
1326 gp.trackingSeq++
1327 }
1328 if !gp.tracking {
1329 return
1330 }
1331
1332
1333
1334
1335
1336
1337 switch oldval {
1338 case _Grunnable:
1339
1340
1341
1342 now := nanotime()
1343 gp.runnableTime += now - gp.trackingStamp
1344 gp.trackingStamp = 0
1345 case _Gwaiting:
1346 if !gp.waitreason.isMutexWait() {
1347
1348 break
1349 }
1350
1351
1352
1353
1354
1355 now := nanotime()
1356 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1357 gp.trackingStamp = 0
1358 }
1359 switch newval {
1360 case _Gwaiting:
1361 if !gp.waitreason.isMutexWait() {
1362
1363 break
1364 }
1365
1366 now := nanotime()
1367 gp.trackingStamp = now
1368 case _Grunnable:
1369
1370
1371 now := nanotime()
1372 gp.trackingStamp = now
1373 case _Grunning:
1374
1375
1376
1377 gp.tracking = false
1378 sched.timeToRun.record(gp.runnableTime)
1379 gp.runnableTime = 0
1380 }
1381 }
1382
1383
1384
1385
1386 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1387
1388 gp.waitreason = reason
1389 casgstatus(gp, old, _Gwaiting)
1390 }
1391
1392
1393
1394
1395
1396
1397
1398
1399 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1400 if !reason.isWaitingForSuspendG() {
1401 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1402 }
1403 casGToWaiting(gp, old, reason)
1404 }
1405
1406
1407
1408
1409
1410 func casGToPreemptScan(gp *g, old, new uint32) {
1411 if old != _Grunning || new != _Gscan|_Gpreempted {
1412 throw("bad g transition")
1413 }
1414 acquireLockRankAndM(lockRankGscan)
1415 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1416 }
1417
1418
1419
1420
1421
1422
1423 }
1424
1425
1426
1427
1428 func casGFromPreempted(gp *g, old, new uint32) bool {
1429 if old != _Gpreempted || new != _Gwaiting {
1430 throw("bad g transition")
1431 }
1432 gp.waitreason = waitReasonPreempted
1433 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1434 return false
1435 }
1436 if bubble := gp.bubble; bubble != nil {
1437 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1438 }
1439 return true
1440 }
1441
1442
1443 type stwReason uint8
1444
1445
1446
1447
1448 const (
1449 stwUnknown stwReason = iota
1450 stwGCMarkTerm
1451 stwGCSweepTerm
1452 stwWriteHeapDump
1453 stwGoroutineProfile
1454 stwGoroutineProfileCleanup
1455 stwAllGoroutinesStack
1456 stwReadMemStats
1457 stwAllThreadsSyscall
1458 stwGOMAXPROCS
1459 stwStartTrace
1460 stwStopTrace
1461 stwForTestCountPagesInUse
1462 stwForTestReadMetricsSlow
1463 stwForTestReadMemStatsSlow
1464 stwForTestPageCachePagesLeaked
1465 stwForTestResetDebugLog
1466 )
1467
1468 func (r stwReason) String() string {
1469 return stwReasonStrings[r]
1470 }
1471
1472 func (r stwReason) isGC() bool {
1473 return r == stwGCMarkTerm || r == stwGCSweepTerm
1474 }
1475
1476
1477
1478
1479 var stwReasonStrings = [...]string{
1480 stwUnknown: "unknown",
1481 stwGCMarkTerm: "GC mark termination",
1482 stwGCSweepTerm: "GC sweep termination",
1483 stwWriteHeapDump: "write heap dump",
1484 stwGoroutineProfile: "goroutine profile",
1485 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1486 stwAllGoroutinesStack: "all goroutines stack trace",
1487 stwReadMemStats: "read mem stats",
1488 stwAllThreadsSyscall: "AllThreadsSyscall",
1489 stwGOMAXPROCS: "GOMAXPROCS",
1490 stwStartTrace: "start trace",
1491 stwStopTrace: "stop trace",
1492 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1493 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1494 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1495 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1496 stwForTestResetDebugLog: "ResetDebugLog (test)",
1497 }
1498
1499
1500
1501 type worldStop struct {
1502 reason stwReason
1503 startedStopping int64
1504 finishedStopping int64
1505 stoppingCPUTime int64
1506 }
1507
1508
1509
1510
1511 var stopTheWorldContext worldStop
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 func stopTheWorld(reason stwReason) worldStop {
1531 semacquire(&worldsema)
1532 gp := getg()
1533 gp.m.preemptoff = reason.String()
1534 systemstack(func() {
1535 stopTheWorldContext = stopTheWorldWithSema(reason)
1536 })
1537 return stopTheWorldContext
1538 }
1539
1540
1541
1542
1543 func startTheWorld(w worldStop) {
1544 systemstack(func() { startTheWorldWithSema(0, w) })
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 mp := acquirem()
1562 mp.preemptoff = ""
1563 semrelease1(&worldsema, true, 0)
1564 releasem(mp)
1565 }
1566
1567
1568
1569
1570 func stopTheWorldGC(reason stwReason) worldStop {
1571 semacquire(&gcsema)
1572 return stopTheWorld(reason)
1573 }
1574
1575
1576
1577
1578 func startTheWorldGC(w worldStop) {
1579 startTheWorld(w)
1580 semrelease(&gcsema)
1581 }
1582
1583
1584 var worldsema uint32 = 1
1585
1586
1587
1588
1589
1590
1591
1592 var gcsema uint32 = 1
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626 func stopTheWorldWithSema(reason stwReason) worldStop {
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1640
1641 trace := traceAcquire()
1642 if trace.ok() {
1643 trace.STWStart(reason)
1644 traceRelease(trace)
1645 }
1646 gp := getg()
1647
1648
1649
1650 if gp.m.locks > 0 {
1651 throw("stopTheWorld: holding locks")
1652 }
1653
1654 lock(&sched.lock)
1655 start := nanotime()
1656 sched.stopwait = gomaxprocs
1657 sched.gcwaiting.Store(true)
1658 preemptall()
1659
1660
1661 gp.m.p.ptr().status = _Pgcstop
1662 gp.m.p.ptr().gcStopTime = start
1663 sched.stopwait--
1664
1665
1666 for _, pp := range allp {
1667 if thread, ok := setBlockOnExitSyscall(pp); ok {
1668 thread.gcstopP()
1669 thread.resume()
1670 }
1671 }
1672
1673
1674 now := nanotime()
1675 for {
1676 pp, _ := pidleget(now)
1677 if pp == nil {
1678 break
1679 }
1680 pp.status = _Pgcstop
1681 pp.gcStopTime = nanotime()
1682 sched.stopwait--
1683 }
1684 wait := sched.stopwait > 0
1685 unlock(&sched.lock)
1686
1687
1688 if wait {
1689 for {
1690
1691 if notetsleep(&sched.stopnote, 100*1000) {
1692 noteclear(&sched.stopnote)
1693 break
1694 }
1695 preemptall()
1696 }
1697 }
1698
1699 finish := nanotime()
1700 startTime := finish - start
1701 if reason.isGC() {
1702 sched.stwStoppingTimeGC.record(startTime)
1703 } else {
1704 sched.stwStoppingTimeOther.record(startTime)
1705 }
1706
1707
1708
1709
1710
1711 stoppingCPUTime := int64(0)
1712 bad := ""
1713 if sched.stopwait != 0 {
1714 bad = "stopTheWorld: not stopped (stopwait != 0)"
1715 } else {
1716 for _, pp := range allp {
1717 if pp.status != _Pgcstop {
1718 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1719 }
1720 if pp.gcStopTime == 0 && bad == "" {
1721 bad = "stopTheWorld: broken CPU time accounting"
1722 }
1723 stoppingCPUTime += finish - pp.gcStopTime
1724 pp.gcStopTime = 0
1725 }
1726 }
1727 if freezing.Load() {
1728
1729
1730
1731
1732 lock(&deadlock)
1733 lock(&deadlock)
1734 }
1735 if bad != "" {
1736 throw(bad)
1737 }
1738
1739 worldStopped()
1740
1741
1742 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1743
1744 return worldStop{
1745 reason: reason,
1746 startedStopping: start,
1747 finishedStopping: finish,
1748 stoppingCPUTime: stoppingCPUTime,
1749 }
1750 }
1751
1752
1753
1754
1755
1756
1757
1758 func startTheWorldWithSema(now int64, w worldStop) int64 {
1759 assertWorldStopped()
1760
1761 mp := acquirem()
1762 if netpollinited() {
1763 list, delta := netpoll(0)
1764 injectglist(&list)
1765 netpollAdjustWaiters(delta)
1766 }
1767 lock(&sched.lock)
1768
1769 procs := gomaxprocs
1770 if newprocs != 0 {
1771 procs = newprocs
1772 newprocs = 0
1773 }
1774 p1 := procresize(procs)
1775 sched.gcwaiting.Store(false)
1776 if sched.sysmonwait.Load() {
1777 sched.sysmonwait.Store(false)
1778 notewakeup(&sched.sysmonnote)
1779 }
1780 unlock(&sched.lock)
1781
1782 worldStarted()
1783
1784 for p1 != nil {
1785 p := p1
1786 p1 = p1.link.ptr()
1787 if p.m != 0 {
1788 mp := p.m.ptr()
1789 p.m = 0
1790 if mp.nextp != 0 {
1791 throw("startTheWorld: inconsistent mp->nextp")
1792 }
1793 mp.nextp.set(p)
1794 notewakeup(&mp.park)
1795 } else {
1796
1797 newm(nil, p, -1)
1798 }
1799 }
1800
1801
1802 if now == 0 {
1803 now = nanotime()
1804 }
1805 totalTime := now - w.startedStopping
1806 if w.reason.isGC() {
1807 sched.stwTotalTimeGC.record(totalTime)
1808 } else {
1809 sched.stwTotalTimeOther.record(totalTime)
1810 }
1811 trace := traceAcquire()
1812 if trace.ok() {
1813 trace.STWDone()
1814 traceRelease(trace)
1815 }
1816
1817
1818
1819
1820 wakep()
1821
1822 releasem(mp)
1823
1824 return now
1825 }
1826
1827
1828
1829 func usesLibcall() bool {
1830 switch GOOS {
1831 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1832 return true
1833 }
1834 return false
1835 }
1836
1837
1838
1839 func mStackIsSystemAllocated() bool {
1840 switch GOOS {
1841 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1842 return true
1843 }
1844 return false
1845 }
1846
1847
1848
1849 func mstart()
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860 func mstart0() {
1861 gp := getg()
1862
1863 osStack := gp.stack.lo == 0
1864 if osStack {
1865
1866
1867
1868
1869
1870
1871
1872
1873 size := gp.stack.hi
1874 if size == 0 {
1875 size = 16384 * sys.StackGuardMultiplier
1876 }
1877 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1878 gp.stack.lo = gp.stack.hi - size + 1024
1879 }
1880
1881
1882 gp.stackguard0 = gp.stack.lo + stackGuard
1883
1884
1885 gp.stackguard1 = gp.stackguard0
1886 mstart1()
1887
1888
1889 if mStackIsSystemAllocated() {
1890
1891
1892
1893 osStack = true
1894 }
1895 mexit(osStack)
1896 }
1897
1898
1899
1900
1901
1902 func mstart1() {
1903 gp := getg()
1904
1905 if gp != gp.m.g0 {
1906 throw("bad runtime·mstart")
1907 }
1908
1909
1910
1911
1912
1913
1914
1915 gp.sched.g = guintptr(unsafe.Pointer(gp))
1916 gp.sched.pc = sys.GetCallerPC()
1917 gp.sched.sp = sys.GetCallerSP()
1918
1919 asminit()
1920 minit()
1921
1922
1923
1924 if gp.m == &m0 {
1925 mstartm0()
1926 }
1927
1928 if debug.dataindependenttiming == 1 {
1929 sys.EnableDIT()
1930 }
1931
1932 if fn := gp.m.mstartfn; fn != nil {
1933 fn()
1934 }
1935
1936 if gp.m != &m0 {
1937 acquirep(gp.m.nextp.ptr())
1938 gp.m.nextp = 0
1939 }
1940 schedule()
1941 }
1942
1943
1944
1945
1946
1947
1948
1949 func mstartm0() {
1950
1951
1952
1953 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1954 cgoHasExtraM = true
1955 newextram()
1956 }
1957 initsig(false)
1958 }
1959
1960
1961
1962
1963 func mPark() {
1964 gp := getg()
1965 notesleep(&gp.m.park)
1966 noteclear(&gp.m.park)
1967 }
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979 func mexit(osStack bool) {
1980 mp := getg().m
1981
1982 if mp == &m0 {
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 handoffp(releasep())
1995 lock(&sched.lock)
1996 sched.nmfreed++
1997 checkdead()
1998 unlock(&sched.lock)
1999 mPark()
2000 throw("locked m0 woke up")
2001 }
2002
2003 sigblock(true)
2004 unminit()
2005
2006
2007 if mp.gsignal != nil {
2008 stackfree(mp.gsignal.stack)
2009 if valgrindenabled {
2010 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2011 mp.gsignal.valgrindStackID = 0
2012 }
2013
2014
2015
2016
2017 mp.gsignal = nil
2018 }
2019
2020
2021 vgetrandomDestroy(mp)
2022
2023
2024
2025 mp.self.clear()
2026
2027
2028 lock(&sched.lock)
2029 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2030 if *pprev == mp {
2031 *pprev = mp.alllink
2032 goto found
2033 }
2034 }
2035 throw("m not found in allm")
2036 found:
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 mp.freeWait.Store(freeMWait)
2052 mp.freelink = sched.freem
2053 sched.freem = mp
2054 unlock(&sched.lock)
2055
2056 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2057 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2058
2059
2060 handoffp(releasep())
2061
2062
2063
2064
2065
2066 lock(&sched.lock)
2067 sched.nmfreed++
2068 checkdead()
2069 unlock(&sched.lock)
2070
2071 if GOOS == "darwin" || GOOS == "ios" {
2072
2073
2074 if mp.signalPending.Load() != 0 {
2075 pendingPreemptSignals.Add(-1)
2076 }
2077 }
2078
2079
2080
2081 mdestroy(mp)
2082
2083 if osStack {
2084
2085 mp.freeWait.Store(freeMRef)
2086
2087
2088
2089 return
2090 }
2091
2092
2093
2094
2095
2096 exitThread(&mp.freeWait)
2097 }
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 func forEachP(reason waitReason, fn func(*p)) {
2110 systemstack(func() {
2111 gp := getg().m.curg
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123 casGToWaitingForSuspendG(gp, _Grunning, reason)
2124 forEachPInternal(fn)
2125 casgstatus(gp, _Gwaiting, _Grunning)
2126 })
2127 }
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138 func forEachPInternal(fn func(*p)) {
2139 mp := acquirem()
2140 pp := getg().m.p.ptr()
2141
2142 lock(&sched.lock)
2143 if sched.safePointWait != 0 {
2144 throw("forEachP: sched.safePointWait != 0")
2145 }
2146 sched.safePointWait = gomaxprocs - 1
2147 sched.safePointFn = fn
2148
2149
2150 for _, p2 := range allp {
2151 if p2 != pp {
2152 atomic.Store(&p2.runSafePointFn, 1)
2153 }
2154 }
2155 preemptall()
2156
2157
2158
2159
2160
2161
2162
2163 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2164 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2165 fn(p)
2166 sched.safePointWait--
2167 }
2168 }
2169
2170 wait := sched.safePointWait > 0
2171 unlock(&sched.lock)
2172
2173
2174 fn(pp)
2175
2176
2177
2178 for _, p2 := range allp {
2179 if atomic.Load(&p2.runSafePointFn) != 1 {
2180
2181 continue
2182 }
2183 if thread, ok := setBlockOnExitSyscall(p2); ok {
2184 thread.takeP()
2185 thread.resume()
2186 handoffp(p2)
2187 }
2188 }
2189
2190
2191 if wait {
2192 for {
2193
2194
2195
2196
2197 if notetsleep(&sched.safePointNote, 100*1000) {
2198 noteclear(&sched.safePointNote)
2199 break
2200 }
2201 preemptall()
2202 }
2203 }
2204 if sched.safePointWait != 0 {
2205 throw("forEachP: not done")
2206 }
2207 for _, p2 := range allp {
2208 if p2.runSafePointFn != 0 {
2209 throw("forEachP: P did not run fn")
2210 }
2211 }
2212
2213 lock(&sched.lock)
2214 sched.safePointFn = nil
2215 unlock(&sched.lock)
2216 releasem(mp)
2217 }
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230 func runSafePointFn() {
2231 p := getg().m.p.ptr()
2232
2233
2234
2235 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2236 return
2237 }
2238 sched.safePointFn(p)
2239 lock(&sched.lock)
2240 sched.safePointWait--
2241 if sched.safePointWait == 0 {
2242 notewakeup(&sched.safePointNote)
2243 }
2244 unlock(&sched.lock)
2245 }
2246
2247
2248
2249
2250 var cgoThreadStart unsafe.Pointer
2251
2252 type cgothreadstart struct {
2253 g guintptr
2254 tls *uint64
2255 fn unsafe.Pointer
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 func allocm(pp *p, fn func(), id int64) *m {
2268 allocmLock.rlock()
2269
2270
2271
2272
2273 acquirem()
2274
2275 gp := getg()
2276 if gp.m.p == 0 {
2277 acquirep(pp)
2278 }
2279
2280
2281
2282 if sched.freem != nil {
2283 lock(&sched.lock)
2284 var newList *m
2285 for freem := sched.freem; freem != nil; {
2286
2287 wait := freem.freeWait.Load()
2288 if wait == freeMWait {
2289 next := freem.freelink
2290 freem.freelink = newList
2291 newList = freem
2292 freem = next
2293 continue
2294 }
2295
2296
2297
2298 if traceEnabled() || traceShuttingDown() {
2299 traceThreadDestroy(freem)
2300 }
2301
2302
2303
2304 if wait == freeMStack {
2305
2306
2307
2308 systemstack(func() {
2309 stackfree(freem.g0.stack)
2310 if valgrindenabled {
2311 valgrindDeregisterStack(freem.g0.valgrindStackID)
2312 freem.g0.valgrindStackID = 0
2313 }
2314 })
2315 }
2316 freem = freem.freelink
2317 }
2318 sched.freem = newList
2319 unlock(&sched.lock)
2320 }
2321
2322 mp := &new(mPadded).m
2323 mp.mstartfn = fn
2324 mcommoninit(mp, id)
2325
2326
2327
2328 if iscgo || mStackIsSystemAllocated() {
2329 mp.g0 = malg(-1)
2330 } else {
2331 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2332 }
2333 mp.g0.m = mp
2334
2335 if pp == gp.m.p.ptr() {
2336 releasep()
2337 }
2338
2339 releasem(gp.m)
2340 allocmLock.runlock()
2341 return mp
2342 }
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383 func needm(signal bool) {
2384 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2385
2386
2387
2388
2389
2390
2391 writeErrStr("fatal error: cgo callback before cgo call\n")
2392 exit(1)
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 var sigmask sigset
2404 sigsave(&sigmask)
2405 sigblock(false)
2406
2407
2408
2409
2410 mp, last := getExtraM()
2411
2412
2413
2414
2415
2416
2417
2418
2419 mp.needextram = last
2420
2421
2422 mp.sigmask = sigmask
2423
2424
2425
2426 osSetupTLS(mp)
2427
2428
2429
2430 setg(mp.g0)
2431 sp := sys.GetCallerSP()
2432 callbackUpdateSystemStack(mp, sp, signal)
2433
2434
2435
2436
2437 mp.isExtraInC = false
2438
2439
2440 asminit()
2441 minit()
2442
2443
2444
2445
2446
2447
2448 var trace traceLocker
2449 if !signal {
2450 trace = traceAcquire()
2451 }
2452
2453
2454 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2455 sched.ngsys.Add(-1)
2456 sched.nGsyscallNoP.Add(1)
2457
2458 if !signal {
2459 if trace.ok() {
2460 trace.GoCreateSyscall(mp.curg)
2461 traceRelease(trace)
2462 }
2463 }
2464 mp.isExtraInSig = signal
2465 }
2466
2467
2468
2469
2470 func needAndBindM() {
2471 needm(false)
2472
2473 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2474 cgoBindM()
2475 }
2476 }
2477
2478
2479
2480
2481 func newextram() {
2482 c := extraMWaiters.Swap(0)
2483 if c > 0 {
2484 for i := uint32(0); i < c; i++ {
2485 oneNewExtraM()
2486 }
2487 } else if extraMLength.Load() == 0 {
2488
2489 oneNewExtraM()
2490 }
2491 }
2492
2493
2494 func oneNewExtraM() {
2495
2496
2497
2498
2499
2500 mp := allocm(nil, nil, -1)
2501 gp := malg(4096)
2502 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2503 gp.sched.sp = gp.stack.hi
2504 gp.sched.sp -= 4 * goarch.PtrSize
2505 gp.sched.lr = 0
2506 gp.sched.g = guintptr(unsafe.Pointer(gp))
2507 gp.syscallpc = gp.sched.pc
2508 gp.syscallsp = gp.sched.sp
2509 gp.stktopsp = gp.sched.sp
2510
2511
2512
2513 casgstatus(gp, _Gidle, _Gdeadextra)
2514 gp.m = mp
2515 mp.curg = gp
2516 mp.isextra = true
2517
2518 mp.isExtraInC = true
2519 mp.lockedInt++
2520 mp.lockedg.set(gp)
2521 gp.lockedm.set(mp)
2522 gp.goid = sched.goidgen.Add(1)
2523 if raceenabled {
2524 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2525 }
2526
2527 allgadd(gp)
2528
2529
2530
2531
2532
2533 sched.ngsys.Add(1)
2534
2535
2536 addExtraM(mp)
2537 }
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572 func dropm() {
2573
2574
2575
2576 mp := getg().m
2577
2578
2579
2580
2581
2582 var trace traceLocker
2583 if !mp.isExtraInSig {
2584 trace = traceAcquire()
2585 }
2586
2587
2588 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2589 mp.curg.preemptStop = false
2590 sched.ngsys.Add(1)
2591 sched.nGsyscallNoP.Add(-1)
2592
2593 if !mp.isExtraInSig {
2594 if trace.ok() {
2595 trace.GoDestroySyscall()
2596 traceRelease(trace)
2597 }
2598 }
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613 mp.syscalltick--
2614
2615
2616
2617 mp.curg.trace.reset()
2618
2619
2620
2621
2622 if traceEnabled() || traceShuttingDown() {
2623
2624
2625
2626
2627
2628
2629
2630 lock(&sched.lock)
2631 traceThreadDestroy(mp)
2632 unlock(&sched.lock)
2633 }
2634 mp.isExtraInSig = false
2635
2636
2637
2638
2639
2640 sigmask := mp.sigmask
2641 sigblock(false)
2642 unminit()
2643
2644 setg(nil)
2645
2646
2647
2648 g0 := mp.g0
2649 g0.stack.hi = 0
2650 g0.stack.lo = 0
2651 g0.stackguard0 = 0
2652 g0.stackguard1 = 0
2653 mp.g0StackAccurate = false
2654
2655 putExtraM(mp)
2656
2657 msigrestore(sigmask)
2658 }
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680 func cgoBindM() {
2681 if GOOS == "windows" || GOOS == "plan9" {
2682 fatal("bindm in unexpected GOOS")
2683 }
2684 g := getg()
2685 if g.m.g0 != g {
2686 fatal("the current g is not g0")
2687 }
2688 if _cgo_bindm != nil {
2689 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2690 }
2691 }
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 func getm() uintptr {
2705 return uintptr(unsafe.Pointer(getg().m))
2706 }
2707
2708 var (
2709
2710
2711
2712
2713
2714
2715 extraM atomic.Uintptr
2716
2717 extraMLength atomic.Uint32
2718
2719 extraMWaiters atomic.Uint32
2720
2721
2722 extraMInUse atomic.Uint32
2723 )
2724
2725
2726
2727
2728
2729
2730
2731
2732 func lockextra(nilokay bool) *m {
2733 const locked = 1
2734
2735 incr := false
2736 for {
2737 old := extraM.Load()
2738 if old == locked {
2739 osyield_no_g()
2740 continue
2741 }
2742 if old == 0 && !nilokay {
2743 if !incr {
2744
2745
2746
2747 extraMWaiters.Add(1)
2748 incr = true
2749 }
2750 usleep_no_g(1)
2751 continue
2752 }
2753 if extraM.CompareAndSwap(old, locked) {
2754 return (*m)(unsafe.Pointer(old))
2755 }
2756 osyield_no_g()
2757 continue
2758 }
2759 }
2760
2761
2762 func unlockextra(mp *m, delta int32) {
2763 extraMLength.Add(delta)
2764 extraM.Store(uintptr(unsafe.Pointer(mp)))
2765 }
2766
2767
2768
2769
2770
2771
2772
2773
2774 func getExtraM() (mp *m, last bool) {
2775 mp = lockextra(false)
2776 extraMInUse.Add(1)
2777 unlockextra(mp.schedlink.ptr(), -1)
2778 return mp, mp.schedlink.ptr() == nil
2779 }
2780
2781
2782
2783
2784
2785 func putExtraM(mp *m) {
2786 extraMInUse.Add(-1)
2787 addExtraM(mp)
2788 }
2789
2790
2791
2792
2793 func addExtraM(mp *m) {
2794 mnext := lockextra(true)
2795 mp.schedlink.set(mnext)
2796 unlockextra(mp, 1)
2797 }
2798
2799 var (
2800
2801
2802
2803 allocmLock rwmutex
2804
2805
2806
2807
2808 execLock rwmutex
2809 )
2810
2811
2812
2813 const (
2814 failthreadcreate = "runtime: failed to create new OS thread\n"
2815 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2816 )
2817
2818
2819
2820
2821 var newmHandoff struct {
2822 lock mutex
2823
2824
2825
2826 newm muintptr
2827
2828
2829
2830 waiting bool
2831 wake note
2832
2833
2834
2835
2836 haveTemplateThread uint32
2837 }
2838
2839
2840
2841
2842
2843
2844
2845
2846 func newm(fn func(), pp *p, id int64) {
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857 acquirem()
2858
2859 mp := allocm(pp, fn, id)
2860 mp.nextp.set(pp)
2861 mp.sigmask = initSigmask
2862 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874 lock(&newmHandoff.lock)
2875 if newmHandoff.haveTemplateThread == 0 {
2876 throw("on a locked thread with no template thread")
2877 }
2878 mp.schedlink = newmHandoff.newm
2879 newmHandoff.newm.set(mp)
2880 if newmHandoff.waiting {
2881 newmHandoff.waiting = false
2882 notewakeup(&newmHandoff.wake)
2883 }
2884 unlock(&newmHandoff.lock)
2885
2886
2887
2888 releasem(getg().m)
2889 return
2890 }
2891 newm1(mp)
2892 releasem(getg().m)
2893 }
2894
2895 func newm1(mp *m) {
2896 if iscgo {
2897 var ts cgothreadstart
2898 if _cgo_thread_start == nil {
2899 throw("_cgo_thread_start missing")
2900 }
2901 ts.g.set(mp.g0)
2902 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2903 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2904 if msanenabled {
2905 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2906 }
2907 if asanenabled {
2908 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2909 }
2910 execLock.rlock()
2911 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2912 execLock.runlock()
2913 return
2914 }
2915 execLock.rlock()
2916 newosproc(mp)
2917 execLock.runlock()
2918 }
2919
2920
2921
2922
2923
2924 func startTemplateThread() {
2925 if GOARCH == "wasm" {
2926 return
2927 }
2928
2929
2930
2931 mp := acquirem()
2932 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2933 releasem(mp)
2934 return
2935 }
2936 newm(templateThread, nil, -1)
2937 releasem(mp)
2938 }
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952 func templateThread() {
2953 lock(&sched.lock)
2954 sched.nmsys++
2955 checkdead()
2956 unlock(&sched.lock)
2957
2958 for {
2959 lock(&newmHandoff.lock)
2960 for newmHandoff.newm != 0 {
2961 newm := newmHandoff.newm.ptr()
2962 newmHandoff.newm = 0
2963 unlock(&newmHandoff.lock)
2964 for newm != nil {
2965 next := newm.schedlink.ptr()
2966 newm.schedlink = 0
2967 newm1(newm)
2968 newm = next
2969 }
2970 lock(&newmHandoff.lock)
2971 }
2972 newmHandoff.waiting = true
2973 noteclear(&newmHandoff.wake)
2974 unlock(&newmHandoff.lock)
2975 notesleep(&newmHandoff.wake)
2976 }
2977 }
2978
2979
2980
2981 func stopm() {
2982 gp := getg()
2983
2984 if gp.m.locks != 0 {
2985 throw("stopm holding locks")
2986 }
2987 if gp.m.p != 0 {
2988 throw("stopm holding p")
2989 }
2990 if gp.m.spinning {
2991 throw("stopm spinning")
2992 }
2993
2994 lock(&sched.lock)
2995 mput(gp.m)
2996 unlock(&sched.lock)
2997 mPark()
2998 acquirep(gp.m.nextp.ptr())
2999 gp.m.nextp = 0
3000 }
3001
3002 func mspinning() {
3003
3004 getg().m.spinning = true
3005 }
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024 func startm(pp *p, spinning, lockheld bool) {
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041 mp := acquirem()
3042 if !lockheld {
3043 lock(&sched.lock)
3044 }
3045 if pp == nil {
3046 if spinning {
3047
3048
3049
3050 throw("startm: P required for spinning=true")
3051 }
3052 pp, _ = pidleget(0)
3053 if pp == nil {
3054 if !lockheld {
3055 unlock(&sched.lock)
3056 }
3057 releasem(mp)
3058 return
3059 }
3060 }
3061 nmp := mget()
3062 if nmp == nil {
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077 id := mReserveID()
3078 unlock(&sched.lock)
3079
3080 var fn func()
3081 if spinning {
3082
3083 fn = mspinning
3084 }
3085 newm(fn, pp, id)
3086
3087 if lockheld {
3088 lock(&sched.lock)
3089 }
3090
3091
3092 releasem(mp)
3093 return
3094 }
3095 if !lockheld {
3096 unlock(&sched.lock)
3097 }
3098 if nmp.spinning {
3099 throw("startm: m is spinning")
3100 }
3101 if nmp.nextp != 0 {
3102 throw("startm: m has p")
3103 }
3104 if spinning && !runqempty(pp) {
3105 throw("startm: p has runnable gs")
3106 }
3107
3108 nmp.spinning = spinning
3109 nmp.nextp.set(pp)
3110 notewakeup(&nmp.park)
3111
3112
3113 releasem(mp)
3114 }
3115
3116
3117
3118
3119
3120 func handoffp(pp *p) {
3121
3122
3123
3124
3125 if !runqempty(pp) || !sched.runq.empty() {
3126 startm(pp, false, false)
3127 return
3128 }
3129
3130 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3131 startm(pp, false, false)
3132 return
3133 }
3134
3135 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3136 startm(pp, false, false)
3137 return
3138 }
3139
3140
3141 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3142 sched.needspinning.Store(0)
3143 startm(pp, true, false)
3144 return
3145 }
3146 lock(&sched.lock)
3147 if sched.gcwaiting.Load() {
3148 pp.status = _Pgcstop
3149 pp.gcStopTime = nanotime()
3150 sched.stopwait--
3151 if sched.stopwait == 0 {
3152 notewakeup(&sched.stopnote)
3153 }
3154 unlock(&sched.lock)
3155 return
3156 }
3157 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3158 sched.safePointFn(pp)
3159 sched.safePointWait--
3160 if sched.safePointWait == 0 {
3161 notewakeup(&sched.safePointNote)
3162 }
3163 }
3164 if !sched.runq.empty() {
3165 unlock(&sched.lock)
3166 startm(pp, false, false)
3167 return
3168 }
3169
3170
3171 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3172 unlock(&sched.lock)
3173 startm(pp, false, false)
3174 return
3175 }
3176
3177
3178
3179 when := pp.timers.wakeTime()
3180 pidleput(pp, 0)
3181 unlock(&sched.lock)
3182
3183 if when != 0 {
3184 wakeNetPoller(when)
3185 }
3186 }
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201 func wakep() {
3202
3203
3204 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3205 return
3206 }
3207
3208
3209
3210
3211
3212
3213 mp := acquirem()
3214
3215 var pp *p
3216 lock(&sched.lock)
3217 pp, _ = pidlegetSpinning(0)
3218 if pp == nil {
3219 if sched.nmspinning.Add(-1) < 0 {
3220 throw("wakep: negative nmspinning")
3221 }
3222 unlock(&sched.lock)
3223 releasem(mp)
3224 return
3225 }
3226
3227
3228
3229
3230 unlock(&sched.lock)
3231
3232 startm(pp, true, false)
3233
3234 releasem(mp)
3235 }
3236
3237
3238
3239 func stoplockedm() {
3240 gp := getg()
3241
3242 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3243 throw("stoplockedm: inconsistent locking")
3244 }
3245 if gp.m.p != 0 {
3246
3247 pp := releasep()
3248 handoffp(pp)
3249 }
3250 incidlelocked(1)
3251
3252 mPark()
3253 status := readgstatus(gp.m.lockedg.ptr())
3254 if status&^_Gscan != _Grunnable {
3255 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3256 dumpgstatus(gp.m.lockedg.ptr())
3257 throw("stoplockedm: not runnable")
3258 }
3259 acquirep(gp.m.nextp.ptr())
3260 gp.m.nextp = 0
3261 }
3262
3263
3264
3265
3266
3267 func startlockedm(gp *g) {
3268 mp := gp.lockedm.ptr()
3269 if mp == getg().m {
3270 throw("startlockedm: locked to me")
3271 }
3272 if mp.nextp != 0 {
3273 throw("startlockedm: m has p")
3274 }
3275
3276 incidlelocked(-1)
3277 pp := releasep()
3278 mp.nextp.set(pp)
3279 notewakeup(&mp.park)
3280 stopm()
3281 }
3282
3283
3284
3285 func gcstopm() {
3286 gp := getg()
3287
3288 if !sched.gcwaiting.Load() {
3289 throw("gcstopm: not waiting for gc")
3290 }
3291 if gp.m.spinning {
3292 gp.m.spinning = false
3293
3294
3295 if sched.nmspinning.Add(-1) < 0 {
3296 throw("gcstopm: negative nmspinning")
3297 }
3298 }
3299 pp := releasep()
3300 lock(&sched.lock)
3301 pp.status = _Pgcstop
3302 pp.gcStopTime = nanotime()
3303 sched.stopwait--
3304 if sched.stopwait == 0 {
3305 notewakeup(&sched.stopnote)
3306 }
3307 unlock(&sched.lock)
3308 stopm()
3309 }
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320 func execute(gp *g, inheritTime bool) {
3321 mp := getg().m
3322
3323 if goroutineProfile.active {
3324
3325
3326
3327 tryRecordGoroutineProfile(gp, nil, osyield)
3328 }
3329
3330
3331 mp.curg = gp
3332 gp.m = mp
3333 gp.syncSafePoint = false
3334 casgstatus(gp, _Grunnable, _Grunning)
3335 gp.waitsince = 0
3336 gp.preempt = false
3337 gp.stackguard0 = gp.stack.lo + stackGuard
3338 if !inheritTime {
3339 mp.p.ptr().schedtick++
3340 }
3341
3342
3343 hz := sched.profilehz
3344 if mp.profilehz != hz {
3345 setThreadCPUProfiler(hz)
3346 }
3347
3348 trace := traceAcquire()
3349 if trace.ok() {
3350 trace.GoStart()
3351 traceRelease(trace)
3352 }
3353
3354 gogo(&gp.sched)
3355 }
3356
3357
3358
3359
3360
3361 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3362 mp := getg().m
3363
3364
3365
3366
3367
3368 top:
3369
3370
3371
3372 mp.clearAllpSnapshot()
3373
3374 pp := mp.p.ptr()
3375 if sched.gcwaiting.Load() {
3376 gcstopm()
3377 goto top
3378 }
3379 if pp.runSafePointFn != 0 {
3380 runSafePointFn()
3381 }
3382
3383
3384
3385
3386
3387 now, pollUntil, _ := pp.timers.check(0, nil)
3388
3389
3390 if traceEnabled() || traceShuttingDown() {
3391 gp := traceReader()
3392 if gp != nil {
3393 trace := traceAcquire()
3394 casgstatus(gp, _Gwaiting, _Grunnable)
3395 if trace.ok() {
3396 trace.GoUnpark(gp, 0)
3397 traceRelease(trace)
3398 }
3399 return gp, false, true
3400 }
3401 }
3402
3403
3404 if gcBlackenEnabled != 0 {
3405 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3406 if gp != nil {
3407 return gp, false, true
3408 }
3409 now = tnow
3410 }
3411
3412
3413
3414
3415 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3416 lock(&sched.lock)
3417 gp := globrunqget()
3418 unlock(&sched.lock)
3419 if gp != nil {
3420 return gp, false, false
3421 }
3422 }
3423
3424
3425 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3426 if gp := wakefing(); gp != nil {
3427 ready(gp, 0, true)
3428 }
3429 }
3430
3431
3432 if gcCleanups.needsWake() {
3433 gcCleanups.wake()
3434 }
3435
3436 if *cgo_yield != nil {
3437 asmcgocall(*cgo_yield, nil)
3438 }
3439
3440
3441 if gp, inheritTime := runqget(pp); gp != nil {
3442 return gp, inheritTime, false
3443 }
3444
3445
3446 if !sched.runq.empty() {
3447 lock(&sched.lock)
3448 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3449 unlock(&sched.lock)
3450 if gp != nil {
3451 if runqputbatch(pp, &q); !q.empty() {
3452 throw("Couldn't put Gs into empty local runq")
3453 }
3454 return gp, false, false
3455 }
3456 }
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3468 list, delta := netpoll(0)
3469 sched.pollingNet.Store(0)
3470 if !list.empty() {
3471 gp := list.pop()
3472 injectglist(&list)
3473 netpollAdjustWaiters(delta)
3474 trace := traceAcquire()
3475 casgstatus(gp, _Gwaiting, _Grunnable)
3476 if trace.ok() {
3477 trace.GoUnpark(gp, 0)
3478 traceRelease(trace)
3479 }
3480 return gp, false, false
3481 }
3482 }
3483
3484
3485
3486
3487
3488
3489 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3490 if !mp.spinning {
3491 mp.becomeSpinning()
3492 }
3493
3494 gp, inheritTime, tnow, w, newWork := stealWork(now)
3495 if gp != nil {
3496
3497 return gp, inheritTime, false
3498 }
3499 if newWork {
3500
3501
3502 goto top
3503 }
3504
3505 now = tnow
3506 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3507
3508 pollUntil = w
3509 }
3510 }
3511
3512
3513
3514
3515
3516 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3517 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3518 if node != nil {
3519 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3520 gp := node.gp.ptr()
3521
3522 trace := traceAcquire()
3523 casgstatus(gp, _Gwaiting, _Grunnable)
3524 if trace.ok() {
3525 trace.GoUnpark(gp, 0)
3526 traceRelease(trace)
3527 }
3528 return gp, false, false
3529 }
3530 gcController.removeIdleMarkWorker()
3531 }
3532
3533
3534
3535
3536
3537 gp, otherReady := beforeIdle(now, pollUntil)
3538 if gp != nil {
3539 trace := traceAcquire()
3540 casgstatus(gp, _Gwaiting, _Grunnable)
3541 if trace.ok() {
3542 trace.GoUnpark(gp, 0)
3543 traceRelease(trace)
3544 }
3545 return gp, false, false
3546 }
3547 if otherReady {
3548 goto top
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559 allpSnapshot := mp.snapshotAllp()
3560
3561
3562 idlepMaskSnapshot := idlepMask
3563 timerpMaskSnapshot := timerpMask
3564
3565
3566 lock(&sched.lock)
3567 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3568 unlock(&sched.lock)
3569 goto top
3570 }
3571 if !sched.runq.empty() {
3572 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3573 unlock(&sched.lock)
3574 if gp == nil {
3575 throw("global runq empty with non-zero runqsize")
3576 }
3577 if runqputbatch(pp, &q); !q.empty() {
3578 throw("Couldn't put Gs into empty local runq")
3579 }
3580 return gp, false, false
3581 }
3582 if !mp.spinning && sched.needspinning.Load() == 1 {
3583
3584 mp.becomeSpinning()
3585 unlock(&sched.lock)
3586 goto top
3587 }
3588 if releasep() != pp {
3589 throw("findrunnable: wrong p")
3590 }
3591 now = pidleput(pp, now)
3592 unlock(&sched.lock)
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630 wasSpinning := mp.spinning
3631 if mp.spinning {
3632 mp.spinning = false
3633 if sched.nmspinning.Add(-1) < 0 {
3634 throw("findrunnable: negative nmspinning")
3635 }
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648 lock(&sched.lock)
3649 if !sched.runq.empty() {
3650 pp, _ := pidlegetSpinning(0)
3651 if pp != nil {
3652 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3653 unlock(&sched.lock)
3654 if gp == nil {
3655 throw("global runq empty with non-zero runqsize")
3656 }
3657 if runqputbatch(pp, &q); !q.empty() {
3658 throw("Couldn't put Gs into empty local runq")
3659 }
3660 acquirep(pp)
3661 mp.becomeSpinning()
3662 return gp, false, false
3663 }
3664 }
3665 unlock(&sched.lock)
3666
3667 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3668 if pp != nil {
3669 acquirep(pp)
3670 mp.becomeSpinning()
3671 goto top
3672 }
3673
3674
3675 pp, gp := checkIdleGCNoP()
3676 if pp != nil {
3677 acquirep(pp)
3678 mp.becomeSpinning()
3679
3680
3681 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3682 trace := traceAcquire()
3683 casgstatus(gp, _Gwaiting, _Grunnable)
3684 if trace.ok() {
3685 trace.GoUnpark(gp, 0)
3686 traceRelease(trace)
3687 }
3688 return gp, false, false
3689 }
3690
3691
3692
3693
3694
3695
3696
3697 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3698 }
3699
3700
3701
3702
3703
3704 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3705 sched.pollUntil.Store(pollUntil)
3706 if mp.p != 0 {
3707 throw("findrunnable: netpoll with p")
3708 }
3709 if mp.spinning {
3710 throw("findrunnable: netpoll with spinning")
3711 }
3712 delay := int64(-1)
3713 if pollUntil != 0 {
3714 if now == 0 {
3715 now = nanotime()
3716 }
3717 delay = pollUntil - now
3718 if delay < 0 {
3719 delay = 0
3720 }
3721 }
3722 if faketime != 0 {
3723
3724 delay = 0
3725 }
3726 list, delta := netpoll(delay)
3727
3728 now = nanotime()
3729 sched.pollUntil.Store(0)
3730 sched.lastpoll.Store(now)
3731 if faketime != 0 && list.empty() {
3732
3733
3734 stopm()
3735 goto top
3736 }
3737 lock(&sched.lock)
3738 pp, _ := pidleget(now)
3739 unlock(&sched.lock)
3740 if pp == nil {
3741 injectglist(&list)
3742 netpollAdjustWaiters(delta)
3743 } else {
3744 acquirep(pp)
3745 if !list.empty() {
3746 gp := list.pop()
3747 injectglist(&list)
3748 netpollAdjustWaiters(delta)
3749 trace := traceAcquire()
3750 casgstatus(gp, _Gwaiting, _Grunnable)
3751 if trace.ok() {
3752 trace.GoUnpark(gp, 0)
3753 traceRelease(trace)
3754 }
3755 return gp, false, false
3756 }
3757 if wasSpinning {
3758 mp.becomeSpinning()
3759 }
3760 goto top
3761 }
3762 } else if pollUntil != 0 && netpollinited() {
3763 pollerPollUntil := sched.pollUntil.Load()
3764 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3765 netpollBreak()
3766 }
3767 }
3768 stopm()
3769 goto top
3770 }
3771
3772
3773
3774
3775
3776 func pollWork() bool {
3777 if !sched.runq.empty() {
3778 return true
3779 }
3780 p := getg().m.p.ptr()
3781 if !runqempty(p) {
3782 return true
3783 }
3784 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3785 if list, delta := netpoll(0); !list.empty() {
3786 injectglist(&list)
3787 netpollAdjustWaiters(delta)
3788 return true
3789 }
3790 }
3791 return false
3792 }
3793
3794
3795
3796
3797
3798
3799
3800 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3801 pp := getg().m.p.ptr()
3802
3803 ranTimer := false
3804
3805 const stealTries = 4
3806 for i := 0; i < stealTries; i++ {
3807 stealTimersOrRunNextG := i == stealTries-1
3808
3809 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3810 if sched.gcwaiting.Load() {
3811
3812 return nil, false, now, pollUntil, true
3813 }
3814 p2 := allp[enum.position()]
3815 if pp == p2 {
3816 continue
3817 }
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3833 tnow, w, ran := p2.timers.check(now, nil)
3834 now = tnow
3835 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3836 pollUntil = w
3837 }
3838 if ran {
3839
3840
3841
3842
3843
3844
3845
3846
3847 if gp, inheritTime := runqget(pp); gp != nil {
3848 return gp, inheritTime, now, pollUntil, ranTimer
3849 }
3850 ranTimer = true
3851 }
3852 }
3853
3854
3855 if !idlepMask.read(enum.position()) {
3856 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3857 return gp, false, now, pollUntil, ranTimer
3858 }
3859 }
3860 }
3861 }
3862
3863
3864
3865
3866 return nil, false, now, pollUntil, ranTimer
3867 }
3868
3869
3870
3871
3872
3873
3874 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3875 for id, p2 := range allpSnapshot {
3876 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3877 lock(&sched.lock)
3878 pp, _ := pidlegetSpinning(0)
3879 if pp == nil {
3880
3881 unlock(&sched.lock)
3882 return nil
3883 }
3884 unlock(&sched.lock)
3885 return pp
3886 }
3887 }
3888
3889
3890 return nil
3891 }
3892
3893
3894
3895
3896 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3897 for id, p2 := range allpSnapshot {
3898 if timerpMaskSnapshot.read(uint32(id)) {
3899 w := p2.timers.wakeTime()
3900 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3901 pollUntil = w
3902 }
3903 }
3904 }
3905
3906 return pollUntil
3907 }
3908
3909
3910
3911
3912
3913 func checkIdleGCNoP() (*p, *g) {
3914
3915
3916
3917
3918
3919
3920 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3921 return nil, nil
3922 }
3923 if !gcShouldScheduleWorker(nil) {
3924 return nil, nil
3925 }
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944 lock(&sched.lock)
3945 pp, now := pidlegetSpinning(0)
3946 if pp == nil {
3947 unlock(&sched.lock)
3948 return nil, nil
3949 }
3950
3951
3952 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3953 pidleput(pp, now)
3954 unlock(&sched.lock)
3955 return nil, nil
3956 }
3957
3958 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3959 if node == nil {
3960 pidleput(pp, now)
3961 unlock(&sched.lock)
3962 gcController.removeIdleMarkWorker()
3963 return nil, nil
3964 }
3965
3966 unlock(&sched.lock)
3967
3968 return pp, node.gp.ptr()
3969 }
3970
3971
3972
3973
3974 func wakeNetPoller(when int64) {
3975 if sched.lastpoll.Load() == 0 {
3976
3977
3978
3979
3980 pollerPollUntil := sched.pollUntil.Load()
3981 if pollerPollUntil == 0 || pollerPollUntil > when {
3982 netpollBreak()
3983 }
3984 } else {
3985
3986
3987 if GOOS != "plan9" {
3988 wakep()
3989 }
3990 }
3991 }
3992
3993 func resetspinning() {
3994 gp := getg()
3995 if !gp.m.spinning {
3996 throw("resetspinning: not a spinning m")
3997 }
3998 gp.m.spinning = false
3999 nmspinning := sched.nmspinning.Add(-1)
4000 if nmspinning < 0 {
4001 throw("findrunnable: negative nmspinning")
4002 }
4003
4004
4005
4006 wakep()
4007 }
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017 func injectglist(glist *gList) {
4018 if glist.empty() {
4019 return
4020 }
4021
4022
4023
4024 var tail *g
4025 trace := traceAcquire()
4026 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4027 tail = gp
4028 casgstatus(gp, _Gwaiting, _Grunnable)
4029 if trace.ok() {
4030 trace.GoUnpark(gp, 0)
4031 }
4032 }
4033 if trace.ok() {
4034 traceRelease(trace)
4035 }
4036
4037
4038 q := gQueue{glist.head, tail.guintptr(), glist.size}
4039 *glist = gList{}
4040
4041 startIdle := func(n int32) {
4042 for ; n > 0; n-- {
4043 mp := acquirem()
4044 lock(&sched.lock)
4045
4046 pp, _ := pidlegetSpinning(0)
4047 if pp == nil {
4048 unlock(&sched.lock)
4049 releasem(mp)
4050 break
4051 }
4052
4053 startm(pp, false, true)
4054 unlock(&sched.lock)
4055 releasem(mp)
4056 }
4057 }
4058
4059 pp := getg().m.p.ptr()
4060 if pp == nil {
4061 n := q.size
4062 lock(&sched.lock)
4063 globrunqputbatch(&q)
4064 unlock(&sched.lock)
4065 startIdle(n)
4066 return
4067 }
4068
4069 var globq gQueue
4070 npidle := sched.npidle.Load()
4071 for ; npidle > 0 && !q.empty(); npidle-- {
4072 g := q.pop()
4073 globq.pushBack(g)
4074 }
4075 if !globq.empty() {
4076 n := globq.size
4077 lock(&sched.lock)
4078 globrunqputbatch(&globq)
4079 unlock(&sched.lock)
4080 startIdle(n)
4081 }
4082
4083 if runqputbatch(pp, &q); !q.empty() {
4084 lock(&sched.lock)
4085 globrunqputbatch(&q)
4086 unlock(&sched.lock)
4087 }
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102 wakep()
4103 }
4104
4105
4106
4107 func schedule() {
4108 mp := getg().m
4109
4110 if mp.locks != 0 {
4111 throw("schedule: holding locks")
4112 }
4113
4114 if mp.lockedg != 0 {
4115 stoplockedm()
4116 execute(mp.lockedg.ptr(), false)
4117 }
4118
4119
4120
4121 if mp.incgo {
4122 throw("schedule: in cgo")
4123 }
4124
4125 top:
4126 pp := mp.p.ptr()
4127 pp.preempt = false
4128
4129
4130
4131
4132 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4133 throw("schedule: spinning with local work")
4134 }
4135
4136 gp, inheritTime, tryWakeP := findRunnable()
4137
4138
4139
4140
4141 mp.clearAllpSnapshot()
4142
4143 if debug.dontfreezetheworld > 0 && freezing.Load() {
4144
4145
4146
4147
4148
4149
4150
4151 lock(&deadlock)
4152 lock(&deadlock)
4153 }
4154
4155
4156
4157
4158 if mp.spinning {
4159 resetspinning()
4160 }
4161
4162 if sched.disable.user && !schedEnabled(gp) {
4163
4164
4165
4166 lock(&sched.lock)
4167 if schedEnabled(gp) {
4168
4169
4170 unlock(&sched.lock)
4171 } else {
4172 sched.disable.runnable.pushBack(gp)
4173 unlock(&sched.lock)
4174 goto top
4175 }
4176 }
4177
4178
4179
4180 if tryWakeP {
4181 wakep()
4182 }
4183 if gp.lockedm != 0 {
4184
4185
4186 startlockedm(gp)
4187 goto top
4188 }
4189
4190 execute(gp, inheritTime)
4191 }
4192
4193
4194
4195
4196
4197
4198
4199
4200 func dropg() {
4201 gp := getg()
4202
4203 setMNoWB(&gp.m.curg.m, nil)
4204 setGNoWB(&gp.m.curg, nil)
4205 }
4206
4207 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4208 unlock((*mutex)(lock))
4209 return true
4210 }
4211
4212
4213 func park_m(gp *g) {
4214 mp := getg().m
4215
4216 trace := traceAcquire()
4217
4218
4219
4220
4221
4222 bubble := gp.bubble
4223 if bubble != nil {
4224 bubble.incActive()
4225 }
4226
4227 if trace.ok() {
4228
4229
4230
4231 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4232 }
4233
4234
4235 casgstatus(gp, _Grunning, _Gwaiting)
4236 if trace.ok() {
4237 traceRelease(trace)
4238 }
4239
4240 dropg()
4241
4242 if fn := mp.waitunlockf; fn != nil {
4243 ok := fn(gp, mp.waitlock)
4244 mp.waitunlockf = nil
4245 mp.waitlock = nil
4246 if !ok {
4247 trace := traceAcquire()
4248 casgstatus(gp, _Gwaiting, _Grunnable)
4249 if bubble != nil {
4250 bubble.decActive()
4251 }
4252 if trace.ok() {
4253 trace.GoUnpark(gp, 2)
4254 traceRelease(trace)
4255 }
4256 execute(gp, true)
4257 }
4258 }
4259
4260 if bubble != nil {
4261 bubble.decActive()
4262 }
4263
4264 schedule()
4265 }
4266
4267 func goschedImpl(gp *g, preempted bool) {
4268 pp := gp.m.p.ptr()
4269 trace := traceAcquire()
4270 status := readgstatus(gp)
4271 if status&^_Gscan != _Grunning {
4272 dumpgstatus(gp)
4273 throw("bad g status")
4274 }
4275 if trace.ok() {
4276
4277
4278
4279 if preempted {
4280 trace.GoPreempt()
4281 } else {
4282 trace.GoSched()
4283 }
4284 }
4285 casgstatus(gp, _Grunning, _Grunnable)
4286 if trace.ok() {
4287 traceRelease(trace)
4288 }
4289
4290 dropg()
4291 if preempted && sched.gcwaiting.Load() {
4292
4293
4294 runqput(pp, gp, true)
4295 } else {
4296 lock(&sched.lock)
4297 globrunqput(gp)
4298 unlock(&sched.lock)
4299 }
4300
4301 if mainStarted {
4302 wakep()
4303 }
4304
4305 schedule()
4306 }
4307
4308
4309 func gosched_m(gp *g) {
4310 goschedImpl(gp, false)
4311 }
4312
4313
4314 func goschedguarded_m(gp *g) {
4315 if !canPreemptM(gp.m) {
4316 gogo(&gp.sched)
4317 }
4318 goschedImpl(gp, false)
4319 }
4320
4321 func gopreempt_m(gp *g) {
4322 goschedImpl(gp, true)
4323 }
4324
4325
4326
4327
4328 func preemptPark(gp *g) {
4329 status := readgstatus(gp)
4330 if status&^_Gscan != _Grunning {
4331 dumpgstatus(gp)
4332 throw("bad g status")
4333 }
4334
4335 if gp.asyncSafePoint {
4336
4337
4338
4339 f := findfunc(gp.sched.pc)
4340 if !f.valid() {
4341 throw("preempt at unknown pc")
4342 }
4343 if f.flag&abi.FuncFlagSPWrite != 0 {
4344 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4345 throw("preempt SPWRITE")
4346 }
4347 }
4348
4349
4350
4351
4352
4353
4354
4355 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4356 dropg()
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378 trace := traceAcquire()
4379 if trace.ok() {
4380 trace.GoPark(traceBlockPreempted, 0)
4381 }
4382 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4383 if trace.ok() {
4384 traceRelease(trace)
4385 }
4386 schedule()
4387 }
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403 func goyield() {
4404 checkTimeouts()
4405 mcall(goyield_m)
4406 }
4407
4408 func goyield_m(gp *g) {
4409 trace := traceAcquire()
4410 pp := gp.m.p.ptr()
4411 if trace.ok() {
4412
4413
4414
4415 trace.GoPreempt()
4416 }
4417 casgstatus(gp, _Grunning, _Grunnable)
4418 if trace.ok() {
4419 traceRelease(trace)
4420 }
4421 dropg()
4422 runqput(pp, gp, false)
4423 schedule()
4424 }
4425
4426
4427 func goexit1() {
4428 if raceenabled {
4429 if gp := getg(); gp.bubble != nil {
4430 racereleasemergeg(gp, gp.bubble.raceaddr())
4431 }
4432 racegoend()
4433 }
4434 trace := traceAcquire()
4435 if trace.ok() {
4436 trace.GoEnd()
4437 traceRelease(trace)
4438 }
4439 mcall(goexit0)
4440 }
4441
4442
4443 func goexit0(gp *g) {
4444 gdestroy(gp)
4445 schedule()
4446 }
4447
4448 func gdestroy(gp *g) {
4449 mp := getg().m
4450 pp := mp.p.ptr()
4451
4452 casgstatus(gp, _Grunning, _Gdead)
4453 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4454 if isSystemGoroutine(gp, false) {
4455 sched.ngsys.Add(-1)
4456 }
4457 gp.m = nil
4458 locked := gp.lockedm != 0
4459 gp.lockedm = 0
4460 mp.lockedg = 0
4461 gp.preemptStop = false
4462 gp.paniconfault = false
4463 gp._defer = nil
4464 gp._panic = nil
4465 gp.writebuf = nil
4466 gp.waitreason = waitReasonZero
4467 gp.param = nil
4468 gp.labels = nil
4469 gp.timer = nil
4470 gp.bubble = nil
4471
4472 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4473
4474
4475
4476 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4477 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4478 gcController.bgScanCredit.Add(scanCredit)
4479 gp.gcAssistBytes = 0
4480 }
4481
4482 dropg()
4483
4484 if GOARCH == "wasm" {
4485 gfput(pp, gp)
4486 return
4487 }
4488
4489 if locked && mp.lockedInt != 0 {
4490 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4491 if mp.isextra {
4492 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4493 }
4494 throw("exited a goroutine internally locked to the OS thread")
4495 }
4496 gfput(pp, gp)
4497 if locked {
4498
4499
4500
4501
4502
4503
4504 if GOOS != "plan9" {
4505 gogo(&mp.g0.sched)
4506 } else {
4507
4508
4509 mp.lockedExt = 0
4510 }
4511 }
4512 }
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522 func save(pc, sp, bp uintptr) {
4523 gp := getg()
4524
4525 if gp == gp.m.g0 || gp == gp.m.gsignal {
4526
4527
4528
4529
4530
4531 throw("save on system g not allowed")
4532 }
4533
4534 gp.sched.pc = pc
4535 gp.sched.sp = sp
4536 gp.sched.lr = 0
4537 gp.sched.bp = bp
4538
4539
4540
4541 if gp.sched.ctxt != nil {
4542 badctxt()
4543 }
4544 }
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570 func reentersyscall(pc, sp, bp uintptr) {
4571 gp := getg()
4572
4573
4574
4575 gp.m.locks++
4576
4577
4578
4579
4580
4581 gp.stackguard0 = stackPreempt
4582 gp.throwsplit = true
4583
4584
4585 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4586
4587 pp := gp.m.p.ptr()
4588 if pp.runSafePointFn != 0 {
4589
4590 systemstack(runSafePointFn)
4591 }
4592 gp.m.oldp.set(pp)
4593
4594
4595 save(pc, sp, bp)
4596 gp.syscallsp = sp
4597 gp.syscallpc = pc
4598 gp.syscallbp = bp
4599
4600
4601 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4602 systemstack(func() {
4603 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4604 throw("entersyscall")
4605 })
4606 }
4607 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4608 systemstack(func() {
4609 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4610 throw("entersyscall")
4611 })
4612 }
4613 trace := traceAcquire()
4614 if trace.ok() {
4615
4616
4617
4618
4619 systemstack(func() {
4620 trace.GoSysCall()
4621 })
4622
4623 save(pc, sp, bp)
4624 }
4625 if sched.gcwaiting.Load() {
4626
4627
4628
4629 systemstack(func() {
4630 entersyscallHandleGCWait(trace)
4631 })
4632
4633 save(pc, sp, bp)
4634 }
4635
4636
4637
4638
4639
4640 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4641 casgstatus(gp, _Grunning, _Gsyscall)
4642 }
4643 if staticLockRanking {
4644
4645 save(pc, sp, bp)
4646 }
4647 if trace.ok() {
4648
4649
4650
4651 traceRelease(trace)
4652 }
4653 if sched.sysmonwait.Load() {
4654 systemstack(entersyscallWakeSysmon)
4655
4656 save(pc, sp, bp)
4657 }
4658 gp.m.locks--
4659 }
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675 func entersyscall() {
4676
4677
4678
4679
4680 fp := getcallerfp()
4681 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4682 }
4683
4684 func entersyscallWakeSysmon() {
4685 lock(&sched.lock)
4686 if sched.sysmonwait.Load() {
4687 sched.sysmonwait.Store(false)
4688 notewakeup(&sched.sysmonnote)
4689 }
4690 unlock(&sched.lock)
4691 }
4692
4693 func entersyscallHandleGCWait(trace traceLocker) {
4694 gp := getg()
4695
4696 lock(&sched.lock)
4697 if sched.stopwait > 0 {
4698
4699 pp := gp.m.p.ptr()
4700 pp.m = 0
4701 gp.m.p = 0
4702 atomic.Store(&pp.status, _Pgcstop)
4703
4704 if trace.ok() {
4705 trace.ProcStop(pp)
4706 }
4707 sched.nGsyscallNoP.Add(1)
4708 pp.gcStopTime = nanotime()
4709 pp.syscalltick++
4710 if sched.stopwait--; sched.stopwait == 0 {
4711 notewakeup(&sched.stopnote)
4712 }
4713 }
4714 unlock(&sched.lock)
4715 }
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729 func entersyscallblock() {
4730 gp := getg()
4731
4732 gp.m.locks++
4733 gp.throwsplit = true
4734 gp.stackguard0 = stackPreempt
4735 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4736 gp.m.p.ptr().syscalltick++
4737
4738 sched.nGsyscallNoP.Add(1)
4739
4740
4741 pc := sys.GetCallerPC()
4742 sp := sys.GetCallerSP()
4743 bp := getcallerfp()
4744 save(pc, sp, bp)
4745 gp.syscallsp = gp.sched.sp
4746 gp.syscallpc = gp.sched.pc
4747 gp.syscallbp = gp.sched.bp
4748 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4749 sp1 := sp
4750 sp2 := gp.sched.sp
4751 sp3 := gp.syscallsp
4752 systemstack(func() {
4753 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4754 throw("entersyscallblock")
4755 })
4756 }
4757
4758
4759
4760
4761
4762
4763 trace := traceAcquire()
4764 systemstack(func() {
4765 if trace.ok() {
4766 trace.GoSysCall()
4767 }
4768 handoffp(releasep())
4769 })
4770
4771
4772
4773 casgstatus(gp, _Grunning, _Gsyscall)
4774 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4775 systemstack(func() {
4776 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4777 throw("entersyscallblock")
4778 })
4779 }
4780 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4781 systemstack(func() {
4782 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4783 throw("entersyscallblock")
4784 })
4785 }
4786 if trace.ok() {
4787 systemstack(func() {
4788 traceRelease(trace)
4789 })
4790 }
4791
4792
4793 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4794
4795 gp.m.locks--
4796 }
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818 func exitsyscall() {
4819 gp := getg()
4820
4821 gp.m.locks++
4822 if sys.GetCallerSP() > gp.syscallsp {
4823 throw("exitsyscall: syscall frame is no longer valid")
4824 }
4825 gp.waitsince = 0
4826
4827 if sched.stopwait == freezeStopWait {
4828
4829
4830
4831 systemstack(func() {
4832 lock(&deadlock)
4833 lock(&deadlock)
4834 })
4835 }
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4849 casgstatus(gp, _Gsyscall, _Grunning)
4850 }
4851
4852
4853
4854
4855
4856
4857 oldp := gp.m.oldp.ptr()
4858 gp.m.oldp.set(nil)
4859
4860
4861 pp := gp.m.p.ptr()
4862 if pp != nil {
4863
4864 if trace := traceAcquire(); trace.ok() {
4865 systemstack(func() {
4866
4867
4868
4869
4870
4871
4872
4873
4874 if pp.syscalltick == gp.m.syscalltick {
4875 trace.GoSysExit(false)
4876 } else {
4877
4878
4879
4880
4881 trace.ProcSteal(pp)
4882 trace.ProcStart()
4883 trace.GoSysExit(true)
4884 trace.GoStart()
4885 }
4886 traceRelease(trace)
4887 })
4888 }
4889 } else {
4890
4891 systemstack(func() {
4892
4893 if pp := exitsyscallTryGetP(oldp); pp != nil {
4894
4895 acquirepNoTrace(pp)
4896
4897
4898 if trace := traceAcquire(); trace.ok() {
4899 trace.ProcStart()
4900 trace.GoSysExit(true)
4901 trace.GoStart()
4902 traceRelease(trace)
4903 }
4904 }
4905 })
4906 pp = gp.m.p.ptr()
4907 }
4908
4909
4910 if pp != nil {
4911 if goroutineProfile.active {
4912
4913
4914
4915 systemstack(func() {
4916 tryRecordGoroutineProfileWB(gp)
4917 })
4918 }
4919
4920
4921 pp.syscalltick++
4922
4923
4924
4925 gp.syscallsp = 0
4926 gp.m.locks--
4927 if gp.preempt {
4928
4929 gp.stackguard0 = stackPreempt
4930 } else {
4931
4932 gp.stackguard0 = gp.stack.lo + stackGuard
4933 }
4934 gp.throwsplit = false
4935
4936 if sched.disable.user && !schedEnabled(gp) {
4937
4938 Gosched()
4939 }
4940 return
4941 }
4942
4943 gp.m.locks--
4944
4945
4946 mcall(exitsyscallNoP)
4947
4948
4949
4950
4951
4952
4953
4954 gp.syscallsp = 0
4955 gp.m.p.ptr().syscalltick++
4956 gp.throwsplit = false
4957 }
4958
4959
4960
4961
4962
4963
4964
4965 func exitsyscallTryGetP(oldp *p) *p {
4966
4967 if oldp != nil {
4968 if thread, ok := setBlockOnExitSyscall(oldp); ok {
4969 thread.takeP()
4970 thread.resume()
4971 sched.nGsyscallNoP.Add(-1)
4972 return oldp
4973 }
4974 }
4975
4976
4977 if sched.pidle != 0 {
4978 lock(&sched.lock)
4979 pp, _ := pidleget(0)
4980 if pp != nil && sched.sysmonwait.Load() {
4981 sched.sysmonwait.Store(false)
4982 notewakeup(&sched.sysmonnote)
4983 }
4984 unlock(&sched.lock)
4985 if pp != nil {
4986 sched.nGsyscallNoP.Add(-1)
4987 return pp
4988 }
4989 }
4990 return nil
4991 }
4992
4993
4994
4995
4996
4997
4998
4999 func exitsyscallNoP(gp *g) {
5000 traceExitingSyscall()
5001 trace := traceAcquire()
5002 casgstatus(gp, _Grunning, _Grunnable)
5003 traceExitedSyscall()
5004 if trace.ok() {
5005
5006
5007
5008
5009 trace.GoSysExit(true)
5010 traceRelease(trace)
5011 }
5012 sched.nGsyscallNoP.Add(-1)
5013 dropg()
5014 lock(&sched.lock)
5015 var pp *p
5016 if schedEnabled(gp) {
5017 pp, _ = pidleget(0)
5018 }
5019 var locked bool
5020 if pp == nil {
5021 globrunqput(gp)
5022
5023
5024
5025
5026
5027
5028 locked = gp.lockedm != 0
5029 } else if sched.sysmonwait.Load() {
5030 sched.sysmonwait.Store(false)
5031 notewakeup(&sched.sysmonnote)
5032 }
5033 unlock(&sched.lock)
5034 if pp != nil {
5035 acquirep(pp)
5036 execute(gp, false)
5037 }
5038 if locked {
5039
5040
5041
5042
5043 stoplockedm()
5044 execute(gp, false)
5045 }
5046 stopm()
5047 schedule()
5048 }
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062 func syscall_runtime_BeforeFork() {
5063 gp := getg().m.curg
5064
5065
5066
5067
5068 gp.m.locks++
5069 sigsave(&gp.m.sigmask)
5070 sigblock(false)
5071
5072
5073
5074
5075
5076 gp.stackguard0 = stackFork
5077 }
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091 func syscall_runtime_AfterFork() {
5092 gp := getg().m.curg
5093
5094
5095 gp.stackguard0 = gp.stack.lo + stackGuard
5096
5097 msigrestore(gp.m.sigmask)
5098
5099 gp.m.locks--
5100 }
5101
5102
5103
5104 var inForkedChild bool
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125 func syscall_runtime_AfterForkInChild() {
5126
5127
5128
5129
5130 inForkedChild = true
5131
5132 clearSignalHandlers()
5133
5134
5135
5136 msigrestore(getg().m.sigmask)
5137
5138 inForkedChild = false
5139 }
5140
5141
5142
5143
5144 var pendingPreemptSignals atomic.Int32
5145
5146
5147
5148
5149 func syscall_runtime_BeforeExec() {
5150
5151 execLock.lock()
5152
5153
5154
5155 if GOOS == "darwin" || GOOS == "ios" {
5156 for pendingPreemptSignals.Load() > 0 {
5157 osyield()
5158 }
5159 }
5160 }
5161
5162
5163
5164
5165 func syscall_runtime_AfterExec() {
5166 execLock.unlock()
5167 }
5168
5169
5170 func malg(stacksize int32) *g {
5171 newg := new(g)
5172 if stacksize >= 0 {
5173 stacksize = round2(stackSystem + stacksize)
5174 systemstack(func() {
5175 newg.stack = stackalloc(uint32(stacksize))
5176 if valgrindenabled {
5177 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5178 }
5179 })
5180 newg.stackguard0 = newg.stack.lo + stackGuard
5181 newg.stackguard1 = ^uintptr(0)
5182
5183
5184 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5185 }
5186 return newg
5187 }
5188
5189
5190
5191
5192 func newproc(fn *funcval) {
5193 gp := getg()
5194 pc := sys.GetCallerPC()
5195 systemstack(func() {
5196 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5197
5198 pp := getg().m.p.ptr()
5199 runqput(pp, newg, true)
5200
5201 if mainStarted {
5202 wakep()
5203 }
5204 })
5205 }
5206
5207
5208
5209
5210 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5211 if fn == nil {
5212 fatal("go of nil func value")
5213 }
5214
5215 mp := acquirem()
5216 pp := mp.p.ptr()
5217 newg := gfget(pp)
5218 if newg == nil {
5219 newg = malg(stackMin)
5220 casgstatus(newg, _Gidle, _Gdead)
5221 allgadd(newg)
5222 }
5223 if newg.stack.hi == 0 {
5224 throw("newproc1: newg missing stack")
5225 }
5226
5227 if readgstatus(newg) != _Gdead {
5228 throw("newproc1: new g is not Gdead")
5229 }
5230
5231 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5232 totalSize = alignUp(totalSize, sys.StackAlign)
5233 sp := newg.stack.hi - totalSize
5234 if usesLR {
5235
5236 *(*uintptr)(unsafe.Pointer(sp)) = 0
5237 prepGoExitFrame(sp)
5238 }
5239 if GOARCH == "arm64" {
5240
5241 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5242 }
5243
5244 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5245 newg.sched.sp = sp
5246 newg.stktopsp = sp
5247 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5248 newg.sched.g = guintptr(unsafe.Pointer(newg))
5249 gostartcallfn(&newg.sched, fn)
5250 newg.parentGoid = callergp.goid
5251 newg.gopc = callerpc
5252 newg.ancestors = saveAncestors(callergp)
5253 newg.startpc = fn.fn
5254 newg.runningCleanups.Store(false)
5255 if isSystemGoroutine(newg, false) {
5256 sched.ngsys.Add(1)
5257 } else {
5258
5259 newg.bubble = callergp.bubble
5260 if mp.curg != nil {
5261 newg.labels = mp.curg.labels
5262 }
5263 if goroutineProfile.active {
5264
5265
5266
5267
5268
5269 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5270 }
5271 }
5272
5273 newg.trackingSeq = uint8(cheaprand())
5274 if newg.trackingSeq%gTrackingPeriod == 0 {
5275 newg.tracking = true
5276 }
5277 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5278
5279
5280
5281 trace := traceAcquire()
5282 var status uint32 = _Grunnable
5283 if parked {
5284 status = _Gwaiting
5285 newg.waitreason = waitreason
5286 }
5287 if pp.goidcache == pp.goidcacheend {
5288
5289
5290
5291 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5292 pp.goidcache -= _GoidCacheBatch - 1
5293 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5294 }
5295 newg.goid = pp.goidcache
5296 casgstatus(newg, _Gdead, status)
5297 pp.goidcache++
5298 newg.trace.reset()
5299 if trace.ok() {
5300 trace.GoCreate(newg, newg.startpc, parked)
5301 traceRelease(trace)
5302 }
5303
5304
5305 if raceenabled {
5306 newg.racectx = racegostart(callerpc)
5307 newg.raceignore = 0
5308 if newg.labels != nil {
5309
5310
5311 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5312 }
5313 }
5314 pp.goroutinesCreated++
5315 releasem(mp)
5316
5317 return newg
5318 }
5319
5320
5321
5322
5323 func saveAncestors(callergp *g) *[]ancestorInfo {
5324
5325 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5326 return nil
5327 }
5328 var callerAncestors []ancestorInfo
5329 if callergp.ancestors != nil {
5330 callerAncestors = *callergp.ancestors
5331 }
5332 n := int32(len(callerAncestors)) + 1
5333 if n > debug.tracebackancestors {
5334 n = debug.tracebackancestors
5335 }
5336 ancestors := make([]ancestorInfo, n)
5337 copy(ancestors[1:], callerAncestors)
5338
5339 var pcs [tracebackInnerFrames]uintptr
5340 npcs := gcallers(callergp, 0, pcs[:])
5341 ipcs := make([]uintptr, npcs)
5342 copy(ipcs, pcs[:])
5343 ancestors[0] = ancestorInfo{
5344 pcs: ipcs,
5345 goid: callergp.goid,
5346 gopc: callergp.gopc,
5347 }
5348
5349 ancestorsp := new([]ancestorInfo)
5350 *ancestorsp = ancestors
5351 return ancestorsp
5352 }
5353
5354
5355
5356 func gfput(pp *p, gp *g) {
5357 if readgstatus(gp) != _Gdead {
5358 throw("gfput: bad status (not Gdead)")
5359 }
5360
5361 stksize := gp.stack.hi - gp.stack.lo
5362
5363 if stksize != uintptr(startingStackSize) {
5364
5365 stackfree(gp.stack)
5366 gp.stack.lo = 0
5367 gp.stack.hi = 0
5368 gp.stackguard0 = 0
5369 if valgrindenabled {
5370 valgrindDeregisterStack(gp.valgrindStackID)
5371 gp.valgrindStackID = 0
5372 }
5373 }
5374
5375 pp.gFree.push(gp)
5376 if pp.gFree.size >= 64 {
5377 var (
5378 stackQ gQueue
5379 noStackQ gQueue
5380 )
5381 for pp.gFree.size >= 32 {
5382 gp := pp.gFree.pop()
5383 if gp.stack.lo == 0 {
5384 noStackQ.push(gp)
5385 } else {
5386 stackQ.push(gp)
5387 }
5388 }
5389 lock(&sched.gFree.lock)
5390 sched.gFree.noStack.pushAll(noStackQ)
5391 sched.gFree.stack.pushAll(stackQ)
5392 unlock(&sched.gFree.lock)
5393 }
5394 }
5395
5396
5397
5398 func gfget(pp *p) *g {
5399 retry:
5400 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5401 lock(&sched.gFree.lock)
5402
5403 for pp.gFree.size < 32 {
5404
5405 gp := sched.gFree.stack.pop()
5406 if gp == nil {
5407 gp = sched.gFree.noStack.pop()
5408 if gp == nil {
5409 break
5410 }
5411 }
5412 pp.gFree.push(gp)
5413 }
5414 unlock(&sched.gFree.lock)
5415 goto retry
5416 }
5417 gp := pp.gFree.pop()
5418 if gp == nil {
5419 return nil
5420 }
5421 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5422
5423
5424
5425 systemstack(func() {
5426 stackfree(gp.stack)
5427 gp.stack.lo = 0
5428 gp.stack.hi = 0
5429 gp.stackguard0 = 0
5430 if valgrindenabled {
5431 valgrindDeregisterStack(gp.valgrindStackID)
5432 gp.valgrindStackID = 0
5433 }
5434 })
5435 }
5436 if gp.stack.lo == 0 {
5437
5438 systemstack(func() {
5439 gp.stack = stackalloc(startingStackSize)
5440 if valgrindenabled {
5441 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5442 }
5443 })
5444 gp.stackguard0 = gp.stack.lo + stackGuard
5445 } else {
5446 if raceenabled {
5447 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5448 }
5449 if msanenabled {
5450 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5451 }
5452 if asanenabled {
5453 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5454 }
5455 }
5456 return gp
5457 }
5458
5459
5460 func gfpurge(pp *p) {
5461 var (
5462 stackQ gQueue
5463 noStackQ gQueue
5464 )
5465 for !pp.gFree.empty() {
5466 gp := pp.gFree.pop()
5467 if gp.stack.lo == 0 {
5468 noStackQ.push(gp)
5469 } else {
5470 stackQ.push(gp)
5471 }
5472 }
5473 lock(&sched.gFree.lock)
5474 sched.gFree.noStack.pushAll(noStackQ)
5475 sched.gFree.stack.pushAll(stackQ)
5476 unlock(&sched.gFree.lock)
5477 }
5478
5479
5480 func Breakpoint() {
5481 breakpoint()
5482 }
5483
5484
5485
5486
5487
5488
5489 func dolockOSThread() {
5490 if GOARCH == "wasm" {
5491 return
5492 }
5493 gp := getg()
5494 gp.m.lockedg.set(gp)
5495 gp.lockedm.set(gp.m)
5496 }
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514 func LockOSThread() {
5515 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5516
5517
5518
5519 startTemplateThread()
5520 }
5521 gp := getg()
5522 gp.m.lockedExt++
5523 if gp.m.lockedExt == 0 {
5524 gp.m.lockedExt--
5525 panic("LockOSThread nesting overflow")
5526 }
5527 dolockOSThread()
5528 }
5529
5530
5531 func lockOSThread() {
5532 getg().m.lockedInt++
5533 dolockOSThread()
5534 }
5535
5536
5537
5538
5539
5540
5541 func dounlockOSThread() {
5542 if GOARCH == "wasm" {
5543 return
5544 }
5545 gp := getg()
5546 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5547 return
5548 }
5549 gp.m.lockedg = 0
5550 gp.lockedm = 0
5551 }
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567 func UnlockOSThread() {
5568 gp := getg()
5569 if gp.m.lockedExt == 0 {
5570 return
5571 }
5572 gp.m.lockedExt--
5573 dounlockOSThread()
5574 }
5575
5576
5577 func unlockOSThread() {
5578 gp := getg()
5579 if gp.m.lockedInt == 0 {
5580 systemstack(badunlockosthread)
5581 }
5582 gp.m.lockedInt--
5583 dounlockOSThread()
5584 }
5585
5586 func badunlockosthread() {
5587 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5588 }
5589
5590 func gcount(includeSys bool) int32 {
5591 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5592 if !includeSys {
5593 n -= sched.ngsys.Load()
5594 }
5595 for _, pp := range allp {
5596 n -= pp.gFree.size
5597 }
5598
5599
5600
5601 if n < 1 {
5602 n = 1
5603 }
5604 return n
5605 }
5606
5607
5608
5609
5610
5611 func goroutineleakcount() int {
5612 return work.goroutineLeak.count
5613 }
5614
5615 func mcount() int32 {
5616 return int32(sched.mnext - sched.nmfreed)
5617 }
5618
5619 var prof struct {
5620 signalLock atomic.Uint32
5621
5622
5623
5624 hz atomic.Int32
5625 }
5626
5627 func _System() { _System() }
5628 func _ExternalCode() { _ExternalCode() }
5629 func _LostExternalCode() { _LostExternalCode() }
5630 func _GC() { _GC() }
5631 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5632 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5633 func _VDSO() { _VDSO() }
5634
5635
5636
5637
5638
5639 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5640 if prof.hz.Load() == 0 {
5641 return
5642 }
5643
5644
5645
5646
5647 if mp != nil && mp.profilehz == 0 {
5648 return
5649 }
5650
5651
5652
5653
5654
5655
5656
5657 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5658 if f := findfunc(pc); f.valid() {
5659 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5660 cpuprof.lostAtomic++
5661 return
5662 }
5663 }
5664 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5665
5666
5667
5668 cpuprof.lostAtomic++
5669 return
5670 }
5671 }
5672
5673
5674
5675
5676
5677
5678
5679 getg().m.mallocing++
5680
5681 var u unwinder
5682 var stk [maxCPUProfStack]uintptr
5683 n := 0
5684 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5685 cgoOff := 0
5686
5687
5688
5689
5690
5691 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5692 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5693 cgoOff++
5694 }
5695 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5696 mp.cgoCallers[0] = 0
5697 }
5698
5699
5700 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5701 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5702
5703
5704 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5705 } else if mp != nil && mp.vdsoSP != 0 {
5706
5707
5708 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5709 } else {
5710 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5711 }
5712 n += tracebackPCs(&u, 0, stk[n:])
5713
5714 if n <= 0 {
5715
5716
5717 n = 2
5718 if inVDSOPage(pc) {
5719 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5720 } else if pc > firstmoduledata.etext {
5721
5722 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5723 }
5724 stk[0] = pc
5725 if mp.preemptoff != "" {
5726 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5727 } else {
5728 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5729 }
5730 }
5731
5732 if prof.hz.Load() != 0 {
5733
5734
5735
5736 var tagPtr *unsafe.Pointer
5737 if gp != nil && gp.m != nil && gp.m.curg != nil {
5738 tagPtr = &gp.m.curg.labels
5739 }
5740 cpuprof.add(tagPtr, stk[:n])
5741
5742 gprof := gp
5743 var mp *m
5744 var pp *p
5745 if gp != nil && gp.m != nil {
5746 if gp.m.curg != nil {
5747 gprof = gp.m.curg
5748 }
5749 mp = gp.m
5750 pp = gp.m.p.ptr()
5751 }
5752 traceCPUSample(gprof, mp, pp, stk[:n])
5753 }
5754 getg().m.mallocing--
5755 }
5756
5757
5758
5759 func setcpuprofilerate(hz int32) {
5760
5761 if hz < 0 {
5762 hz = 0
5763 }
5764
5765
5766
5767 gp := getg()
5768 gp.m.locks++
5769
5770
5771
5772
5773 setThreadCPUProfiler(0)
5774
5775 for !prof.signalLock.CompareAndSwap(0, 1) {
5776 osyield()
5777 }
5778 if prof.hz.Load() != hz {
5779 setProcessCPUProfiler(hz)
5780 prof.hz.Store(hz)
5781 }
5782 prof.signalLock.Store(0)
5783
5784 lock(&sched.lock)
5785 sched.profilehz = hz
5786 unlock(&sched.lock)
5787
5788 if hz != 0 {
5789 setThreadCPUProfiler(hz)
5790 }
5791
5792 gp.m.locks--
5793 }
5794
5795
5796
5797 func (pp *p) init(id int32) {
5798 pp.id = id
5799 pp.gcw.id = id
5800 pp.status = _Pgcstop
5801 pp.sudogcache = pp.sudogbuf[:0]
5802 pp.deferpool = pp.deferpoolbuf[:0]
5803 pp.wbBuf.reset()
5804 if pp.mcache == nil {
5805 if id == 0 {
5806 if mcache0 == nil {
5807 throw("missing mcache?")
5808 }
5809
5810
5811 pp.mcache = mcache0
5812 } else {
5813 pp.mcache = allocmcache()
5814 }
5815 }
5816 if raceenabled && pp.raceprocctx == 0 {
5817 if id == 0 {
5818 pp.raceprocctx = raceprocctx0
5819 raceprocctx0 = 0
5820 } else {
5821 pp.raceprocctx = raceproccreate()
5822 }
5823 }
5824 lockInit(&pp.timers.mu, lockRankTimers)
5825
5826
5827
5828 timerpMask.set(id)
5829
5830
5831 idlepMask.clear(id)
5832 }
5833
5834
5835
5836
5837
5838 func (pp *p) destroy() {
5839 assertLockHeld(&sched.lock)
5840 assertWorldStopped()
5841
5842
5843 for pp.runqhead != pp.runqtail {
5844
5845 pp.runqtail--
5846 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5847
5848 globrunqputhead(gp)
5849 }
5850 if pp.runnext != 0 {
5851 globrunqputhead(pp.runnext.ptr())
5852 pp.runnext = 0
5853 }
5854
5855
5856 getg().m.p.ptr().timers.take(&pp.timers)
5857
5858
5859
5860 if phase := gcphase; phase != _GCoff {
5861 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5862 throw("P destroyed while GC is running")
5863 }
5864
5865 pp.gcw.spanq.destroy()
5866
5867 clear(pp.sudogbuf[:])
5868 pp.sudogcache = pp.sudogbuf[:0]
5869 pp.pinnerCache = nil
5870 clear(pp.deferpoolbuf[:])
5871 pp.deferpool = pp.deferpoolbuf[:0]
5872 systemstack(func() {
5873 for i := 0; i < pp.mspancache.len; i++ {
5874
5875 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5876 }
5877 pp.mspancache.len = 0
5878 lock(&mheap_.lock)
5879 pp.pcache.flush(&mheap_.pages)
5880 unlock(&mheap_.lock)
5881 })
5882 freemcache(pp.mcache)
5883 pp.mcache = nil
5884 gfpurge(pp)
5885 if raceenabled {
5886 if pp.timers.raceCtx != 0 {
5887
5888
5889
5890
5891
5892 mp := getg().m
5893 phold := mp.p.ptr()
5894 mp.p.set(pp)
5895
5896 racectxend(pp.timers.raceCtx)
5897 pp.timers.raceCtx = 0
5898
5899 mp.p.set(phold)
5900 }
5901 raceprocdestroy(pp.raceprocctx)
5902 pp.raceprocctx = 0
5903 }
5904 pp.gcAssistTime = 0
5905 gcCleanups.queued += pp.cleanupsQueued
5906 pp.cleanupsQueued = 0
5907 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5908 pp.goroutinesCreated = 0
5909 pp.xRegs.free()
5910 pp.status = _Pdead
5911 }
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921 func procresize(nprocs int32) *p {
5922 assertLockHeld(&sched.lock)
5923 assertWorldStopped()
5924
5925 old := gomaxprocs
5926 if old < 0 || nprocs <= 0 {
5927 throw("procresize: invalid arg")
5928 }
5929 trace := traceAcquire()
5930 if trace.ok() {
5931 trace.Gomaxprocs(nprocs)
5932 traceRelease(trace)
5933 }
5934
5935
5936 now := nanotime()
5937 if sched.procresizetime != 0 {
5938 sched.totaltime += int64(old) * (now - sched.procresizetime)
5939 }
5940 sched.procresizetime = now
5941
5942
5943 if nprocs > int32(len(allp)) {
5944
5945
5946 lock(&allpLock)
5947 if nprocs <= int32(cap(allp)) {
5948 allp = allp[:nprocs]
5949 } else {
5950 nallp := make([]*p, nprocs)
5951
5952
5953 copy(nallp, allp[:cap(allp)])
5954 allp = nallp
5955 }
5956
5957 idlepMask = idlepMask.resize(nprocs)
5958 timerpMask = timerpMask.resize(nprocs)
5959 work.spanqMask = work.spanqMask.resize(nprocs)
5960 unlock(&allpLock)
5961 }
5962
5963
5964 for i := old; i < nprocs; i++ {
5965 pp := allp[i]
5966 if pp == nil {
5967 pp = new(p)
5968 }
5969 pp.init(i)
5970 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5971 }
5972
5973 gp := getg()
5974 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5975
5976 gp.m.p.ptr().status = _Prunning
5977 gp.m.p.ptr().mcache.prepareForSweep()
5978 } else {
5979
5980
5981
5982
5983
5984 if gp.m.p != 0 {
5985 trace := traceAcquire()
5986 if trace.ok() {
5987
5988
5989
5990 trace.GoSched()
5991 trace.ProcStop(gp.m.p.ptr())
5992 traceRelease(trace)
5993 }
5994 gp.m.p.ptr().m = 0
5995 }
5996 gp.m.p = 0
5997 pp := allp[0]
5998 pp.m = 0
5999 pp.status = _Pidle
6000 acquirep(pp)
6001 trace := traceAcquire()
6002 if trace.ok() {
6003 trace.GoStart()
6004 traceRelease(trace)
6005 }
6006 }
6007
6008
6009 mcache0 = nil
6010
6011
6012 for i := nprocs; i < old; i++ {
6013 pp := allp[i]
6014 pp.destroy()
6015
6016 }
6017
6018
6019 if int32(len(allp)) != nprocs {
6020 lock(&allpLock)
6021 allp = allp[:nprocs]
6022 idlepMask = idlepMask.resize(nprocs)
6023 timerpMask = timerpMask.resize(nprocs)
6024 work.spanqMask = work.spanqMask.resize(nprocs)
6025 unlock(&allpLock)
6026 }
6027
6028 var runnablePs *p
6029 var runnablePsNeedM *p
6030 for i := nprocs - 1; i >= 0; i-- {
6031 pp := allp[i]
6032 if gp.m.p.ptr() == pp {
6033 continue
6034 }
6035 pp.status = _Pidle
6036 if runqempty(pp) {
6037 pidleput(pp, now)
6038 continue
6039 }
6040
6041
6042
6043
6044
6045
6046
6047
6048 var mp *m
6049 if oldm := pp.oldm.get(); oldm != nil {
6050
6051 mp = mgetSpecific(oldm)
6052 }
6053 if mp == nil {
6054
6055 pp.link.set(runnablePsNeedM)
6056 runnablePsNeedM = pp
6057 continue
6058 }
6059 pp.m.set(mp)
6060 pp.link.set(runnablePs)
6061 runnablePs = pp
6062 }
6063 for runnablePsNeedM != nil {
6064 pp := runnablePsNeedM
6065 runnablePsNeedM = pp.link.ptr()
6066
6067 mp := mget()
6068 pp.m.set(mp)
6069 pp.link.set(runnablePs)
6070 runnablePs = pp
6071 }
6072
6073 stealOrder.reset(uint32(nprocs))
6074 var int32p *int32 = &gomaxprocs
6075 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6076 if old != nprocs {
6077
6078 gcCPULimiter.resetCapacity(now, nprocs)
6079 }
6080 return runnablePs
6081 }
6082
6083
6084
6085
6086
6087
6088
6089 func acquirep(pp *p) {
6090
6091 acquirepNoTrace(pp)
6092
6093
6094 trace := traceAcquire()
6095 if trace.ok() {
6096 trace.ProcStart()
6097 traceRelease(trace)
6098 }
6099 }
6100
6101
6102
6103
6104 func acquirepNoTrace(pp *p) {
6105
6106 wirep(pp)
6107
6108
6109
6110
6111
6112
6113 pp.oldm = pp.m.ptr().self
6114
6115
6116
6117 pp.mcache.prepareForSweep()
6118 }
6119
6120
6121
6122
6123
6124
6125
6126 func wirep(pp *p) {
6127 gp := getg()
6128
6129 if gp.m.p != 0 {
6130
6131
6132 systemstack(func() {
6133 throw("wirep: already in go")
6134 })
6135 }
6136 if pp.m != 0 || pp.status != _Pidle {
6137
6138
6139 systemstack(func() {
6140 id := int64(0)
6141 if pp.m != 0 {
6142 id = pp.m.ptr().id
6143 }
6144 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6145 throw("wirep: invalid p state")
6146 })
6147 }
6148 gp.m.p.set(pp)
6149 pp.m.set(gp.m)
6150 pp.status = _Prunning
6151 }
6152
6153
6154 func releasep() *p {
6155 trace := traceAcquire()
6156 if trace.ok() {
6157 trace.ProcStop(getg().m.p.ptr())
6158 traceRelease(trace)
6159 }
6160 return releasepNoTrace()
6161 }
6162
6163
6164 func releasepNoTrace() *p {
6165 gp := getg()
6166
6167 if gp.m.p == 0 {
6168 throw("releasep: invalid arg")
6169 }
6170 pp := gp.m.p.ptr()
6171 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6172 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6173 throw("releasep: invalid p state")
6174 }
6175 gp.m.p = 0
6176 pp.m = 0
6177 pp.status = _Pidle
6178 return pp
6179 }
6180
6181 func incidlelocked(v int32) {
6182 lock(&sched.lock)
6183 sched.nmidlelocked += v
6184 if v > 0 {
6185 checkdead()
6186 }
6187 unlock(&sched.lock)
6188 }
6189
6190
6191
6192
6193 func checkdead() {
6194 assertLockHeld(&sched.lock)
6195
6196
6197
6198
6199
6200
6201 if (islibrary || isarchive) && GOARCH != "wasm" {
6202 return
6203 }
6204
6205
6206
6207
6208
6209 if panicking.Load() > 0 {
6210 return
6211 }
6212
6213
6214
6215
6216
6217 var run0 int32
6218 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6219 run0 = 1
6220 }
6221
6222 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6223 if run > run0 {
6224 return
6225 }
6226 if run < 0 {
6227 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6228 unlock(&sched.lock)
6229 throw("checkdead: inconsistent counts")
6230 }
6231
6232 grunning := 0
6233 forEachG(func(gp *g) {
6234 if isSystemGoroutine(gp, false) {
6235 return
6236 }
6237 s := readgstatus(gp)
6238 switch s &^ _Gscan {
6239 case _Gwaiting,
6240 _Gpreempted:
6241 grunning++
6242 case _Grunnable,
6243 _Grunning,
6244 _Gsyscall:
6245 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6246 unlock(&sched.lock)
6247 throw("checkdead: runnable g")
6248 }
6249 })
6250 if grunning == 0 {
6251 unlock(&sched.lock)
6252 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6253 }
6254
6255
6256 if faketime != 0 {
6257 if when := timeSleepUntil(); when < maxWhen {
6258 faketime = when
6259
6260
6261 pp, _ := pidleget(faketime)
6262 if pp == nil {
6263
6264
6265 unlock(&sched.lock)
6266 throw("checkdead: no p for timer")
6267 }
6268 mp := mget()
6269 if mp == nil {
6270
6271
6272 unlock(&sched.lock)
6273 throw("checkdead: no m for timer")
6274 }
6275
6276
6277
6278 sched.nmspinning.Add(1)
6279 mp.spinning = true
6280 mp.nextp.set(pp)
6281 notewakeup(&mp.park)
6282 return
6283 }
6284 }
6285
6286
6287 for _, pp := range allp {
6288 if len(pp.timers.heap) > 0 {
6289 return
6290 }
6291 }
6292
6293 unlock(&sched.lock)
6294 fatal("all goroutines are asleep - deadlock!")
6295 }
6296
6297
6298
6299
6300
6301
6302 var forcegcperiod int64 = 2 * 60 * 1e9
6303
6304
6305
6306
6307 const haveSysmon = GOARCH != "wasm"
6308
6309
6310
6311
6312 func sysmon() {
6313 lock(&sched.lock)
6314 sched.nmsys++
6315 checkdead()
6316 unlock(&sched.lock)
6317
6318 lastgomaxprocs := int64(0)
6319 lasttrace := int64(0)
6320 idle := 0
6321 delay := uint32(0)
6322
6323 for {
6324 if idle == 0 {
6325 delay = 20
6326 } else if idle > 50 {
6327 delay *= 2
6328 }
6329 if delay > 10*1000 {
6330 delay = 10 * 1000
6331 }
6332 usleep(delay)
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349 now := nanotime()
6350 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6351 lock(&sched.lock)
6352 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6353 syscallWake := false
6354 next := timeSleepUntil()
6355 if next > now {
6356 sched.sysmonwait.Store(true)
6357 unlock(&sched.lock)
6358
6359
6360 sleep := forcegcperiod / 2
6361 if next-now < sleep {
6362 sleep = next - now
6363 }
6364 shouldRelax := sleep >= osRelaxMinNS
6365 if shouldRelax {
6366 osRelax(true)
6367 }
6368 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6369 if shouldRelax {
6370 osRelax(false)
6371 }
6372 lock(&sched.lock)
6373 sched.sysmonwait.Store(false)
6374 noteclear(&sched.sysmonnote)
6375 }
6376 if syscallWake {
6377 idle = 0
6378 delay = 20
6379 }
6380 }
6381 unlock(&sched.lock)
6382 }
6383
6384 lock(&sched.sysmonlock)
6385
6386
6387 now = nanotime()
6388
6389
6390 if *cgo_yield != nil {
6391 asmcgocall(*cgo_yield, nil)
6392 }
6393
6394 lastpoll := sched.lastpoll.Load()
6395 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6396 sched.lastpoll.CompareAndSwap(lastpoll, now)
6397 list, delta := netpoll(0)
6398 if !list.empty() {
6399
6400
6401
6402
6403
6404
6405
6406 incidlelocked(-1)
6407 injectglist(&list)
6408 incidlelocked(1)
6409 netpollAdjustWaiters(delta)
6410 }
6411 }
6412
6413 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6414 sysmonUpdateGOMAXPROCS()
6415 lastgomaxprocs = now
6416 }
6417 if scavenger.sysmonWake.Load() != 0 {
6418
6419 scavenger.wake()
6420 }
6421
6422
6423 if retake(now) != 0 {
6424 idle = 0
6425 } else {
6426 idle++
6427 }
6428
6429 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6430 lock(&forcegc.lock)
6431 forcegc.idle.Store(false)
6432 var list gList
6433 list.push(forcegc.g)
6434 injectglist(&list)
6435 unlock(&forcegc.lock)
6436 }
6437 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6438 lasttrace = now
6439 schedtrace(debug.scheddetail > 0)
6440 }
6441 unlock(&sched.sysmonlock)
6442 }
6443 }
6444
6445 type sysmontick struct {
6446 schedtick uint32
6447 syscalltick uint32
6448 schedwhen int64
6449 syscallwhen int64
6450 }
6451
6452
6453
6454 const forcePreemptNS = 10 * 1000 * 1000
6455
6456 func retake(now int64) uint32 {
6457 n := 0
6458
6459
6460 lock(&allpLock)
6461
6462
6463
6464 for i := 0; i < len(allp); i++ {
6465
6466
6467
6468
6469
6470
6471
6472
6473 pp := allp[i]
6474 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6475
6476
6477 continue
6478 }
6479 pd := &pp.sysmontick
6480 sysretake := false
6481
6482
6483
6484
6485
6486 schedt := int64(pp.schedtick)
6487 if int64(pd.schedtick) != schedt {
6488 pd.schedtick = uint32(schedt)
6489 pd.schedwhen = now
6490 } else if pd.schedwhen+forcePreemptNS <= now {
6491 preemptone(pp)
6492
6493
6494
6495
6496 sysretake = true
6497 }
6498
6499
6500 unlock(&allpLock)
6501
6502
6503
6504
6505
6506
6507
6508
6509 incidlelocked(-1)
6510
6511
6512 thread, ok := setBlockOnExitSyscall(pp)
6513 if !ok {
6514
6515 goto done
6516 }
6517
6518
6519 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6520 pd.syscalltick = uint32(syst)
6521 pd.syscallwhen = now
6522 thread.resume()
6523 goto done
6524 }
6525
6526
6527
6528
6529 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6530 thread.resume()
6531 goto done
6532 }
6533
6534
6535
6536 thread.takeP()
6537 thread.resume()
6538 n++
6539
6540
6541 handoffp(pp)
6542
6543
6544
6545 done:
6546 incidlelocked(1)
6547 lock(&allpLock)
6548 }
6549 unlock(&allpLock)
6550 return uint32(n)
6551 }
6552
6553
6554
6555 type syscallingThread struct {
6556 gp *g
6557 mp *m
6558 pp *p
6559 status uint32
6560 }
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572
6573
6574
6575
6576 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6577 if pp.status != _Prunning {
6578 return syscallingThread{}, false
6579 }
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591 mp := pp.m.ptr()
6592 if mp == nil {
6593
6594 return syscallingThread{}, false
6595 }
6596 gp := mp.curg
6597 if gp == nil {
6598
6599 return syscallingThread{}, false
6600 }
6601 status := readgstatus(gp) &^ _Gscan
6602
6603
6604
6605
6606 if status != _Gsyscall && status != _Gdeadextra {
6607
6608 return syscallingThread{}, false
6609 }
6610 if !castogscanstatus(gp, status, status|_Gscan) {
6611
6612 return syscallingThread{}, false
6613 }
6614 if gp.m != mp || gp.m.p.ptr() != pp {
6615
6616 casfrom_Gscanstatus(gp, status|_Gscan, status)
6617 return syscallingThread{}, false
6618 }
6619 return syscallingThread{gp, mp, pp, status}, true
6620 }
6621
6622
6623
6624
6625
6626 func (s syscallingThread) gcstopP() {
6627 assertLockHeld(&sched.lock)
6628
6629 s.releaseP(_Pgcstop)
6630 s.pp.gcStopTime = nanotime()
6631 sched.stopwait--
6632 }
6633
6634
6635
6636 func (s syscallingThread) takeP() {
6637 s.releaseP(_Pidle)
6638 }
6639
6640
6641
6642
6643 func (s syscallingThread) releaseP(state uint32) {
6644 if state != _Pidle && state != _Pgcstop {
6645 throw("attempted to release P into a bad state")
6646 }
6647 trace := traceAcquire()
6648 s.pp.m = 0
6649 s.mp.p = 0
6650 atomic.Store(&s.pp.status, state)
6651 if trace.ok() {
6652 trace.ProcSteal(s.pp)
6653 traceRelease(trace)
6654 }
6655 sched.nGsyscallNoP.Add(1)
6656 s.pp.syscalltick++
6657 }
6658
6659
6660 func (s syscallingThread) resume() {
6661 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6662 }
6663
6664
6665
6666
6667
6668
6669 func preemptall() bool {
6670 res := false
6671 for _, pp := range allp {
6672 if pp.status != _Prunning {
6673 continue
6674 }
6675 if preemptone(pp) {
6676 res = true
6677 }
6678 }
6679 return res
6680 }
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692 func preemptone(pp *p) bool {
6693 mp := pp.m.ptr()
6694 if mp == nil || mp == getg().m {
6695 return false
6696 }
6697 gp := mp.curg
6698 if gp == nil || gp == mp.g0 {
6699 return false
6700 }
6701 if readgstatus(gp)&^_Gscan == _Gsyscall {
6702
6703 return false
6704 }
6705
6706 gp.preempt = true
6707
6708
6709
6710
6711
6712 gp.stackguard0 = stackPreempt
6713
6714
6715 if preemptMSupported && debug.asyncpreemptoff == 0 {
6716 pp.preempt = true
6717 preemptM(mp)
6718 }
6719
6720 return true
6721 }
6722
6723 var starttime int64
6724
6725 func schedtrace(detailed bool) {
6726 now := nanotime()
6727 if starttime == 0 {
6728 starttime = now
6729 }
6730
6731 lock(&sched.lock)
6732 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6733 if detailed {
6734 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6735 }
6736
6737
6738
6739 for i, pp := range allp {
6740 h := atomic.Load(&pp.runqhead)
6741 t := atomic.Load(&pp.runqtail)
6742 if detailed {
6743 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6744 mp := pp.m.ptr()
6745 if mp != nil {
6746 print(mp.id)
6747 } else {
6748 print("nil")
6749 }
6750 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6751 } else {
6752
6753
6754 print(" ")
6755 if i == 0 {
6756 print("[ ")
6757 }
6758 print(t - h)
6759 if i == len(allp)-1 {
6760 print(" ]")
6761 }
6762 }
6763 }
6764
6765 if !detailed {
6766
6767 print(" schedticks=[ ")
6768 for _, pp := range allp {
6769 print(pp.schedtick)
6770 print(" ")
6771 }
6772 print("]\n")
6773 }
6774
6775 if !detailed {
6776 unlock(&sched.lock)
6777 return
6778 }
6779
6780 for mp := allm; mp != nil; mp = mp.alllink {
6781 pp := mp.p.ptr()
6782 print(" M", mp.id, ": p=")
6783 if pp != nil {
6784 print(pp.id)
6785 } else {
6786 print("nil")
6787 }
6788 print(" curg=")
6789 if mp.curg != nil {
6790 print(mp.curg.goid)
6791 } else {
6792 print("nil")
6793 }
6794 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6795 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6796 print(lockedg.goid)
6797 } else {
6798 print("nil")
6799 }
6800 print("\n")
6801 }
6802
6803 forEachG(func(gp *g) {
6804 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6805 if gp.m != nil {
6806 print(gp.m.id)
6807 } else {
6808 print("nil")
6809 }
6810 print(" lockedm=")
6811 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6812 print(lockedm.id)
6813 } else {
6814 print("nil")
6815 }
6816 print("\n")
6817 })
6818 unlock(&sched.lock)
6819 }
6820
6821 type updateMaxProcsGState struct {
6822 lock mutex
6823 g *g
6824 idle atomic.Bool
6825
6826
6827 procs int32
6828 }
6829
6830 var (
6831
6832
6833 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6834
6835
6836
6837 updateMaxProcsG updateMaxProcsGState
6838
6839
6840
6841
6842
6843
6844
6845
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886 computeMaxProcsLock mutex
6887 )
6888
6889
6890
6891
6892 func defaultGOMAXPROCSUpdateEnable() {
6893 if debug.updatemaxprocs == 0 {
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905 updatemaxprocs.IncNonDefault()
6906 return
6907 }
6908
6909 go updateMaxProcsGoroutine()
6910 }
6911
6912 func updateMaxProcsGoroutine() {
6913 updateMaxProcsG.g = getg()
6914 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
6915 for {
6916 lock(&updateMaxProcsG.lock)
6917 if updateMaxProcsG.idle.Load() {
6918 throw("updateMaxProcsGoroutine: phase error")
6919 }
6920 updateMaxProcsG.idle.Store(true)
6921 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
6922
6923
6924 stw := stopTheWorldGC(stwGOMAXPROCS)
6925
6926
6927 lock(&sched.lock)
6928 custom := sched.customGOMAXPROCS
6929 unlock(&sched.lock)
6930 if custom {
6931 startTheWorldGC(stw)
6932 return
6933 }
6934
6935
6936
6937
6938
6939 newprocs = updateMaxProcsG.procs
6940 lock(&sched.lock)
6941 sched.customGOMAXPROCS = false
6942 unlock(&sched.lock)
6943
6944 startTheWorldGC(stw)
6945 }
6946 }
6947
6948 func sysmonUpdateGOMAXPROCS() {
6949
6950 lock(&computeMaxProcsLock)
6951
6952
6953 lock(&sched.lock)
6954 custom := sched.customGOMAXPROCS
6955 curr := gomaxprocs
6956 unlock(&sched.lock)
6957 if custom {
6958 unlock(&computeMaxProcsLock)
6959 return
6960 }
6961
6962
6963 procs := defaultGOMAXPROCS(0)
6964 unlock(&computeMaxProcsLock)
6965 if procs == curr {
6966
6967 return
6968 }
6969
6970
6971
6972
6973 if updateMaxProcsG.idle.Load() {
6974 lock(&updateMaxProcsG.lock)
6975 updateMaxProcsG.procs = procs
6976 updateMaxProcsG.idle.Store(false)
6977 var list gList
6978 list.push(updateMaxProcsG.g)
6979 injectglist(&list)
6980 unlock(&updateMaxProcsG.lock)
6981 }
6982 }
6983
6984
6985
6986
6987
6988
6989 func schedEnableUser(enable bool) {
6990 lock(&sched.lock)
6991 if sched.disable.user == !enable {
6992 unlock(&sched.lock)
6993 return
6994 }
6995 sched.disable.user = !enable
6996 if enable {
6997 n := sched.disable.runnable.size
6998 globrunqputbatch(&sched.disable.runnable)
6999 unlock(&sched.lock)
7000 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7001 startm(nil, false, false)
7002 }
7003 } else {
7004 unlock(&sched.lock)
7005 }
7006 }
7007
7008
7009
7010
7011
7012 func schedEnabled(gp *g) bool {
7013 assertLockHeld(&sched.lock)
7014
7015 if sched.disable.user {
7016 return isSystemGoroutine(gp, true)
7017 }
7018 return true
7019 }
7020
7021
7022
7023
7024
7025
7026 func mput(mp *m) {
7027 assertLockHeld(&sched.lock)
7028
7029 sched.midle.push(unsafe.Pointer(mp))
7030 sched.nmidle++
7031 checkdead()
7032 }
7033
7034
7035
7036
7037
7038
7039 func mget() *m {
7040 assertLockHeld(&sched.lock)
7041
7042 mp := (*m)(sched.midle.pop())
7043 if mp != nil {
7044 sched.nmidle--
7045 }
7046 return mp
7047 }
7048
7049
7050
7051
7052
7053
7054
7055
7056 func mgetSpecific(mp *m) *m {
7057 assertLockHeld(&sched.lock)
7058
7059 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7060
7061 return nil
7062 }
7063
7064 sched.midle.remove(unsafe.Pointer(mp))
7065 sched.nmidle--
7066
7067 return mp
7068 }
7069
7070
7071
7072
7073
7074
7075 func globrunqput(gp *g) {
7076 assertLockHeld(&sched.lock)
7077
7078 sched.runq.pushBack(gp)
7079 }
7080
7081
7082
7083
7084
7085
7086 func globrunqputhead(gp *g) {
7087 assertLockHeld(&sched.lock)
7088
7089 sched.runq.push(gp)
7090 }
7091
7092
7093
7094
7095
7096
7097
7098 func globrunqputbatch(batch *gQueue) {
7099 assertLockHeld(&sched.lock)
7100
7101 sched.runq.pushBackAll(*batch)
7102 *batch = gQueue{}
7103 }
7104
7105
7106
7107 func globrunqget() *g {
7108 assertLockHeld(&sched.lock)
7109
7110 if sched.runq.size == 0 {
7111 return nil
7112 }
7113
7114 return sched.runq.pop()
7115 }
7116
7117
7118
7119 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7120 assertLockHeld(&sched.lock)
7121
7122 if sched.runq.size == 0 {
7123 return
7124 }
7125
7126 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7127
7128 gp = sched.runq.pop()
7129 n--
7130
7131 for ; n > 0; n-- {
7132 gp1 := sched.runq.pop()
7133 q.pushBack(gp1)
7134 }
7135 return
7136 }
7137
7138
7139 type pMask []uint32
7140
7141
7142 func (p pMask) read(id uint32) bool {
7143 word := id / 32
7144 mask := uint32(1) << (id % 32)
7145 return (atomic.Load(&p[word]) & mask) != 0
7146 }
7147
7148
7149 func (p pMask) set(id int32) {
7150 word := id / 32
7151 mask := uint32(1) << (id % 32)
7152 atomic.Or(&p[word], mask)
7153 }
7154
7155
7156 func (p pMask) clear(id int32) {
7157 word := id / 32
7158 mask := uint32(1) << (id % 32)
7159 atomic.And(&p[word], ^mask)
7160 }
7161
7162
7163 func (p pMask) any() bool {
7164 for i := range p {
7165 if atomic.Load(&p[i]) != 0 {
7166 return true
7167 }
7168 }
7169 return false
7170 }
7171
7172
7173
7174
7175
7176 func (p pMask) resize(nprocs int32) pMask {
7177 maskWords := (nprocs + 31) / 32
7178
7179 if maskWords <= int32(cap(p)) {
7180 return p[:maskWords]
7181 }
7182 newMask := make([]uint32, maskWords)
7183
7184 copy(newMask, p)
7185 return newMask
7186 }
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199 func pidleput(pp *p, now int64) int64 {
7200 assertLockHeld(&sched.lock)
7201
7202 if !runqempty(pp) {
7203 throw("pidleput: P has non-empty run queue")
7204 }
7205 if now == 0 {
7206 now = nanotime()
7207 }
7208 if pp.timers.len.Load() == 0 {
7209 timerpMask.clear(pp.id)
7210 }
7211 idlepMask.set(pp.id)
7212 pp.link = sched.pidle
7213 sched.pidle.set(pp)
7214 sched.npidle.Add(1)
7215 if !pp.limiterEvent.start(limiterEventIdle, now) {
7216 throw("must be able to track idle limiter event")
7217 }
7218 return now
7219 }
7220
7221
7222
7223
7224
7225
7226
7227
7228 func pidleget(now int64) (*p, int64) {
7229 assertLockHeld(&sched.lock)
7230
7231 pp := sched.pidle.ptr()
7232 if pp != nil {
7233
7234 if now == 0 {
7235 now = nanotime()
7236 }
7237 timerpMask.set(pp.id)
7238 idlepMask.clear(pp.id)
7239 sched.pidle = pp.link
7240 sched.npidle.Add(-1)
7241 pp.limiterEvent.stop(limiterEventIdle, now)
7242 }
7243 return pp, now
7244 }
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256 func pidlegetSpinning(now int64) (*p, int64) {
7257 assertLockHeld(&sched.lock)
7258
7259 pp, now := pidleget(now)
7260 if pp == nil {
7261
7262
7263
7264 sched.needspinning.Store(1)
7265 return nil, now
7266 }
7267
7268 return pp, now
7269 }
7270
7271
7272
7273 func runqempty(pp *p) bool {
7274
7275
7276
7277
7278 for {
7279 head := atomic.Load(&pp.runqhead)
7280 tail := atomic.Load(&pp.runqtail)
7281 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7282 if tail == atomic.Load(&pp.runqtail) {
7283 return head == tail && runnext == 0
7284 }
7285 }
7286 }
7287
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297 const randomizeScheduler = raceenabled
7298
7299
7300
7301
7302
7303
7304 func runqput(pp *p, gp *g, next bool) {
7305 if !haveSysmon && next {
7306
7307
7308
7309
7310
7311
7312
7313
7314 next = false
7315 }
7316 if randomizeScheduler && next && randn(2) == 0 {
7317 next = false
7318 }
7319
7320 if next {
7321 retryNext:
7322 oldnext := pp.runnext
7323 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7324 goto retryNext
7325 }
7326 if oldnext == 0 {
7327 return
7328 }
7329
7330 gp = oldnext.ptr()
7331 }
7332
7333 retry:
7334 h := atomic.LoadAcq(&pp.runqhead)
7335 t := pp.runqtail
7336 if t-h < uint32(len(pp.runq)) {
7337 pp.runq[t%uint32(len(pp.runq))].set(gp)
7338 atomic.StoreRel(&pp.runqtail, t+1)
7339 return
7340 }
7341 if runqputslow(pp, gp, h, t) {
7342 return
7343 }
7344
7345 goto retry
7346 }
7347
7348
7349
7350 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7351 var batch [len(pp.runq)/2 + 1]*g
7352
7353
7354 n := t - h
7355 n = n / 2
7356 if n != uint32(len(pp.runq)/2) {
7357 throw("runqputslow: queue is not full")
7358 }
7359 for i := uint32(0); i < n; i++ {
7360 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7361 }
7362 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7363 return false
7364 }
7365 batch[n] = gp
7366
7367 if randomizeScheduler {
7368 for i := uint32(1); i <= n; i++ {
7369 j := cheaprandn(i + 1)
7370 batch[i], batch[j] = batch[j], batch[i]
7371 }
7372 }
7373
7374
7375 for i := uint32(0); i < n; i++ {
7376 batch[i].schedlink.set(batch[i+1])
7377 }
7378
7379 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7380
7381
7382 lock(&sched.lock)
7383 globrunqputbatch(&q)
7384 unlock(&sched.lock)
7385 return true
7386 }
7387
7388
7389
7390
7391 func runqputbatch(pp *p, q *gQueue) {
7392 if q.empty() {
7393 return
7394 }
7395 h := atomic.LoadAcq(&pp.runqhead)
7396 t := pp.runqtail
7397 n := uint32(0)
7398 for !q.empty() && t-h < uint32(len(pp.runq)) {
7399 gp := q.pop()
7400 pp.runq[t%uint32(len(pp.runq))].set(gp)
7401 t++
7402 n++
7403 }
7404
7405 if randomizeScheduler {
7406 off := func(o uint32) uint32 {
7407 return (pp.runqtail + o) % uint32(len(pp.runq))
7408 }
7409 for i := uint32(1); i < n; i++ {
7410 j := cheaprandn(i + 1)
7411 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7412 }
7413 }
7414
7415 atomic.StoreRel(&pp.runqtail, t)
7416
7417 return
7418 }
7419
7420
7421
7422
7423
7424 func runqget(pp *p) (gp *g, inheritTime bool) {
7425
7426 next := pp.runnext
7427
7428
7429
7430 if next != 0 && pp.runnext.cas(next, 0) {
7431 return next.ptr(), true
7432 }
7433
7434 for {
7435 h := atomic.LoadAcq(&pp.runqhead)
7436 t := pp.runqtail
7437 if t == h {
7438 return nil, false
7439 }
7440 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7441 if atomic.CasRel(&pp.runqhead, h, h+1) {
7442 return gp, false
7443 }
7444 }
7445 }
7446
7447
7448
7449 func runqdrain(pp *p) (drainQ gQueue) {
7450 oldNext := pp.runnext
7451 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7452 drainQ.pushBack(oldNext.ptr())
7453 }
7454
7455 retry:
7456 h := atomic.LoadAcq(&pp.runqhead)
7457 t := pp.runqtail
7458 qn := t - h
7459 if qn == 0 {
7460 return
7461 }
7462 if qn > uint32(len(pp.runq)) {
7463 goto retry
7464 }
7465
7466 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7467 goto retry
7468 }
7469
7470
7471
7472
7473
7474
7475
7476
7477 for i := uint32(0); i < qn; i++ {
7478 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7479 drainQ.pushBack(gp)
7480 }
7481 return
7482 }
7483
7484
7485
7486
7487
7488 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7489 for {
7490 h := atomic.LoadAcq(&pp.runqhead)
7491 t := atomic.LoadAcq(&pp.runqtail)
7492 n := t - h
7493 n = n - n/2
7494 if n == 0 {
7495 if stealRunNextG {
7496
7497 if next := pp.runnext; next != 0 {
7498 if pp.status == _Prunning {
7499
7500
7501
7502
7503
7504
7505
7506
7507
7508
7509 if !osHasLowResTimer {
7510 usleep(3)
7511 } else {
7512
7513
7514
7515 osyield()
7516 }
7517 }
7518 if !pp.runnext.cas(next, 0) {
7519 continue
7520 }
7521 batch[batchHead%uint32(len(batch))] = next
7522 return 1
7523 }
7524 }
7525 return 0
7526 }
7527 if n > uint32(len(pp.runq)/2) {
7528 continue
7529 }
7530 for i := uint32(0); i < n; i++ {
7531 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7532 batch[(batchHead+i)%uint32(len(batch))] = g
7533 }
7534 if atomic.CasRel(&pp.runqhead, h, h+n) {
7535 return n
7536 }
7537 }
7538 }
7539
7540
7541
7542
7543 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7544 t := pp.runqtail
7545 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7546 if n == 0 {
7547 return nil
7548 }
7549 n--
7550 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7551 if n == 0 {
7552 return gp
7553 }
7554 h := atomic.LoadAcq(&pp.runqhead)
7555 if t-h+n >= uint32(len(pp.runq)) {
7556 throw("runqsteal: runq overflow")
7557 }
7558 atomic.StoreRel(&pp.runqtail, t+n)
7559 return gp
7560 }
7561
7562
7563
7564 type gQueue struct {
7565 head guintptr
7566 tail guintptr
7567 size int32
7568 }
7569
7570
7571 func (q *gQueue) empty() bool {
7572 return q.head == 0
7573 }
7574
7575
7576 func (q *gQueue) push(gp *g) {
7577 gp.schedlink = q.head
7578 q.head.set(gp)
7579 if q.tail == 0 {
7580 q.tail.set(gp)
7581 }
7582 q.size++
7583 }
7584
7585
7586 func (q *gQueue) pushBack(gp *g) {
7587 gp.schedlink = 0
7588 if q.tail != 0 {
7589 q.tail.ptr().schedlink.set(gp)
7590 } else {
7591 q.head.set(gp)
7592 }
7593 q.tail.set(gp)
7594 q.size++
7595 }
7596
7597
7598
7599 func (q *gQueue) pushBackAll(q2 gQueue) {
7600 if q2.tail == 0 {
7601 return
7602 }
7603 q2.tail.ptr().schedlink = 0
7604 if q.tail != 0 {
7605 q.tail.ptr().schedlink = q2.head
7606 } else {
7607 q.head = q2.head
7608 }
7609 q.tail = q2.tail
7610 q.size += q2.size
7611 }
7612
7613
7614
7615 func (q *gQueue) pop() *g {
7616 gp := q.head.ptr()
7617 if gp != nil {
7618 q.head = gp.schedlink
7619 if q.head == 0 {
7620 q.tail = 0
7621 }
7622 q.size--
7623 }
7624 return gp
7625 }
7626
7627
7628 func (q *gQueue) popList() gList {
7629 stack := gList{q.head, q.size}
7630 *q = gQueue{}
7631 return stack
7632 }
7633
7634
7635
7636 type gList struct {
7637 head guintptr
7638 size int32
7639 }
7640
7641
7642 func (l *gList) empty() bool {
7643 return l.head == 0
7644 }
7645
7646
7647 func (l *gList) push(gp *g) {
7648 gp.schedlink = l.head
7649 l.head.set(gp)
7650 l.size++
7651 }
7652
7653
7654 func (l *gList) pushAll(q gQueue) {
7655 if !q.empty() {
7656 q.tail.ptr().schedlink = l.head
7657 l.head = q.head
7658 l.size += q.size
7659 }
7660 }
7661
7662
7663 func (l *gList) pop() *g {
7664 gp := l.head.ptr()
7665 if gp != nil {
7666 l.head = gp.schedlink
7667 l.size--
7668 }
7669 return gp
7670 }
7671
7672
7673 func setMaxThreads(in int) (out int) {
7674 lock(&sched.lock)
7675 out = int(sched.maxmcount)
7676 if in > 0x7fffffff {
7677 sched.maxmcount = 0x7fffffff
7678 } else {
7679 sched.maxmcount = int32(in)
7680 }
7681 checkmcount()
7682 unlock(&sched.lock)
7683 return
7684 }
7685
7686
7687
7688
7689
7690
7691
7692
7693
7694
7695
7696
7697
7698 func procPin() int {
7699 gp := getg()
7700 mp := gp.m
7701
7702 mp.locks++
7703 return int(mp.p.ptr().id)
7704 }
7705
7706
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718 func procUnpin() {
7719 gp := getg()
7720 gp.m.locks--
7721 }
7722
7723
7724
7725 func sync_runtime_procPin() int {
7726 return procPin()
7727 }
7728
7729
7730
7731 func sync_runtime_procUnpin() {
7732 procUnpin()
7733 }
7734
7735
7736
7737 func sync_atomic_runtime_procPin() int {
7738 return procPin()
7739 }
7740
7741
7742
7743 func sync_atomic_runtime_procUnpin() {
7744 procUnpin()
7745 }
7746
7747
7748
7749
7750
7751 func internal_sync_runtime_canSpin(i int) bool {
7752
7753
7754
7755
7756
7757 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7758 return false
7759 }
7760 if p := getg().m.p.ptr(); !runqempty(p) {
7761 return false
7762 }
7763 return true
7764 }
7765
7766
7767
7768 func internal_sync_runtime_doSpin() {
7769 procyield(active_spin_cnt)
7770 }
7771
7772
7773
7774
7775
7776
7777
7778
7779
7780
7781
7782
7783
7784
7785
7786 func sync_runtime_canSpin(i int) bool {
7787 return internal_sync_runtime_canSpin(i)
7788 }
7789
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799
7800
7801
7802 func sync_runtime_doSpin() {
7803 internal_sync_runtime_doSpin()
7804 }
7805
7806 var stealOrder randomOrder
7807
7808
7809
7810
7811
7812 type randomOrder struct {
7813 count uint32
7814 coprimes []uint32
7815 }
7816
7817 type randomEnum struct {
7818 i uint32
7819 count uint32
7820 pos uint32
7821 inc uint32
7822 }
7823
7824 func (ord *randomOrder) reset(count uint32) {
7825 ord.count = count
7826 ord.coprimes = ord.coprimes[:0]
7827 for i := uint32(1); i <= count; i++ {
7828 if gcd(i, count) == 1 {
7829 ord.coprimes = append(ord.coprimes, i)
7830 }
7831 }
7832 }
7833
7834 func (ord *randomOrder) start(i uint32) randomEnum {
7835 return randomEnum{
7836 count: ord.count,
7837 pos: i % ord.count,
7838 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7839 }
7840 }
7841
7842 func (enum *randomEnum) done() bool {
7843 return enum.i == enum.count
7844 }
7845
7846 func (enum *randomEnum) next() {
7847 enum.i++
7848 enum.pos = (enum.pos + enum.inc) % enum.count
7849 }
7850
7851 func (enum *randomEnum) position() uint32 {
7852 return enum.pos
7853 }
7854
7855 func gcd(a, b uint32) uint32 {
7856 for b != 0 {
7857 a, b = b, a%b
7858 }
7859 return a
7860 }
7861
7862
7863
7864 type initTask struct {
7865 state uint32
7866 nfns uint32
7867
7868 }
7869
7870
7871
7872 var inittrace tracestat
7873
7874 type tracestat struct {
7875 active bool
7876 id uint64
7877 allocs uint64
7878 bytes uint64
7879 }
7880
7881 func doInit(ts []*initTask) {
7882 for _, t := range ts {
7883 doInit1(t)
7884 }
7885 }
7886
7887 func doInit1(t *initTask) {
7888 switch t.state {
7889 case 2:
7890 return
7891 case 1:
7892 throw("recursive call during initialization - linker skew")
7893 default:
7894 t.state = 1
7895
7896 var (
7897 start int64
7898 before tracestat
7899 )
7900
7901 if inittrace.active {
7902 start = nanotime()
7903
7904 before = inittrace
7905 }
7906
7907 if t.nfns == 0 {
7908
7909 throw("inittask with no functions")
7910 }
7911
7912 firstFunc := add(unsafe.Pointer(t), 8)
7913 for i := uint32(0); i < t.nfns; i++ {
7914 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7915 f := *(*func())(unsafe.Pointer(&p))
7916 f()
7917 }
7918
7919 if inittrace.active {
7920 end := nanotime()
7921
7922 after := inittrace
7923
7924 f := *(*func())(unsafe.Pointer(&firstFunc))
7925 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7926
7927 var sbuf [24]byte
7928 print("init ", pkg, " @")
7929 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7930 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7931 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7932 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7933 print("\n")
7934 }
7935
7936 t.state = 2
7937 }
7938 }
7939
View as plain text