Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/sys"
16 "internal/strconv"
17 "internal/stringslite"
18 "unsafe"
19 )
20
21
22 var modinfo string
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 var (
119 m0 m
120 g0 g
121 mcache0 *mcache
122 raceprocctx0 uintptr
123 raceFiniLock mutex
124 )
125
126
127
128 var runtime_inittasks []*initTask
129
130
131
132
133
134 var main_init_done chan bool
135
136
137 func main_main()
138
139
140 var mainStarted bool
141
142
143 var runtimeInitTime int64
144
145
146 var initSigmask sigset
147
148
149 func main() {
150 mp := getg().m
151
152
153
154 mp.g0.racectx = 0
155
156
157
158
159 if goarch.PtrSize == 8 {
160 maxstacksize = 1000000000
161 } else {
162 maxstacksize = 250000000
163 }
164
165
166
167
168 maxstackceiling = 2 * maxstacksize
169
170
171 mainStarted = true
172
173 if haveSysmon {
174 systemstack(func() {
175 newm(sysmon, nil, -1)
176 })
177 }
178
179
180
181
182
183
184
185 lockOSThread()
186
187 if mp != &m0 {
188 throw("runtime.main not on m0")
189 }
190
191
192
193 runtimeInitTime = nanotime()
194 if runtimeInitTime == 0 {
195 throw("nanotime returning zero")
196 }
197
198 if debug.inittrace != 0 {
199 inittrace.id = getg().goid
200 inittrace.active = true
201 }
202
203 doInit(runtime_inittasks)
204
205
206 needUnlock := true
207 defer func() {
208 if needUnlock {
209 unlockOSThread()
210 }
211 }()
212
213 gcenable()
214 defaultGOMAXPROCSUpdateEnable()
215
216 main_init_done = make(chan bool)
217 if iscgo {
218 if _cgo_pthread_key_created == nil {
219 throw("_cgo_pthread_key_created missing")
220 }
221
222 if _cgo_thread_start == nil {
223 throw("_cgo_thread_start missing")
224 }
225 if GOOS != "windows" {
226 if _cgo_setenv == nil {
227 throw("_cgo_setenv missing")
228 }
229 if _cgo_unsetenv == nil {
230 throw("_cgo_unsetenv missing")
231 }
232 }
233 if _cgo_notify_runtime_init_done == nil {
234 throw("_cgo_notify_runtime_init_done missing")
235 }
236
237
238 if set_crosscall2 == nil {
239 throw("set_crosscall2 missing")
240 }
241 set_crosscall2()
242
243
244
245 startTemplateThread()
246 cgocall(_cgo_notify_runtime_init_done, nil)
247 }
248
249
250
251
252
253
254
255
256 last := lastmoduledatap
257 for m := &firstmoduledata; true; m = m.next {
258 doInit(m.inittasks)
259 if m == last {
260 break
261 }
262 }
263
264
265
266 inittrace.active = false
267
268 close(main_init_done)
269
270 needUnlock = false
271 unlockOSThread()
272
273 if isarchive || islibrary {
274
275
276 if GOARCH == "wasm" {
277
278
279
280
281
282
283
284 pause(sys.GetCallerSP() - 16)
285 panic("unreachable")
286 }
287 return
288 }
289 fn := main_main
290 fn()
291
292
293
294
295
296
297
298
299 exitHooksRun := false
300 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
301 runExitHooks(0)
302 exitHooksRun = true
303 lsandoleakcheck()
304 }
305
306
307
308
309
310 if runningPanicDefers.Load() != 0 {
311
312 for c := 0; c < 1000; c++ {
313 if runningPanicDefers.Load() == 0 {
314 break
315 }
316 Gosched()
317 }
318 }
319 if panicking.Load() != 0 {
320 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
321 }
322 if !exitHooksRun {
323 runExitHooks(0)
324 }
325 if raceenabled {
326 racefini()
327 }
328
329 exit(0)
330 for {
331 var x *int32
332 *x = 0
333 }
334 }
335
336
337
338
339 func os_beforeExit(exitCode int) {
340 runExitHooks(exitCode)
341 if exitCode == 0 && raceenabled {
342 racefini()
343 }
344
345
346 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
347 lsandoleakcheck()
348 }
349 }
350
351 func init() {
352 exithook.Gosched = Gosched
353 exithook.Goid = func() uint64 { return getg().goid }
354 exithook.Throw = throw
355 }
356
357 func runExitHooks(code int) {
358 exithook.Run(code)
359 }
360
361
362 func init() {
363 go forcegchelper()
364 }
365
366 func forcegchelper() {
367 forcegc.g = getg()
368 lockInit(&forcegc.lock, lockRankForcegc)
369 for {
370 lock(&forcegc.lock)
371 if forcegc.idle.Load() {
372 throw("forcegc: phase error")
373 }
374 forcegc.idle.Store(true)
375 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
376
377 if debug.gctrace > 0 {
378 println("GC forced")
379 }
380
381 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
382 }
383 }
384
385
386
387
388
389 func Gosched() {
390 checkTimeouts()
391 mcall(gosched_m)
392 }
393
394
395
396
397
398 func goschedguarded() {
399 mcall(goschedguarded_m)
400 }
401
402
403
404
405
406
407 func goschedIfBusy() {
408 gp := getg()
409
410
411 if !gp.preempt && sched.npidle.Load() > 0 {
412 return
413 }
414 mcall(gosched_m)
415 }
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
446 if reason != waitReasonSleep {
447 checkTimeouts()
448 }
449 mp := acquirem()
450 gp := mp.curg
451 status := readgstatus(gp)
452 if status != _Grunning && status != _Gscanrunning {
453 throw("gopark: bad g status")
454 }
455 mp.waitlock = lock
456 mp.waitunlockf = unlockf
457 gp.waitreason = reason
458 mp.waitTraceBlockReason = traceReason
459 mp.waitTraceSkip = traceskip
460 releasem(mp)
461
462 mcall(park_m)
463 }
464
465
466
467 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
468 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
469 }
470
471
472
473
474
475
476
477
478
479
480
481 func goready(gp *g, traceskip int) {
482 systemstack(func() {
483 ready(gp, traceskip, true)
484 })
485 }
486
487
488 func acquireSudog() *sudog {
489
490
491
492
493
494
495
496
497 mp := acquirem()
498 pp := mp.p.ptr()
499 if len(pp.sudogcache) == 0 {
500 lock(&sched.sudoglock)
501
502 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
503 s := sched.sudogcache
504 sched.sudogcache = s.next
505 s.next = nil
506 pp.sudogcache = append(pp.sudogcache, s)
507 }
508 unlock(&sched.sudoglock)
509
510 if len(pp.sudogcache) == 0 {
511 pp.sudogcache = append(pp.sudogcache, new(sudog))
512 }
513 }
514 n := len(pp.sudogcache)
515 s := pp.sudogcache[n-1]
516 pp.sudogcache[n-1] = nil
517 pp.sudogcache = pp.sudogcache[:n-1]
518 if s.elem.get() != nil {
519 throw("acquireSudog: found s.elem != nil in cache")
520 }
521 releasem(mp)
522 return s
523 }
524
525
526 func releaseSudog(s *sudog) {
527 if s.elem.get() != nil {
528 throw("runtime: sudog with non-nil elem")
529 }
530 if s.isSelect {
531 throw("runtime: sudog with non-false isSelect")
532 }
533 if s.next != nil {
534 throw("runtime: sudog with non-nil next")
535 }
536 if s.prev != nil {
537 throw("runtime: sudog with non-nil prev")
538 }
539 if s.waitlink != nil {
540 throw("runtime: sudog with non-nil waitlink")
541 }
542 if s.c.get() != nil {
543 throw("runtime: sudog with non-nil c")
544 }
545 gp := getg()
546 if gp.param != nil {
547 throw("runtime: releaseSudog with non-nil gp.param")
548 }
549 mp := acquirem()
550 pp := mp.p.ptr()
551 if len(pp.sudogcache) == cap(pp.sudogcache) {
552
553 var first, last *sudog
554 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
555 n := len(pp.sudogcache)
556 p := pp.sudogcache[n-1]
557 pp.sudogcache[n-1] = nil
558 pp.sudogcache = pp.sudogcache[:n-1]
559 if first == nil {
560 first = p
561 } else {
562 last.next = p
563 }
564 last = p
565 }
566 lock(&sched.sudoglock)
567 last.next = sched.sudogcache
568 sched.sudogcache = first
569 unlock(&sched.sudoglock)
570 }
571 pp.sudogcache = append(pp.sudogcache, s)
572 releasem(mp)
573 }
574
575
576 func badmcall(fn func(*g)) {
577 throw("runtime: mcall called on m->g0 stack")
578 }
579
580 func badmcall2(fn func(*g)) {
581 throw("runtime: mcall function returned")
582 }
583
584 func badreflectcall() {
585 panic(plainError("arg size to reflect.call more than 1GB"))
586 }
587
588
589
590 func badmorestackg0() {
591 if !crashStackImplemented {
592 writeErrStr("fatal: morestack on g0\n")
593 return
594 }
595
596 g := getg()
597 switchToCrashStack(func() {
598 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
599 g.m.traceback = 2
600 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
601 print("\n")
602
603 throw("morestack on g0")
604 })
605 }
606
607
608
609 func badmorestackgsignal() {
610 writeErrStr("fatal: morestack on gsignal\n")
611 }
612
613
614 func badctxt() {
615 throw("ctxt != 0")
616 }
617
618
619
620 var gcrash g
621
622 var crashingG atomic.Pointer[g]
623
624
625
626
627
628
629
630
631
632 func switchToCrashStack(fn func()) {
633 me := getg()
634 if crashingG.CompareAndSwapNoWB(nil, me) {
635 switchToCrashStack0(fn)
636 abort()
637 }
638 if crashingG.Load() == me {
639
640 writeErrStr("fatal: recursive switchToCrashStack\n")
641 abort()
642 }
643
644 usleep_no_g(100)
645 writeErrStr("fatal: concurrent switchToCrashStack\n")
646 abort()
647 }
648
649
650
651
652 const crashStackImplemented = GOOS != "windows"
653
654
655 func switchToCrashStack0(fn func())
656
657 func lockedOSThread() bool {
658 gp := getg()
659 return gp.lockedm != 0 && gp.m.lockedg != 0
660 }
661
662 var (
663
664
665
666
667
668
669 allglock mutex
670 allgs []*g
671
672
673
674
675
676
677
678
679
680
681
682
683
684 allglen uintptr
685 allgptr **g
686 )
687
688 func allgadd(gp *g) {
689 if readgstatus(gp) == _Gidle {
690 throw("allgadd: bad status Gidle")
691 }
692
693 lock(&allglock)
694 allgs = append(allgs, gp)
695 if &allgs[0] != allgptr {
696 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
697 }
698 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
699 unlock(&allglock)
700 }
701
702
703
704
705 func allGsSnapshot() []*g {
706 assertWorldStoppedOrLockHeld(&allglock)
707
708
709
710
711
712
713 return allgs[:len(allgs):len(allgs)]
714 }
715
716
717 func atomicAllG() (**g, uintptr) {
718 length := atomic.Loaduintptr(&allglen)
719 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
720 return ptr, length
721 }
722
723
724 func atomicAllGIndex(ptr **g, i uintptr) *g {
725 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
726 }
727
728
729
730
731 func forEachG(fn func(gp *g)) {
732 lock(&allglock)
733 for _, gp := range allgs {
734 fn(gp)
735 }
736 unlock(&allglock)
737 }
738
739
740
741
742
743 func forEachGRace(fn func(gp *g)) {
744 ptr, length := atomicAllG()
745 for i := uintptr(0); i < length; i++ {
746 gp := atomicAllGIndex(ptr, i)
747 fn(gp)
748 }
749 return
750 }
751
752 const (
753
754
755 _GoidCacheBatch = 16
756 )
757
758
759
760 func cpuinit(env string) {
761 cpu.Initialize(env)
762
763
764
765 switch GOARCH {
766 case "386", "amd64":
767 x86HasAVX = cpu.X86.HasAVX
768 x86HasFMA = cpu.X86.HasFMA
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771
772 case "arm":
773 armHasVFPv4 = cpu.ARM.HasVFPv4
774
775 case "arm64":
776 arm64HasATOMICS = cpu.ARM64.HasATOMICS
777
778 case "loong64":
779 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
780 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
781 loong64HasLSX = cpu.Loong64.HasLSX
782
783 case "riscv64":
784 riscv64HasZbb = cpu.RISCV64.HasZbb
785 }
786 }
787
788
789
790
791
792
793 func getGodebugEarly() (string, bool) {
794 const prefix = "GODEBUG="
795 var env string
796 switch GOOS {
797 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
798
799
800
801 n := int32(0)
802 for argv_index(argv, argc+1+n) != nil {
803 n++
804 }
805
806 for i := int32(0); i < n; i++ {
807 p := argv_index(argv, argc+1+i)
808 s := unsafe.String(p, findnull(p))
809
810 if stringslite.HasPrefix(s, prefix) {
811 env = gostringnocopy(p)[len(prefix):]
812 break
813 }
814 }
815 break
816
817 default:
818 return "", false
819 }
820 return env, true
821 }
822
823
824
825
826
827
828
829
830
831 func schedinit() {
832 lockInit(&sched.lock, lockRankSched)
833 lockInit(&sched.sysmonlock, lockRankSysmon)
834 lockInit(&sched.deferlock, lockRankDefer)
835 lockInit(&sched.sudoglock, lockRankSudog)
836 lockInit(&deadlock, lockRankDeadlock)
837 lockInit(&paniclk, lockRankPanic)
838 lockInit(&allglock, lockRankAllg)
839 lockInit(&allpLock, lockRankAllp)
840 lockInit(&reflectOffs.lock, lockRankReflectOffs)
841 lockInit(&finlock, lockRankFin)
842 lockInit(&cpuprof.lock, lockRankCpuprof)
843 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
844 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
845 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
846 traceLockInit()
847
848
849
850 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
851
852 lockVerifyMSize()
853
854 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
855
856
857
858 gp := getg()
859 if raceenabled {
860 gp.racectx, raceprocctx0 = raceinit()
861 }
862
863 sched.maxmcount = 10000
864 crashFD.Store(^uintptr(0))
865
866
867 worldStopped()
868
869 godebug, parsedGodebug := getGodebugEarly()
870 if parsedGodebug {
871 parseRuntimeDebugVars(godebug)
872 }
873 ticks.init()
874 moduledataverify()
875 stackinit()
876 randinit()
877 mallocinit()
878 cpuinit(godebug)
879 alginit()
880 mcommoninit(gp.m, -1)
881 modulesinit()
882 typelinksinit()
883 itabsinit()
884 stkobjinit()
885
886 sigsave(&gp.m.sigmask)
887 initSigmask = gp.m.sigmask
888
889 goargs()
890 goenvs()
891 secure()
892 checkfds()
893 if !parsedGodebug {
894
895
896 parseRuntimeDebugVars(gogetenv("GODEBUG"))
897 }
898 finishDebugVarsSetup()
899 gcinit()
900
901
902
903 gcrash.stack = stackalloc(16384)
904 gcrash.stackguard0 = gcrash.stack.lo + 1000
905 gcrash.stackguard1 = gcrash.stack.lo + 1000
906
907
908
909
910
911 if disableMemoryProfiling {
912 MemProfileRate = 0
913 }
914
915
916 mProfStackInit(gp.m)
917 defaultGOMAXPROCSInit()
918
919 lock(&sched.lock)
920 sched.lastpoll.Store(nanotime())
921 var procs int32
922 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
923 procs = int32(n)
924 sched.customGOMAXPROCS = true
925 } else {
926
927
928
929
930
931
932
933
934 procs = defaultGOMAXPROCS(numCPUStartup)
935 }
936 if procresize(procs) != nil {
937 throw("unknown runnable goroutine during bootstrap")
938 }
939 unlock(&sched.lock)
940
941
942 worldStarted()
943
944 if buildVersion == "" {
945
946
947 buildVersion = "unknown"
948 }
949 if len(modinfo) == 1 {
950
951
952 modinfo = ""
953 }
954 }
955
956 func dumpgstatus(gp *g) {
957 thisg := getg()
958 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
959 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
960 }
961
962
963 func checkmcount() {
964 assertLockHeld(&sched.lock)
965
966
967
968
969
970
971
972
973
974 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
975 if count > sched.maxmcount {
976 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
977 throw("thread exhaustion")
978 }
979 }
980
981
982
983
984
985 func mReserveID() int64 {
986 assertLockHeld(&sched.lock)
987
988 if sched.mnext+1 < sched.mnext {
989 throw("runtime: thread ID overflow")
990 }
991 id := sched.mnext
992 sched.mnext++
993 checkmcount()
994 return id
995 }
996
997
998 func mcommoninit(mp *m, id int64) {
999 gp := getg()
1000
1001
1002 if gp != gp.m.g0 {
1003 callers(1, mp.createstack[:])
1004 }
1005
1006 lock(&sched.lock)
1007
1008 if id >= 0 {
1009 mp.id = id
1010 } else {
1011 mp.id = mReserveID()
1012 }
1013
1014 mp.self = newMWeakPointer(mp)
1015
1016 mrandinit(mp)
1017
1018 mpreinit(mp)
1019 if mp.gsignal != nil {
1020 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1021 }
1022
1023
1024
1025 mp.alllink = allm
1026
1027
1028
1029 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1030 unlock(&sched.lock)
1031
1032
1033 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1034 mp.cgoCallers = new(cgoCallers)
1035 }
1036 mProfStackInit(mp)
1037 }
1038
1039
1040
1041
1042
1043 func mProfStackInit(mp *m) {
1044 if debug.profstackdepth == 0 {
1045
1046
1047 return
1048 }
1049 mp.profStack = makeProfStackFP()
1050 mp.mLockProfile.stack = makeProfStackFP()
1051 }
1052
1053
1054
1055
1056 func makeProfStackFP() []uintptr {
1057
1058
1059
1060
1061
1062
1063 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1064 }
1065
1066
1067
1068 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1069
1070
1071 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1072
1073 func (mp *m) becomeSpinning() {
1074 mp.spinning = true
1075 sched.nmspinning.Add(1)
1076 sched.needspinning.Store(0)
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086 func (mp *m) snapshotAllp() []*p {
1087 mp.allpSnapshot = allp
1088 return mp.allpSnapshot
1089 }
1090
1091
1092
1093
1094
1095
1096
1097 func (mp *m) clearAllpSnapshot() {
1098 mp.allpSnapshot = nil
1099 }
1100
1101 func (mp *m) hasCgoOnStack() bool {
1102 return mp.ncgo > 0 || mp.isextra
1103 }
1104
1105 const (
1106
1107
1108 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1109
1110
1111
1112 osHasLowResClockInt = goos.IsWindows
1113
1114
1115
1116 osHasLowResClock = osHasLowResClockInt > 0
1117 )
1118
1119
1120 func ready(gp *g, traceskip int, next bool) {
1121 status := readgstatus(gp)
1122
1123
1124 mp := acquirem()
1125 if status&^_Gscan != _Gwaiting {
1126 dumpgstatus(gp)
1127 throw("bad g->status in ready")
1128 }
1129
1130
1131 trace := traceAcquire()
1132 casgstatus(gp, _Gwaiting, _Grunnable)
1133 if trace.ok() {
1134 trace.GoUnpark(gp, traceskip)
1135 traceRelease(trace)
1136 }
1137 runqput(mp.p.ptr(), gp, next)
1138 wakep()
1139 releasem(mp)
1140 }
1141
1142
1143
1144 const freezeStopWait = 0x7fffffff
1145
1146
1147
1148 var freezing atomic.Bool
1149
1150
1151
1152
1153 func freezetheworld() {
1154 freezing.Store(true)
1155 if debug.dontfreezetheworld > 0 {
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 usleep(1000)
1181 return
1182 }
1183
1184
1185
1186
1187 for i := 0; i < 5; i++ {
1188
1189 sched.stopwait = freezeStopWait
1190 sched.gcwaiting.Store(true)
1191
1192 if !preemptall() {
1193 break
1194 }
1195 usleep(1000)
1196 }
1197
1198 usleep(1000)
1199 preemptall()
1200 usleep(1000)
1201 }
1202
1203
1204
1205
1206
1207 func readgstatus(gp *g) uint32 {
1208 return gp.atomicstatus.Load()
1209 }
1210
1211
1212
1213
1214
1215 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1216 success := false
1217
1218
1219 switch oldval {
1220 default:
1221 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1222 dumpgstatus(gp)
1223 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1224 case _Gscanrunnable,
1225 _Gscanwaiting,
1226 _Gscanrunning,
1227 _Gscansyscall,
1228 _Gscanleaked,
1229 _Gscanpreempted,
1230 _Gscandeadextra:
1231 if newval == oldval&^_Gscan {
1232 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1233 }
1234 }
1235 if !success {
1236 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1237 dumpgstatus(gp)
1238 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1239 }
1240 releaseLockRankAndM(lockRankGscan)
1241 }
1242
1243
1244
1245 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1246 switch oldval {
1247 case _Grunnable,
1248 _Grunning,
1249 _Gwaiting,
1250 _Gleaked,
1251 _Gsyscall,
1252 _Gdeadextra:
1253 if newval == oldval|_Gscan {
1254 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1255 if r {
1256 acquireLockRankAndM(lockRankGscan)
1257 }
1258 return r
1259
1260 }
1261 }
1262 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1263 throw("bad oldval passed to castogscanstatus")
1264 return false
1265 }
1266
1267
1268
1269 var casgstatusAlwaysTrack = false
1270
1271
1272
1273
1274
1275
1276
1277 func casgstatus(gp *g, oldval, newval uint32) {
1278 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1279 systemstack(func() {
1280
1281
1282 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1283 throw("casgstatus: bad incoming values")
1284 })
1285 }
1286
1287 lockWithRankMayAcquire(nil, lockRankGscan)
1288
1289
1290 const yieldDelay = 5 * 1000
1291 var nextYield int64
1292
1293
1294
1295 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1296 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1297 systemstack(func() {
1298
1299
1300 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1301 })
1302 }
1303 if i == 0 {
1304 nextYield = nanotime() + yieldDelay
1305 }
1306 if nanotime() < nextYield {
1307 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1308 procyield(1)
1309 }
1310 } else {
1311 osyield()
1312 nextYield = nanotime() + yieldDelay/2
1313 }
1314 }
1315
1316 if gp.bubble != nil {
1317 systemstack(func() {
1318 gp.bubble.changegstatus(gp, oldval, newval)
1319 })
1320 }
1321
1322 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1323
1324
1325 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1326 gp.tracking = true
1327 }
1328 gp.trackingSeq++
1329 }
1330 if !gp.tracking {
1331 return
1332 }
1333
1334
1335
1336
1337
1338
1339 switch oldval {
1340 case _Grunnable:
1341
1342
1343
1344 now := nanotime()
1345 gp.runnableTime += now - gp.trackingStamp
1346 gp.trackingStamp = 0
1347 case _Gwaiting:
1348 if !gp.waitreason.isMutexWait() {
1349
1350 break
1351 }
1352
1353
1354
1355
1356
1357 now := nanotime()
1358 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1359 gp.trackingStamp = 0
1360 }
1361 switch newval {
1362 case _Gwaiting:
1363 if !gp.waitreason.isMutexWait() {
1364
1365 break
1366 }
1367
1368 now := nanotime()
1369 gp.trackingStamp = now
1370 case _Grunnable:
1371
1372
1373 now := nanotime()
1374 gp.trackingStamp = now
1375 case _Grunning:
1376
1377
1378
1379 gp.tracking = false
1380 sched.timeToRun.record(gp.runnableTime)
1381 gp.runnableTime = 0
1382 }
1383 }
1384
1385
1386
1387
1388 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1389
1390 gp.waitreason = reason
1391 casgstatus(gp, old, _Gwaiting)
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1402 if !reason.isWaitingForSuspendG() {
1403 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1404 }
1405 casGToWaiting(gp, old, reason)
1406 }
1407
1408
1409
1410
1411
1412 func casGToPreemptScan(gp *g, old, new uint32) {
1413 if old != _Grunning || new != _Gscan|_Gpreempted {
1414 throw("bad g transition")
1415 }
1416 acquireLockRankAndM(lockRankGscan)
1417 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1418 }
1419
1420
1421
1422
1423
1424
1425 }
1426
1427
1428
1429
1430 func casGFromPreempted(gp *g, old, new uint32) bool {
1431 if old != _Gpreempted || new != _Gwaiting {
1432 throw("bad g transition")
1433 }
1434 gp.waitreason = waitReasonPreempted
1435 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1436 return false
1437 }
1438 if bubble := gp.bubble; bubble != nil {
1439 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1440 }
1441 return true
1442 }
1443
1444
1445 type stwReason uint8
1446
1447
1448
1449
1450 const (
1451 stwUnknown stwReason = iota
1452 stwGCMarkTerm
1453 stwGCSweepTerm
1454 stwWriteHeapDump
1455 stwGoroutineProfile
1456 stwGoroutineProfileCleanup
1457 stwAllGoroutinesStack
1458 stwReadMemStats
1459 stwAllThreadsSyscall
1460 stwGOMAXPROCS
1461 stwStartTrace
1462 stwStopTrace
1463 stwForTestCountPagesInUse
1464 stwForTestReadMetricsSlow
1465 stwForTestReadMemStatsSlow
1466 stwForTestPageCachePagesLeaked
1467 stwForTestResetDebugLog
1468 )
1469
1470 func (r stwReason) String() string {
1471 return stwReasonStrings[r]
1472 }
1473
1474 func (r stwReason) isGC() bool {
1475 return r == stwGCMarkTerm || r == stwGCSweepTerm
1476 }
1477
1478
1479
1480
1481 var stwReasonStrings = [...]string{
1482 stwUnknown: "unknown",
1483 stwGCMarkTerm: "GC mark termination",
1484 stwGCSweepTerm: "GC sweep termination",
1485 stwWriteHeapDump: "write heap dump",
1486 stwGoroutineProfile: "goroutine profile",
1487 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1488 stwAllGoroutinesStack: "all goroutines stack trace",
1489 stwReadMemStats: "read mem stats",
1490 stwAllThreadsSyscall: "AllThreadsSyscall",
1491 stwGOMAXPROCS: "GOMAXPROCS",
1492 stwStartTrace: "start trace",
1493 stwStopTrace: "stop trace",
1494 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1495 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1496 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1497 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1498 stwForTestResetDebugLog: "ResetDebugLog (test)",
1499 }
1500
1501
1502
1503 type worldStop struct {
1504 reason stwReason
1505 startedStopping int64
1506 finishedStopping int64
1507 stoppingCPUTime int64
1508 }
1509
1510
1511
1512
1513 var stopTheWorldContext worldStop
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 func stopTheWorld(reason stwReason) worldStop {
1533 semacquire(&worldsema)
1534 gp := getg()
1535 gp.m.preemptoff = reason.String()
1536 systemstack(func() {
1537 stopTheWorldContext = stopTheWorldWithSema(reason)
1538 })
1539 return stopTheWorldContext
1540 }
1541
1542
1543
1544
1545 func startTheWorld(w worldStop) {
1546 systemstack(func() { startTheWorldWithSema(0, w) })
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 mp := acquirem()
1564 mp.preemptoff = ""
1565 semrelease1(&worldsema, true, 0)
1566 releasem(mp)
1567 }
1568
1569
1570
1571
1572 func stopTheWorldGC(reason stwReason) worldStop {
1573 semacquire(&gcsema)
1574 return stopTheWorld(reason)
1575 }
1576
1577
1578
1579
1580 func startTheWorldGC(w worldStop) {
1581 startTheWorld(w)
1582 semrelease(&gcsema)
1583 }
1584
1585
1586 var worldsema uint32 = 1
1587
1588
1589
1590
1591
1592
1593
1594 var gcsema uint32 = 1
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628 func stopTheWorldWithSema(reason stwReason) worldStop {
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1642
1643 trace := traceAcquire()
1644 if trace.ok() {
1645 trace.STWStart(reason)
1646 traceRelease(trace)
1647 }
1648 gp := getg()
1649
1650
1651
1652 if gp.m.locks > 0 {
1653 throw("stopTheWorld: holding locks")
1654 }
1655
1656 lock(&sched.lock)
1657 start := nanotime()
1658 sched.stopwait = gomaxprocs
1659 sched.gcwaiting.Store(true)
1660 preemptall()
1661
1662
1663 gp.m.p.ptr().status = _Pgcstop
1664 gp.m.p.ptr().gcStopTime = start
1665 sched.stopwait--
1666
1667
1668 for _, pp := range allp {
1669 if thread, ok := setBlockOnExitSyscall(pp); ok {
1670 thread.gcstopP()
1671 thread.resume()
1672 }
1673 }
1674
1675
1676 now := nanotime()
1677 for {
1678 pp, _ := pidleget(now)
1679 if pp == nil {
1680 break
1681 }
1682 pp.status = _Pgcstop
1683 pp.gcStopTime = nanotime()
1684 sched.stopwait--
1685 }
1686 wait := sched.stopwait > 0
1687 unlock(&sched.lock)
1688
1689
1690 if wait {
1691 for {
1692
1693 if notetsleep(&sched.stopnote, 100*1000) {
1694 noteclear(&sched.stopnote)
1695 break
1696 }
1697 preemptall()
1698 }
1699 }
1700
1701 finish := nanotime()
1702 startTime := finish - start
1703 if reason.isGC() {
1704 sched.stwStoppingTimeGC.record(startTime)
1705 } else {
1706 sched.stwStoppingTimeOther.record(startTime)
1707 }
1708
1709
1710
1711
1712
1713 stoppingCPUTime := int64(0)
1714 bad := ""
1715 if sched.stopwait != 0 {
1716 bad = "stopTheWorld: not stopped (stopwait != 0)"
1717 } else {
1718 for _, pp := range allp {
1719 if pp.status != _Pgcstop {
1720 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1721 }
1722 if pp.gcStopTime == 0 && bad == "" {
1723 bad = "stopTheWorld: broken CPU time accounting"
1724 }
1725 stoppingCPUTime += finish - pp.gcStopTime
1726 pp.gcStopTime = 0
1727 }
1728 }
1729 if freezing.Load() {
1730
1731
1732
1733
1734 lock(&deadlock)
1735 lock(&deadlock)
1736 }
1737 if bad != "" {
1738 throw(bad)
1739 }
1740
1741 worldStopped()
1742
1743
1744 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1745
1746 return worldStop{
1747 reason: reason,
1748 startedStopping: start,
1749 finishedStopping: finish,
1750 stoppingCPUTime: stoppingCPUTime,
1751 }
1752 }
1753
1754
1755
1756
1757
1758
1759
1760 func startTheWorldWithSema(now int64, w worldStop) int64 {
1761 assertWorldStopped()
1762
1763 mp := acquirem()
1764 if netpollinited() {
1765 list, delta := netpoll(0)
1766 injectglist(&list)
1767 netpollAdjustWaiters(delta)
1768 }
1769 lock(&sched.lock)
1770
1771 procs := gomaxprocs
1772 if newprocs != 0 {
1773 procs = newprocs
1774 newprocs = 0
1775 }
1776 p1 := procresize(procs)
1777 sched.gcwaiting.Store(false)
1778 if sched.sysmonwait.Load() {
1779 sched.sysmonwait.Store(false)
1780 notewakeup(&sched.sysmonnote)
1781 }
1782 unlock(&sched.lock)
1783
1784 worldStarted()
1785
1786 for p1 != nil {
1787 p := p1
1788 p1 = p1.link.ptr()
1789 if p.m != 0 {
1790 mp := p.m.ptr()
1791 p.m = 0
1792 if mp.nextp != 0 {
1793 throw("startTheWorld: inconsistent mp->nextp")
1794 }
1795 mp.nextp.set(p)
1796 notewakeup(&mp.park)
1797 } else {
1798
1799 newm(nil, p, -1)
1800 }
1801 }
1802
1803
1804 if now == 0 {
1805 now = nanotime()
1806 }
1807 totalTime := now - w.startedStopping
1808 if w.reason.isGC() {
1809 sched.stwTotalTimeGC.record(totalTime)
1810 } else {
1811 sched.stwTotalTimeOther.record(totalTime)
1812 }
1813 trace := traceAcquire()
1814 if trace.ok() {
1815 trace.STWDone()
1816 traceRelease(trace)
1817 }
1818
1819
1820
1821
1822 wakep()
1823
1824 releasem(mp)
1825
1826 return now
1827 }
1828
1829
1830
1831 func usesLibcall() bool {
1832 switch GOOS {
1833 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1834 return true
1835 }
1836 return false
1837 }
1838
1839
1840
1841 func mStackIsSystemAllocated() bool {
1842 switch GOOS {
1843 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1844 return true
1845 }
1846 return false
1847 }
1848
1849
1850
1851 func mstart()
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 func mstart0() {
1863 gp := getg()
1864
1865 osStack := gp.stack.lo == 0
1866 if osStack {
1867
1868
1869
1870
1871
1872
1873
1874
1875 size := gp.stack.hi
1876 if size == 0 {
1877 size = 16384 * sys.StackGuardMultiplier
1878 }
1879 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1880 gp.stack.lo = gp.stack.hi - size + 1024
1881 }
1882
1883
1884 gp.stackguard0 = gp.stack.lo + stackGuard
1885
1886
1887 gp.stackguard1 = gp.stackguard0
1888 mstart1()
1889
1890
1891 if mStackIsSystemAllocated() {
1892
1893
1894
1895 osStack = true
1896 }
1897 mexit(osStack)
1898 }
1899
1900
1901
1902
1903
1904 func mstart1() {
1905 gp := getg()
1906
1907 if gp != gp.m.g0 {
1908 throw("bad runtime·mstart")
1909 }
1910
1911
1912
1913
1914
1915
1916
1917 gp.sched.g = guintptr(unsafe.Pointer(gp))
1918 gp.sched.pc = sys.GetCallerPC()
1919 gp.sched.sp = sys.GetCallerSP()
1920
1921 asminit()
1922 minit()
1923
1924
1925
1926 if gp.m == &m0 {
1927 mstartm0()
1928 }
1929
1930 if debug.dataindependenttiming == 1 {
1931 sys.EnableDIT()
1932 }
1933
1934 if fn := gp.m.mstartfn; fn != nil {
1935 fn()
1936 }
1937
1938 if gp.m != &m0 {
1939 acquirep(gp.m.nextp.ptr())
1940 gp.m.nextp = 0
1941 }
1942 schedule()
1943 }
1944
1945
1946
1947
1948
1949
1950
1951 func mstartm0() {
1952
1953
1954
1955 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1956 cgoHasExtraM = true
1957 newextram()
1958 }
1959 initsig(false)
1960 }
1961
1962
1963
1964
1965 func mPark() {
1966 gp := getg()
1967 notesleep(&gp.m.park)
1968 noteclear(&gp.m.park)
1969 }
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981 func mexit(osStack bool) {
1982 mp := getg().m
1983
1984 if mp == &m0 {
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 handoffp(releasep())
1997 lock(&sched.lock)
1998 sched.nmfreed++
1999 checkdead()
2000 unlock(&sched.lock)
2001 mPark()
2002 throw("locked m0 woke up")
2003 }
2004
2005 sigblock(true)
2006 unminit()
2007
2008
2009 if mp.gsignal != nil {
2010 stackfree(mp.gsignal.stack)
2011 if valgrindenabled {
2012 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2013 mp.gsignal.valgrindStackID = 0
2014 }
2015
2016
2017
2018
2019 mp.gsignal = nil
2020 }
2021
2022
2023 vgetrandomDestroy(mp)
2024
2025
2026
2027 mp.self.clear()
2028
2029
2030 lock(&sched.lock)
2031 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2032 if *pprev == mp {
2033 *pprev = mp.alllink
2034 goto found
2035 }
2036 }
2037 throw("m not found in allm")
2038 found:
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 mp.freeWait.Store(freeMWait)
2054 mp.freelink = sched.freem
2055 sched.freem = mp
2056 unlock(&sched.lock)
2057
2058 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2059 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2060
2061
2062 handoffp(releasep())
2063
2064
2065
2066
2067
2068 lock(&sched.lock)
2069 sched.nmfreed++
2070 checkdead()
2071 unlock(&sched.lock)
2072
2073 if GOOS == "darwin" || GOOS == "ios" {
2074
2075
2076 if mp.signalPending.Load() != 0 {
2077 pendingPreemptSignals.Add(-1)
2078 }
2079 }
2080
2081
2082
2083 mdestroy(mp)
2084
2085 if osStack {
2086
2087 mp.freeWait.Store(freeMRef)
2088
2089
2090
2091 return
2092 }
2093
2094
2095
2096
2097
2098 exitThread(&mp.freeWait)
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 func forEachP(reason waitReason, fn func(*p)) {
2112 systemstack(func() {
2113 gp := getg().m.curg
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 casGToWaitingForSuspendG(gp, _Grunning, reason)
2126 forEachPInternal(fn)
2127 casgstatus(gp, _Gwaiting, _Grunning)
2128 })
2129 }
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140 func forEachPInternal(fn func(*p)) {
2141 mp := acquirem()
2142 pp := getg().m.p.ptr()
2143
2144 lock(&sched.lock)
2145 if sched.safePointWait != 0 {
2146 throw("forEachP: sched.safePointWait != 0")
2147 }
2148 sched.safePointWait = gomaxprocs - 1
2149 sched.safePointFn = fn
2150
2151
2152 for _, p2 := range allp {
2153 if p2 != pp {
2154 atomic.Store(&p2.runSafePointFn, 1)
2155 }
2156 }
2157 preemptall()
2158
2159
2160
2161
2162
2163
2164
2165 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2166 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2167 fn(p)
2168 sched.safePointWait--
2169 }
2170 }
2171
2172 wait := sched.safePointWait > 0
2173 unlock(&sched.lock)
2174
2175
2176 fn(pp)
2177
2178
2179
2180 for _, p2 := range allp {
2181 if atomic.Load(&p2.runSafePointFn) != 1 {
2182
2183 continue
2184 }
2185 if thread, ok := setBlockOnExitSyscall(p2); ok {
2186 thread.takeP()
2187 thread.resume()
2188 handoffp(p2)
2189 }
2190 }
2191
2192
2193 if wait {
2194 for {
2195
2196
2197
2198
2199 if notetsleep(&sched.safePointNote, 100*1000) {
2200 noteclear(&sched.safePointNote)
2201 break
2202 }
2203 preemptall()
2204 }
2205 }
2206 if sched.safePointWait != 0 {
2207 throw("forEachP: not done")
2208 }
2209 for _, p2 := range allp {
2210 if p2.runSafePointFn != 0 {
2211 throw("forEachP: P did not run fn")
2212 }
2213 }
2214
2215 lock(&sched.lock)
2216 sched.safePointFn = nil
2217 unlock(&sched.lock)
2218 releasem(mp)
2219 }
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 func runSafePointFn() {
2233 p := getg().m.p.ptr()
2234
2235
2236
2237 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2238 return
2239 }
2240 sched.safePointFn(p)
2241 lock(&sched.lock)
2242 sched.safePointWait--
2243 if sched.safePointWait == 0 {
2244 notewakeup(&sched.safePointNote)
2245 }
2246 unlock(&sched.lock)
2247 }
2248
2249
2250
2251
2252 var cgoThreadStart unsafe.Pointer
2253
2254 type cgothreadstart struct {
2255 g guintptr
2256 tls *uint64
2257 fn unsafe.Pointer
2258 }
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 func allocm(pp *p, fn func(), id int64) *m {
2270 allocmLock.rlock()
2271
2272
2273
2274
2275 acquirem()
2276
2277 gp := getg()
2278 if gp.m.p == 0 {
2279 acquirep(pp)
2280 }
2281
2282
2283
2284 if sched.freem != nil {
2285 lock(&sched.lock)
2286 var newList *m
2287 for freem := sched.freem; freem != nil; {
2288
2289 wait := freem.freeWait.Load()
2290 if wait == freeMWait {
2291 next := freem.freelink
2292 freem.freelink = newList
2293 newList = freem
2294 freem = next
2295 continue
2296 }
2297
2298
2299
2300 if traceEnabled() || traceShuttingDown() {
2301 traceThreadDestroy(freem)
2302 }
2303
2304
2305
2306 if wait == freeMStack {
2307
2308
2309
2310 systemstack(func() {
2311 stackfree(freem.g0.stack)
2312 if valgrindenabled {
2313 valgrindDeregisterStack(freem.g0.valgrindStackID)
2314 freem.g0.valgrindStackID = 0
2315 }
2316 })
2317 }
2318 freem = freem.freelink
2319 }
2320 sched.freem = newList
2321 unlock(&sched.lock)
2322 }
2323
2324 mp := &new(mPadded).m
2325 mp.mstartfn = fn
2326 mcommoninit(mp, id)
2327
2328
2329
2330 if iscgo || mStackIsSystemAllocated() {
2331 mp.g0 = malg(-1)
2332 } else {
2333 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2334 }
2335 mp.g0.m = mp
2336
2337 if pp == gp.m.p.ptr() {
2338 releasep()
2339 }
2340
2341 releasem(gp.m)
2342 allocmLock.runlock()
2343 return mp
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385 func needm(signal bool) {
2386 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2387
2388
2389
2390
2391
2392
2393 writeErrStr("fatal error: cgo callback before cgo call\n")
2394 exit(1)
2395 }
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405 var sigmask sigset
2406 sigsave(&sigmask)
2407 sigblock(false)
2408
2409
2410
2411
2412 mp, last := getExtraM()
2413
2414
2415
2416
2417
2418
2419
2420
2421 mp.needextram = last
2422
2423
2424 mp.sigmask = sigmask
2425
2426
2427
2428 osSetupTLS(mp)
2429
2430
2431
2432 setg(mp.g0)
2433 sp := sys.GetCallerSP()
2434 callbackUpdateSystemStack(mp, sp, signal)
2435
2436
2437
2438
2439 mp.isExtraInC = false
2440
2441
2442 asminit()
2443 minit()
2444
2445
2446
2447
2448
2449
2450 var trace traceLocker
2451 if !signal {
2452 trace = traceAcquire()
2453 }
2454
2455
2456 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2457 sched.ngsys.Add(-1)
2458 sched.nGsyscallNoP.Add(1)
2459
2460 if !signal {
2461 if trace.ok() {
2462 trace.GoCreateSyscall(mp.curg)
2463 traceRelease(trace)
2464 }
2465 }
2466 mp.isExtraInSig = signal
2467 }
2468
2469
2470
2471
2472 func needAndBindM() {
2473 needm(false)
2474
2475 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2476 cgoBindM()
2477 }
2478 }
2479
2480
2481
2482
2483 func newextram() {
2484 c := extraMWaiters.Swap(0)
2485 if c > 0 {
2486 for i := uint32(0); i < c; i++ {
2487 oneNewExtraM()
2488 }
2489 } else if extraMLength.Load() == 0 {
2490
2491 oneNewExtraM()
2492 }
2493 }
2494
2495
2496 func oneNewExtraM() {
2497
2498
2499
2500
2501
2502 mp := allocm(nil, nil, -1)
2503 gp := malg(4096)
2504 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2505 gp.sched.sp = gp.stack.hi
2506 gp.sched.sp -= 4 * goarch.PtrSize
2507 gp.sched.lr = 0
2508 gp.sched.g = guintptr(unsafe.Pointer(gp))
2509 gp.syscallpc = gp.sched.pc
2510 gp.syscallsp = gp.sched.sp
2511 gp.stktopsp = gp.sched.sp
2512
2513
2514
2515 casgstatus(gp, _Gidle, _Gdeadextra)
2516 gp.m = mp
2517 mp.curg = gp
2518 mp.isextra = true
2519
2520 mp.isExtraInC = true
2521 mp.lockedInt++
2522 mp.lockedg.set(gp)
2523 gp.lockedm.set(mp)
2524 gp.goid = sched.goidgen.Add(1)
2525 if raceenabled {
2526 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2527 }
2528
2529 allgadd(gp)
2530
2531
2532
2533
2534
2535 sched.ngsys.Add(1)
2536
2537
2538 addExtraM(mp)
2539 }
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574 func dropm() {
2575
2576
2577
2578 mp := getg().m
2579
2580
2581
2582
2583
2584 var trace traceLocker
2585 if !mp.isExtraInSig {
2586 trace = traceAcquire()
2587 }
2588
2589
2590 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2591 mp.curg.preemptStop = false
2592 sched.ngsys.Add(1)
2593 sched.nGsyscallNoP.Add(-1)
2594
2595 if !mp.isExtraInSig {
2596 if trace.ok() {
2597 trace.GoDestroySyscall()
2598 traceRelease(trace)
2599 }
2600 }
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615 mp.syscalltick--
2616
2617
2618
2619 mp.curg.trace.reset()
2620
2621
2622
2623
2624 if traceEnabled() || traceShuttingDown() {
2625
2626
2627
2628
2629
2630
2631
2632 lock(&sched.lock)
2633 traceThreadDestroy(mp)
2634 unlock(&sched.lock)
2635 }
2636 mp.isExtraInSig = false
2637
2638
2639
2640
2641
2642 sigmask := mp.sigmask
2643 sigblock(false)
2644 unminit()
2645
2646 setg(nil)
2647
2648
2649
2650 g0 := mp.g0
2651 g0.stack.hi = 0
2652 g0.stack.lo = 0
2653 g0.stackguard0 = 0
2654 g0.stackguard1 = 0
2655 mp.g0StackAccurate = false
2656
2657 putExtraM(mp)
2658
2659 msigrestore(sigmask)
2660 }
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682 func cgoBindM() {
2683 if GOOS == "windows" || GOOS == "plan9" {
2684 fatal("bindm in unexpected GOOS")
2685 }
2686 g := getg()
2687 if g.m.g0 != g {
2688 fatal("the current g is not g0")
2689 }
2690 if _cgo_bindm != nil {
2691 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2692 }
2693 }
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706 func getm() uintptr {
2707 return uintptr(unsafe.Pointer(getg().m))
2708 }
2709
2710 var (
2711
2712
2713
2714
2715
2716
2717 extraM atomic.Uintptr
2718
2719 extraMLength atomic.Uint32
2720
2721 extraMWaiters atomic.Uint32
2722
2723
2724 extraMInUse atomic.Uint32
2725 )
2726
2727
2728
2729
2730
2731
2732
2733
2734 func lockextra(nilokay bool) *m {
2735 const locked = 1
2736
2737 incr := false
2738 for {
2739 old := extraM.Load()
2740 if old == locked {
2741 osyield_no_g()
2742 continue
2743 }
2744 if old == 0 && !nilokay {
2745 if !incr {
2746
2747
2748
2749 extraMWaiters.Add(1)
2750 incr = true
2751 }
2752 usleep_no_g(1)
2753 continue
2754 }
2755 if extraM.CompareAndSwap(old, locked) {
2756 return (*m)(unsafe.Pointer(old))
2757 }
2758 osyield_no_g()
2759 continue
2760 }
2761 }
2762
2763
2764 func unlockextra(mp *m, delta int32) {
2765 extraMLength.Add(delta)
2766 extraM.Store(uintptr(unsafe.Pointer(mp)))
2767 }
2768
2769
2770
2771
2772
2773
2774
2775
2776 func getExtraM() (mp *m, last bool) {
2777 mp = lockextra(false)
2778 extraMInUse.Add(1)
2779 unlockextra(mp.schedlink.ptr(), -1)
2780 return mp, mp.schedlink.ptr() == nil
2781 }
2782
2783
2784
2785
2786
2787 func putExtraM(mp *m) {
2788 extraMInUse.Add(-1)
2789 addExtraM(mp)
2790 }
2791
2792
2793
2794
2795 func addExtraM(mp *m) {
2796 mnext := lockextra(true)
2797 mp.schedlink.set(mnext)
2798 unlockextra(mp, 1)
2799 }
2800
2801 var (
2802
2803
2804
2805 allocmLock rwmutex
2806
2807
2808
2809
2810 execLock rwmutex
2811 )
2812
2813
2814
2815 const (
2816 failthreadcreate = "runtime: failed to create new OS thread\n"
2817 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2818 )
2819
2820
2821
2822
2823 var newmHandoff struct {
2824 lock mutex
2825
2826
2827
2828 newm muintptr
2829
2830
2831
2832 waiting bool
2833 wake note
2834
2835
2836
2837
2838 haveTemplateThread uint32
2839 }
2840
2841
2842
2843
2844
2845
2846
2847
2848 func newm(fn func(), pp *p, id int64) {
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859 acquirem()
2860
2861 mp := allocm(pp, fn, id)
2862 mp.nextp.set(pp)
2863 mp.sigmask = initSigmask
2864 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876 lock(&newmHandoff.lock)
2877 if newmHandoff.haveTemplateThread == 0 {
2878 throw("on a locked thread with no template thread")
2879 }
2880 mp.schedlink = newmHandoff.newm
2881 newmHandoff.newm.set(mp)
2882 if newmHandoff.waiting {
2883 newmHandoff.waiting = false
2884 notewakeup(&newmHandoff.wake)
2885 }
2886 unlock(&newmHandoff.lock)
2887
2888
2889
2890 releasem(getg().m)
2891 return
2892 }
2893 newm1(mp)
2894 releasem(getg().m)
2895 }
2896
2897 func newm1(mp *m) {
2898 if iscgo {
2899 var ts cgothreadstart
2900 if _cgo_thread_start == nil {
2901 throw("_cgo_thread_start missing")
2902 }
2903 ts.g.set(mp.g0)
2904 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2905 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2906 if msanenabled {
2907 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2908 }
2909 if asanenabled {
2910 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2911 }
2912 execLock.rlock()
2913 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2914 execLock.runlock()
2915 return
2916 }
2917 execLock.rlock()
2918 newosproc(mp)
2919 execLock.runlock()
2920 }
2921
2922
2923
2924
2925
2926 func startTemplateThread() {
2927 if GOARCH == "wasm" {
2928 return
2929 }
2930
2931
2932
2933 mp := acquirem()
2934 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2935 releasem(mp)
2936 return
2937 }
2938 newm(templateThread, nil, -1)
2939 releasem(mp)
2940 }
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954 func templateThread() {
2955 lock(&sched.lock)
2956 sched.nmsys++
2957 checkdead()
2958 unlock(&sched.lock)
2959
2960 for {
2961 lock(&newmHandoff.lock)
2962 for newmHandoff.newm != 0 {
2963 newm := newmHandoff.newm.ptr()
2964 newmHandoff.newm = 0
2965 unlock(&newmHandoff.lock)
2966 for newm != nil {
2967 next := newm.schedlink.ptr()
2968 newm.schedlink = 0
2969 newm1(newm)
2970 newm = next
2971 }
2972 lock(&newmHandoff.lock)
2973 }
2974 newmHandoff.waiting = true
2975 noteclear(&newmHandoff.wake)
2976 unlock(&newmHandoff.lock)
2977 notesleep(&newmHandoff.wake)
2978 }
2979 }
2980
2981
2982
2983 func stopm() {
2984 gp := getg()
2985
2986 if gp.m.locks != 0 {
2987 throw("stopm holding locks")
2988 }
2989 if gp.m.p != 0 {
2990 throw("stopm holding p")
2991 }
2992 if gp.m.spinning {
2993 throw("stopm spinning")
2994 }
2995
2996 lock(&sched.lock)
2997 mput(gp.m)
2998 unlock(&sched.lock)
2999 mPark()
3000 acquirep(gp.m.nextp.ptr())
3001 gp.m.nextp = 0
3002 }
3003
3004 func mspinning() {
3005
3006 getg().m.spinning = true
3007 }
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026 func startm(pp *p, spinning, lockheld bool) {
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043 mp := acquirem()
3044 if !lockheld {
3045 lock(&sched.lock)
3046 }
3047 if pp == nil {
3048 if spinning {
3049
3050
3051
3052 throw("startm: P required for spinning=true")
3053 }
3054 pp, _ = pidleget(0)
3055 if pp == nil {
3056 if !lockheld {
3057 unlock(&sched.lock)
3058 }
3059 releasem(mp)
3060 return
3061 }
3062 }
3063 nmp := mget()
3064 if nmp == nil {
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079 id := mReserveID()
3080 unlock(&sched.lock)
3081
3082 var fn func()
3083 if spinning {
3084
3085 fn = mspinning
3086 }
3087 newm(fn, pp, id)
3088
3089 if lockheld {
3090 lock(&sched.lock)
3091 }
3092
3093
3094 releasem(mp)
3095 return
3096 }
3097 if !lockheld {
3098 unlock(&sched.lock)
3099 }
3100 if nmp.spinning {
3101 throw("startm: m is spinning")
3102 }
3103 if nmp.nextp != 0 {
3104 throw("startm: m has p")
3105 }
3106 if spinning && !runqempty(pp) {
3107 throw("startm: p has runnable gs")
3108 }
3109
3110 nmp.spinning = spinning
3111 nmp.nextp.set(pp)
3112 notewakeup(&nmp.park)
3113
3114
3115 releasem(mp)
3116 }
3117
3118
3119
3120
3121
3122 func handoffp(pp *p) {
3123
3124
3125
3126
3127 if !runqempty(pp) || !sched.runq.empty() {
3128 startm(pp, false, false)
3129 return
3130 }
3131
3132 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3133 startm(pp, false, false)
3134 return
3135 }
3136
3137 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3138 startm(pp, false, false)
3139 return
3140 }
3141
3142
3143 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3144 sched.needspinning.Store(0)
3145 startm(pp, true, false)
3146 return
3147 }
3148 lock(&sched.lock)
3149 if sched.gcwaiting.Load() {
3150 pp.status = _Pgcstop
3151 pp.gcStopTime = nanotime()
3152 sched.stopwait--
3153 if sched.stopwait == 0 {
3154 notewakeup(&sched.stopnote)
3155 }
3156 unlock(&sched.lock)
3157 return
3158 }
3159 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3160 sched.safePointFn(pp)
3161 sched.safePointWait--
3162 if sched.safePointWait == 0 {
3163 notewakeup(&sched.safePointNote)
3164 }
3165 }
3166 if !sched.runq.empty() {
3167 unlock(&sched.lock)
3168 startm(pp, false, false)
3169 return
3170 }
3171
3172
3173 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3174 unlock(&sched.lock)
3175 startm(pp, false, false)
3176 return
3177 }
3178
3179
3180
3181 when := pp.timers.wakeTime()
3182 pidleput(pp, 0)
3183 unlock(&sched.lock)
3184
3185 if when != 0 {
3186 wakeNetPoller(when)
3187 }
3188 }
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203 func wakep() {
3204
3205
3206 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3207 return
3208 }
3209
3210
3211
3212
3213
3214
3215 mp := acquirem()
3216
3217 var pp *p
3218 lock(&sched.lock)
3219 pp, _ = pidlegetSpinning(0)
3220 if pp == nil {
3221 if sched.nmspinning.Add(-1) < 0 {
3222 throw("wakep: negative nmspinning")
3223 }
3224 unlock(&sched.lock)
3225 releasem(mp)
3226 return
3227 }
3228
3229
3230
3231
3232 unlock(&sched.lock)
3233
3234 startm(pp, true, false)
3235
3236 releasem(mp)
3237 }
3238
3239
3240
3241 func stoplockedm() {
3242 gp := getg()
3243
3244 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3245 throw("stoplockedm: inconsistent locking")
3246 }
3247 if gp.m.p != 0 {
3248
3249 pp := releasep()
3250 handoffp(pp)
3251 }
3252 incidlelocked(1)
3253
3254 mPark()
3255 status := readgstatus(gp.m.lockedg.ptr())
3256 if status&^_Gscan != _Grunnable {
3257 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3258 dumpgstatus(gp.m.lockedg.ptr())
3259 throw("stoplockedm: not runnable")
3260 }
3261 acquirep(gp.m.nextp.ptr())
3262 gp.m.nextp = 0
3263 }
3264
3265
3266
3267
3268
3269 func startlockedm(gp *g) {
3270 mp := gp.lockedm.ptr()
3271 if mp == getg().m {
3272 throw("startlockedm: locked to me")
3273 }
3274 if mp.nextp != 0 {
3275 throw("startlockedm: m has p")
3276 }
3277
3278 incidlelocked(-1)
3279 pp := releasep()
3280 mp.nextp.set(pp)
3281 notewakeup(&mp.park)
3282 stopm()
3283 }
3284
3285
3286
3287 func gcstopm() {
3288 gp := getg()
3289
3290 if !sched.gcwaiting.Load() {
3291 throw("gcstopm: not waiting for gc")
3292 }
3293 if gp.m.spinning {
3294 gp.m.spinning = false
3295
3296
3297 if sched.nmspinning.Add(-1) < 0 {
3298 throw("gcstopm: negative nmspinning")
3299 }
3300 }
3301 pp := releasep()
3302 lock(&sched.lock)
3303 pp.status = _Pgcstop
3304 pp.gcStopTime = nanotime()
3305 sched.stopwait--
3306 if sched.stopwait == 0 {
3307 notewakeup(&sched.stopnote)
3308 }
3309 unlock(&sched.lock)
3310 stopm()
3311 }
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322 func execute(gp *g, inheritTime bool) {
3323 mp := getg().m
3324
3325 if goroutineProfile.active {
3326
3327
3328
3329 tryRecordGoroutineProfile(gp, nil, osyield)
3330 }
3331
3332
3333 mp.curg = gp
3334 gp.m = mp
3335 gp.syncSafePoint = false
3336 casgstatus(gp, _Grunnable, _Grunning)
3337 gp.waitsince = 0
3338 gp.preempt = false
3339 gp.stackguard0 = gp.stack.lo + stackGuard
3340 if !inheritTime {
3341 mp.p.ptr().schedtick++
3342 }
3343
3344
3345 hz := sched.profilehz
3346 if mp.profilehz != hz {
3347 setThreadCPUProfiler(hz)
3348 }
3349
3350 trace := traceAcquire()
3351 if trace.ok() {
3352 trace.GoStart()
3353 traceRelease(trace)
3354 }
3355
3356 gogo(&gp.sched)
3357 }
3358
3359
3360
3361
3362
3363 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3364 mp := getg().m
3365
3366
3367
3368
3369
3370 top:
3371
3372
3373
3374 mp.clearAllpSnapshot()
3375
3376 pp := mp.p.ptr()
3377 if sched.gcwaiting.Load() {
3378 gcstopm()
3379 goto top
3380 }
3381 if pp.runSafePointFn != 0 {
3382 runSafePointFn()
3383 }
3384
3385
3386
3387
3388
3389 now, pollUntil, _ := pp.timers.check(0, nil)
3390
3391
3392 if traceEnabled() || traceShuttingDown() {
3393 gp := traceReader()
3394 if gp != nil {
3395 trace := traceAcquire()
3396 casgstatus(gp, _Gwaiting, _Grunnable)
3397 if trace.ok() {
3398 trace.GoUnpark(gp, 0)
3399 traceRelease(trace)
3400 }
3401 return gp, false, true
3402 }
3403 }
3404
3405
3406 if gcBlackenEnabled != 0 {
3407 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3408 if gp != nil {
3409 return gp, false, true
3410 }
3411 now = tnow
3412 }
3413
3414
3415
3416
3417 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3418 lock(&sched.lock)
3419 gp := globrunqget()
3420 unlock(&sched.lock)
3421 if gp != nil {
3422 return gp, false, false
3423 }
3424 }
3425
3426
3427 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3428 if gp := wakefing(); gp != nil {
3429 ready(gp, 0, true)
3430 }
3431 }
3432
3433
3434 if gcCleanups.needsWake() {
3435 gcCleanups.wake()
3436 }
3437
3438 if *cgo_yield != nil {
3439 asmcgocall(*cgo_yield, nil)
3440 }
3441
3442
3443 if gp, inheritTime := runqget(pp); gp != nil {
3444 return gp, inheritTime, false
3445 }
3446
3447
3448 if !sched.runq.empty() {
3449 lock(&sched.lock)
3450 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3451 unlock(&sched.lock)
3452 if gp != nil {
3453 if runqputbatch(pp, &q); !q.empty() {
3454 throw("Couldn't put Gs into empty local runq")
3455 }
3456 return gp, false, false
3457 }
3458 }
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3470 list, delta := netpoll(0)
3471 sched.pollingNet.Store(0)
3472 if !list.empty() {
3473 gp := list.pop()
3474 injectglist(&list)
3475 netpollAdjustWaiters(delta)
3476 trace := traceAcquire()
3477 casgstatus(gp, _Gwaiting, _Grunnable)
3478 if trace.ok() {
3479 trace.GoUnpark(gp, 0)
3480 traceRelease(trace)
3481 }
3482 return gp, false, false
3483 }
3484 }
3485
3486
3487
3488
3489
3490
3491 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3492 if !mp.spinning {
3493 mp.becomeSpinning()
3494 }
3495
3496 gp, inheritTime, tnow, w, newWork := stealWork(now)
3497 if gp != nil {
3498
3499 return gp, inheritTime, false
3500 }
3501 if newWork {
3502
3503
3504 goto top
3505 }
3506
3507 now = tnow
3508 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3509
3510 pollUntil = w
3511 }
3512 }
3513
3514
3515
3516
3517
3518 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3519 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3520 if node != nil {
3521 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3522 gp := node.gp.ptr()
3523
3524 trace := traceAcquire()
3525 casgstatus(gp, _Gwaiting, _Grunnable)
3526 if trace.ok() {
3527 trace.GoUnpark(gp, 0)
3528 traceRelease(trace)
3529 }
3530 return gp, false, false
3531 }
3532 gcController.removeIdleMarkWorker()
3533 }
3534
3535
3536
3537
3538
3539 gp, otherReady := beforeIdle(now, pollUntil)
3540 if gp != nil {
3541 trace := traceAcquire()
3542 casgstatus(gp, _Gwaiting, _Grunnable)
3543 if trace.ok() {
3544 trace.GoUnpark(gp, 0)
3545 traceRelease(trace)
3546 }
3547 return gp, false, false
3548 }
3549 if otherReady {
3550 goto top
3551 }
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561 allpSnapshot := mp.snapshotAllp()
3562
3563
3564 idlepMaskSnapshot := idlepMask
3565 timerpMaskSnapshot := timerpMask
3566
3567
3568 lock(&sched.lock)
3569 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3570 unlock(&sched.lock)
3571 goto top
3572 }
3573 if !sched.runq.empty() {
3574 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3575 unlock(&sched.lock)
3576 if gp == nil {
3577 throw("global runq empty with non-zero runqsize")
3578 }
3579 if runqputbatch(pp, &q); !q.empty() {
3580 throw("Couldn't put Gs into empty local runq")
3581 }
3582 return gp, false, false
3583 }
3584 if !mp.spinning && sched.needspinning.Load() == 1 {
3585
3586 mp.becomeSpinning()
3587 unlock(&sched.lock)
3588 goto top
3589 }
3590 if releasep() != pp {
3591 throw("findRunnable: wrong p")
3592 }
3593 now = pidleput(pp, now)
3594 unlock(&sched.lock)
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632 wasSpinning := mp.spinning
3633 if mp.spinning {
3634 mp.spinning = false
3635 if sched.nmspinning.Add(-1) < 0 {
3636 throw("findRunnable: negative nmspinning")
3637 }
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650 lock(&sched.lock)
3651 if !sched.runq.empty() {
3652 pp, _ := pidlegetSpinning(0)
3653 if pp != nil {
3654 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3655 unlock(&sched.lock)
3656 if gp == nil {
3657 throw("global runq empty with non-zero runqsize")
3658 }
3659 if runqputbatch(pp, &q); !q.empty() {
3660 throw("Couldn't put Gs into empty local runq")
3661 }
3662 acquirep(pp)
3663 mp.becomeSpinning()
3664 return gp, false, false
3665 }
3666 }
3667 unlock(&sched.lock)
3668
3669 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3670 if pp != nil {
3671 acquirep(pp)
3672 mp.becomeSpinning()
3673 goto top
3674 }
3675
3676
3677 pp, gp := checkIdleGCNoP()
3678 if pp != nil {
3679 acquirep(pp)
3680 mp.becomeSpinning()
3681
3682
3683 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3684 trace := traceAcquire()
3685 casgstatus(gp, _Gwaiting, _Grunnable)
3686 if trace.ok() {
3687 trace.GoUnpark(gp, 0)
3688 traceRelease(trace)
3689 }
3690 return gp, false, false
3691 }
3692
3693
3694
3695
3696
3697
3698
3699 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3700 }
3701
3702
3703
3704
3705
3706 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3707 sched.pollUntil.Store(pollUntil)
3708 if mp.p != 0 {
3709 throw("findRunnable: netpoll with p")
3710 }
3711 if mp.spinning {
3712 throw("findRunnable: netpoll with spinning")
3713 }
3714 delay := int64(-1)
3715 if pollUntil != 0 {
3716 if now == 0 {
3717 now = nanotime()
3718 }
3719 delay = pollUntil - now
3720 if delay < 0 {
3721 delay = 0
3722 }
3723 }
3724 if faketime != 0 {
3725
3726 delay = 0
3727 }
3728 list, delta := netpoll(delay)
3729
3730 now = nanotime()
3731 sched.pollUntil.Store(0)
3732 sched.lastpoll.Store(now)
3733 if faketime != 0 && list.empty() {
3734
3735
3736 stopm()
3737 goto top
3738 }
3739 lock(&sched.lock)
3740 pp, _ := pidleget(now)
3741 unlock(&sched.lock)
3742 if pp == nil {
3743 injectglist(&list)
3744 netpollAdjustWaiters(delta)
3745 } else {
3746 acquirep(pp)
3747 if !list.empty() {
3748 gp := list.pop()
3749 injectglist(&list)
3750 netpollAdjustWaiters(delta)
3751 trace := traceAcquire()
3752 casgstatus(gp, _Gwaiting, _Grunnable)
3753 if trace.ok() {
3754 trace.GoUnpark(gp, 0)
3755 traceRelease(trace)
3756 }
3757 return gp, false, false
3758 }
3759 if wasSpinning {
3760 mp.becomeSpinning()
3761 }
3762 goto top
3763 }
3764 } else if pollUntil != 0 && netpollinited() {
3765 pollerPollUntil := sched.pollUntil.Load()
3766 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3767 netpollBreak()
3768 }
3769 }
3770 stopm()
3771 goto top
3772 }
3773
3774
3775
3776
3777
3778 func pollWork() bool {
3779 if !sched.runq.empty() {
3780 return true
3781 }
3782 p := getg().m.p.ptr()
3783 if !runqempty(p) {
3784 return true
3785 }
3786 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3787 if list, delta := netpoll(0); !list.empty() {
3788 injectglist(&list)
3789 netpollAdjustWaiters(delta)
3790 return true
3791 }
3792 }
3793 return false
3794 }
3795
3796
3797
3798
3799
3800
3801
3802 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3803 pp := getg().m.p.ptr()
3804
3805 ranTimer := false
3806
3807 const stealTries = 4
3808 for i := 0; i < stealTries; i++ {
3809 stealTimersOrRunNextG := i == stealTries-1
3810
3811 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3812 if sched.gcwaiting.Load() {
3813
3814 return nil, false, now, pollUntil, true
3815 }
3816 p2 := allp[enum.position()]
3817 if pp == p2 {
3818 continue
3819 }
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3835 tnow, w, ran := p2.timers.check(now, nil)
3836 now = tnow
3837 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3838 pollUntil = w
3839 }
3840 if ran {
3841
3842
3843
3844
3845
3846
3847
3848
3849 if gp, inheritTime := runqget(pp); gp != nil {
3850 return gp, inheritTime, now, pollUntil, ranTimer
3851 }
3852 ranTimer = true
3853 }
3854 }
3855
3856
3857 if !idlepMask.read(enum.position()) {
3858 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3859 return gp, false, now, pollUntil, ranTimer
3860 }
3861 }
3862 }
3863 }
3864
3865
3866
3867
3868 return nil, false, now, pollUntil, ranTimer
3869 }
3870
3871
3872
3873
3874
3875
3876 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3877 for id, p2 := range allpSnapshot {
3878 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3879 lock(&sched.lock)
3880 pp, _ := pidlegetSpinning(0)
3881 if pp == nil {
3882
3883 unlock(&sched.lock)
3884 return nil
3885 }
3886 unlock(&sched.lock)
3887 return pp
3888 }
3889 }
3890
3891
3892 return nil
3893 }
3894
3895
3896
3897
3898 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3899 for id, p2 := range allpSnapshot {
3900 if timerpMaskSnapshot.read(uint32(id)) {
3901 w := p2.timers.wakeTime()
3902 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3903 pollUntil = w
3904 }
3905 }
3906 }
3907
3908 return pollUntil
3909 }
3910
3911
3912
3913
3914
3915 func checkIdleGCNoP() (*p, *g) {
3916
3917
3918
3919
3920
3921
3922 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3923 return nil, nil
3924 }
3925 if !gcShouldScheduleWorker(nil) {
3926 return nil, nil
3927 }
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946 lock(&sched.lock)
3947 pp, now := pidlegetSpinning(0)
3948 if pp == nil {
3949 unlock(&sched.lock)
3950 return nil, nil
3951 }
3952
3953
3954 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3955 pidleput(pp, now)
3956 unlock(&sched.lock)
3957 return nil, nil
3958 }
3959
3960 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3961 if node == nil {
3962 pidleput(pp, now)
3963 unlock(&sched.lock)
3964 gcController.removeIdleMarkWorker()
3965 return nil, nil
3966 }
3967
3968 unlock(&sched.lock)
3969
3970 return pp, node.gp.ptr()
3971 }
3972
3973
3974
3975
3976 func wakeNetPoller(when int64) {
3977 if sched.lastpoll.Load() == 0 {
3978
3979
3980
3981
3982 pollerPollUntil := sched.pollUntil.Load()
3983 if pollerPollUntil == 0 || pollerPollUntil > when {
3984 netpollBreak()
3985 }
3986 } else {
3987
3988
3989 if GOOS != "plan9" {
3990 wakep()
3991 }
3992 }
3993 }
3994
3995 func resetspinning() {
3996 gp := getg()
3997 if !gp.m.spinning {
3998 throw("resetspinning: not a spinning m")
3999 }
4000 gp.m.spinning = false
4001 nmspinning := sched.nmspinning.Add(-1)
4002 if nmspinning < 0 {
4003 throw("findRunnable: negative nmspinning")
4004 }
4005
4006
4007
4008 wakep()
4009 }
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019 func injectglist(glist *gList) {
4020 if glist.empty() {
4021 return
4022 }
4023
4024
4025
4026 var tail *g
4027 trace := traceAcquire()
4028 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4029 tail = gp
4030 casgstatus(gp, _Gwaiting, _Grunnable)
4031 if trace.ok() {
4032 trace.GoUnpark(gp, 0)
4033 }
4034 }
4035 if trace.ok() {
4036 traceRelease(trace)
4037 }
4038
4039
4040 q := gQueue{glist.head, tail.guintptr(), glist.size}
4041 *glist = gList{}
4042
4043 startIdle := func(n int32) {
4044 for ; n > 0; n-- {
4045 mp := acquirem()
4046 lock(&sched.lock)
4047
4048 pp, _ := pidlegetSpinning(0)
4049 if pp == nil {
4050 unlock(&sched.lock)
4051 releasem(mp)
4052 break
4053 }
4054
4055 startm(pp, false, true)
4056 unlock(&sched.lock)
4057 releasem(mp)
4058 }
4059 }
4060
4061 pp := getg().m.p.ptr()
4062 if pp == nil {
4063 n := q.size
4064 lock(&sched.lock)
4065 globrunqputbatch(&q)
4066 unlock(&sched.lock)
4067 startIdle(n)
4068 return
4069 }
4070
4071 var globq gQueue
4072 npidle := sched.npidle.Load()
4073 for ; npidle > 0 && !q.empty(); npidle-- {
4074 g := q.pop()
4075 globq.pushBack(g)
4076 }
4077 if !globq.empty() {
4078 n := globq.size
4079 lock(&sched.lock)
4080 globrunqputbatch(&globq)
4081 unlock(&sched.lock)
4082 startIdle(n)
4083 }
4084
4085 if runqputbatch(pp, &q); !q.empty() {
4086 lock(&sched.lock)
4087 globrunqputbatch(&q)
4088 unlock(&sched.lock)
4089 }
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104 wakep()
4105 }
4106
4107
4108
4109 func schedule() {
4110 mp := getg().m
4111
4112 if mp.locks != 0 {
4113 throw("schedule: holding locks")
4114 }
4115
4116 if mp.lockedg != 0 {
4117 stoplockedm()
4118 execute(mp.lockedg.ptr(), false)
4119 }
4120
4121
4122
4123 if mp.incgo {
4124 throw("schedule: in cgo")
4125 }
4126
4127 top:
4128 pp := mp.p.ptr()
4129 pp.preempt = false
4130
4131
4132
4133
4134 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4135 throw("schedule: spinning with local work")
4136 }
4137
4138 gp, inheritTime, tryWakeP := findRunnable()
4139
4140
4141 pp = mp.p.ptr()
4142
4143
4144
4145
4146 mp.clearAllpSnapshot()
4147
4148
4149
4150
4151
4152
4153
4154
4155 gcController.releaseNextGCMarkWorker(pp)
4156
4157 if debug.dontfreezetheworld > 0 && freezing.Load() {
4158
4159
4160
4161
4162
4163
4164
4165 lock(&deadlock)
4166 lock(&deadlock)
4167 }
4168
4169
4170
4171
4172 if mp.spinning {
4173 resetspinning()
4174 }
4175
4176 if sched.disable.user && !schedEnabled(gp) {
4177
4178
4179
4180 lock(&sched.lock)
4181 if schedEnabled(gp) {
4182
4183
4184 unlock(&sched.lock)
4185 } else {
4186 sched.disable.runnable.pushBack(gp)
4187 unlock(&sched.lock)
4188 goto top
4189 }
4190 }
4191
4192
4193
4194 if tryWakeP {
4195 wakep()
4196 }
4197 if gp.lockedm != 0 {
4198
4199
4200 startlockedm(gp)
4201 goto top
4202 }
4203
4204 execute(gp, inheritTime)
4205 }
4206
4207
4208
4209
4210
4211
4212
4213
4214 func dropg() {
4215 gp := getg()
4216
4217 setMNoWB(&gp.m.curg.m, nil)
4218 setGNoWB(&gp.m.curg, nil)
4219 }
4220
4221 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4222 unlock((*mutex)(lock))
4223 return true
4224 }
4225
4226
4227 func park_m(gp *g) {
4228 mp := getg().m
4229
4230 trace := traceAcquire()
4231
4232
4233
4234
4235
4236 bubble := gp.bubble
4237 if bubble != nil {
4238 bubble.incActive()
4239 }
4240
4241 if trace.ok() {
4242
4243
4244
4245 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4246 }
4247
4248
4249 casgstatus(gp, _Grunning, _Gwaiting)
4250 if trace.ok() {
4251 traceRelease(trace)
4252 }
4253
4254 dropg()
4255
4256 if fn := mp.waitunlockf; fn != nil {
4257 ok := fn(gp, mp.waitlock)
4258 mp.waitunlockf = nil
4259 mp.waitlock = nil
4260 if !ok {
4261 trace := traceAcquire()
4262 casgstatus(gp, _Gwaiting, _Grunnable)
4263 if bubble != nil {
4264 bubble.decActive()
4265 }
4266 if trace.ok() {
4267 trace.GoUnpark(gp, 2)
4268 traceRelease(trace)
4269 }
4270 execute(gp, true)
4271 }
4272 }
4273
4274 if bubble != nil {
4275 bubble.decActive()
4276 }
4277
4278 schedule()
4279 }
4280
4281 func goschedImpl(gp *g, preempted bool) {
4282 pp := gp.m.p.ptr()
4283 trace := traceAcquire()
4284 status := readgstatus(gp)
4285 if status&^_Gscan != _Grunning {
4286 dumpgstatus(gp)
4287 throw("bad g status")
4288 }
4289 if trace.ok() {
4290
4291
4292
4293 if preempted {
4294 trace.GoPreempt()
4295 } else {
4296 trace.GoSched()
4297 }
4298 }
4299 casgstatus(gp, _Grunning, _Grunnable)
4300 if trace.ok() {
4301 traceRelease(trace)
4302 }
4303
4304 dropg()
4305 if preempted && sched.gcwaiting.Load() {
4306
4307
4308 runqput(pp, gp, true)
4309 } else {
4310 lock(&sched.lock)
4311 globrunqput(gp)
4312 unlock(&sched.lock)
4313 }
4314
4315 if mainStarted {
4316 wakep()
4317 }
4318
4319 schedule()
4320 }
4321
4322
4323 func gosched_m(gp *g) {
4324 goschedImpl(gp, false)
4325 }
4326
4327
4328 func goschedguarded_m(gp *g) {
4329 if !canPreemptM(gp.m) {
4330 gogo(&gp.sched)
4331 }
4332 goschedImpl(gp, false)
4333 }
4334
4335 func gopreempt_m(gp *g) {
4336 goschedImpl(gp, true)
4337 }
4338
4339
4340
4341
4342 func preemptPark(gp *g) {
4343 status := readgstatus(gp)
4344 if status&^_Gscan != _Grunning {
4345 dumpgstatus(gp)
4346 throw("bad g status")
4347 }
4348
4349 if gp.asyncSafePoint {
4350
4351
4352
4353 f := findfunc(gp.sched.pc)
4354 if !f.valid() {
4355 throw("preempt at unknown pc")
4356 }
4357 if f.flag&abi.FuncFlagSPWrite != 0 {
4358 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4359 throw("preempt SPWRITE")
4360 }
4361 }
4362
4363
4364
4365
4366
4367
4368
4369 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4370 dropg()
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392 trace := traceAcquire()
4393 if trace.ok() {
4394 trace.GoPark(traceBlockPreempted, 0)
4395 }
4396 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4397 if trace.ok() {
4398 traceRelease(trace)
4399 }
4400 schedule()
4401 }
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417 func goyield() {
4418 checkTimeouts()
4419 mcall(goyield_m)
4420 }
4421
4422 func goyield_m(gp *g) {
4423 trace := traceAcquire()
4424 pp := gp.m.p.ptr()
4425 if trace.ok() {
4426
4427
4428
4429 trace.GoPreempt()
4430 }
4431 casgstatus(gp, _Grunning, _Grunnable)
4432 if trace.ok() {
4433 traceRelease(trace)
4434 }
4435 dropg()
4436 runqput(pp, gp, false)
4437 schedule()
4438 }
4439
4440
4441 func goexit1() {
4442 if raceenabled {
4443 if gp := getg(); gp.bubble != nil {
4444 racereleasemergeg(gp, gp.bubble.raceaddr())
4445 }
4446 racegoend()
4447 }
4448 trace := traceAcquire()
4449 if trace.ok() {
4450 trace.GoEnd()
4451 traceRelease(trace)
4452 }
4453 mcall(goexit0)
4454 }
4455
4456
4457 func goexit0(gp *g) {
4458 if goexperiment.RuntimeSecret && gp.secret > 0 {
4459
4460
4461 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4462
4463
4464 }
4465 gdestroy(gp)
4466 schedule()
4467 }
4468
4469 func gdestroy(gp *g) {
4470 mp := getg().m
4471 pp := mp.p.ptr()
4472
4473 casgstatus(gp, _Grunning, _Gdead)
4474 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4475 if isSystemGoroutine(gp, false) {
4476 sched.ngsys.Add(-1)
4477 }
4478 gp.m = nil
4479 locked := gp.lockedm != 0
4480 gp.lockedm = 0
4481 mp.lockedg = 0
4482 gp.preemptStop = false
4483 gp.paniconfault = false
4484 gp._defer = nil
4485 gp._panic = nil
4486 gp.writebuf = nil
4487 gp.waitreason = waitReasonZero
4488 gp.param = nil
4489 gp.labels = nil
4490 gp.timer = nil
4491 gp.bubble = nil
4492 gp.fipsOnlyBypass = false
4493 gp.secret = 0
4494
4495 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4496
4497
4498
4499 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4500 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4501 gcController.bgScanCredit.Add(scanCredit)
4502 gp.gcAssistBytes = 0
4503 }
4504
4505 dropg()
4506
4507 if GOARCH == "wasm" {
4508 gfput(pp, gp)
4509 return
4510 }
4511
4512 if locked && mp.lockedInt != 0 {
4513 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4514 if mp.isextra {
4515 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4516 }
4517 throw("exited a goroutine internally locked to the OS thread")
4518 }
4519 gfput(pp, gp)
4520 if locked {
4521
4522
4523
4524
4525
4526
4527 if GOOS != "plan9" {
4528 gogo(&mp.g0.sched)
4529 } else {
4530
4531
4532 mp.lockedExt = 0
4533 }
4534 }
4535 }
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545 func save(pc, sp, bp uintptr) {
4546 gp := getg()
4547
4548 if gp == gp.m.g0 || gp == gp.m.gsignal {
4549
4550
4551
4552
4553
4554 throw("save on system g not allowed")
4555 }
4556
4557 gp.sched.pc = pc
4558 gp.sched.sp = sp
4559 gp.sched.lr = 0
4560 gp.sched.bp = bp
4561
4562
4563
4564 if gp.sched.ctxt != nil {
4565 badctxt()
4566 }
4567 }
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593 func reentersyscall(pc, sp, bp uintptr) {
4594 gp := getg()
4595
4596
4597
4598 gp.m.locks++
4599
4600
4601
4602
4603
4604 gp.stackguard0 = stackPreempt
4605 gp.throwsplit = true
4606
4607
4608 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4609
4610 pp := gp.m.p.ptr()
4611 if pp.runSafePointFn != 0 {
4612
4613 systemstack(runSafePointFn)
4614 }
4615 gp.m.oldp.set(pp)
4616
4617
4618 save(pc, sp, bp)
4619 gp.syscallsp = sp
4620 gp.syscallpc = pc
4621 gp.syscallbp = bp
4622
4623
4624 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4625 systemstack(func() {
4626 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4627 throw("entersyscall")
4628 })
4629 }
4630 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4631 systemstack(func() {
4632 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4633 throw("entersyscall")
4634 })
4635 }
4636 trace := traceAcquire()
4637 if trace.ok() {
4638
4639
4640
4641
4642 systemstack(func() {
4643 trace.GoSysCall()
4644 })
4645
4646 save(pc, sp, bp)
4647 }
4648 if sched.gcwaiting.Load() {
4649
4650
4651
4652 systemstack(func() {
4653 entersyscallHandleGCWait(trace)
4654 })
4655
4656 save(pc, sp, bp)
4657 }
4658
4659
4660
4661
4662
4663 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4664 casgstatus(gp, _Grunning, _Gsyscall)
4665 }
4666 if staticLockRanking {
4667
4668 save(pc, sp, bp)
4669 }
4670 if trace.ok() {
4671
4672
4673
4674 traceRelease(trace)
4675 }
4676 if sched.sysmonwait.Load() {
4677 systemstack(entersyscallWakeSysmon)
4678
4679 save(pc, sp, bp)
4680 }
4681 gp.m.locks--
4682 }
4683
4684
4685
4686
4687 const debugExtendGrunningNoP = false
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703 func entersyscall() {
4704
4705
4706
4707
4708 fp := getcallerfp()
4709 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4710 }
4711
4712 func entersyscallWakeSysmon() {
4713 lock(&sched.lock)
4714 if sched.sysmonwait.Load() {
4715 sched.sysmonwait.Store(false)
4716 notewakeup(&sched.sysmonnote)
4717 }
4718 unlock(&sched.lock)
4719 }
4720
4721 func entersyscallHandleGCWait(trace traceLocker) {
4722 gp := getg()
4723
4724 lock(&sched.lock)
4725 if sched.stopwait > 0 {
4726
4727 pp := gp.m.p.ptr()
4728 pp.m = 0
4729 gp.m.p = 0
4730 atomic.Store(&pp.status, _Pgcstop)
4731
4732 if trace.ok() {
4733 trace.ProcStop(pp)
4734 }
4735 sched.nGsyscallNoP.Add(1)
4736 pp.gcStopTime = nanotime()
4737 pp.syscalltick++
4738 if sched.stopwait--; sched.stopwait == 0 {
4739 notewakeup(&sched.stopnote)
4740 }
4741 }
4742 unlock(&sched.lock)
4743 }
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757 func entersyscallblock() {
4758 gp := getg()
4759
4760 gp.m.locks++
4761 gp.throwsplit = true
4762 gp.stackguard0 = stackPreempt
4763 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4764 gp.m.p.ptr().syscalltick++
4765
4766 sched.nGsyscallNoP.Add(1)
4767
4768
4769 pc := sys.GetCallerPC()
4770 sp := sys.GetCallerSP()
4771 bp := getcallerfp()
4772 save(pc, sp, bp)
4773 gp.syscallsp = gp.sched.sp
4774 gp.syscallpc = gp.sched.pc
4775 gp.syscallbp = gp.sched.bp
4776 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4777 sp1 := sp
4778 sp2 := gp.sched.sp
4779 sp3 := gp.syscallsp
4780 systemstack(func() {
4781 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4782 throw("entersyscallblock")
4783 })
4784 }
4785
4786
4787
4788
4789
4790
4791 trace := traceAcquire()
4792 systemstack(func() {
4793 if trace.ok() {
4794 trace.GoSysCall()
4795 }
4796 handoffp(releasep())
4797 })
4798
4799
4800
4801 if debugExtendGrunningNoP {
4802 usleep(10)
4803 }
4804 casgstatus(gp, _Grunning, _Gsyscall)
4805 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4806 systemstack(func() {
4807 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4808 throw("entersyscallblock")
4809 })
4810 }
4811 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4812 systemstack(func() {
4813 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4814 throw("entersyscallblock")
4815 })
4816 }
4817 if trace.ok() {
4818 systemstack(func() {
4819 traceRelease(trace)
4820 })
4821 }
4822
4823
4824 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4825
4826 gp.m.locks--
4827 }
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849 func exitsyscall() {
4850 gp := getg()
4851
4852 gp.m.locks++
4853 if sys.GetCallerSP() > gp.syscallsp {
4854 throw("exitsyscall: syscall frame is no longer valid")
4855 }
4856 gp.waitsince = 0
4857
4858 if sched.stopwait == freezeStopWait {
4859
4860
4861
4862 systemstack(func() {
4863 lock(&deadlock)
4864 lock(&deadlock)
4865 })
4866 }
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4880 casgstatus(gp, _Gsyscall, _Grunning)
4881 }
4882
4883
4884
4885
4886 if debugExtendGrunningNoP {
4887 usleep(10)
4888 }
4889
4890
4891 oldp := gp.m.oldp.ptr()
4892 gp.m.oldp.set(nil)
4893
4894
4895 pp := gp.m.p.ptr()
4896 if pp != nil {
4897
4898 if trace := traceAcquire(); trace.ok() {
4899 systemstack(func() {
4900
4901
4902
4903
4904
4905
4906
4907
4908 if pp.syscalltick == gp.m.syscalltick {
4909 trace.GoSysExit(false)
4910 } else {
4911
4912
4913
4914
4915 trace.ProcSteal(pp)
4916 trace.ProcStart()
4917 trace.GoSysExit(true)
4918 trace.GoStart()
4919 }
4920 traceRelease(trace)
4921 })
4922 }
4923 } else {
4924
4925 systemstack(func() {
4926
4927 if pp := exitsyscallTryGetP(oldp); pp != nil {
4928
4929 acquirepNoTrace(pp)
4930
4931
4932 if trace := traceAcquire(); trace.ok() {
4933 trace.ProcStart()
4934 trace.GoSysExit(true)
4935 trace.GoStart()
4936 traceRelease(trace)
4937 }
4938 }
4939 })
4940 pp = gp.m.p.ptr()
4941 }
4942
4943
4944 if pp != nil {
4945 if goroutineProfile.active {
4946
4947
4948
4949 systemstack(func() {
4950 tryRecordGoroutineProfileWB(gp)
4951 })
4952 }
4953
4954
4955 pp.syscalltick++
4956
4957
4958
4959 gp.syscallsp = 0
4960 gp.m.locks--
4961 if gp.preempt {
4962
4963 gp.stackguard0 = stackPreempt
4964 } else {
4965
4966 gp.stackguard0 = gp.stack.lo + stackGuard
4967 }
4968 gp.throwsplit = false
4969
4970 if sched.disable.user && !schedEnabled(gp) {
4971
4972 Gosched()
4973 }
4974 return
4975 }
4976
4977 gp.m.locks--
4978
4979
4980 mcall(exitsyscallNoP)
4981
4982
4983
4984
4985
4986
4987
4988 gp.syscallsp = 0
4989 gp.m.p.ptr().syscalltick++
4990 gp.throwsplit = false
4991 }
4992
4993
4994
4995
4996
4997
4998
4999 func exitsyscallTryGetP(oldp *p) *p {
5000
5001 if oldp != nil {
5002 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5003 thread.takeP()
5004 thread.resume()
5005 sched.nGsyscallNoP.Add(-1)
5006 return oldp
5007 }
5008 }
5009
5010
5011 if sched.pidle != 0 {
5012 lock(&sched.lock)
5013 pp, _ := pidleget(0)
5014 if pp != nil && sched.sysmonwait.Load() {
5015 sched.sysmonwait.Store(false)
5016 notewakeup(&sched.sysmonnote)
5017 }
5018 unlock(&sched.lock)
5019 if pp != nil {
5020 sched.nGsyscallNoP.Add(-1)
5021 return pp
5022 }
5023 }
5024 return nil
5025 }
5026
5027
5028
5029
5030
5031
5032
5033 func exitsyscallNoP(gp *g) {
5034 traceExitingSyscall()
5035 trace := traceAcquire()
5036 casgstatus(gp, _Grunning, _Grunnable)
5037 traceExitedSyscall()
5038 if trace.ok() {
5039
5040
5041
5042
5043 trace.GoSysExit(true)
5044 traceRelease(trace)
5045 }
5046 sched.nGsyscallNoP.Add(-1)
5047 dropg()
5048 lock(&sched.lock)
5049 var pp *p
5050 if schedEnabled(gp) {
5051 pp, _ = pidleget(0)
5052 }
5053 var locked bool
5054 if pp == nil {
5055 globrunqput(gp)
5056
5057
5058
5059
5060
5061
5062 locked = gp.lockedm != 0
5063 } else if sched.sysmonwait.Load() {
5064 sched.sysmonwait.Store(false)
5065 notewakeup(&sched.sysmonnote)
5066 }
5067 unlock(&sched.lock)
5068 if pp != nil {
5069 acquirep(pp)
5070 execute(gp, false)
5071 }
5072 if locked {
5073
5074
5075
5076
5077 stoplockedm()
5078 execute(gp, false)
5079 }
5080 stopm()
5081 schedule()
5082 }
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096 func syscall_runtime_BeforeFork() {
5097 gp := getg().m.curg
5098
5099
5100
5101
5102 gp.m.locks++
5103 sigsave(&gp.m.sigmask)
5104 sigblock(false)
5105
5106
5107
5108
5109
5110 gp.stackguard0 = stackFork
5111 }
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125 func syscall_runtime_AfterFork() {
5126 gp := getg().m.curg
5127
5128
5129 gp.stackguard0 = gp.stack.lo + stackGuard
5130
5131 msigrestore(gp.m.sigmask)
5132
5133 gp.m.locks--
5134 }
5135
5136
5137
5138 var inForkedChild bool
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159 func syscall_runtime_AfterForkInChild() {
5160
5161
5162
5163
5164 inForkedChild = true
5165
5166 clearSignalHandlers()
5167
5168
5169
5170 msigrestore(getg().m.sigmask)
5171
5172 inForkedChild = false
5173 }
5174
5175
5176
5177
5178 var pendingPreemptSignals atomic.Int32
5179
5180
5181
5182
5183 func syscall_runtime_BeforeExec() {
5184
5185 execLock.lock()
5186
5187
5188
5189 if GOOS == "darwin" || GOOS == "ios" {
5190 for pendingPreemptSignals.Load() > 0 {
5191 osyield()
5192 }
5193 }
5194 }
5195
5196
5197
5198
5199 func syscall_runtime_AfterExec() {
5200 execLock.unlock()
5201 }
5202
5203
5204 func malg(stacksize int32) *g {
5205 newg := new(g)
5206 if stacksize >= 0 {
5207 stacksize = round2(stackSystem + stacksize)
5208 systemstack(func() {
5209 newg.stack = stackalloc(uint32(stacksize))
5210 if valgrindenabled {
5211 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5212 }
5213 })
5214 newg.stackguard0 = newg.stack.lo + stackGuard
5215 newg.stackguard1 = ^uintptr(0)
5216
5217
5218 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5219 }
5220 return newg
5221 }
5222
5223
5224
5225
5226 func newproc(fn *funcval) {
5227 gp := getg()
5228 if goexperiment.RuntimeSecret && gp.secret > 0 {
5229 panic("goroutine spawned while running in secret mode")
5230 }
5231
5232 pc := sys.GetCallerPC()
5233 systemstack(func() {
5234 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5235
5236 pp := getg().m.p.ptr()
5237 runqput(pp, newg, true)
5238
5239 if mainStarted {
5240 wakep()
5241 }
5242 })
5243 }
5244
5245
5246
5247
5248 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5249 if fn == nil {
5250 fatal("go of nil func value")
5251 }
5252
5253 mp := acquirem()
5254 pp := mp.p.ptr()
5255 newg := gfget(pp)
5256 if newg == nil {
5257 newg = malg(stackMin)
5258 casgstatus(newg, _Gidle, _Gdead)
5259 allgadd(newg)
5260 }
5261 if newg.stack.hi == 0 {
5262 throw("newproc1: newg missing stack")
5263 }
5264
5265 if readgstatus(newg) != _Gdead {
5266 throw("newproc1: new g is not Gdead")
5267 }
5268
5269 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5270 totalSize = alignUp(totalSize, sys.StackAlign)
5271 sp := newg.stack.hi - totalSize
5272 if usesLR {
5273
5274 *(*uintptr)(unsafe.Pointer(sp)) = 0
5275 prepGoExitFrame(sp)
5276 }
5277 if GOARCH == "arm64" {
5278
5279 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5280 }
5281
5282 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5283 newg.sched.sp = sp
5284 newg.stktopsp = sp
5285 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5286 newg.sched.g = guintptr(unsafe.Pointer(newg))
5287 gostartcallfn(&newg.sched, fn)
5288 newg.parentGoid = callergp.goid
5289 newg.gopc = callerpc
5290 newg.ancestors = saveAncestors(callergp)
5291 newg.startpc = fn.fn
5292 newg.runningCleanups.Store(false)
5293 if isSystemGoroutine(newg, false) {
5294 sched.ngsys.Add(1)
5295 } else {
5296
5297 newg.bubble = callergp.bubble
5298 if mp.curg != nil {
5299 newg.labels = mp.curg.labels
5300 }
5301 if goroutineProfile.active {
5302
5303
5304
5305
5306
5307 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5308 }
5309 }
5310
5311 newg.trackingSeq = uint8(cheaprand())
5312 if newg.trackingSeq%gTrackingPeriod == 0 {
5313 newg.tracking = true
5314 }
5315 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5316
5317
5318
5319 trace := traceAcquire()
5320 var status uint32 = _Grunnable
5321 if parked {
5322 status = _Gwaiting
5323 newg.waitreason = waitreason
5324 }
5325 if pp.goidcache == pp.goidcacheend {
5326
5327
5328
5329 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5330 pp.goidcache -= _GoidCacheBatch - 1
5331 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5332 }
5333 newg.goid = pp.goidcache
5334 casgstatus(newg, _Gdead, status)
5335 pp.goidcache++
5336 newg.trace.reset()
5337 if trace.ok() {
5338 trace.GoCreate(newg, newg.startpc, parked)
5339 traceRelease(trace)
5340 }
5341
5342
5343 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5344
5345
5346 if raceenabled {
5347 newg.racectx = racegostart(callerpc)
5348 newg.raceignore = 0
5349 if newg.labels != nil {
5350
5351
5352 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5353 }
5354 }
5355 pp.goroutinesCreated++
5356 releasem(mp)
5357
5358 return newg
5359 }
5360
5361
5362
5363
5364 func saveAncestors(callergp *g) *[]ancestorInfo {
5365
5366 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5367 return nil
5368 }
5369 var callerAncestors []ancestorInfo
5370 if callergp.ancestors != nil {
5371 callerAncestors = *callergp.ancestors
5372 }
5373 n := int32(len(callerAncestors)) + 1
5374 if n > debug.tracebackancestors {
5375 n = debug.tracebackancestors
5376 }
5377 ancestors := make([]ancestorInfo, n)
5378 copy(ancestors[1:], callerAncestors)
5379
5380 var pcs [tracebackInnerFrames]uintptr
5381 npcs := gcallers(callergp, 0, pcs[:])
5382 ipcs := make([]uintptr, npcs)
5383 copy(ipcs, pcs[:])
5384 ancestors[0] = ancestorInfo{
5385 pcs: ipcs,
5386 goid: callergp.goid,
5387 gopc: callergp.gopc,
5388 }
5389
5390 ancestorsp := new([]ancestorInfo)
5391 *ancestorsp = ancestors
5392 return ancestorsp
5393 }
5394
5395
5396
5397 func gfput(pp *p, gp *g) {
5398 if readgstatus(gp) != _Gdead {
5399 throw("gfput: bad status (not Gdead)")
5400 }
5401
5402 stksize := gp.stack.hi - gp.stack.lo
5403
5404 if stksize != uintptr(startingStackSize) {
5405
5406 stackfree(gp.stack)
5407 gp.stack.lo = 0
5408 gp.stack.hi = 0
5409 gp.stackguard0 = 0
5410 if valgrindenabled {
5411 valgrindDeregisterStack(gp.valgrindStackID)
5412 gp.valgrindStackID = 0
5413 }
5414 }
5415
5416 pp.gFree.push(gp)
5417 if pp.gFree.size >= 64 {
5418 var (
5419 stackQ gQueue
5420 noStackQ gQueue
5421 )
5422 for pp.gFree.size >= 32 {
5423 gp := pp.gFree.pop()
5424 if gp.stack.lo == 0 {
5425 noStackQ.push(gp)
5426 } else {
5427 stackQ.push(gp)
5428 }
5429 }
5430 lock(&sched.gFree.lock)
5431 sched.gFree.noStack.pushAll(noStackQ)
5432 sched.gFree.stack.pushAll(stackQ)
5433 unlock(&sched.gFree.lock)
5434 }
5435 }
5436
5437
5438
5439 func gfget(pp *p) *g {
5440 retry:
5441 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5442 lock(&sched.gFree.lock)
5443
5444 for pp.gFree.size < 32 {
5445
5446 gp := sched.gFree.stack.pop()
5447 if gp == nil {
5448 gp = sched.gFree.noStack.pop()
5449 if gp == nil {
5450 break
5451 }
5452 }
5453 pp.gFree.push(gp)
5454 }
5455 unlock(&sched.gFree.lock)
5456 goto retry
5457 }
5458 gp := pp.gFree.pop()
5459 if gp == nil {
5460 return nil
5461 }
5462 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5463
5464
5465
5466 systemstack(func() {
5467 stackfree(gp.stack)
5468 gp.stack.lo = 0
5469 gp.stack.hi = 0
5470 gp.stackguard0 = 0
5471 if valgrindenabled {
5472 valgrindDeregisterStack(gp.valgrindStackID)
5473 gp.valgrindStackID = 0
5474 }
5475 })
5476 }
5477 if gp.stack.lo == 0 {
5478
5479 systemstack(func() {
5480 gp.stack = stackalloc(startingStackSize)
5481 if valgrindenabled {
5482 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5483 }
5484 })
5485 gp.stackguard0 = gp.stack.lo + stackGuard
5486 } else {
5487 if raceenabled {
5488 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5489 }
5490 if msanenabled {
5491 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5492 }
5493 if asanenabled {
5494 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5495 }
5496 }
5497 return gp
5498 }
5499
5500
5501 func gfpurge(pp *p) {
5502 var (
5503 stackQ gQueue
5504 noStackQ gQueue
5505 )
5506 for !pp.gFree.empty() {
5507 gp := pp.gFree.pop()
5508 if gp.stack.lo == 0 {
5509 noStackQ.push(gp)
5510 } else {
5511 stackQ.push(gp)
5512 }
5513 }
5514 lock(&sched.gFree.lock)
5515 sched.gFree.noStack.pushAll(noStackQ)
5516 sched.gFree.stack.pushAll(stackQ)
5517 unlock(&sched.gFree.lock)
5518 }
5519
5520
5521 func Breakpoint() {
5522 breakpoint()
5523 }
5524
5525
5526
5527
5528
5529
5530 func dolockOSThread() {
5531 if GOARCH == "wasm" {
5532 return
5533 }
5534 gp := getg()
5535 gp.m.lockedg.set(gp)
5536 gp.lockedm.set(gp.m)
5537 }
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555 func LockOSThread() {
5556 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5557
5558
5559
5560 startTemplateThread()
5561 }
5562 gp := getg()
5563 gp.m.lockedExt++
5564 if gp.m.lockedExt == 0 {
5565 gp.m.lockedExt--
5566 panic("LockOSThread nesting overflow")
5567 }
5568 dolockOSThread()
5569 }
5570
5571
5572 func lockOSThread() {
5573 getg().m.lockedInt++
5574 dolockOSThread()
5575 }
5576
5577
5578
5579
5580
5581
5582 func dounlockOSThread() {
5583 if GOARCH == "wasm" {
5584 return
5585 }
5586 gp := getg()
5587 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5588 return
5589 }
5590 gp.m.lockedg = 0
5591 gp.lockedm = 0
5592 }
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608 func UnlockOSThread() {
5609 gp := getg()
5610 if gp.m.lockedExt == 0 {
5611 return
5612 }
5613 gp.m.lockedExt--
5614 dounlockOSThread()
5615 }
5616
5617
5618 func unlockOSThread() {
5619 gp := getg()
5620 if gp.m.lockedInt == 0 {
5621 systemstack(badunlockosthread)
5622 }
5623 gp.m.lockedInt--
5624 dounlockOSThread()
5625 }
5626
5627 func badunlockosthread() {
5628 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5629 }
5630
5631 func gcount(includeSys bool) int32 {
5632 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5633 if !includeSys {
5634 n -= sched.ngsys.Load()
5635 }
5636 for _, pp := range allp {
5637 n -= pp.gFree.size
5638 }
5639
5640
5641
5642 if n < 1 {
5643 n = 1
5644 }
5645 return n
5646 }
5647
5648
5649
5650
5651
5652 func goroutineleakcount() int {
5653 return work.goroutineLeak.count
5654 }
5655
5656 func mcount() int32 {
5657 return int32(sched.mnext - sched.nmfreed)
5658 }
5659
5660 var prof struct {
5661 signalLock atomic.Uint32
5662
5663
5664
5665 hz atomic.Int32
5666 }
5667
5668 func _System() { _System() }
5669 func _ExternalCode() { _ExternalCode() }
5670 func _LostExternalCode() { _LostExternalCode() }
5671 func _GC() { _GC() }
5672 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5673 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5674 func _VDSO() { _VDSO() }
5675
5676
5677
5678
5679
5680 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5681 if prof.hz.Load() == 0 {
5682 return
5683 }
5684
5685
5686
5687
5688 if mp != nil && mp.profilehz == 0 {
5689 return
5690 }
5691
5692
5693
5694
5695
5696
5697
5698 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5699 if f := findfunc(pc); f.valid() {
5700 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5701 cpuprof.lostAtomic++
5702 return
5703 }
5704 }
5705 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5706
5707
5708
5709 cpuprof.lostAtomic++
5710 return
5711 }
5712 }
5713
5714
5715
5716
5717
5718
5719
5720 getg().m.mallocing++
5721
5722 var u unwinder
5723 var stk [maxCPUProfStack]uintptr
5724 n := 0
5725 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5726 cgoOff := 0
5727
5728
5729
5730
5731
5732 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5733 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5734 cgoOff++
5735 }
5736 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5737 mp.cgoCallers[0] = 0
5738 }
5739
5740
5741 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5742 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5743
5744
5745 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5746 } else if mp != nil && mp.vdsoSP != 0 {
5747
5748
5749 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5750 } else {
5751 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5752 }
5753 n += tracebackPCs(&u, 0, stk[n:])
5754
5755 if n <= 0 {
5756
5757
5758 n = 2
5759 if inVDSOPage(pc) {
5760 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5761 } else if pc > firstmoduledata.etext {
5762
5763 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5764 }
5765 stk[0] = pc
5766 if mp.preemptoff != "" {
5767 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5768 } else {
5769 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5770 }
5771 }
5772
5773 if prof.hz.Load() != 0 {
5774
5775
5776
5777 var tagPtr *unsafe.Pointer
5778 if gp != nil && gp.m != nil && gp.m.curg != nil {
5779 tagPtr = &gp.m.curg.labels
5780 }
5781 cpuprof.add(tagPtr, stk[:n])
5782
5783 gprof := gp
5784 var mp *m
5785 var pp *p
5786 if gp != nil && gp.m != nil {
5787 if gp.m.curg != nil {
5788 gprof = gp.m.curg
5789 }
5790 mp = gp.m
5791 pp = gp.m.p.ptr()
5792 }
5793 traceCPUSample(gprof, mp, pp, stk[:n])
5794 }
5795 getg().m.mallocing--
5796 }
5797
5798
5799
5800 func setcpuprofilerate(hz int32) {
5801
5802 if hz < 0 {
5803 hz = 0
5804 }
5805
5806
5807
5808 gp := getg()
5809 gp.m.locks++
5810
5811
5812
5813
5814 setThreadCPUProfiler(0)
5815
5816 for !prof.signalLock.CompareAndSwap(0, 1) {
5817 osyield()
5818 }
5819 if prof.hz.Load() != hz {
5820 setProcessCPUProfiler(hz)
5821 prof.hz.Store(hz)
5822 }
5823 prof.signalLock.Store(0)
5824
5825 lock(&sched.lock)
5826 sched.profilehz = hz
5827 unlock(&sched.lock)
5828
5829 if hz != 0 {
5830 setThreadCPUProfiler(hz)
5831 }
5832
5833 gp.m.locks--
5834 }
5835
5836
5837
5838 func (pp *p) init(id int32) {
5839 pp.id = id
5840 pp.gcw.id = id
5841 pp.status = _Pgcstop
5842 pp.sudogcache = pp.sudogbuf[:0]
5843 pp.deferpool = pp.deferpoolbuf[:0]
5844 pp.wbBuf.reset()
5845 if pp.mcache == nil {
5846 if id == 0 {
5847 if mcache0 == nil {
5848 throw("missing mcache?")
5849 }
5850
5851
5852 pp.mcache = mcache0
5853 } else {
5854 pp.mcache = allocmcache()
5855 }
5856 }
5857 if raceenabled && pp.raceprocctx == 0 {
5858 if id == 0 {
5859 pp.raceprocctx = raceprocctx0
5860 raceprocctx0 = 0
5861 } else {
5862 pp.raceprocctx = raceproccreate()
5863 }
5864 }
5865 lockInit(&pp.timers.mu, lockRankTimers)
5866
5867
5868
5869 timerpMask.set(id)
5870
5871
5872 idlepMask.clear(id)
5873 }
5874
5875
5876
5877
5878
5879 func (pp *p) destroy() {
5880 assertLockHeld(&sched.lock)
5881 assertWorldStopped()
5882
5883
5884 for pp.runqhead != pp.runqtail {
5885
5886 pp.runqtail--
5887 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5888
5889 globrunqputhead(gp)
5890 }
5891 if pp.runnext != 0 {
5892 globrunqputhead(pp.runnext.ptr())
5893 pp.runnext = 0
5894 }
5895
5896
5897 getg().m.p.ptr().timers.take(&pp.timers)
5898
5899
5900
5901 if phase := gcphase; phase != _GCoff {
5902 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5903 throw("P destroyed while GC is running")
5904 }
5905
5906 pp.gcw.spanq.destroy()
5907
5908 clear(pp.sudogbuf[:])
5909 pp.sudogcache = pp.sudogbuf[:0]
5910 pp.pinnerCache = nil
5911 clear(pp.deferpoolbuf[:])
5912 pp.deferpool = pp.deferpoolbuf[:0]
5913 systemstack(func() {
5914 for i := 0; i < pp.mspancache.len; i++ {
5915
5916 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5917 }
5918 pp.mspancache.len = 0
5919 lock(&mheap_.lock)
5920 pp.pcache.flush(&mheap_.pages)
5921 unlock(&mheap_.lock)
5922 })
5923 freemcache(pp.mcache)
5924 pp.mcache = nil
5925 gfpurge(pp)
5926 if raceenabled {
5927 if pp.timers.raceCtx != 0 {
5928
5929
5930
5931
5932
5933 mp := getg().m
5934 phold := mp.p.ptr()
5935 mp.p.set(pp)
5936
5937 racectxend(pp.timers.raceCtx)
5938 pp.timers.raceCtx = 0
5939
5940 mp.p.set(phold)
5941 }
5942 raceprocdestroy(pp.raceprocctx)
5943 pp.raceprocctx = 0
5944 }
5945 pp.gcAssistTime = 0
5946 gcCleanups.queued += pp.cleanupsQueued
5947 pp.cleanupsQueued = 0
5948 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5949 pp.goroutinesCreated = 0
5950 pp.xRegs.free()
5951 pp.status = _Pdead
5952 }
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962 func procresize(nprocs int32) *p {
5963 assertLockHeld(&sched.lock)
5964 assertWorldStopped()
5965
5966 old := gomaxprocs
5967 if old < 0 || nprocs <= 0 {
5968 throw("procresize: invalid arg")
5969 }
5970 trace := traceAcquire()
5971 if trace.ok() {
5972 trace.Gomaxprocs(nprocs)
5973 traceRelease(trace)
5974 }
5975
5976
5977 now := nanotime()
5978 if sched.procresizetime != 0 {
5979 sched.totaltime += int64(old) * (now - sched.procresizetime)
5980 }
5981 sched.procresizetime = now
5982
5983
5984 if nprocs > int32(len(allp)) {
5985
5986
5987 lock(&allpLock)
5988 if nprocs <= int32(cap(allp)) {
5989 allp = allp[:nprocs]
5990 } else {
5991 nallp := make([]*p, nprocs)
5992
5993
5994 copy(nallp, allp[:cap(allp)])
5995 allp = nallp
5996 }
5997
5998 idlepMask = idlepMask.resize(nprocs)
5999 timerpMask = timerpMask.resize(nprocs)
6000 work.spanqMask = work.spanqMask.resize(nprocs)
6001 unlock(&allpLock)
6002 }
6003
6004
6005 for i := old; i < nprocs; i++ {
6006 pp := allp[i]
6007 if pp == nil {
6008 pp = new(p)
6009 }
6010 pp.init(i)
6011 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6012 }
6013
6014 gp := getg()
6015 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6016
6017 gp.m.p.ptr().status = _Prunning
6018 gp.m.p.ptr().mcache.prepareForSweep()
6019 } else {
6020
6021
6022
6023
6024
6025 if gp.m.p != 0 {
6026 trace := traceAcquire()
6027 if trace.ok() {
6028
6029
6030
6031 trace.GoSched()
6032 trace.ProcStop(gp.m.p.ptr())
6033 traceRelease(trace)
6034 }
6035 gp.m.p.ptr().m = 0
6036 }
6037 gp.m.p = 0
6038 pp := allp[0]
6039 pp.m = 0
6040 pp.status = _Pidle
6041 acquirep(pp)
6042 trace := traceAcquire()
6043 if trace.ok() {
6044 trace.GoStart()
6045 traceRelease(trace)
6046 }
6047 }
6048
6049
6050 mcache0 = nil
6051
6052
6053 for i := nprocs; i < old; i++ {
6054 pp := allp[i]
6055 pp.destroy()
6056
6057 }
6058
6059
6060 if int32(len(allp)) != nprocs {
6061 lock(&allpLock)
6062 allp = allp[:nprocs]
6063 idlepMask = idlepMask.resize(nprocs)
6064 timerpMask = timerpMask.resize(nprocs)
6065 work.spanqMask = work.spanqMask.resize(nprocs)
6066 unlock(&allpLock)
6067 }
6068
6069
6070 var runnablePs *p
6071 var runnablePsNeedM *p
6072 var idlePs *p
6073 for i := nprocs - 1; i >= 0; i-- {
6074 pp := allp[i]
6075 if gp.m.p.ptr() == pp {
6076 continue
6077 }
6078 pp.status = _Pidle
6079 if runqempty(pp) {
6080 pp.link.set(idlePs)
6081 idlePs = pp
6082 continue
6083 }
6084
6085
6086
6087
6088
6089
6090
6091
6092 var mp *m
6093 if oldm := pp.oldm.get(); oldm != nil {
6094
6095 mp = mgetSpecific(oldm)
6096 }
6097 if mp == nil {
6098
6099 pp.link.set(runnablePsNeedM)
6100 runnablePsNeedM = pp
6101 continue
6102 }
6103 pp.m.set(mp)
6104 pp.link.set(runnablePs)
6105 runnablePs = pp
6106 }
6107
6108
6109 for runnablePsNeedM != nil {
6110 pp := runnablePsNeedM
6111 runnablePsNeedM = pp.link.ptr()
6112
6113 mp := mget()
6114 pp.m.set(mp)
6115 pp.link.set(runnablePs)
6116 runnablePs = pp
6117 }
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143 if gcBlackenEnabled != 0 {
6144 for idlePs != nil {
6145 pp := idlePs
6146
6147 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6148 if !ok {
6149
6150 break
6151 }
6152
6153
6154
6155
6156
6157
6158
6159
6160 idlePs = pp.link.ptr()
6161 mp := mget()
6162 pp.m.set(mp)
6163 pp.link.set(runnablePs)
6164 runnablePs = pp
6165 }
6166 }
6167
6168
6169 for idlePs != nil {
6170 pp := idlePs
6171 idlePs = pp.link.ptr()
6172 pidleput(pp, now)
6173 }
6174
6175 stealOrder.reset(uint32(nprocs))
6176 var int32p *int32 = &gomaxprocs
6177 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6178 if old != nprocs {
6179
6180 gcCPULimiter.resetCapacity(now, nprocs)
6181 }
6182 return runnablePs
6183 }
6184
6185
6186
6187
6188
6189
6190
6191 func acquirep(pp *p) {
6192
6193 acquirepNoTrace(pp)
6194
6195
6196 trace := traceAcquire()
6197 if trace.ok() {
6198 trace.ProcStart()
6199 traceRelease(trace)
6200 }
6201 }
6202
6203
6204
6205
6206 func acquirepNoTrace(pp *p) {
6207
6208 wirep(pp)
6209
6210
6211
6212
6213
6214
6215 pp.oldm = pp.m.ptr().self
6216
6217
6218
6219 pp.mcache.prepareForSweep()
6220 }
6221
6222
6223
6224
6225
6226
6227
6228 func wirep(pp *p) {
6229 gp := getg()
6230
6231 if gp.m.p != 0 {
6232
6233
6234 systemstack(func() {
6235 throw("wirep: already in go")
6236 })
6237 }
6238 if pp.m != 0 || pp.status != _Pidle {
6239
6240
6241 systemstack(func() {
6242 id := int64(0)
6243 if pp.m != 0 {
6244 id = pp.m.ptr().id
6245 }
6246 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6247 throw("wirep: invalid p state")
6248 })
6249 }
6250 gp.m.p.set(pp)
6251 pp.m.set(gp.m)
6252 pp.status = _Prunning
6253 }
6254
6255
6256 func releasep() *p {
6257 trace := traceAcquire()
6258 if trace.ok() {
6259 trace.ProcStop(getg().m.p.ptr())
6260 traceRelease(trace)
6261 }
6262 return releasepNoTrace()
6263 }
6264
6265
6266 func releasepNoTrace() *p {
6267 gp := getg()
6268
6269 if gp.m.p == 0 {
6270 throw("releasep: invalid arg")
6271 }
6272 pp := gp.m.p.ptr()
6273 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6274 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6275 throw("releasep: invalid p state")
6276 }
6277
6278
6279 gcController.releaseNextGCMarkWorker(pp)
6280
6281 gp.m.p = 0
6282 pp.m = 0
6283 pp.status = _Pidle
6284 return pp
6285 }
6286
6287 func incidlelocked(v int32) {
6288 lock(&sched.lock)
6289 sched.nmidlelocked += v
6290 if v > 0 {
6291 checkdead()
6292 }
6293 unlock(&sched.lock)
6294 }
6295
6296
6297
6298
6299 func checkdead() {
6300 assertLockHeld(&sched.lock)
6301
6302
6303
6304
6305
6306
6307 if (islibrary || isarchive) && GOARCH != "wasm" {
6308 return
6309 }
6310
6311
6312
6313
6314
6315 if panicking.Load() > 0 {
6316 return
6317 }
6318
6319
6320
6321
6322
6323 var run0 int32
6324 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6325 run0 = 1
6326 }
6327
6328 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6329 if run > run0 {
6330 return
6331 }
6332 if run < 0 {
6333 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6334 unlock(&sched.lock)
6335 throw("checkdead: inconsistent counts")
6336 }
6337
6338 grunning := 0
6339 forEachG(func(gp *g) {
6340 if isSystemGoroutine(gp, false) {
6341 return
6342 }
6343 s := readgstatus(gp)
6344 switch s &^ _Gscan {
6345 case _Gwaiting,
6346 _Gpreempted:
6347 grunning++
6348 case _Grunnable,
6349 _Grunning,
6350 _Gsyscall:
6351 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6352 unlock(&sched.lock)
6353 throw("checkdead: runnable g")
6354 }
6355 })
6356 if grunning == 0 {
6357 unlock(&sched.lock)
6358 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6359 }
6360
6361
6362 if faketime != 0 {
6363 if when := timeSleepUntil(); when < maxWhen {
6364 faketime = when
6365
6366
6367 pp, _ := pidleget(faketime)
6368 if pp == nil {
6369
6370
6371 unlock(&sched.lock)
6372 throw("checkdead: no p for timer")
6373 }
6374 mp := mget()
6375 if mp == nil {
6376
6377
6378 unlock(&sched.lock)
6379 throw("checkdead: no m for timer")
6380 }
6381
6382
6383
6384 sched.nmspinning.Add(1)
6385 mp.spinning = true
6386 mp.nextp.set(pp)
6387 notewakeup(&mp.park)
6388 return
6389 }
6390 }
6391
6392
6393 for _, pp := range allp {
6394 if len(pp.timers.heap) > 0 {
6395 return
6396 }
6397 }
6398
6399 unlock(&sched.lock)
6400 fatal("all goroutines are asleep - deadlock!")
6401 }
6402
6403
6404
6405
6406
6407
6408 var forcegcperiod int64 = 2 * 60 * 1e9
6409
6410
6411
6412
6413 const haveSysmon = GOARCH != "wasm"
6414
6415
6416
6417
6418 func sysmon() {
6419 lock(&sched.lock)
6420 sched.nmsys++
6421 checkdead()
6422 unlock(&sched.lock)
6423
6424 lastgomaxprocs := int64(0)
6425 lasttrace := int64(0)
6426 idle := 0
6427 delay := uint32(0)
6428
6429 for {
6430 if idle == 0 {
6431 delay = 20
6432 } else if idle > 50 {
6433 delay *= 2
6434 }
6435 if delay > 10*1000 {
6436 delay = 10 * 1000
6437 }
6438 usleep(delay)
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455 now := nanotime()
6456 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6457 lock(&sched.lock)
6458 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6459 syscallWake := false
6460 next := timeSleepUntil()
6461 if next > now {
6462 sched.sysmonwait.Store(true)
6463 unlock(&sched.lock)
6464
6465
6466 sleep := forcegcperiod / 2
6467 if next-now < sleep {
6468 sleep = next - now
6469 }
6470 shouldRelax := sleep >= osRelaxMinNS
6471 if shouldRelax {
6472 osRelax(true)
6473 }
6474 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6475 if shouldRelax {
6476 osRelax(false)
6477 }
6478 lock(&sched.lock)
6479 sched.sysmonwait.Store(false)
6480 noteclear(&sched.sysmonnote)
6481 }
6482 if syscallWake {
6483 idle = 0
6484 delay = 20
6485 }
6486 }
6487 unlock(&sched.lock)
6488 }
6489
6490 lock(&sched.sysmonlock)
6491
6492
6493 now = nanotime()
6494
6495
6496 if *cgo_yield != nil {
6497 asmcgocall(*cgo_yield, nil)
6498 }
6499
6500 lastpoll := sched.lastpoll.Load()
6501 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6502 sched.lastpoll.CompareAndSwap(lastpoll, now)
6503 list, delta := netpoll(0)
6504 if !list.empty() {
6505
6506
6507
6508
6509
6510
6511
6512 incidlelocked(-1)
6513 injectglist(&list)
6514 incidlelocked(1)
6515 netpollAdjustWaiters(delta)
6516 }
6517 }
6518
6519 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6520 sysmonUpdateGOMAXPROCS()
6521 lastgomaxprocs = now
6522 }
6523 if scavenger.sysmonWake.Load() != 0 {
6524
6525 scavenger.wake()
6526 }
6527
6528
6529 if retake(now) != 0 {
6530 idle = 0
6531 } else {
6532 idle++
6533 }
6534
6535 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6536 lock(&forcegc.lock)
6537 forcegc.idle.Store(false)
6538 var list gList
6539 list.push(forcegc.g)
6540 injectglist(&list)
6541 unlock(&forcegc.lock)
6542 }
6543 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6544 lasttrace = now
6545 schedtrace(debug.scheddetail > 0)
6546 }
6547 unlock(&sched.sysmonlock)
6548 }
6549 }
6550
6551 type sysmontick struct {
6552 schedtick uint32
6553 syscalltick uint32
6554 schedwhen int64
6555 syscallwhen int64
6556 }
6557
6558
6559
6560 const forcePreemptNS = 10 * 1000 * 1000
6561
6562 func retake(now int64) uint32 {
6563 n := 0
6564
6565
6566 lock(&allpLock)
6567
6568
6569
6570 for i := 0; i < len(allp); i++ {
6571
6572
6573
6574
6575
6576
6577
6578
6579 pp := allp[i]
6580 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6581
6582
6583 continue
6584 }
6585 pd := &pp.sysmontick
6586 sysretake := false
6587
6588
6589
6590
6591
6592 schedt := int64(pp.schedtick)
6593 if int64(pd.schedtick) != schedt {
6594 pd.schedtick = uint32(schedt)
6595 pd.schedwhen = now
6596 } else if pd.schedwhen+forcePreemptNS <= now {
6597 preemptone(pp)
6598
6599
6600
6601
6602 sysretake = true
6603 }
6604
6605
6606 unlock(&allpLock)
6607
6608
6609
6610
6611
6612
6613
6614
6615 incidlelocked(-1)
6616
6617
6618 thread, ok := setBlockOnExitSyscall(pp)
6619 if !ok {
6620
6621 goto done
6622 }
6623
6624
6625 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6626 pd.syscalltick = uint32(syst)
6627 pd.syscallwhen = now
6628 thread.resume()
6629 goto done
6630 }
6631
6632
6633
6634
6635 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6636 thread.resume()
6637 goto done
6638 }
6639
6640
6641
6642 thread.takeP()
6643 thread.resume()
6644 n++
6645
6646
6647 handoffp(pp)
6648
6649
6650
6651 done:
6652 incidlelocked(1)
6653 lock(&allpLock)
6654 }
6655 unlock(&allpLock)
6656 return uint32(n)
6657 }
6658
6659
6660
6661 type syscallingThread struct {
6662 gp *g
6663 mp *m
6664 pp *p
6665 status uint32
6666 }
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6683 if pp.status != _Prunning {
6684 return syscallingThread{}, false
6685 }
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697 mp := pp.m.ptr()
6698 if mp == nil {
6699
6700 return syscallingThread{}, false
6701 }
6702 gp := mp.curg
6703 if gp == nil {
6704
6705 return syscallingThread{}, false
6706 }
6707 status := readgstatus(gp) &^ _Gscan
6708
6709
6710
6711
6712 if status != _Gsyscall && status != _Gdeadextra {
6713
6714 return syscallingThread{}, false
6715 }
6716 if !castogscanstatus(gp, status, status|_Gscan) {
6717
6718 return syscallingThread{}, false
6719 }
6720 if gp.m != mp || gp.m.p.ptr() != pp {
6721
6722 casfrom_Gscanstatus(gp, status|_Gscan, status)
6723 return syscallingThread{}, false
6724 }
6725 return syscallingThread{gp, mp, pp, status}, true
6726 }
6727
6728
6729
6730
6731
6732 func (s syscallingThread) gcstopP() {
6733 assertLockHeld(&sched.lock)
6734
6735 s.releaseP(_Pgcstop)
6736 s.pp.gcStopTime = nanotime()
6737 sched.stopwait--
6738 }
6739
6740
6741
6742 func (s syscallingThread) takeP() {
6743 s.releaseP(_Pidle)
6744 }
6745
6746
6747
6748
6749 func (s syscallingThread) releaseP(state uint32) {
6750 if state != _Pidle && state != _Pgcstop {
6751 throw("attempted to release P into a bad state")
6752 }
6753 trace := traceAcquire()
6754 s.pp.m = 0
6755 s.mp.p = 0
6756 atomic.Store(&s.pp.status, state)
6757 if trace.ok() {
6758 trace.ProcSteal(s.pp)
6759 traceRelease(trace)
6760 }
6761 sched.nGsyscallNoP.Add(1)
6762 s.pp.syscalltick++
6763 }
6764
6765
6766 func (s syscallingThread) resume() {
6767 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6768 }
6769
6770
6771
6772
6773
6774
6775 func preemptall() bool {
6776 res := false
6777 for _, pp := range allp {
6778 if pp.status != _Prunning {
6779 continue
6780 }
6781 if preemptone(pp) {
6782 res = true
6783 }
6784 }
6785 return res
6786 }
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798 func preemptone(pp *p) bool {
6799 mp := pp.m.ptr()
6800 if mp == nil || mp == getg().m {
6801 return false
6802 }
6803 gp := mp.curg
6804 if gp == nil || gp == mp.g0 {
6805 return false
6806 }
6807 if readgstatus(gp)&^_Gscan == _Gsyscall {
6808
6809 return false
6810 }
6811
6812 gp.preempt = true
6813
6814
6815
6816
6817
6818 gp.stackguard0 = stackPreempt
6819
6820
6821 if preemptMSupported && debug.asyncpreemptoff == 0 {
6822 pp.preempt = true
6823 preemptM(mp)
6824 }
6825
6826 return true
6827 }
6828
6829 var starttime int64
6830
6831 func schedtrace(detailed bool) {
6832 now := nanotime()
6833 if starttime == 0 {
6834 starttime = now
6835 }
6836
6837 lock(&sched.lock)
6838 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6839 if detailed {
6840 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6841 }
6842
6843
6844
6845 for i, pp := range allp {
6846 h := atomic.Load(&pp.runqhead)
6847 t := atomic.Load(&pp.runqtail)
6848 if detailed {
6849 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6850 mp := pp.m.ptr()
6851 if mp != nil {
6852 print(mp.id)
6853 } else {
6854 print("nil")
6855 }
6856 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6857 } else {
6858
6859
6860 print(" ")
6861 if i == 0 {
6862 print("[ ")
6863 }
6864 print(t - h)
6865 if i == len(allp)-1 {
6866 print(" ]")
6867 }
6868 }
6869 }
6870
6871 if !detailed {
6872
6873 print(" schedticks=[ ")
6874 for _, pp := range allp {
6875 print(pp.schedtick)
6876 print(" ")
6877 }
6878 print("]\n")
6879 }
6880
6881 if !detailed {
6882 unlock(&sched.lock)
6883 return
6884 }
6885
6886 for mp := allm; mp != nil; mp = mp.alllink {
6887 pp := mp.p.ptr()
6888 print(" M", mp.id, ": p=")
6889 if pp != nil {
6890 print(pp.id)
6891 } else {
6892 print("nil")
6893 }
6894 print(" curg=")
6895 if mp.curg != nil {
6896 print(mp.curg.goid)
6897 } else {
6898 print("nil")
6899 }
6900 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6901 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6902 print(lockedg.goid)
6903 } else {
6904 print("nil")
6905 }
6906 print("\n")
6907 }
6908
6909 forEachG(func(gp *g) {
6910 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6911 if gp.m != nil {
6912 print(gp.m.id)
6913 } else {
6914 print("nil")
6915 }
6916 print(" lockedm=")
6917 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6918 print(lockedm.id)
6919 } else {
6920 print("nil")
6921 }
6922 print("\n")
6923 })
6924 unlock(&sched.lock)
6925 }
6926
6927 type updateMaxProcsGState struct {
6928 lock mutex
6929 g *g
6930 idle atomic.Bool
6931
6932
6933 procs int32
6934 }
6935
6936 var (
6937
6938
6939 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6940
6941
6942
6943 updateMaxProcsG updateMaxProcsGState
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992 computeMaxProcsLock mutex
6993 )
6994
6995
6996
6997
6998 func defaultGOMAXPROCSUpdateEnable() {
6999 if debug.updatemaxprocs == 0 {
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011 updatemaxprocs.IncNonDefault()
7012 return
7013 }
7014
7015 go updateMaxProcsGoroutine()
7016 }
7017
7018 func updateMaxProcsGoroutine() {
7019 updateMaxProcsG.g = getg()
7020 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7021 for {
7022 lock(&updateMaxProcsG.lock)
7023 if updateMaxProcsG.idle.Load() {
7024 throw("updateMaxProcsGoroutine: phase error")
7025 }
7026 updateMaxProcsG.idle.Store(true)
7027 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7028
7029
7030 stw := stopTheWorldGC(stwGOMAXPROCS)
7031
7032
7033 lock(&sched.lock)
7034 custom := sched.customGOMAXPROCS
7035 unlock(&sched.lock)
7036 if custom {
7037 startTheWorldGC(stw)
7038 return
7039 }
7040
7041
7042
7043
7044
7045 newprocs = updateMaxProcsG.procs
7046 lock(&sched.lock)
7047 sched.customGOMAXPROCS = false
7048 unlock(&sched.lock)
7049
7050 startTheWorldGC(stw)
7051 }
7052 }
7053
7054 func sysmonUpdateGOMAXPROCS() {
7055
7056 lock(&computeMaxProcsLock)
7057
7058
7059 lock(&sched.lock)
7060 custom := sched.customGOMAXPROCS
7061 curr := gomaxprocs
7062 unlock(&sched.lock)
7063 if custom {
7064 unlock(&computeMaxProcsLock)
7065 return
7066 }
7067
7068
7069 procs := defaultGOMAXPROCS(0)
7070 unlock(&computeMaxProcsLock)
7071 if procs == curr {
7072
7073 return
7074 }
7075
7076
7077
7078
7079 if updateMaxProcsG.idle.Load() {
7080 lock(&updateMaxProcsG.lock)
7081 updateMaxProcsG.procs = procs
7082 updateMaxProcsG.idle.Store(false)
7083 var list gList
7084 list.push(updateMaxProcsG.g)
7085 injectglist(&list)
7086 unlock(&updateMaxProcsG.lock)
7087 }
7088 }
7089
7090
7091
7092
7093
7094
7095 func schedEnableUser(enable bool) {
7096 lock(&sched.lock)
7097 if sched.disable.user == !enable {
7098 unlock(&sched.lock)
7099 return
7100 }
7101 sched.disable.user = !enable
7102 if enable {
7103 n := sched.disable.runnable.size
7104 globrunqputbatch(&sched.disable.runnable)
7105 unlock(&sched.lock)
7106 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7107 startm(nil, false, false)
7108 }
7109 } else {
7110 unlock(&sched.lock)
7111 }
7112 }
7113
7114
7115
7116
7117
7118 func schedEnabled(gp *g) bool {
7119 assertLockHeld(&sched.lock)
7120
7121 if sched.disable.user {
7122 return isSystemGoroutine(gp, true)
7123 }
7124 return true
7125 }
7126
7127
7128
7129
7130
7131
7132 func mput(mp *m) {
7133 assertLockHeld(&sched.lock)
7134
7135 sched.midle.push(unsafe.Pointer(mp))
7136 sched.nmidle++
7137 checkdead()
7138 }
7139
7140
7141
7142
7143
7144
7145 func mget() *m {
7146 assertLockHeld(&sched.lock)
7147
7148 mp := (*m)(sched.midle.pop())
7149 if mp != nil {
7150 sched.nmidle--
7151 }
7152 return mp
7153 }
7154
7155
7156
7157
7158
7159
7160
7161
7162 func mgetSpecific(mp *m) *m {
7163 assertLockHeld(&sched.lock)
7164
7165 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7166
7167 return nil
7168 }
7169
7170 sched.midle.remove(unsafe.Pointer(mp))
7171 sched.nmidle--
7172
7173 return mp
7174 }
7175
7176
7177
7178
7179
7180
7181 func globrunqput(gp *g) {
7182 assertLockHeld(&sched.lock)
7183
7184 sched.runq.pushBack(gp)
7185 }
7186
7187
7188
7189
7190
7191
7192 func globrunqputhead(gp *g) {
7193 assertLockHeld(&sched.lock)
7194
7195 sched.runq.push(gp)
7196 }
7197
7198
7199
7200
7201
7202
7203
7204 func globrunqputbatch(batch *gQueue) {
7205 assertLockHeld(&sched.lock)
7206
7207 sched.runq.pushBackAll(*batch)
7208 *batch = gQueue{}
7209 }
7210
7211
7212
7213 func globrunqget() *g {
7214 assertLockHeld(&sched.lock)
7215
7216 if sched.runq.size == 0 {
7217 return nil
7218 }
7219
7220 return sched.runq.pop()
7221 }
7222
7223
7224
7225 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7226 assertLockHeld(&sched.lock)
7227
7228 if sched.runq.size == 0 {
7229 return
7230 }
7231
7232 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7233
7234 gp = sched.runq.pop()
7235 n--
7236
7237 for ; n > 0; n-- {
7238 gp1 := sched.runq.pop()
7239 q.pushBack(gp1)
7240 }
7241 return
7242 }
7243
7244
7245 type pMask []uint32
7246
7247
7248 func (p pMask) read(id uint32) bool {
7249 word := id / 32
7250 mask := uint32(1) << (id % 32)
7251 return (atomic.Load(&p[word]) & mask) != 0
7252 }
7253
7254
7255 func (p pMask) set(id int32) {
7256 word := id / 32
7257 mask := uint32(1) << (id % 32)
7258 atomic.Or(&p[word], mask)
7259 }
7260
7261
7262 func (p pMask) clear(id int32) {
7263 word := id / 32
7264 mask := uint32(1) << (id % 32)
7265 atomic.And(&p[word], ^mask)
7266 }
7267
7268
7269 func (p pMask) any() bool {
7270 for i := range p {
7271 if atomic.Load(&p[i]) != 0 {
7272 return true
7273 }
7274 }
7275 return false
7276 }
7277
7278
7279
7280
7281
7282 func (p pMask) resize(nprocs int32) pMask {
7283 maskWords := (nprocs + 31) / 32
7284
7285 if maskWords <= int32(cap(p)) {
7286 return p[:maskWords]
7287 }
7288 newMask := make([]uint32, maskWords)
7289
7290 copy(newMask, p)
7291 return newMask
7292 }
7293
7294
7295
7296
7297
7298
7299
7300
7301
7302
7303
7304
7305 func pidleput(pp *p, now int64) int64 {
7306 assertLockHeld(&sched.lock)
7307
7308 if !runqempty(pp) {
7309 throw("pidleput: P has non-empty run queue")
7310 }
7311 if now == 0 {
7312 now = nanotime()
7313 }
7314 if pp.timers.len.Load() == 0 {
7315 timerpMask.clear(pp.id)
7316 }
7317 idlepMask.set(pp.id)
7318 pp.link = sched.pidle
7319 sched.pidle.set(pp)
7320 sched.npidle.Add(1)
7321 if !pp.limiterEvent.start(limiterEventIdle, now) {
7322 throw("must be able to track idle limiter event")
7323 }
7324 return now
7325 }
7326
7327
7328
7329
7330
7331
7332
7333
7334 func pidleget(now int64) (*p, int64) {
7335 assertLockHeld(&sched.lock)
7336
7337 pp := sched.pidle.ptr()
7338 if pp != nil {
7339
7340 if now == 0 {
7341 now = nanotime()
7342 }
7343 timerpMask.set(pp.id)
7344 idlepMask.clear(pp.id)
7345 sched.pidle = pp.link
7346 sched.npidle.Add(-1)
7347 pp.limiterEvent.stop(limiterEventIdle, now)
7348 }
7349 return pp, now
7350 }
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362 func pidlegetSpinning(now int64) (*p, int64) {
7363 assertLockHeld(&sched.lock)
7364
7365 pp, now := pidleget(now)
7366 if pp == nil {
7367
7368
7369
7370 sched.needspinning.Store(1)
7371 return nil, now
7372 }
7373
7374 return pp, now
7375 }
7376
7377
7378
7379 func runqempty(pp *p) bool {
7380
7381
7382
7383
7384 for {
7385 head := atomic.Load(&pp.runqhead)
7386 tail := atomic.Load(&pp.runqtail)
7387 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7388 if tail == atomic.Load(&pp.runqtail) {
7389 return head == tail && runnext == 0
7390 }
7391 }
7392 }
7393
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403 const randomizeScheduler = raceenabled
7404
7405
7406
7407
7408
7409
7410 func runqput(pp *p, gp *g, next bool) {
7411 if !haveSysmon && next {
7412
7413
7414
7415
7416
7417
7418
7419
7420 next = false
7421 }
7422 if randomizeScheduler && next && randn(2) == 0 {
7423 next = false
7424 }
7425
7426 if next {
7427 retryNext:
7428 oldnext := pp.runnext
7429 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7430 goto retryNext
7431 }
7432 if oldnext == 0 {
7433 return
7434 }
7435
7436 gp = oldnext.ptr()
7437 }
7438
7439 retry:
7440 h := atomic.LoadAcq(&pp.runqhead)
7441 t := pp.runqtail
7442 if t-h < uint32(len(pp.runq)) {
7443 pp.runq[t%uint32(len(pp.runq))].set(gp)
7444 atomic.StoreRel(&pp.runqtail, t+1)
7445 return
7446 }
7447 if runqputslow(pp, gp, h, t) {
7448 return
7449 }
7450
7451 goto retry
7452 }
7453
7454
7455
7456 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7457 var batch [len(pp.runq)/2 + 1]*g
7458
7459
7460 n := t - h
7461 n = n / 2
7462 if n != uint32(len(pp.runq)/2) {
7463 throw("runqputslow: queue is not full")
7464 }
7465 for i := uint32(0); i < n; i++ {
7466 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7467 }
7468 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7469 return false
7470 }
7471 batch[n] = gp
7472
7473 if randomizeScheduler {
7474 for i := uint32(1); i <= n; i++ {
7475 j := cheaprandn(i + 1)
7476 batch[i], batch[j] = batch[j], batch[i]
7477 }
7478 }
7479
7480
7481 for i := uint32(0); i < n; i++ {
7482 batch[i].schedlink.set(batch[i+1])
7483 }
7484
7485 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7486
7487
7488 lock(&sched.lock)
7489 globrunqputbatch(&q)
7490 unlock(&sched.lock)
7491 return true
7492 }
7493
7494
7495
7496
7497 func runqputbatch(pp *p, q *gQueue) {
7498 if q.empty() {
7499 return
7500 }
7501 h := atomic.LoadAcq(&pp.runqhead)
7502 t := pp.runqtail
7503 n := uint32(0)
7504 for !q.empty() && t-h < uint32(len(pp.runq)) {
7505 gp := q.pop()
7506 pp.runq[t%uint32(len(pp.runq))].set(gp)
7507 t++
7508 n++
7509 }
7510
7511 if randomizeScheduler {
7512 off := func(o uint32) uint32 {
7513 return (pp.runqtail + o) % uint32(len(pp.runq))
7514 }
7515 for i := uint32(1); i < n; i++ {
7516 j := cheaprandn(i + 1)
7517 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7518 }
7519 }
7520
7521 atomic.StoreRel(&pp.runqtail, t)
7522
7523 return
7524 }
7525
7526
7527
7528
7529
7530 func runqget(pp *p) (gp *g, inheritTime bool) {
7531
7532 next := pp.runnext
7533
7534
7535
7536 if next != 0 && pp.runnext.cas(next, 0) {
7537 return next.ptr(), true
7538 }
7539
7540 for {
7541 h := atomic.LoadAcq(&pp.runqhead)
7542 t := pp.runqtail
7543 if t == h {
7544 return nil, false
7545 }
7546 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7547 if atomic.CasRel(&pp.runqhead, h, h+1) {
7548 return gp, false
7549 }
7550 }
7551 }
7552
7553
7554
7555 func runqdrain(pp *p) (drainQ gQueue) {
7556 oldNext := pp.runnext
7557 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7558 drainQ.pushBack(oldNext.ptr())
7559 }
7560
7561 retry:
7562 h := atomic.LoadAcq(&pp.runqhead)
7563 t := pp.runqtail
7564 qn := t - h
7565 if qn == 0 {
7566 return
7567 }
7568 if qn > uint32(len(pp.runq)) {
7569 goto retry
7570 }
7571
7572 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7573 goto retry
7574 }
7575
7576
7577
7578
7579
7580
7581
7582
7583 for i := uint32(0); i < qn; i++ {
7584 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7585 drainQ.pushBack(gp)
7586 }
7587 return
7588 }
7589
7590
7591
7592
7593
7594 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7595 for {
7596 h := atomic.LoadAcq(&pp.runqhead)
7597 t := atomic.LoadAcq(&pp.runqtail)
7598 n := t - h
7599 n = n - n/2
7600 if n == 0 {
7601 if stealRunNextG {
7602
7603 if next := pp.runnext; next != 0 {
7604 if pp.status == _Prunning {
7605 if mp := pp.m.ptr(); mp != nil {
7606 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7607
7608
7609
7610
7611
7612
7613
7614
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626 if !osHasLowResTimer {
7627 usleep(3)
7628 } else {
7629
7630
7631
7632 osyield()
7633 }
7634 }
7635 }
7636 }
7637 if !pp.runnext.cas(next, 0) {
7638 continue
7639 }
7640 batch[batchHead%uint32(len(batch))] = next
7641 return 1
7642 }
7643 }
7644 return 0
7645 }
7646 if n > uint32(len(pp.runq)/2) {
7647 continue
7648 }
7649 for i := uint32(0); i < n; i++ {
7650 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7651 batch[(batchHead+i)%uint32(len(batch))] = g
7652 }
7653 if atomic.CasRel(&pp.runqhead, h, h+n) {
7654 return n
7655 }
7656 }
7657 }
7658
7659
7660
7661
7662 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7663 t := pp.runqtail
7664 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7665 if n == 0 {
7666 return nil
7667 }
7668 n--
7669 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7670 if n == 0 {
7671 return gp
7672 }
7673 h := atomic.LoadAcq(&pp.runqhead)
7674 if t-h+n >= uint32(len(pp.runq)) {
7675 throw("runqsteal: runq overflow")
7676 }
7677 atomic.StoreRel(&pp.runqtail, t+n)
7678 return gp
7679 }
7680
7681
7682
7683 type gQueue struct {
7684 head guintptr
7685 tail guintptr
7686 size int32
7687 }
7688
7689
7690 func (q *gQueue) empty() bool {
7691 return q.head == 0
7692 }
7693
7694
7695 func (q *gQueue) push(gp *g) {
7696 gp.schedlink = q.head
7697 q.head.set(gp)
7698 if q.tail == 0 {
7699 q.tail.set(gp)
7700 }
7701 q.size++
7702 }
7703
7704
7705 func (q *gQueue) pushBack(gp *g) {
7706 gp.schedlink = 0
7707 if q.tail != 0 {
7708 q.tail.ptr().schedlink.set(gp)
7709 } else {
7710 q.head.set(gp)
7711 }
7712 q.tail.set(gp)
7713 q.size++
7714 }
7715
7716
7717
7718 func (q *gQueue) pushBackAll(q2 gQueue) {
7719 if q2.tail == 0 {
7720 return
7721 }
7722 q2.tail.ptr().schedlink = 0
7723 if q.tail != 0 {
7724 q.tail.ptr().schedlink = q2.head
7725 } else {
7726 q.head = q2.head
7727 }
7728 q.tail = q2.tail
7729 q.size += q2.size
7730 }
7731
7732
7733
7734 func (q *gQueue) pop() *g {
7735 gp := q.head.ptr()
7736 if gp != nil {
7737 q.head = gp.schedlink
7738 if q.head == 0 {
7739 q.tail = 0
7740 }
7741 q.size--
7742 }
7743 return gp
7744 }
7745
7746
7747 func (q *gQueue) popList() gList {
7748 stack := gList{q.head, q.size}
7749 *q = gQueue{}
7750 return stack
7751 }
7752
7753
7754
7755 type gList struct {
7756 head guintptr
7757 size int32
7758 }
7759
7760
7761 func (l *gList) empty() bool {
7762 return l.head == 0
7763 }
7764
7765
7766 func (l *gList) push(gp *g) {
7767 gp.schedlink = l.head
7768 l.head.set(gp)
7769 l.size++
7770 }
7771
7772
7773 func (l *gList) pushAll(q gQueue) {
7774 if !q.empty() {
7775 q.tail.ptr().schedlink = l.head
7776 l.head = q.head
7777 l.size += q.size
7778 }
7779 }
7780
7781
7782 func (l *gList) pop() *g {
7783 gp := l.head.ptr()
7784 if gp != nil {
7785 l.head = gp.schedlink
7786 l.size--
7787 }
7788 return gp
7789 }
7790
7791
7792 func setMaxThreads(in int) (out int) {
7793 lock(&sched.lock)
7794 out = int(sched.maxmcount)
7795 if in > 0x7fffffff {
7796 sched.maxmcount = 0x7fffffff
7797 } else {
7798 sched.maxmcount = int32(in)
7799 }
7800 checkmcount()
7801 unlock(&sched.lock)
7802 return
7803 }
7804
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817 func procPin() int {
7818 gp := getg()
7819 mp := gp.m
7820
7821 mp.locks++
7822 return int(mp.p.ptr().id)
7823 }
7824
7825
7826
7827
7828
7829
7830
7831
7832
7833
7834
7835
7836
7837 func procUnpin() {
7838 gp := getg()
7839 gp.m.locks--
7840 }
7841
7842
7843
7844 func sync_runtime_procPin() int {
7845 return procPin()
7846 }
7847
7848
7849
7850 func sync_runtime_procUnpin() {
7851 procUnpin()
7852 }
7853
7854
7855
7856 func sync_atomic_runtime_procPin() int {
7857 return procPin()
7858 }
7859
7860
7861
7862 func sync_atomic_runtime_procUnpin() {
7863 procUnpin()
7864 }
7865
7866
7867
7868
7869
7870 func internal_sync_runtime_canSpin(i int) bool {
7871
7872
7873
7874
7875
7876 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7877 return false
7878 }
7879 if p := getg().m.p.ptr(); !runqempty(p) {
7880 return false
7881 }
7882 return true
7883 }
7884
7885
7886
7887 func internal_sync_runtime_doSpin() {
7888 procyield(active_spin_cnt)
7889 }
7890
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905 func sync_runtime_canSpin(i int) bool {
7906 return internal_sync_runtime_canSpin(i)
7907 }
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917
7918
7919
7920
7921 func sync_runtime_doSpin() {
7922 internal_sync_runtime_doSpin()
7923 }
7924
7925 var stealOrder randomOrder
7926
7927
7928
7929
7930
7931 type randomOrder struct {
7932 count uint32
7933 coprimes []uint32
7934 }
7935
7936 type randomEnum struct {
7937 i uint32
7938 count uint32
7939 pos uint32
7940 inc uint32
7941 }
7942
7943 func (ord *randomOrder) reset(count uint32) {
7944 ord.count = count
7945 ord.coprimes = ord.coprimes[:0]
7946 for i := uint32(1); i <= count; i++ {
7947 if gcd(i, count) == 1 {
7948 ord.coprimes = append(ord.coprimes, i)
7949 }
7950 }
7951 }
7952
7953 func (ord *randomOrder) start(i uint32) randomEnum {
7954 return randomEnum{
7955 count: ord.count,
7956 pos: i % ord.count,
7957 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7958 }
7959 }
7960
7961 func (enum *randomEnum) done() bool {
7962 return enum.i == enum.count
7963 }
7964
7965 func (enum *randomEnum) next() {
7966 enum.i++
7967 enum.pos = (enum.pos + enum.inc) % enum.count
7968 }
7969
7970 func (enum *randomEnum) position() uint32 {
7971 return enum.pos
7972 }
7973
7974 func gcd(a, b uint32) uint32 {
7975 for b != 0 {
7976 a, b = b, a%b
7977 }
7978 return a
7979 }
7980
7981
7982
7983 type initTask struct {
7984 state uint32
7985 nfns uint32
7986
7987 }
7988
7989
7990
7991 var inittrace tracestat
7992
7993 type tracestat struct {
7994 active bool
7995 id uint64
7996 allocs uint64
7997 bytes uint64
7998 }
7999
8000 func doInit(ts []*initTask) {
8001 for _, t := range ts {
8002 doInit1(t)
8003 }
8004 }
8005
8006 func doInit1(t *initTask) {
8007 switch t.state {
8008 case 2:
8009 return
8010 case 1:
8011 throw("recursive call during initialization - linker skew")
8012 default:
8013 t.state = 1
8014
8015 var (
8016 start int64
8017 before tracestat
8018 )
8019
8020 if inittrace.active {
8021 start = nanotime()
8022
8023 before = inittrace
8024 }
8025
8026 if t.nfns == 0 {
8027
8028 throw("inittask with no functions")
8029 }
8030
8031 firstFunc := add(unsafe.Pointer(t), 8)
8032 for i := uint32(0); i < t.nfns; i++ {
8033 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8034 f := *(*func())(unsafe.Pointer(&p))
8035 f()
8036 }
8037
8038 if inittrace.active {
8039 end := nanotime()
8040
8041 after := inittrace
8042
8043 f := *(*func())(unsafe.Pointer(&firstFunc))
8044 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8045
8046 var sbuf [24]byte
8047 print("init ", pkg, " @")
8048 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8049 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8050 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8051 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8052 print("\n")
8053 }
8054
8055 t.state = 2
8056 }
8057 }
8058
View as plain text