Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "internal/runtime/atomic"
11 "unsafe"
12 )
13
14
15
16
17
18
19 const (
20 tracebackCrash = 1 << iota
21 tracebackAll
22 tracebackShift = iota
23 )
24
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
27
28
29
30
31
32
33
34
35
36
37 func gotraceback() (level int32, all, crash bool) {
38 gp := getg()
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42 if gp.m.traceback != 0 {
43 level = int32(gp.m.traceback)
44 } else if gp.m.throwing >= throwTypeRuntime {
45
46
47 level = 2
48 } else {
49 level = int32(t >> tracebackShift)
50 }
51 return
52 }
53
54 var (
55 argc int32
56 argv **byte
57 )
58
59
60
61
62 func argv_index(argv **byte, i int32) *byte {
63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
64 }
65
66 func args(c int32, v **byte) {
67 argc = c
68 argv = v
69 sysargs(c, v)
70 }
71
72 func goargs() {
73 if GOOS == "windows" {
74 return
75 }
76 argslice = make([]string, argc)
77 for i := int32(0); i < argc; i++ {
78 argslice[i] = gostringnocopy(argv_index(argv, i))
79 }
80 }
81
82 func goenvs_unix() {
83
84
85
86 n := int32(0)
87 for argv_index(argv, argc+1+n) != nil {
88 n++
89 }
90
91 envs = make([]string, n)
92 for i := int32(0); i < n; i++ {
93 envs[i] = gostring(argv_index(argv, argc+1+i))
94 }
95 }
96
97 func environ() []string {
98 return envs
99 }
100
101
102
103 var test_z64, test_x64 uint64
104
105 func testAtomic64() {
106 test_z64 = 42
107 test_x64 = 0
108 if atomic.Cas64(&test_z64, test_x64, 1) {
109 throw("cas64 failed")
110 }
111 if test_x64 != 0 {
112 throw("cas64 failed")
113 }
114 test_x64 = 42
115 if !atomic.Cas64(&test_z64, test_x64, 1) {
116 throw("cas64 failed")
117 }
118 if test_x64 != 42 || test_z64 != 1 {
119 throw("cas64 failed")
120 }
121 if atomic.Load64(&test_z64) != 1 {
122 throw("load64 failed")
123 }
124 atomic.Store64(&test_z64, (1<<40)+1)
125 if atomic.Load64(&test_z64) != (1<<40)+1 {
126 throw("store64 failed")
127 }
128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129 throw("xadd64 failed")
130 }
131 if atomic.Load64(&test_z64) != (2<<40)+2 {
132 throw("xadd64 failed")
133 }
134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135 throw("xchg64 failed")
136 }
137 if atomic.Load64(&test_z64) != (3<<40)+3 {
138 throw("xchg64 failed")
139 }
140 }
141
142 func check() {
143 var (
144 a int8
145 b uint8
146 c int16
147 d uint16
148 e int32
149 f uint32
150 g int64
151 h uint64
152 i, i1 float32
153 j, j1 float64
154 k unsafe.Pointer
155 l *uint16
156 m [4]byte
157 )
158 type x1t struct {
159 x uint8
160 }
161 type y1t struct {
162 x1 x1t
163 y uint8
164 }
165 var x1 x1t
166 var y1 y1t
167
168 if unsafe.Sizeof(a) != 1 {
169 throw("bad a")
170 }
171 if unsafe.Sizeof(b) != 1 {
172 throw("bad b")
173 }
174 if unsafe.Sizeof(c) != 2 {
175 throw("bad c")
176 }
177 if unsafe.Sizeof(d) != 2 {
178 throw("bad d")
179 }
180 if unsafe.Sizeof(e) != 4 {
181 throw("bad e")
182 }
183 if unsafe.Sizeof(f) != 4 {
184 throw("bad f")
185 }
186 if unsafe.Sizeof(g) != 8 {
187 throw("bad g")
188 }
189 if unsafe.Sizeof(h) != 8 {
190 throw("bad h")
191 }
192 if unsafe.Sizeof(i) != 4 {
193 throw("bad i")
194 }
195 if unsafe.Sizeof(j) != 8 {
196 throw("bad j")
197 }
198 if unsafe.Sizeof(k) != goarch.PtrSize {
199 throw("bad k")
200 }
201 if unsafe.Sizeof(l) != goarch.PtrSize {
202 throw("bad l")
203 }
204 if unsafe.Sizeof(x1) != 1 {
205 throw("bad unsafe.Sizeof x1")
206 }
207 if unsafe.Offsetof(y1.y) != 1 {
208 throw("bad offsetof y1.y")
209 }
210 if unsafe.Sizeof(y1) != 2 {
211 throw("bad unsafe.Sizeof y1")
212 }
213
214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
215 throw("bad timediv")
216 }
217
218 var z uint32
219 z = 1
220 if !atomic.Cas(&z, 1, 2) {
221 throw("cas1")
222 }
223 if z != 2 {
224 throw("cas2")
225 }
226
227 z = 4
228 if atomic.Cas(&z, 5, 6) {
229 throw("cas3")
230 }
231 if z != 4 {
232 throw("cas4")
233 }
234
235 z = 0xffffffff
236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
237 throw("cas5")
238 }
239 if z != 0xfffffffe {
240 throw("cas6")
241 }
242
243 m = [4]byte{1, 1, 1, 1}
244 atomic.Or8(&m[1], 0xf0)
245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
246 throw("atomicor8")
247 }
248
249 m = [4]byte{0xff, 0xff, 0xff, 0xff}
250 atomic.And8(&m[1], 0x1)
251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
252 throw("atomicand8")
253 }
254
255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
256 if j == j {
257 throw("float64nan")
258 }
259 if !(j != j) {
260 throw("float64nan1")
261 }
262
263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
264 if j == j1 {
265 throw("float64nan2")
266 }
267 if !(j != j1) {
268 throw("float64nan3")
269 }
270
271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
272 if i == i {
273 throw("float32nan")
274 }
275 if i == i {
276 throw("float32nan1")
277 }
278
279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
280 if i == i1 {
281 throw("float32nan2")
282 }
283 if i == i1 {
284 throw("float32nan3")
285 }
286
287 testAtomic64()
288
289 if fixedStack != round2(fixedStack) {
290 throw("FixedStack is not power-of-2")
291 }
292
293 if !checkASM() {
294 throw("assembly checks failed")
295 }
296 }
297
298 type dbgVar struct {
299 name string
300 value *int32
301 atomic *atomic.Int32
302 def int32
303 }
304
305
306
307
308
309 var debug struct {
310 cgocheck int32
311 clobberfree int32
312 decoratemappings int32
313 disablethp int32
314 dontfreezetheworld int32
315 efence int32
316 gccheckmark int32
317 gcpacertrace int32
318 gcshrinkstackoff int32
319 gcstoptheworld int32
320 gctrace int32
321 invalidptr int32
322 madvdontneed int32
323 runtimeContentionStacks atomic.Int32
324 scavtrace int32
325 scheddetail int32
326 schedtrace int32
327 tracebackancestors int32
328 asyncpreemptoff int32
329 harddecommit int32
330 adaptivestackstart int32
331 tracefpunwindoff int32
332 traceadvanceperiod int32
333 traceCheckStackOwnership int32
334 profstackdepth int32
335 dataindependenttiming int32
336
337
338
339
340 malloc bool
341 inittrace int32
342 sbrk int32
343
344
345
346
347
348
349
350
351 traceallocfree atomic.Int32
352
353 panicnil atomic.Int32
354
355
356
357
358
359
360
361
362
363 asynctimerchan atomic.Int32
364 }
365
366 var dbgvars = []*dbgVar{
367 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
368 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
369 {name: "asynctimerchan", atomic: &debug.asynctimerchan},
370 {name: "cgocheck", value: &debug.cgocheck},
371 {name: "clobberfree", value: &debug.clobberfree},
372 {name: "dataindependenttiming", value: &debug.dataindependenttiming},
373 {name: "decoratemappings", value: &debug.decoratemappings, def: 1},
374 {name: "disablethp", value: &debug.disablethp},
375 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
376 {name: "efence", value: &debug.efence},
377 {name: "gccheckmark", value: &debug.gccheckmark},
378 {name: "gcpacertrace", value: &debug.gcpacertrace},
379 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
380 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
381 {name: "gctrace", value: &debug.gctrace},
382 {name: "harddecommit", value: &debug.harddecommit},
383 {name: "inittrace", value: &debug.inittrace},
384 {name: "invalidptr", value: &debug.invalidptr},
385 {name: "madvdontneed", value: &debug.madvdontneed},
386 {name: "panicnil", atomic: &debug.panicnil},
387 {name: "profstackdepth", value: &debug.profstackdepth, def: 128},
388 {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
389 {name: "sbrk", value: &debug.sbrk},
390 {name: "scavtrace", value: &debug.scavtrace},
391 {name: "scheddetail", value: &debug.scheddetail},
392 {name: "schedtrace", value: &debug.schedtrace},
393 {name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
394 {name: "traceallocfree", atomic: &debug.traceallocfree},
395 {name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
396 {name: "tracebackancestors", value: &debug.tracebackancestors},
397 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
398 }
399
400 func parsedebugvars() {
401
402 debug.cgocheck = 1
403 debug.invalidptr = 1
404 debug.adaptivestackstart = 1
405 if GOOS == "linux" {
406
407
408
409
410
411
412
413
414 debug.madvdontneed = 1
415 }
416 debug.traceadvanceperiod = defaultTraceAdvancePeriod
417
418 godebug := gogetenv("GODEBUG")
419
420 p := new(string)
421 *p = godebug
422 godebugEnv.Store(p)
423
424
425 for _, v := range dbgvars {
426 if v.def != 0 {
427
428 if v.value != nil {
429 *v.value = v.def
430 } else if v.atomic != nil {
431 v.atomic.Store(v.def)
432 }
433 }
434 }
435
436
437 parsegodebug(godebugDefault, nil)
438
439
440 parsegodebug(godebug, nil)
441
442 debug.malloc = (debug.inittrace | debug.sbrk) != 0
443 debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 if debug.gccheckmark > 0 {
459 debug.asyncpreemptoff = 1
460 }
461
462 setTraceback(gogetenv("GOTRACEBACK"))
463 traceback_env = traceback_cache
464 }
465
466
467
468 func reparsedebugvars(env string) {
469 seen := make(map[string]bool)
470
471 parsegodebug(env, seen)
472
473 parsegodebug(godebugDefault, seen)
474
475 for _, v := range dbgvars {
476 if v.atomic != nil && !seen[v.name] {
477 v.atomic.Store(0)
478 }
479 }
480 }
481
482
483
484
485
486
487
488
489
490
491
492 func parsegodebug(godebug string, seen map[string]bool) {
493 for p := godebug; p != ""; {
494 var field string
495 if seen == nil {
496
497 i := bytealg.IndexByteString(p, ',')
498 if i < 0 {
499 field, p = p, ""
500 } else {
501 field, p = p[:i], p[i+1:]
502 }
503 } else {
504
505 i := len(p) - 1
506 for i >= 0 && p[i] != ',' {
507 i--
508 }
509 if i < 0 {
510 p, field = "", p
511 } else {
512 p, field = p[:i], p[i+1:]
513 }
514 }
515 i := bytealg.IndexByteString(field, '=')
516 if i < 0 {
517 continue
518 }
519 key, value := field[:i], field[i+1:]
520 if seen[key] {
521 continue
522 }
523 if seen != nil {
524 seen[key] = true
525 }
526
527
528
529
530 if seen == nil && key == "memprofilerate" {
531 if n, ok := atoi(value); ok {
532 MemProfileRate = n
533 }
534 } else {
535 for _, v := range dbgvars {
536 if v.name == key {
537 if n, ok := atoi32(value); ok {
538 if seen == nil && v.value != nil {
539 *v.value = n
540 } else if v.atomic != nil {
541 v.atomic.Store(n)
542 }
543 }
544 }
545 }
546 }
547 }
548
549 if debug.cgocheck > 1 {
550 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
551 }
552 }
553
554
555 func setTraceback(level string) {
556 var t uint32
557 switch level {
558 case "none":
559 t = 0
560 case "single", "":
561 t = 1 << tracebackShift
562 case "all":
563 t = 1<<tracebackShift | tracebackAll
564 case "system":
565 t = 2<<tracebackShift | tracebackAll
566 case "crash":
567 t = 2<<tracebackShift | tracebackAll | tracebackCrash
568 case "wer":
569 if GOOS == "windows" {
570 t = 2<<tracebackShift | tracebackAll | tracebackCrash
571 enableWER()
572 break
573 }
574 fallthrough
575 default:
576 t = tracebackAll
577 if n, ok := atoi(level); ok && n == int(uint32(n)) {
578 t |= uint32(n) << tracebackShift
579 }
580 }
581
582
583 if islibrary || isarchive {
584 t |= tracebackCrash
585 }
586
587 t |= traceback_env
588
589 atomic.Store(&traceback_cache, t)
590 }
591
592
593
594
595
596
597
598
599 func timediv(v int64, div int32, rem *int32) int32 {
600 res := int32(0)
601 for bit := 30; bit >= 0; bit-- {
602 if v >= int64(div)<<uint(bit) {
603 v = v - (int64(div) << uint(bit))
604
605
606 res |= 1 << uint(bit)
607 }
608 }
609 if v >= int64(div) {
610 if rem != nil {
611 *rem = 0
612 }
613 return 0x7fffffff
614 }
615 if rem != nil {
616 *rem = int32(v)
617 }
618 return res
619 }
620
621
622
623
624 func acquirem() *m {
625 gp := getg()
626 gp.m.locks++
627 return gp.m
628 }
629
630
631 func releasem(mp *m) {
632 gp := getg()
633 mp.locks--
634 if mp.locks == 0 && gp.preempt {
635
636 gp.stackguard0 = stackPreempt
637 }
638 }
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
656 modules := activeModules()
657 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
658 ret := [][]int32{modules[0].typelinks}
659 for _, md := range modules[1:] {
660 sections = append(sections, unsafe.Pointer(md.types))
661 ret = append(ret, md.typelinks)
662 }
663 return sections, ret
664 }
665
666
667
668
669
670
671
672
673
674
675
676
677 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
678 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
679 }
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
696 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
697 }
698
699
700
701
702
703
704
705
706
707
708
709
710 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
711 return toRType((*_type)(rtype)).textOff(textOff(off))
712 }
713
714
715
716
717 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
718 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
719 }
720
721
722
723
724 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
725 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
726 }
727
728
729
730
731 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
732 reflectOffsLock()
733 if reflectOffs.m == nil {
734 reflectOffs.m = make(map[int32]unsafe.Pointer)
735 reflectOffs.minv = make(map[unsafe.Pointer]int32)
736 reflectOffs.next = -1
737 }
738 id, found := reflectOffs.minv[ptr]
739 if !found {
740 id = reflectOffs.next
741 reflectOffs.next--
742 reflectOffs.m[id] = ptr
743 reflectOffs.minv[ptr] = id
744 }
745 reflectOffsUnlock()
746 return id
747 }
748
749
750 func fips_getIndicator() uint8 {
751 return getg().fipsIndicator
752 }
753
754
755 func fips_setIndicator(indicator uint8) {
756 getg().fipsIndicator = indicator
757 }
758
View as plain text