Text file
src/runtime/asm_riscv64.s
1 // Copyright 2017 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "go_asm.h"
6 #include "funcdata.h"
7 #include "textflag.h"
8 #include "cgo/abi_riscv64.h"
9
10
11 // When building with -buildmode=c-shared, this symbol is called when the shared
12 // library is loaded.
13 TEXT _rt0_riscv64_lib(SB),NOSPLIT,$224
14 // Preserve callee-save registers, along with X1 (LR).
15 MOV X1, (8*3)(X2)
16 SAVE_GPR((8*4))
17 SAVE_FPR((8*16))
18
19 // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go
20 MOV X0, g
21
22 MOV A0, _rt0_riscv64_lib_argc<>(SB)
23 MOV A1, _rt0_riscv64_lib_argv<>(SB)
24
25 // Synchronous initialization.
26 MOV $runtime·libpreinit(SB), T1
27 JALR RA, T1
28
29 // Create a new thread to do the runtime initialization and return.
30 MOV _cgo_sys_thread_create(SB), T1
31 BEQZ T1, nocgo
32 MOV $_rt0_riscv64_lib_go(SB), A0
33 MOV $0, A1
34 JALR RA, T1
35 JMP restore
36
37 nocgo:
38 MOV $0x800000, A0 // stacksize = 8192KB
39 MOV $_rt0_riscv64_lib_go(SB), A1
40 MOV A0, 8(X2)
41 MOV A1, 16(X2)
42 MOV $runtime·newosproc0(SB), T1
43 JALR RA, T1
44
45 restore:
46 // Restore callee-save registers, along with X1 (LR).
47 MOV (8*3)(X2), X1
48 RESTORE_GPR((8*4))
49 RESTORE_FPR((8*16))
50
51 RET
52
53 TEXT _rt0_riscv64_lib_go(SB),NOSPLIT,$0
54 MOV _rt0_riscv64_lib_argc<>(SB), A0
55 MOV _rt0_riscv64_lib_argv<>(SB), A1
56 MOV $runtime·rt0_go(SB), T0
57 JALR ZERO, T0
58
59 DATA _rt0_riscv64_lib_argc<>(SB)/8, $0
60 GLOBL _rt0_riscv64_lib_argc<>(SB),NOPTR, $8
61 DATA _rt0_riscv64_lib_argv<>(SB)/8, $0
62 GLOBL _rt0_riscv64_lib_argv<>(SB),NOPTR, $8
63
64 // func rt0_go()
65 TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
66 // X2 = stack; A0 = argc; A1 = argv
67 SUB $24, X2
68 MOV A0, 8(X2) // argc
69 MOV A1, 16(X2) // argv
70
71 // create istack out of the given (operating system) stack.
72 // _cgo_init may update stackguard.
73 MOV $runtime·g0(SB), g
74 MOV $(-64*1024), T0
75 ADD T0, X2, T1
76 MOV T1, g_stackguard0(g)
77 MOV T1, g_stackguard1(g)
78 MOV T1, (g_stack+stack_lo)(g)
79 MOV X2, (g_stack+stack_hi)(g)
80
81 // if there is a _cgo_init, call it using the gcc ABI.
82 MOV _cgo_init(SB), T2
83 BEQ T2, ZERO, nocgo
84
85 MOV ZERO, A3 // arg 3: not used
86 MOV ZERO, A2 // arg 2: not used
87 MOV $setg_gcc<>(SB), A1 // arg 1: setg
88 MOV g, A0 // arg 0: G
89 JALR RA, T2
90
91 nocgo:
92 // update stackguard after _cgo_init
93 MOV (g_stack+stack_lo)(g), T0
94 ADD $const_stackGuard, T0
95 MOV T0, g_stackguard0(g)
96 MOV T0, g_stackguard1(g)
97
98 // set the per-goroutine and per-mach "registers"
99 MOV $runtime·m0(SB), T0
100
101 // save m->g0 = g0
102 MOV g, m_g0(T0)
103 // save m0 to g0->m
104 MOV T0, g_m(g)
105
106 CALL runtime·check(SB)
107
108 // args are already prepared
109 CALL runtime·args(SB)
110 CALL runtime·osinit(SB)
111 CALL runtime·schedinit(SB)
112
113 // create a new goroutine to start program
114 MOV $runtime·mainPC(SB), T0 // entry
115 SUB $16, X2
116 MOV T0, 8(X2)
117 MOV ZERO, 0(X2)
118 CALL runtime·newproc(SB)
119 ADD $16, X2
120
121 // start this M
122 CALL runtime·mstart(SB)
123
124 WORD $0 // crash if reached
125 RET
126
127 TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
128 CALL runtime·mstart0(SB)
129 RET // not reached
130
131 // void setg_gcc(G*); set g called from gcc with g in A0
132 TEXT setg_gcc<>(SB),NOSPLIT,$0-0
133 MOV A0, g
134 CALL runtime·save_g(SB)
135 RET
136
137 // func cputicks() int64
138 TEXT runtime·cputicks<ABIInternal>(SB),NOSPLIT,$0-0
139 // RDTIME to emulate cpu ticks
140 // RDCYCLE reads counter that is per HART(core) based
141 // according to the riscv manual, see issue 46737
142 RDTIME X10
143 RET
144
145 // systemstack_switch is a dummy routine that systemstack leaves at the bottom
146 // of the G stack. We need to distinguish the routine that
147 // lives at the bottom of the G stack from the one that lives
148 // at the top of the system stack because the one at the top of
149 // the system stack terminates the stack walk (see topofstack()).
150 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
151 UNDEF
152 JALR RA, ZERO // make sure this function is not leaf
153 RET
154
155 // func systemstack(fn func())
156 TEXT runtime·systemstack(SB), NOSPLIT, $0-8
157 MOV fn+0(FP), CTXT // CTXT = fn
158 MOV g_m(g), T0 // T0 = m
159
160 MOV m_gsignal(T0), T1 // T1 = gsignal
161 BEQ g, T1, noswitch
162
163 MOV m_g0(T0), T1 // T1 = g0
164 BEQ g, T1, noswitch
165
166 MOV m_curg(T0), T2
167 BEQ g, T2, switch
168
169 // Bad: g is not gsignal, not g0, not curg. What is it?
170 // Hide call from linker nosplit analysis.
171 MOV $runtime·badsystemstack(SB), T1
172 JALR RA, T1
173
174 switch:
175 // save our state in g->sched. Pretend to
176 // be systemstack_switch if the G stack is scanned.
177 CALL gosave_systemstack_switch<>(SB)
178
179 // switch to g0
180 MOV T1, g
181 CALL runtime·save_g(SB)
182 MOV (g_sched+gobuf_sp)(g), T0
183 MOV T0, X2
184
185 // call target function
186 MOV 0(CTXT), T1 // code pointer
187 JALR RA, T1
188
189 // switch back to g
190 MOV g_m(g), T0
191 MOV m_curg(T0), g
192 CALL runtime·save_g(SB)
193 MOV (g_sched+gobuf_sp)(g), X2
194 MOV ZERO, (g_sched+gobuf_sp)(g)
195 RET
196
197 noswitch:
198 // already on m stack, just call directly
199 // Using a tail call here cleans up tracebacks since we won't stop
200 // at an intermediate systemstack.
201 MOV 0(CTXT), T1 // code pointer
202 ADD $8, X2
203 JMP (T1)
204
205 // func switchToCrashStack0(fn func())
206 TEXT runtime·switchToCrashStack0<ABIInternal>(SB), NOSPLIT, $0-8
207 MOV X10, CTXT // context register
208 MOV g_m(g), X11 // curm
209
210 // set g to gcrash
211 MOV $runtime·gcrash(SB), g // g = &gcrash
212 CALL runtime·save_g(SB) // clobbers X31
213 MOV X11, g_m(g) // g.m = curm
214 MOV g, m_g0(X11) // curm.g0 = g
215
216 // switch to crashstack
217 MOV (g_stack+stack_hi)(g), X11
218 SUB $(4*8), X11
219 MOV X11, X2
220
221 // call target function
222 MOV 0(CTXT), X10
223 JALR X1, X10
224
225 // should never return
226 CALL runtime·abort(SB)
227 UNDEF
228
229 /*
230 * support for morestack
231 */
232
233 // Called during function prolog when more stack is needed.
234 // Called with return address (i.e. caller's PC) in X5 (aka T0),
235 // and the LR register contains the caller's LR.
236 //
237 // The traceback routines see morestack on a g0 as being
238 // the top of a stack (for example, morestack calling newstack
239 // calling the scheduler calling newm calling gc), so we must
240 // record an argument size. For that purpose, it has no arguments.
241
242 // func morestack()
243 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
244 // Called from f.
245 // Set g->sched to context in f.
246 MOV X2, (g_sched+gobuf_sp)(g)
247 MOV T0, (g_sched+gobuf_pc)(g)
248 MOV RA, (g_sched+gobuf_lr)(g)
249 MOV CTXT, (g_sched+gobuf_ctxt)(g)
250
251 // Cannot grow scheduler stack (m->g0).
252 MOV g_m(g), A0
253 MOV m_g0(A0), A1
254 BNE g, A1, 3(PC)
255 CALL runtime·badmorestackg0(SB)
256 CALL runtime·abort(SB)
257
258 // Cannot grow signal stack (m->gsignal).
259 MOV m_gsignal(A0), A1
260 BNE g, A1, 3(PC)
261 CALL runtime·badmorestackgsignal(SB)
262 CALL runtime·abort(SB)
263
264 // Called from f.
265 // Set m->morebuf to f's caller.
266 MOV RA, (m_morebuf+gobuf_pc)(A0) // f's caller's PC
267 MOV X2, (m_morebuf+gobuf_sp)(A0) // f's caller's SP
268 MOV g, (m_morebuf+gobuf_g)(A0)
269
270 // Call newstack on m->g0's stack.
271 MOV m_g0(A0), g
272 CALL runtime·save_g(SB)
273 MOV (g_sched+gobuf_sp)(g), X2
274 // Create a stack frame on g0 to call newstack.
275 MOV ZERO, -8(X2) // Zero saved LR in frame
276 SUB $8, X2
277 CALL runtime·newstack(SB)
278
279 // Not reached, but make sure the return PC from the call to newstack
280 // is still in this function, and not the beginning of the next.
281 UNDEF
282
283 // func morestack_noctxt()
284 TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
285 // Force SPWRITE. This function doesn't actually write SP,
286 // but it is called with a special calling convention where
287 // the caller doesn't save LR on stack but passes it as a
288 // register, and the unwinder currently doesn't understand.
289 // Make it SPWRITE to stop unwinding. (See issue 54332)
290 MOV X2, X2
291
292 MOV ZERO, CTXT
293 JMP runtime·morestack(SB)
294
295 // AES hashing not implemented for riscv64
296 TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
297 JMP runtime·memhashFallback<ABIInternal>(SB)
298 TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
299 JMP runtime·strhashFallback<ABIInternal>(SB)
300 TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
301 JMP runtime·memhash32Fallback<ABIInternal>(SB)
302 TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
303 JMP runtime·memhash64Fallback<ABIInternal>(SB)
304
305 // restore state from Gobuf; longjmp
306
307 // func gogo(buf *gobuf)
308 TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8
309 MOV buf+0(FP), T0
310 MOV gobuf_g(T0), T1
311 MOV 0(T1), ZERO // make sure g != nil
312 JMP gogo<>(SB)
313
314 TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
315 MOV T1, g
316 CALL runtime·save_g(SB)
317
318 MOV gobuf_sp(T0), X2
319 MOV gobuf_lr(T0), RA
320 MOV gobuf_ctxt(T0), CTXT
321 MOV ZERO, gobuf_sp(T0)
322 MOV ZERO, gobuf_lr(T0)
323 MOV ZERO, gobuf_ctxt(T0)
324 MOV gobuf_pc(T0), T0
325 JALR ZERO, T0
326
327 // func procyieldAsm(cycles uint32)
328 TEXT runtime·procyieldAsm(SB),NOSPLIT,$0-0
329 RET
330
331 // Switch to m->g0's stack, call fn(g).
332 // Fn must never return. It should gogo(&g->sched)
333 // to keep running g.
334
335 // func mcall(fn func(*g))
336 TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
337 MOV X10, CTXT
338
339 // Save caller state in g->sched
340 MOV X2, (g_sched+gobuf_sp)(g)
341 MOV RA, (g_sched+gobuf_pc)(g)
342 MOV ZERO, (g_sched+gobuf_lr)(g)
343
344 // Switch to m->g0 & its stack, call fn.
345 MOV g, X10
346 MOV g_m(g), T1
347 MOV m_g0(T1), g
348 CALL runtime·save_g(SB)
349 BNE g, X10, 2(PC)
350 JMP runtime·badmcall(SB)
351 MOV 0(CTXT), T1 // code pointer
352 MOV (g_sched+gobuf_sp)(g), X2 // sp = m->g0->sched.sp
353 // we don't need special macro for regabi since arg0(X10) = g
354 SUB $16, X2
355 MOV X10, 8(X2) // setup g
356 MOV ZERO, 0(X2) // clear return address
357 JALR RA, T1
358 JMP runtime·badmcall2(SB)
359
360 // Save state of caller into g->sched,
361 // but using fake PC from systemstack_switch.
362 // Must only be called from functions with no locals ($0)
363 // or else unwinding from systemstack_switch is incorrect.
364 // Smashes X31.
365 TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
366 MOV $runtime·systemstack_switch(SB), X31
367 ADD $8, X31 // get past prologue
368 MOV X31, (g_sched+gobuf_pc)(g)
369 MOV X2, (g_sched+gobuf_sp)(g)
370 MOV ZERO, (g_sched+gobuf_lr)(g)
371 // Assert ctxt is zero. See func save.
372 MOV (g_sched+gobuf_ctxt)(g), X31
373 BEQ ZERO, X31, 2(PC)
374 CALL runtime·abort(SB)
375 RET
376
377 // func asmcgocall_no_g(fn, arg unsafe.Pointer)
378 // Call fn(arg) aligned appropriately for the gcc ABI.
379 // Called on a system stack, and there may be no g yet (during needm).
380 TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
381 MOV fn+0(FP), X11
382 MOV arg+8(FP), X10
383 JALR RA, (X11)
384 RET
385
386 // func asmcgocall(fn, arg unsafe.Pointer) int32
387 // Call fn(arg) on the scheduler stack,
388 // aligned appropriately for the gcc ABI.
389 // See cgocall.go for more details.
390 TEXT ·asmcgocall(SB),NOSPLIT,$0-20
391 MOV fn+0(FP), X11
392 MOV arg+8(FP), X10
393
394 MOV X2, X8 // save original stack pointer
395 MOV g, X9
396
397 // Figure out if we need to switch to m->g0 stack.
398 // We get called to create new OS threads too, and those
399 // come in on the m->g0 stack already. Or we might already
400 // be on the m->gsignal stack.
401 MOV g_m(g), X6
402 MOV m_gsignal(X6), X7
403 BEQ X7, g, g0
404 MOV m_g0(X6), X7
405 BEQ X7, g, g0
406
407 CALL gosave_systemstack_switch<>(SB)
408 MOV X7, g
409 CALL runtime·save_g(SB)
410 MOV (g_sched+gobuf_sp)(g), X2
411
412 // Now on a scheduling stack (a pthread-created stack).
413 g0:
414 // Save room for two of our pointers.
415 SUB $16, X2
416 MOV X9, 0(X2) // save old g on stack
417 MOV (g_stack+stack_hi)(X9), X9
418 SUB X8, X9, X8
419 MOV X8, 8(X2) // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
420
421 JALR RA, (X11)
422
423 // Restore g, stack pointer. X10 is return value.
424 MOV 0(X2), g
425 CALL runtime·save_g(SB)
426 MOV (g_stack+stack_hi)(g), X5
427 MOV 8(X2), X6
428 SUB X6, X5, X6
429 MOV X6, X2
430
431 MOVW X10, ret+16(FP)
432 RET
433
434 // func asminit()
435 TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
436 RET
437
438 // reflectcall: call a function with the given argument list
439 // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
440 // we don't have variable-sized frames, so we use a small number
441 // of constant-sized-frame functions to encode a few bits of size in the pc.
442 // Caution: ugly multiline assembly macros in your future!
443
444 #define DISPATCH(NAME,MAXSIZE) \
445 MOV $MAXSIZE, T1 \
446 BLTU T1, T0, 3(PC) \
447 MOV $NAME(SB), T2; \
448 JALR ZERO, T2
449 // Note: can't just "BR NAME(SB)" - bad inlining results.
450
451 // func call(stackArgsType *rtype, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
452 TEXT reflect·call(SB), NOSPLIT, $0-0
453 JMP ·reflectcall(SB)
454
455 // func call(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
456 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
457 MOVWU frameSize+32(FP), T0
458 DISPATCH(runtime·call16, 16)
459 DISPATCH(runtime·call32, 32)
460 DISPATCH(runtime·call64, 64)
461 DISPATCH(runtime·call128, 128)
462 DISPATCH(runtime·call256, 256)
463 DISPATCH(runtime·call512, 512)
464 DISPATCH(runtime·call1024, 1024)
465 DISPATCH(runtime·call2048, 2048)
466 DISPATCH(runtime·call4096, 4096)
467 DISPATCH(runtime·call8192, 8192)
468 DISPATCH(runtime·call16384, 16384)
469 DISPATCH(runtime·call32768, 32768)
470 DISPATCH(runtime·call65536, 65536)
471 DISPATCH(runtime·call131072, 131072)
472 DISPATCH(runtime·call262144, 262144)
473 DISPATCH(runtime·call524288, 524288)
474 DISPATCH(runtime·call1048576, 1048576)
475 DISPATCH(runtime·call2097152, 2097152)
476 DISPATCH(runtime·call4194304, 4194304)
477 DISPATCH(runtime·call8388608, 8388608)
478 DISPATCH(runtime·call16777216, 16777216)
479 DISPATCH(runtime·call33554432, 33554432)
480 DISPATCH(runtime·call67108864, 67108864)
481 DISPATCH(runtime·call134217728, 134217728)
482 DISPATCH(runtime·call268435456, 268435456)
483 DISPATCH(runtime·call536870912, 536870912)
484 DISPATCH(runtime·call1073741824, 1073741824)
485 MOV $runtime·badreflectcall(SB), T2
486 JALR ZERO, T2
487
488 #define CALLFN(NAME,MAXSIZE) \
489 TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
490 NO_LOCAL_POINTERS; \
491 /* copy arguments to stack */ \
492 MOV stackArgs+16(FP), A1; \
493 MOVWU stackArgsSize+24(FP), A2; \
494 MOV X2, A3; \
495 ADD $8, A3; \
496 ADD A3, A2; \
497 BEQ A3, A2, 6(PC); \
498 MOVBU (A1), A4; \
499 ADD $1, A1; \
500 MOVB A4, (A3); \
501 ADD $1, A3; \
502 JMP -5(PC); \
503 /* set up argument registers */ \
504 MOV regArgs+40(FP), X25; \
505 CALL ·unspillArgs(SB); \
506 /* call function */ \
507 MOV f+8(FP), CTXT; \
508 MOV (CTXT), X25; \
509 PCDATA $PCDATA_StackMapIndex, $0; \
510 JALR RA, X25; \
511 /* copy return values back */ \
512 MOV regArgs+40(FP), X25; \
513 CALL ·spillArgs(SB); \
514 MOV stackArgsType+0(FP), A5; \
515 MOV stackArgs+16(FP), A1; \
516 MOVWU stackArgsSize+24(FP), A2; \
517 MOVWU stackRetOffset+28(FP), A4; \
518 ADD $8, X2, A3; \
519 ADD A4, A3; \
520 ADD A4, A1; \
521 SUB A4, A2; \
522 CALL callRet<>(SB); \
523 RET
524
525 // callRet copies return values back at the end of call*. This is a
526 // separate function so it can allocate stack space for the arguments
527 // to reflectcallmove. It does not follow the Go ABI; it expects its
528 // arguments in registers.
529 TEXT callRet<>(SB), NOSPLIT, $40-0
530 NO_LOCAL_POINTERS
531 MOV A5, 8(X2)
532 MOV A1, 16(X2)
533 MOV A3, 24(X2)
534 MOV A2, 32(X2)
535 MOV X25, 40(X2)
536 CALL runtime·reflectcallmove(SB)
537 RET
538
539 CALLFN(·call16, 16)
540 CALLFN(·call32, 32)
541 CALLFN(·call64, 64)
542 CALLFN(·call128, 128)
543 CALLFN(·call256, 256)
544 CALLFN(·call512, 512)
545 CALLFN(·call1024, 1024)
546 CALLFN(·call2048, 2048)
547 CALLFN(·call4096, 4096)
548 CALLFN(·call8192, 8192)
549 CALLFN(·call16384, 16384)
550 CALLFN(·call32768, 32768)
551 CALLFN(·call65536, 65536)
552 CALLFN(·call131072, 131072)
553 CALLFN(·call262144, 262144)
554 CALLFN(·call524288, 524288)
555 CALLFN(·call1048576, 1048576)
556 CALLFN(·call2097152, 2097152)
557 CALLFN(·call4194304, 4194304)
558 CALLFN(·call8388608, 8388608)
559 CALLFN(·call16777216, 16777216)
560 CALLFN(·call33554432, 33554432)
561 CALLFN(·call67108864, 67108864)
562 CALLFN(·call134217728, 134217728)
563 CALLFN(·call268435456, 268435456)
564 CALLFN(·call536870912, 536870912)
565 CALLFN(·call1073741824, 1073741824)
566
567 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
568 // Must obey the gcc calling convention.
569 TEXT _cgo_topofstack(SB),NOSPLIT,$8
570 // g (X27) and REG_TMP (X31) might be clobbered by load_g.
571 // X27 is callee-save in the gcc calling convention, so save it.
572 MOV g, savedX27-8(SP)
573
574 CALL runtime·load_g(SB)
575 MOV g_m(g), X5
576 MOV m_curg(X5), X5
577 MOV (g_stack+stack_hi)(X5), X10 // return value in X10
578
579 MOV savedX27-8(SP), g
580 RET
581
582 // func goexit(neverCallThisFunction)
583 // The top-most function running on a goroutine, returns to goexit+PCQuantum*2.
584 // Note that the NOPs are written in a manner that will not be compressed,
585 // since the offset must be known by the runtime.
586 TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0
587 WORD $0x00000013 // NOP
588 JMP runtime·goexit1(SB) // does not return
589 // traceback from goexit1 must hit code range of goexit
590 WORD $0x00000013 // NOP
591
592 // This is called from .init_array and follows the platform, not the Go ABI.
593 TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
594 // Use X31 as it is a scratch register in both the Go ABI and psABI.
595 MOV runtime·lastmoduledatap(SB), X31
596 MOV X10, moduledata_next(X31)
597 MOV X10, runtime·lastmoduledatap(SB)
598 RET
599
600 // func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
601 // See cgocall.go for more details.
602 TEXT ·cgocallback(SB),NOSPLIT,$24-24
603 NO_LOCAL_POINTERS
604
605 // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g.
606 // It is used to dropm while thread is exiting.
607 MOV fn+0(FP), X7
608 BNE ZERO, X7, loadg
609 // Restore the g from frame.
610 MOV frame+8(FP), g
611 JMP dropm
612
613 loadg:
614 // Load m and g from thread-local storage.
615 MOVBU runtime·iscgo(SB), X5
616 BEQ ZERO, X5, nocgo
617 CALL runtime·load_g(SB)
618 nocgo:
619
620 // If g is nil, Go did not create the current thread,
621 // or if this thread never called into Go on pthread platforms.
622 // Call needm to obtain one for temporary use.
623 // In this case, we're running on the thread stack, so there's
624 // lots of space, but the linker doesn't know. Hide the call from
625 // the linker analysis by using an indirect call.
626 BEQ ZERO, g, needm
627
628 MOV g_m(g), X5
629 MOV X5, savedm-8(SP)
630 JMP havem
631
632 needm:
633 MOV g, savedm-8(SP) // g is zero, so is m.
634 MOV $runtime·needAndBindM(SB), X6
635 JALR RA, X6
636
637 // Set m->sched.sp = SP, so that if a panic happens
638 // during the function we are about to execute, it will
639 // have a valid SP to run on the g0 stack.
640 // The next few lines (after the havem label)
641 // will save this SP onto the stack and then write
642 // the same SP back to m->sched.sp. That seems redundant,
643 // but if an unrecovered panic happens, unwindm will
644 // restore the g->sched.sp from the stack location
645 // and then systemstack will try to use it. If we don't set it here,
646 // that restored SP will be uninitialized (typically 0) and
647 // will not be usable.
648 MOV g_m(g), X5
649 MOV m_g0(X5), X6
650 MOV X2, (g_sched+gobuf_sp)(X6)
651
652 havem:
653 // Now there's a valid m, and we're running on its m->g0.
654 // Save current m->g0->sched.sp on stack and then set it to SP.
655 // Save current sp in m->g0->sched.sp in preparation for
656 // switch back to m->curg stack.
657 // NOTE: unwindm knows that the saved g->sched.sp is at 8(X2) aka savedsp-24(SP).
658 MOV m_g0(X5), X6
659 MOV (g_sched+gobuf_sp)(X6), X7
660 MOV X7, savedsp-24(SP) // must match frame size
661 MOV X2, (g_sched+gobuf_sp)(X6)
662
663 // Switch to m->curg stack and call runtime.cgocallbackg.
664 // Because we are taking over the execution of m->curg
665 // but *not* resuming what had been running, we need to
666 // save that information (m->curg->sched) so we can restore it.
667 // We can restore m->curg->sched.sp easily, because calling
668 // runtime.cgocallbackg leaves SP unchanged upon return.
669 // To save m->curg->sched.pc, we push it onto the curg stack and
670 // open a frame the same size as cgocallback's g0 frame.
671 // Once we switch to the curg stack, the pushed PC will appear
672 // to be the return PC of cgocallback, so that the traceback
673 // will seamlessly trace back into the earlier calls.
674 MOV m_curg(X5), g
675 CALL runtime·save_g(SB)
676 MOV (g_sched+gobuf_sp)(g), X6 // prepare stack as X6
677 MOV (g_sched+gobuf_pc)(g), X7
678 MOV X7, -(24+8)(X6) // "saved LR"; must match frame size
679 // Gather our arguments into registers.
680 MOV fn+0(FP), X7
681 MOV frame+8(FP), X8
682 MOV ctxt+16(FP), X9
683 MOV $-(24+8)(X6), X2 // switch stack; must match frame size
684 MOV X7, 8(X2)
685 MOV X8, 16(X2)
686 MOV X9, 24(X2)
687 CALL runtime·cgocallbackg(SB)
688
689 // Restore g->sched (== m->curg->sched) from saved values.
690 MOV 0(X2), X7
691 MOV X7, (g_sched+gobuf_pc)(g)
692 MOV $(24+8)(X2), X6 // must match frame size
693 MOV X6, (g_sched+gobuf_sp)(g)
694
695 // Switch back to m->g0's stack and restore m->g0->sched.sp.
696 // (Unlike m->curg, the g0 goroutine never uses sched.pc,
697 // so we do not have to restore it.)
698 MOV g_m(g), X5
699 MOV m_g0(X5), g
700 CALL runtime·save_g(SB)
701 MOV (g_sched+gobuf_sp)(g), X2
702 MOV savedsp-24(SP), X6 // must match frame size
703 MOV X6, (g_sched+gobuf_sp)(g)
704
705 // If the m on entry was nil, we called needm above to borrow an m,
706 // 1. for the duration of the call on non-pthread platforms,
707 // 2. or the duration of the C thread alive on pthread platforms.
708 // If the m on entry wasn't nil,
709 // 1. the thread might be a Go thread,
710 // 2. or it wasn't the first call from a C thread on pthread platforms,
711 // since then we skip dropm to reuse the m in the first call.
712 MOV savedm-8(SP), X5
713 BNE ZERO, X5, droppedm
714
715 // Skip dropm to reuse it in the next call, when a pthread key has been created.
716 MOV _cgo_pthread_key_created(SB), X5
717 // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm.
718 BEQ ZERO, X5, dropm
719 MOV (X5), X5
720 BNE ZERO, X5, droppedm
721
722 dropm:
723 MOV $runtime·dropm(SB), X6
724 JALR RA, X6
725 droppedm:
726
727 // Done!
728 RET
729
730 TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
731 EBREAK
732 RET
733
734 TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
735 EBREAK
736 RET
737
738 // void setg(G*); set g. for use by needm.
739 TEXT runtime·setg(SB), NOSPLIT, $0-8
740 MOV gg+0(FP), g
741 // This only happens if iscgo, so jump straight to save_g
742 CALL runtime·save_g(SB)
743 RET
744
745 TEXT ·checkASM(SB),NOSPLIT,$0-1
746 MOV $1, T0
747 MOV T0, ret+0(FP)
748 RET
749
750 // spillArgs stores return values from registers to a *internal/abi.RegArgs in X25.
751 TEXT ·spillArgs(SB),NOSPLIT,$0-0
752 MOV X10, (0*8)(X25)
753 MOV X11, (1*8)(X25)
754 MOV X12, (2*8)(X25)
755 MOV X13, (3*8)(X25)
756 MOV X14, (4*8)(X25)
757 MOV X15, (5*8)(X25)
758 MOV X16, (6*8)(X25)
759 MOV X17, (7*8)(X25)
760 MOV X8, (8*8)(X25)
761 MOV X9, (9*8)(X25)
762 MOV X18, (10*8)(X25)
763 MOV X19, (11*8)(X25)
764 MOV X20, (12*8)(X25)
765 MOV X21, (13*8)(X25)
766 MOV X22, (14*8)(X25)
767 MOV X23, (15*8)(X25)
768 MOVD F10, (16*8)(X25)
769 MOVD F11, (17*8)(X25)
770 MOVD F12, (18*8)(X25)
771 MOVD F13, (19*8)(X25)
772 MOVD F14, (20*8)(X25)
773 MOVD F15, (21*8)(X25)
774 MOVD F16, (22*8)(X25)
775 MOVD F17, (23*8)(X25)
776 MOVD F8, (24*8)(X25)
777 MOVD F9, (25*8)(X25)
778 MOVD F18, (26*8)(X25)
779 MOVD F19, (27*8)(X25)
780 MOVD F20, (28*8)(X25)
781 MOVD F21, (29*8)(X25)
782 MOVD F22, (30*8)(X25)
783 MOVD F23, (31*8)(X25)
784 RET
785
786 // unspillArgs loads args into registers from a *internal/abi.RegArgs in X25.
787 TEXT ·unspillArgs(SB),NOSPLIT,$0-0
788 MOV (0*8)(X25), X10
789 MOV (1*8)(X25), X11
790 MOV (2*8)(X25), X12
791 MOV (3*8)(X25), X13
792 MOV (4*8)(X25), X14
793 MOV (5*8)(X25), X15
794 MOV (6*8)(X25), X16
795 MOV (7*8)(X25), X17
796 MOV (8*8)(X25), X8
797 MOV (9*8)(X25), X9
798 MOV (10*8)(X25), X18
799 MOV (11*8)(X25), X19
800 MOV (12*8)(X25), X20
801 MOV (13*8)(X25), X21
802 MOV (14*8)(X25), X22
803 MOV (15*8)(X25), X23
804 MOVD (16*8)(X25), F10
805 MOVD (17*8)(X25), F11
806 MOVD (18*8)(X25), F12
807 MOVD (19*8)(X25), F13
808 MOVD (20*8)(X25), F14
809 MOVD (21*8)(X25), F15
810 MOVD (22*8)(X25), F16
811 MOVD (23*8)(X25), F17
812 MOVD (24*8)(X25), F8
813 MOVD (25*8)(X25), F9
814 MOVD (26*8)(X25), F18
815 MOVD (27*8)(X25), F19
816 MOVD (28*8)(X25), F20
817 MOVD (29*8)(X25), F21
818 MOVD (30*8)(X25), F22
819 MOVD (31*8)(X25), F23
820 RET
821
822 // gcWriteBarrier informs the GC about heap pointer writes.
823 //
824 // gcWriteBarrier does NOT follow the Go ABI. It accepts the
825 // number of bytes of buffer needed in X24, and returns a pointer
826 // to the buffer space in X24.
827 // It clobbers X31 aka T6 (the linker temp register - REG_TMP).
828 // The act of CALLing gcWriteBarrier will clobber RA (LR).
829 // It does not clobber any other general-purpose registers,
830 // but may clobber others (e.g., floating point registers).
831 TEXT gcWriteBarrier<>(SB),NOSPLIT,$208
832 // Save the registers clobbered by the fast path.
833 MOV A0, 24*8(X2)
834 MOV A1, 25*8(X2)
835 retry:
836 MOV g_m(g), A0
837 MOV m_p(A0), A0
838 MOV (p_wbBuf+wbBuf_next)(A0), A1
839 MOV (p_wbBuf+wbBuf_end)(A0), T6 // T6 is linker temp register (REG_TMP)
840 // Increment wbBuf.next position.
841 ADD X24, A1
842 // Is the buffer full?
843 BLTU T6, A1, flush
844 // Commit to the larger buffer.
845 MOV A1, (p_wbBuf+wbBuf_next)(A0)
846 // Make the return value (the original next position)
847 SUB X24, A1, X24
848 // Restore registers.
849 MOV 24*8(X2), A0
850 MOV 25*8(X2), A1
851 RET
852
853 flush:
854 // Save all general purpose registers since these could be
855 // clobbered by wbBufFlush and were not saved by the caller.
856 MOV T0, 1*8(X2)
857 MOV T1, 2*8(X2)
858 // X0 is zero register
859 // X1 is LR, saved by prologue
860 // X2 is SP
861 // X3 is GP
862 // X4 is TP
863 MOV X7, 3*8(X2)
864 MOV X8, 4*8(X2)
865 MOV X9, 5*8(X2)
866 // X10 already saved (A0)
867 // X11 already saved (A1)
868 MOV X12, 6*8(X2)
869 MOV X13, 7*8(X2)
870 MOV X14, 8*8(X2)
871 MOV X15, 9*8(X2)
872 MOV X16, 10*8(X2)
873 MOV X17, 11*8(X2)
874 MOV X18, 12*8(X2)
875 MOV X19, 13*8(X2)
876 MOV X20, 14*8(X2)
877 MOV X21, 15*8(X2)
878 MOV X22, 16*8(X2)
879 MOV X23, 17*8(X2)
880 MOV X24, 18*8(X2)
881 MOV X25, 19*8(X2)
882 MOV X26, 20*8(X2)
883 // X27 is g.
884 MOV X28, 21*8(X2)
885 MOV X29, 22*8(X2)
886 MOV X30, 23*8(X2)
887 // X31 is tmp register.
888
889 CALL runtime·wbBufFlush(SB)
890
891 MOV 1*8(X2), T0
892 MOV 2*8(X2), T1
893 MOV 3*8(X2), X7
894 MOV 4*8(X2), X8
895 MOV 5*8(X2), X9
896 MOV 6*8(X2), X12
897 MOV 7*8(X2), X13
898 MOV 8*8(X2), X14
899 MOV 9*8(X2), X15
900 MOV 10*8(X2), X16
901 MOV 11*8(X2), X17
902 MOV 12*8(X2), X18
903 MOV 13*8(X2), X19
904 MOV 14*8(X2), X20
905 MOV 15*8(X2), X21
906 MOV 16*8(X2), X22
907 MOV 17*8(X2), X23
908 MOV 18*8(X2), X24
909 MOV 19*8(X2), X25
910 MOV 20*8(X2), X26
911 MOV 21*8(X2), X28
912 MOV 22*8(X2), X29
913 MOV 23*8(X2), X30
914
915 JMP retry
916
917 TEXT runtime·gcWriteBarrier1<ABIInternal>(SB),NOSPLIT,$0
918 MOV $8, X24
919 JMP gcWriteBarrier<>(SB)
920 TEXT runtime·gcWriteBarrier2<ABIInternal>(SB),NOSPLIT,$0
921 MOV $16, X24
922 JMP gcWriteBarrier<>(SB)
923 TEXT runtime·gcWriteBarrier3<ABIInternal>(SB),NOSPLIT,$0
924 MOV $24, X24
925 JMP gcWriteBarrier<>(SB)
926 TEXT runtime·gcWriteBarrier4<ABIInternal>(SB),NOSPLIT,$0
927 MOV $32, X24
928 JMP gcWriteBarrier<>(SB)
929 TEXT runtime·gcWriteBarrier5<ABIInternal>(SB),NOSPLIT,$0
930 MOV $40, X24
931 JMP gcWriteBarrier<>(SB)
932 TEXT runtime·gcWriteBarrier6<ABIInternal>(SB),NOSPLIT,$0
933 MOV $48, X24
934 JMP gcWriteBarrier<>(SB)
935 TEXT runtime·gcWriteBarrier7<ABIInternal>(SB),NOSPLIT,$0
936 MOV $56, X24
937 JMP gcWriteBarrier<>(SB)
938 TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
939 MOV $64, X24
940 JMP gcWriteBarrier<>(SB)
941
942 TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0
943 NO_LOCAL_POINTERS
944 // Save all 16 int registers that could have an index in them.
945 // They may be pointers, but if they are they are dead.
946 // Skip X0 aka ZERO, X1 aka LR, X2 aka SP, X3 aka GP, X4 aka TP.
947 MOV X5, 24(X2)
948 MOV X6, 32(X2)
949 MOV X7, 40(X2)
950 MOV X8, 48(X2)
951 MOV X9, 56(X2)
952 MOV X10, 64(X2)
953 MOV X11, 72(X2)
954 MOV X12, 80(X2)
955 MOV X13, 88(X2)
956 MOV X14, 96(X2)
957 MOV X15, 104(X2)
958 MOV X16, 112(X2)
959 MOV X17, 120(X2)
960 MOV X18, 128(X2)
961 MOV X19, 136(X2)
962 MOV X20, 144(X2)
963
964 MOV X1, X10 // PC immediately after call to panicBounds
965 ADD $24, X2, X11 // pointer to save area
966 CALL runtime·panicBounds64<ABIInternal>(SB)
967 RET
968
969 DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
970 GLOBL runtime·mainPC(SB),RODATA,$8
971
View as plain text