Source file src/cmd/compile/internal/wasm/ssa.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package wasm
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/logopt"
    11  	"cmd/compile/internal/objw"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/compile/internal/ssagen"
    14  	"cmd/compile/internal/types"
    15  	"cmd/internal/obj"
    16  	"cmd/internal/obj/wasm"
    17  )
    18  
    19  /*
    20  
    21     Wasm implementation
    22     -------------------
    23  
    24     Wasm is a strange Go port because the machine isn't
    25     a register-based machine, threads are different, code paths
    26     are different, etc. We outline those differences here.
    27  
    28     See the design doc for some additional info on this topic.
    29     https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4/edit#heading=h.mjo1bish3xni
    30  
    31     PCs:
    32  
    33     Wasm doesn't have PCs in the normal sense that you can jump
    34     to or call to. Instead, we simulate these PCs using our own construct.
    35  
    36     A PC in the Wasm implementation is the combination of a function
    37     ID and a block ID within that function. The function ID is an index
    38     into a function table which transfers control to the start of the
    39     function in question, and the block ID is a sequential integer
    40     indicating where in the function we are.
    41  
    42     Every function starts with a branch table which transfers control
    43     to the place in the function indicated by the block ID. The block
    44     ID is provided to the function as the sole Wasm argument.
    45  
    46     Block IDs do not encode every possible PC. They only encode places
    47     in the function where it might be suspended. Typically these places
    48     are call sites.
    49  
    50     Sometimes we encode the function ID and block ID separately. When
    51     recorded together as a single integer, we use the value F<<16+B.
    52  
    53     Threads:
    54  
    55     Wasm doesn't (yet) have threads. We have to simulate threads by
    56     keeping goroutine stacks in linear memory and unwinding
    57     the Wasm stack each time we want to switch goroutines.
    58  
    59     To support unwinding a stack, each function call returns on the Wasm
    60     stack a boolean that tells the function whether it should return
    61     immediately or not. When returning immediately, a return address
    62     is left on the top of the Go stack indicating where the goroutine
    63     should be resumed.
    64  
    65     Stack pointer:
    66  
    67     There is a single global stack pointer which records the stack pointer
    68     used by the currently active goroutine. This is just an address in
    69     linear memory where the Go runtime is maintaining the stack for that
    70     goroutine.
    71  
    72     Functions cache the global stack pointer in a local variable for
    73     faster access, but any changes must be spilled to the global variable
    74     before any call and restored from the global variable after any call.
    75  
    76     Calling convention:
    77  
    78     All Go arguments and return values are passed on the Go stack, not
    79     the wasm stack. In addition, return addresses are pushed on the
    80     Go stack at every call point. Return addresses are not used during
    81     normal execution, they are used only when resuming goroutines.
    82     (So they are not really a "return address", they are a "resume address".)
    83  
    84     All Go functions have the Wasm type (i32)->i32. The argument
    85     is the block ID and the return value is the exit immediately flag.
    86  
    87     Callsite:
    88      - write arguments to the Go stack (starting at SP+0)
    89      - push return address to Go stack (8 bytes)
    90      - write local SP to global SP
    91      - push 0 (type i32) to Wasm stack
    92      - issue Call
    93      - restore local SP from global SP
    94      - pop int32 from top of Wasm stack. If nonzero, exit function immediately.
    95      - use results from Go stack (starting at SP+sizeof(args))
    96         - note that the callee will have popped the return address
    97  
    98     Prologue:
    99      - initialize local SP from global SP
   100      - jump to the location indicated by the block ID argument
   101        (which appears in local variable 0)
   102      - at block 0
   103        - check for Go stack overflow, call morestack if needed
   104        - subtract frame size from SP
   105        - note that arguments now start at SP+framesize+8
   106  
   107     Normal epilogue:
   108      - pop frame from Go stack
   109      - pop return address from Go stack
   110      - push 0 (type i32) on the Wasm stack
   111      - return
   112     Exit immediately epilogue:
   113      - push 1 (type i32) on the Wasm stack
   114      - return
   115      - note that the return address and stack frame are left on the Go stack
   116  
   117     The main loop that executes goroutines is wasm_pc_f_loop, in
   118     runtime/rt0_js_wasm.s. It grabs the saved return address from
   119     the top of the Go stack (actually SP-8?), splits it up into F
   120     and B parts, then calls F with its Wasm argument set to B.
   121  
   122     Note that when resuming a goroutine, only the most recent function
   123     invocation of that goroutine appears on the Wasm stack. When that
   124     Wasm function returns normally, the next most recent frame will
   125     then be started up by wasm_pc_f_loop.
   126  
   127     Global 0 is SP (stack pointer)
   128     Global 1 is CTXT (closure pointer)
   129     Global 2 is GP (goroutine pointer)
   130  */
   131  
   132  func Init(arch *ssagen.ArchInfo) {
   133  	arch.LinkArch = &wasm.Linkwasm
   134  	arch.REGSP = wasm.REG_SP
   135  	arch.MAXWIDTH = 1 << 50
   136  
   137  	arch.ZeroRange = zeroRange
   138  	arch.Ginsnop = ginsnop
   139  
   140  	arch.SSAMarkMoves = ssaMarkMoves
   141  	arch.SSAGenValue = ssaGenValue
   142  	arch.SSAGenBlock = ssaGenBlock
   143  }
   144  
   145  func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
   146  	if cnt == 0 {
   147  		return p
   148  	}
   149  	if cnt%8 != 0 {
   150  		base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
   151  	}
   152  
   153  	for i := int64(0); i < cnt; i += 8 {
   154  		p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
   155  		p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
   156  		p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
   157  	}
   158  
   159  	return p
   160  }
   161  
   162  func ginsnop(pp *objw.Progs) *obj.Prog {
   163  	return pp.Prog(wasm.ANop)
   164  }
   165  
   166  func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
   167  }
   168  
   169  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
   170  	switch b.Kind {
   171  	case ssa.BlockPlain, ssa.BlockDefer:
   172  		if next != b.Succs[0].Block() {
   173  			s.Br(obj.AJMP, b.Succs[0].Block())
   174  		}
   175  
   176  	case ssa.BlockIf:
   177  		switch next {
   178  		case b.Succs[0].Block():
   179  			// if false, jump to b.Succs[1]
   180  			getValue32(s, b.Controls[0])
   181  			s.Prog(wasm.AI32Eqz)
   182  			s.Prog(wasm.AIf)
   183  			s.Br(obj.AJMP, b.Succs[1].Block())
   184  			s.Prog(wasm.AEnd)
   185  		case b.Succs[1].Block():
   186  			// if true, jump to b.Succs[0]
   187  			getValue32(s, b.Controls[0])
   188  			s.Prog(wasm.AIf)
   189  			s.Br(obj.AJMP, b.Succs[0].Block())
   190  			s.Prog(wasm.AEnd)
   191  		default:
   192  			// if true, jump to b.Succs[0], else jump to b.Succs[1]
   193  			getValue32(s, b.Controls[0])
   194  			s.Prog(wasm.AIf)
   195  			s.Br(obj.AJMP, b.Succs[0].Block())
   196  			s.Prog(wasm.AEnd)
   197  			s.Br(obj.AJMP, b.Succs[1].Block())
   198  		}
   199  
   200  	case ssa.BlockRet:
   201  		s.Prog(obj.ARET)
   202  
   203  	case ssa.BlockExit, ssa.BlockRetJmp:
   204  
   205  	default:
   206  		panic("unexpected block")
   207  	}
   208  
   209  	// Entry point for the next block. Used by the JMP in goToBlock.
   210  	s.Prog(wasm.ARESUMEPOINT)
   211  
   212  	if s.OnWasmStackSkipped != 0 {
   213  		panic("wasm: bad stack")
   214  	}
   215  }
   216  
   217  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   218  	switch v.Op {
   219  	case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall, ssa.OpWasmLoweredTailCall:
   220  		s.PrepareCall(v)
   221  		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
   222  			// The runtime needs to inject jumps to
   223  			// deferreturn calls using the address in
   224  			// _func.deferreturn. Hence, the call to
   225  			// deferreturn must itself be a resumption
   226  			// point so it gets a target PC.
   227  			s.Prog(wasm.ARESUMEPOINT)
   228  		}
   229  		if v.Op == ssa.OpWasmLoweredClosureCall {
   230  			getValue64(s, v.Args[1])
   231  			setReg(s, wasm.REG_CTXT)
   232  		}
   233  		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil {
   234  			sym := call.Fn
   235  			p := s.Prog(obj.ACALL)
   236  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
   237  			p.Pos = v.Pos
   238  			if v.Op == ssa.OpWasmLoweredTailCall {
   239  				p.As = obj.ARET
   240  			}
   241  		} else {
   242  			getValue64(s, v.Args[0])
   243  			p := s.Prog(obj.ACALL)
   244  			p.To = obj.Addr{Type: obj.TYPE_NONE}
   245  			p.Pos = v.Pos
   246  		}
   247  
   248  	case ssa.OpWasmLoweredMove:
   249  		getValue32(s, v.Args[0])
   250  		getValue32(s, v.Args[1])
   251  		i32Const(s, int32(v.AuxInt))
   252  		s.Prog(wasm.AMemoryCopy)
   253  
   254  	case ssa.OpWasmLoweredZero:
   255  		getValue32(s, v.Args[0])
   256  		i32Const(s, 0)
   257  		i32Const(s, int32(v.AuxInt))
   258  		s.Prog(wasm.AMemoryFill)
   259  
   260  	case ssa.OpWasmLoweredNilCheck:
   261  		getValue64(s, v.Args[0])
   262  		s.Prog(wasm.AI64Eqz)
   263  		s.Prog(wasm.AIf)
   264  		p := s.Prog(wasm.ACALLNORESUME)
   265  		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
   266  		s.Prog(wasm.AEnd)
   267  		if logopt.Enabled() {
   268  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
   269  		}
   270  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
   271  			base.WarnfAt(v.Pos, "generated nil check")
   272  		}
   273  
   274  	case ssa.OpWasmLoweredWB:
   275  		p := s.Prog(wasm.ACall)
   276  		// AuxInt encodes how many buffer entries we need.
   277  		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.GCWriteBarrier[v.AuxInt-1]}
   278  		setReg(s, v.Reg0()) // move result from wasm stack to register local
   279  
   280  	case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
   281  		getValue32(s, v.Args[0])
   282  		getValue64(s, v.Args[1])
   283  		p := s.Prog(v.Op.Asm())
   284  		p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
   285  
   286  	case ssa.OpStoreReg:
   287  		getReg(s, wasm.REG_SP)
   288  		getValue64(s, v.Args[0])
   289  		p := s.Prog(storeOp(v.Type))
   290  		ssagen.AddrAuto(&p.To, v)
   291  
   292  	case ssa.OpClobber, ssa.OpClobberReg:
   293  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   294  
   295  	default:
   296  		if v.Type.IsMemory() {
   297  			return
   298  		}
   299  		if v.OnWasmStack {
   300  			s.OnWasmStackSkipped++
   301  			// If a Value is marked OnWasmStack, we don't generate the value and store it to a register now.
   302  			// Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack.
   303  			return
   304  		}
   305  		ssaGenValueOnStack(s, v, true)
   306  		if s.OnWasmStackSkipped != 0 {
   307  			panic("wasm: bad stack")
   308  		}
   309  		setReg(s, v.Reg())
   310  	}
   311  }
   312  
   313  func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
   314  	switch v.Op {
   315  	case ssa.OpWasmLoweredGetClosurePtr:
   316  		getReg(s, wasm.REG_CTXT)
   317  
   318  	case ssa.OpWasmLoweredGetCallerPC:
   319  		p := s.Prog(wasm.AI64Load)
   320  		// Caller PC is stored 8 bytes below first parameter.
   321  		p.From = obj.Addr{
   322  			Type:   obj.TYPE_MEM,
   323  			Name:   obj.NAME_PARAM,
   324  			Offset: -8,
   325  		}
   326  
   327  	case ssa.OpWasmLoweredGetCallerSP:
   328  		p := s.Prog(wasm.AGet)
   329  		// Caller SP is the address of the first parameter.
   330  		p.From = obj.Addr{
   331  			Type:   obj.TYPE_ADDR,
   332  			Name:   obj.NAME_PARAM,
   333  			Reg:    wasm.REG_SP,
   334  			Offset: 0,
   335  		}
   336  
   337  	case ssa.OpWasmLoweredAddr:
   338  		if v.Aux == nil { // address of off(SP), no symbol
   339  			getValue64(s, v.Args[0])
   340  			i64Const(s, v.AuxInt)
   341  			s.Prog(wasm.AI64Add)
   342  			break
   343  		}
   344  		p := s.Prog(wasm.AGet)
   345  		p.From.Type = obj.TYPE_ADDR
   346  		switch v.Aux.(type) {
   347  		case *obj.LSym:
   348  			ssagen.AddAux(&p.From, v)
   349  		case *ir.Name:
   350  			p.From.Reg = v.Args[0].Reg()
   351  			ssagen.AddAux(&p.From, v)
   352  		default:
   353  			panic("wasm: bad LoweredAddr")
   354  		}
   355  
   356  	case ssa.OpWasmLoweredConvert:
   357  		getValue64(s, v.Args[0])
   358  
   359  	case ssa.OpWasmSelect:
   360  		getValue64(s, v.Args[0])
   361  		getValue64(s, v.Args[1])
   362  		getValue32(s, v.Args[2])
   363  		s.Prog(v.Op.Asm())
   364  
   365  	case ssa.OpWasmI64AddConst:
   366  		getValue64(s, v.Args[0])
   367  		i64Const(s, v.AuxInt)
   368  		s.Prog(v.Op.Asm())
   369  
   370  	case ssa.OpWasmI64Const:
   371  		i64Const(s, v.AuxInt)
   372  
   373  	case ssa.OpWasmF32Const:
   374  		f32Const(s, v.AuxFloat())
   375  
   376  	case ssa.OpWasmF64Const:
   377  		f64Const(s, v.AuxFloat())
   378  
   379  	case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
   380  		getValue32(s, v.Args[0])
   381  		p := s.Prog(v.Op.Asm())
   382  		p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
   383  
   384  	case ssa.OpWasmI64Eqz:
   385  		getValue64(s, v.Args[0])
   386  		s.Prog(v.Op.Asm())
   387  		if extend {
   388  			s.Prog(wasm.AI64ExtendI32U)
   389  		}
   390  
   391  	case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
   392  		ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
   393  		ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
   394  		getValue64(s, v.Args[0])
   395  		getValue64(s, v.Args[1])
   396  		s.Prog(v.Op.Asm())
   397  		if extend {
   398  			s.Prog(wasm.AI64ExtendI32U)
   399  		}
   400  
   401  	case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl,
   402  		ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign,
   403  		ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign:
   404  		getValue64(s, v.Args[0])
   405  		getValue64(s, v.Args[1])
   406  		s.Prog(v.Op.Asm())
   407  
   408  	case ssa.OpWasmI32Rotl:
   409  		getValue32(s, v.Args[0])
   410  		getValue32(s, v.Args[1])
   411  		s.Prog(wasm.AI32Rotl)
   412  		s.Prog(wasm.AI64ExtendI32U)
   413  
   414  	case ssa.OpWasmI64DivS:
   415  		getValue64(s, v.Args[0])
   416  		getValue64(s, v.Args[1])
   417  		if v.Type.Size() == 8 {
   418  			// Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
   419  			p := s.Prog(wasm.ACall)
   420  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
   421  			break
   422  		}
   423  		s.Prog(wasm.AI64DivS)
   424  
   425  	case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
   426  		getValue64(s, v.Args[0])
   427  		s.Prog(v.Op.Asm())
   428  
   429  	case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
   430  		getValue64(s, v.Args[0])
   431  		s.Prog(v.Op.Asm())
   432  
   433  	case ssa.OpWasmF32DemoteF64:
   434  		getValue64(s, v.Args[0])
   435  		s.Prog(v.Op.Asm())
   436  
   437  	case ssa.OpWasmF64PromoteF32:
   438  		getValue64(s, v.Args[0])
   439  		s.Prog(v.Op.Asm())
   440  
   441  	case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U,
   442  		ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U,
   443  		ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S,
   444  		ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs,
   445  		ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs,
   446  		ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt:
   447  		getValue64(s, v.Args[0])
   448  		s.Prog(v.Op.Asm())
   449  
   450  	case ssa.OpLoadReg:
   451  		p := s.Prog(loadOp(v.Type))
   452  		ssagen.AddrAuto(&p.From, v.Args[0])
   453  
   454  	case ssa.OpCopy:
   455  		getValue64(s, v.Args[0])
   456  
   457  	default:
   458  		v.Fatalf("unexpected op: %s", v.Op)
   459  
   460  	}
   461  }
   462  
   463  func isCmp(v *ssa.Value) bool {
   464  	switch v.Op {
   465  	case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
   466  		ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
   467  		ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
   468  		return true
   469  	default:
   470  		return false
   471  	}
   472  }
   473  
   474  func getValue32(s *ssagen.State, v *ssa.Value) {
   475  	if v.OnWasmStack {
   476  		s.OnWasmStackSkipped--
   477  		ssaGenValueOnStack(s, v, false)
   478  		if !isCmp(v) {
   479  			s.Prog(wasm.AI32WrapI64)
   480  		}
   481  		return
   482  	}
   483  
   484  	reg := v.Reg()
   485  	getReg(s, reg)
   486  	if reg != wasm.REG_SP {
   487  		s.Prog(wasm.AI32WrapI64)
   488  	}
   489  }
   490  
   491  func getValue64(s *ssagen.State, v *ssa.Value) {
   492  	if v.OnWasmStack {
   493  		s.OnWasmStackSkipped--
   494  		ssaGenValueOnStack(s, v, true)
   495  		return
   496  	}
   497  
   498  	reg := v.Reg()
   499  	getReg(s, reg)
   500  	if reg == wasm.REG_SP {
   501  		s.Prog(wasm.AI64ExtendI32U)
   502  	}
   503  }
   504  
   505  func i32Const(s *ssagen.State, val int32) {
   506  	p := s.Prog(wasm.AI32Const)
   507  	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
   508  }
   509  
   510  func i64Const(s *ssagen.State, val int64) {
   511  	p := s.Prog(wasm.AI64Const)
   512  	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
   513  }
   514  
   515  func f32Const(s *ssagen.State, val float64) {
   516  	p := s.Prog(wasm.AF32Const)
   517  	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
   518  }
   519  
   520  func f64Const(s *ssagen.State, val float64) {
   521  	p := s.Prog(wasm.AF64Const)
   522  	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
   523  }
   524  
   525  func getReg(s *ssagen.State, reg int16) {
   526  	p := s.Prog(wasm.AGet)
   527  	p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
   528  }
   529  
   530  func setReg(s *ssagen.State, reg int16) {
   531  	p := s.Prog(wasm.ASet)
   532  	p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
   533  }
   534  
   535  func loadOp(t *types.Type) obj.As {
   536  	if t.IsFloat() {
   537  		switch t.Size() {
   538  		case 4:
   539  			return wasm.AF32Load
   540  		case 8:
   541  			return wasm.AF64Load
   542  		default:
   543  			panic("bad load type")
   544  		}
   545  	}
   546  
   547  	switch t.Size() {
   548  	case 1:
   549  		if t.IsSigned() {
   550  			return wasm.AI64Load8S
   551  		}
   552  		return wasm.AI64Load8U
   553  	case 2:
   554  		if t.IsSigned() {
   555  			return wasm.AI64Load16S
   556  		}
   557  		return wasm.AI64Load16U
   558  	case 4:
   559  		if t.IsSigned() {
   560  			return wasm.AI64Load32S
   561  		}
   562  		return wasm.AI64Load32U
   563  	case 8:
   564  		return wasm.AI64Load
   565  	default:
   566  		panic("bad load type")
   567  	}
   568  }
   569  
   570  func storeOp(t *types.Type) obj.As {
   571  	if t.IsFloat() {
   572  		switch t.Size() {
   573  		case 4:
   574  			return wasm.AF32Store
   575  		case 8:
   576  			return wasm.AF64Store
   577  		default:
   578  			panic("bad store type")
   579  		}
   580  	}
   581  
   582  	switch t.Size() {
   583  	case 1:
   584  		return wasm.AI64Store8
   585  	case 2:
   586  		return wasm.AI64Store16
   587  	case 4:
   588  		return wasm.AI64Store32
   589  	case 8:
   590  		return wasm.AI64Store
   591  	default:
   592  		panic("bad store type")
   593  	}
   594  }
   595  

View as plain text