Source file src/cmd/compile/internal/wasm/ssa.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package wasm
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/logopt"
    11  	"cmd/compile/internal/objw"
    12  	"cmd/compile/internal/ssa"
    13  	"cmd/compile/internal/ssagen"
    14  	"cmd/compile/internal/types"
    15  	"cmd/internal/obj"
    16  	"cmd/internal/obj/wasm"
    17  )
    18  
    19  /*
    20  
    21     Wasm implementation
    22     -------------------
    23  
    24     Wasm is a strange Go port because the machine isn't
    25     a register-based machine, threads are different, code paths
    26     are different, etc. We outline those differences here.
    27  
    28     See the design doc for some additional info on this topic.
    29     https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4/edit#heading=h.mjo1bish3xni
    30  
    31     PCs:
    32  
    33     Wasm doesn't have PCs in the normal sense that you can jump
    34     to or call to. Instead, we simulate these PCs using our own construct.
    35  
    36     A PC in the Wasm implementation is the combination of a function
    37     ID and a block ID within that function. The function ID is an index
    38     into a function table which transfers control to the start of the
    39     function in question, and the block ID is a sequential integer
    40     indicating where in the function we are.
    41  
    42     Every function starts with a branch table which transfers control
    43     to the place in the function indicated by the block ID. The block
    44     ID is provided to the function as the sole Wasm argument.
    45  
    46     Block IDs do not encode every possible PC. They only encode places
    47     in the function where it might be suspended. Typically these places
    48     are call sites.
    49  
    50     Sometimes we encode the function ID and block ID separately. When
    51     recorded together as a single integer, we use the value F<<16+B.
    52  
    53     Threads:
    54  
    55     Wasm doesn't (yet) have threads. We have to simulate threads by
    56     keeping goroutine stacks in linear memory and unwinding
    57     the Wasm stack each time we want to switch goroutines.
    58  
    59     To support unwinding a stack, each function call returns on the Wasm
    60     stack a boolean that tells the function whether it should return
    61     immediately or not. When returning immediately, a return address
    62     is left on the top of the Go stack indicating where the goroutine
    63     should be resumed.
    64  
    65     Stack pointer:
    66  
    67     There is a single global stack pointer which records the stack pointer
    68     used by the currently active goroutine. This is just an address in
    69     linear memory where the Go runtime is maintaining the stack for that
    70     goroutine.
    71  
    72     Functions cache the global stack pointer in a local variable for
    73     faster access, but any changes must be spilled to the global variable
    74     before any call and restored from the global variable after any call.
    75  
    76     Calling convention:
    77  
    78     All Go arguments and return values are passed on the Go stack, not
    79     the wasm stack. In addition, return addresses are pushed on the
    80     Go stack at every call point. Return addresses are not used during
    81     normal execution, they are used only when resuming goroutines.
    82     (So they are not really a "return address", they are a "resume address".)
    83  
    84     All Go functions have the Wasm type (i32)->i32. The argument
    85     is the block ID and the return value is the exit immediately flag.
    86  
    87     Callsite:
    88      - write arguments to the Go stack (starting at SP+0)
    89      - push return address to Go stack (8 bytes)
    90      - write local SP to global SP
    91      - push 0 (type i32) to Wasm stack
    92      - issue Call
    93      - restore local SP from global SP
    94      - pop int32 from top of Wasm stack. If nonzero, exit function immediately.
    95      - use results from Go stack (starting at SP+sizeof(args))
    96         - note that the callee will have popped the return address
    97  
    98     Prologue:
    99      - initialize local SP from global SP
   100      - jump to the location indicated by the block ID argument
   101        (which appears in local variable 0)
   102      - at block 0
   103        - check for Go stack overflow, call morestack if needed
   104        - subtract frame size from SP
   105        - note that arguments now start at SP+framesize+8
   106  
   107     Normal epilogue:
   108      - pop frame from Go stack
   109      - pop return address from Go stack
   110      - push 0 (type i32) on the Wasm stack
   111      - return
   112     Exit immediately epilogue:
   113      - push 1 (type i32) on the Wasm stack
   114      - return
   115      - note that the return address and stack frame are left on the Go stack
   116  
   117     The main loop that executes goroutines is wasm_pc_f_loop, in
   118     runtime/rt0_js_wasm.s. It grabs the saved return address from
   119     the top of the Go stack (actually SP-8?), splits it up into F
   120     and B parts, then calls F with its Wasm argument set to B.
   121  
   122     Note that when resuming a goroutine, only the most recent function
   123     invocation of that goroutine appears on the Wasm stack. When that
   124     Wasm function returns normally, the next most recent frame will
   125     then be started up by wasm_pc_f_loop.
   126  
   127     Global 0 is SP (stack pointer)
   128     Global 1 is CTXT (closure pointer)
   129     Global 2 is GP (goroutine pointer)
   130  */
   131  
   132  func Init(arch *ssagen.ArchInfo) {
   133  	arch.LinkArch = &wasm.Linkwasm
   134  	arch.REGSP = wasm.REG_SP
   135  	arch.MAXWIDTH = 1 << 50
   136  
   137  	arch.ZeroRange = zeroRange
   138  	arch.Ginsnop = ginsnop
   139  
   140  	arch.SSAMarkMoves = ssaMarkMoves
   141  	arch.SSAGenValue = ssaGenValue
   142  	arch.SSAGenBlock = ssaGenBlock
   143  }
   144  
   145  func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
   146  	if cnt == 0 {
   147  		return p
   148  	}
   149  	if cnt%8 != 0 {
   150  		base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
   151  	}
   152  
   153  	for i := int64(0); i < cnt; i += 8 {
   154  		p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
   155  		p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
   156  		p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
   157  	}
   158  
   159  	return p
   160  }
   161  
   162  func ginsnop(pp *objw.Progs) *obj.Prog {
   163  	return pp.Prog(wasm.ANop)
   164  }
   165  
   166  func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
   167  }
   168  
   169  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
   170  	switch b.Kind {
   171  	case ssa.BlockPlain, ssa.BlockDefer:
   172  		if next != b.Succs[0].Block() {
   173  			s.Br(obj.AJMP, b.Succs[0].Block())
   174  		}
   175  
   176  	case ssa.BlockIf:
   177  		switch next {
   178  		case b.Succs[0].Block():
   179  			// if false, jump to b.Succs[1]
   180  			getValue32(s, b.Controls[0])
   181  			s.Prog(wasm.AI32Eqz)
   182  			s.Prog(wasm.AIf)
   183  			s.Br(obj.AJMP, b.Succs[1].Block())
   184  			s.Prog(wasm.AEnd)
   185  		case b.Succs[1].Block():
   186  			// if true, jump to b.Succs[0]
   187  			getValue32(s, b.Controls[0])
   188  			s.Prog(wasm.AIf)
   189  			s.Br(obj.AJMP, b.Succs[0].Block())
   190  			s.Prog(wasm.AEnd)
   191  		default:
   192  			// if true, jump to b.Succs[0], else jump to b.Succs[1]
   193  			getValue32(s, b.Controls[0])
   194  			s.Prog(wasm.AIf)
   195  			s.Br(obj.AJMP, b.Succs[0].Block())
   196  			s.Prog(wasm.AEnd)
   197  			s.Br(obj.AJMP, b.Succs[1].Block())
   198  		}
   199  
   200  	case ssa.BlockRet:
   201  		s.Prog(obj.ARET)
   202  
   203  	case ssa.BlockExit, ssa.BlockRetJmp:
   204  
   205  	default:
   206  		panic("unexpected block")
   207  	}
   208  
   209  	// Entry point for the next block. Used by the JMP in goToBlock.
   210  	s.Prog(wasm.ARESUMEPOINT)
   211  
   212  	if s.OnWasmStackSkipped != 0 {
   213  		panic("wasm: bad stack")
   214  	}
   215  }
   216  
   217  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   218  	switch v.Op {
   219  	case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall, ssa.OpWasmLoweredTailCall, ssa.OpWasmLoweredTailCallInter:
   220  		s.PrepareCall(v)
   221  		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
   222  			// The runtime needs to inject jumps to
   223  			// deferreturn calls using the address in
   224  			// _func.deferreturn. Hence, the call to
   225  			// deferreturn must itself be a resumption
   226  			// point so it gets a target PC.
   227  			s.Prog(wasm.ARESUMEPOINT)
   228  		}
   229  		if v.Op == ssa.OpWasmLoweredClosureCall {
   230  			getValue64(s, v.Args[1])
   231  			setReg(s, wasm.REG_CTXT)
   232  		}
   233  		if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil {
   234  			sym := call.Fn
   235  			p := s.Prog(obj.ACALL)
   236  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
   237  			p.Pos = v.Pos
   238  			if v.Op == ssa.OpWasmLoweredTailCall {
   239  				p.As = obj.ARET
   240  			}
   241  		} else {
   242  			getValue64(s, v.Args[0])
   243  			p := s.Prog(obj.ACALL)
   244  			p.To = obj.Addr{Type: obj.TYPE_NONE}
   245  			p.Pos = v.Pos
   246  			if v.Op == ssa.OpWasmLoweredTailCallInter {
   247  				p.As = obj.ARET
   248  			}
   249  		}
   250  
   251  	case ssa.OpWasmLoweredMove:
   252  		getValue32(s, v.Args[0])
   253  		getValue32(s, v.Args[1])
   254  		i32Const(s, int32(v.AuxInt))
   255  		s.Prog(wasm.AMemoryCopy)
   256  
   257  	case ssa.OpWasmLoweredZero:
   258  		getValue32(s, v.Args[0])
   259  		i32Const(s, 0)
   260  		i32Const(s, int32(v.AuxInt))
   261  		s.Prog(wasm.AMemoryFill)
   262  
   263  	case ssa.OpWasmLoweredNilCheck:
   264  		getValue64(s, v.Args[0])
   265  		s.Prog(wasm.AI64Eqz)
   266  		s.Prog(wasm.AIf)
   267  		p := s.Prog(wasm.ACALLNORESUME)
   268  		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
   269  		s.Prog(wasm.AEnd)
   270  		if logopt.Enabled() {
   271  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
   272  		}
   273  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
   274  			base.WarnfAt(v.Pos, "generated nil check")
   275  		}
   276  
   277  	case ssa.OpWasmLoweredWB:
   278  		p := s.Prog(wasm.ACall)
   279  		// AuxInt encodes how many buffer entries we need.
   280  		p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.GCWriteBarrier[v.AuxInt-1]}
   281  		setReg(s, v.Reg0()) // move result from wasm stack to register local
   282  
   283  	case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
   284  		getValue32(s, v.Args[0])
   285  		getValue64(s, v.Args[1])
   286  		p := s.Prog(v.Op.Asm())
   287  		p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
   288  
   289  	case ssa.OpStoreReg:
   290  		getReg(s, wasm.REG_SP)
   291  		getValue64(s, v.Args[0])
   292  		p := s.Prog(storeOp(v.Type))
   293  		ssagen.AddrAuto(&p.To, v)
   294  
   295  	case ssa.OpClobber, ssa.OpClobberReg:
   296  		// TODO: implement for clobberdead experiment. Nop is ok for now.
   297  
   298  	default:
   299  		if v.Type.IsMemory() {
   300  			return
   301  		}
   302  		if v.OnWasmStack {
   303  			s.OnWasmStackSkipped++
   304  			// If a Value is marked OnWasmStack, we don't generate the value and store it to a register now.
   305  			// Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack.
   306  			return
   307  		}
   308  		ssaGenValueOnStack(s, v, true)
   309  		if s.OnWasmStackSkipped != 0 {
   310  			panic("wasm: bad stack")
   311  		}
   312  		setReg(s, v.Reg())
   313  	}
   314  }
   315  
   316  func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
   317  	switch v.Op {
   318  	case ssa.OpWasmLoweredGetClosurePtr:
   319  		getReg(s, wasm.REG_CTXT)
   320  
   321  	case ssa.OpWasmLoweredGetCallerPC:
   322  		p := s.Prog(wasm.AI64Load)
   323  		// Caller PC is stored 8 bytes below first parameter.
   324  		p.From = obj.Addr{
   325  			Type:   obj.TYPE_MEM,
   326  			Name:   obj.NAME_PARAM,
   327  			Offset: -8,
   328  		}
   329  
   330  	case ssa.OpWasmLoweredGetCallerSP:
   331  		p := s.Prog(wasm.AGet)
   332  		// Caller SP is the address of the first parameter.
   333  		p.From = obj.Addr{
   334  			Type:   obj.TYPE_ADDR,
   335  			Name:   obj.NAME_PARAM,
   336  			Reg:    wasm.REG_SP,
   337  			Offset: 0,
   338  		}
   339  
   340  	case ssa.OpWasmLoweredAddr:
   341  		if v.Aux == nil { // address of off(SP), no symbol
   342  			getValue64(s, v.Args[0])
   343  			i64Const(s, v.AuxInt)
   344  			s.Prog(wasm.AI64Add)
   345  			break
   346  		}
   347  		p := s.Prog(wasm.AGet)
   348  		p.From.Type = obj.TYPE_ADDR
   349  		switch v.Aux.(type) {
   350  		case *obj.LSym:
   351  			ssagen.AddAux(&p.From, v)
   352  		case *ir.Name:
   353  			p.From.Reg = v.Args[0].Reg()
   354  			ssagen.AddAux(&p.From, v)
   355  		default:
   356  			panic("wasm: bad LoweredAddr")
   357  		}
   358  
   359  	case ssa.OpWasmLoweredConvert:
   360  		getValue64(s, v.Args[0])
   361  
   362  	case ssa.OpWasmSelect:
   363  		getValue64(s, v.Args[0])
   364  		getValue64(s, v.Args[1])
   365  		getValue32(s, v.Args[2])
   366  		s.Prog(v.Op.Asm())
   367  
   368  	case ssa.OpWasmI64AddConst:
   369  		getValue64(s, v.Args[0])
   370  		i64Const(s, v.AuxInt)
   371  		s.Prog(v.Op.Asm())
   372  
   373  	case ssa.OpWasmI64Const:
   374  		i64Const(s, v.AuxInt)
   375  
   376  	case ssa.OpWasmF32Const:
   377  		f32Const(s, v.AuxFloat())
   378  
   379  	case ssa.OpWasmF64Const:
   380  		f64Const(s, v.AuxFloat())
   381  
   382  	case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
   383  		getValue32(s, v.Args[0])
   384  		p := s.Prog(v.Op.Asm())
   385  		p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
   386  
   387  	case ssa.OpWasmI64Eqz:
   388  		getValue64(s, v.Args[0])
   389  		s.Prog(v.Op.Asm())
   390  		if extend {
   391  			s.Prog(wasm.AI64ExtendI32U)
   392  		}
   393  
   394  	case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
   395  		ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
   396  		ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
   397  		getValue64(s, v.Args[0])
   398  		getValue64(s, v.Args[1])
   399  		s.Prog(v.Op.Asm())
   400  		if extend {
   401  			s.Prog(wasm.AI64ExtendI32U)
   402  		}
   403  
   404  	case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl,
   405  		ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign,
   406  		ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign:
   407  		getValue64(s, v.Args[0])
   408  		getValue64(s, v.Args[1])
   409  		s.Prog(v.Op.Asm())
   410  
   411  	case ssa.OpWasmI32Rotl:
   412  		getValue32(s, v.Args[0])
   413  		getValue32(s, v.Args[1])
   414  		s.Prog(wasm.AI32Rotl)
   415  		s.Prog(wasm.AI64ExtendI32U)
   416  
   417  	case ssa.OpWasmI64DivS:
   418  		getValue64(s, v.Args[0])
   419  		getValue64(s, v.Args[1])
   420  		if v.Type.Size() == 8 {
   421  			// Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
   422  			p := s.Prog(wasm.ACall)
   423  			p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
   424  			break
   425  		}
   426  		s.Prog(wasm.AI64DivS)
   427  
   428  	case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
   429  		getValue64(s, v.Args[0])
   430  		s.Prog(v.Op.Asm())
   431  
   432  	case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
   433  		getValue64(s, v.Args[0])
   434  		s.Prog(v.Op.Asm())
   435  
   436  	case ssa.OpWasmF32DemoteF64:
   437  		getValue64(s, v.Args[0])
   438  		s.Prog(v.Op.Asm())
   439  
   440  	case ssa.OpWasmF64PromoteF32:
   441  		getValue64(s, v.Args[0])
   442  		s.Prog(v.Op.Asm())
   443  
   444  	case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U,
   445  		ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U,
   446  		ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S,
   447  		ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs,
   448  		ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs,
   449  		ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt:
   450  		getValue64(s, v.Args[0])
   451  		s.Prog(v.Op.Asm())
   452  
   453  	case ssa.OpLoadReg:
   454  		p := s.Prog(loadOp(v.Type))
   455  		ssagen.AddrAuto(&p.From, v.Args[0])
   456  
   457  	case ssa.OpCopy:
   458  		getValue64(s, v.Args[0])
   459  
   460  	default:
   461  		v.Fatalf("unexpected op: %s", v.Op)
   462  
   463  	}
   464  }
   465  
   466  func isCmp(v *ssa.Value) bool {
   467  	switch v.Op {
   468  	case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
   469  		ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
   470  		ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
   471  		return true
   472  	default:
   473  		return false
   474  	}
   475  }
   476  
   477  func getValue32(s *ssagen.State, v *ssa.Value) {
   478  	if v.OnWasmStack {
   479  		s.OnWasmStackSkipped--
   480  		ssaGenValueOnStack(s, v, false)
   481  		if !isCmp(v) {
   482  			s.Prog(wasm.AI32WrapI64)
   483  		}
   484  		return
   485  	}
   486  
   487  	reg := v.Reg()
   488  	getReg(s, reg)
   489  	if reg != wasm.REG_SP {
   490  		s.Prog(wasm.AI32WrapI64)
   491  	}
   492  }
   493  
   494  func getValue64(s *ssagen.State, v *ssa.Value) {
   495  	if v.OnWasmStack {
   496  		s.OnWasmStackSkipped--
   497  		ssaGenValueOnStack(s, v, true)
   498  		return
   499  	}
   500  
   501  	reg := v.Reg()
   502  	getReg(s, reg)
   503  	if reg == wasm.REG_SP {
   504  		s.Prog(wasm.AI64ExtendI32U)
   505  	}
   506  }
   507  
   508  func i32Const(s *ssagen.State, val int32) {
   509  	p := s.Prog(wasm.AI32Const)
   510  	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
   511  }
   512  
   513  func i64Const(s *ssagen.State, val int64) {
   514  	p := s.Prog(wasm.AI64Const)
   515  	p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
   516  }
   517  
   518  func f32Const(s *ssagen.State, val float64) {
   519  	p := s.Prog(wasm.AF32Const)
   520  	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
   521  }
   522  
   523  func f64Const(s *ssagen.State, val float64) {
   524  	p := s.Prog(wasm.AF64Const)
   525  	p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
   526  }
   527  
   528  func getReg(s *ssagen.State, reg int16) {
   529  	p := s.Prog(wasm.AGet)
   530  	p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
   531  }
   532  
   533  func setReg(s *ssagen.State, reg int16) {
   534  	p := s.Prog(wasm.ASet)
   535  	p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
   536  }
   537  
   538  func loadOp(t *types.Type) obj.As {
   539  	if t.IsFloat() {
   540  		switch t.Size() {
   541  		case 4:
   542  			return wasm.AF32Load
   543  		case 8:
   544  			return wasm.AF64Load
   545  		default:
   546  			panic("bad load type")
   547  		}
   548  	}
   549  
   550  	switch t.Size() {
   551  	case 1:
   552  		if t.IsSigned() {
   553  			return wasm.AI64Load8S
   554  		}
   555  		return wasm.AI64Load8U
   556  	case 2:
   557  		if t.IsSigned() {
   558  			return wasm.AI64Load16S
   559  		}
   560  		return wasm.AI64Load16U
   561  	case 4:
   562  		if t.IsSigned() {
   563  			return wasm.AI64Load32S
   564  		}
   565  		return wasm.AI64Load32U
   566  	case 8:
   567  		return wasm.AI64Load
   568  	default:
   569  		panic("bad load type")
   570  	}
   571  }
   572  
   573  func storeOp(t *types.Type) obj.As {
   574  	if t.IsFloat() {
   575  		switch t.Size() {
   576  		case 4:
   577  			return wasm.AF32Store
   578  		case 8:
   579  			return wasm.AF64Store
   580  		default:
   581  			panic("bad store type")
   582  		}
   583  	}
   584  
   585  	switch t.Size() {
   586  	case 1:
   587  		return wasm.AI64Store8
   588  	case 2:
   589  		return wasm.AI64Store16
   590  	case 4:
   591  		return wasm.AI64Store32
   592  	case 8:
   593  		return wasm.AI64Store
   594  	default:
   595  		panic("bad store type")
   596  	}
   597  }
   598  

View as plain text