1
2
3
4
5 package amd64
6
7 import (
8 "fmt"
9 "internal/buildcfg"
10 "math"
11
12 "cmd/compile/internal/base"
13 "cmd/compile/internal/ir"
14 "cmd/compile/internal/logopt"
15 "cmd/compile/internal/objw"
16 "cmd/compile/internal/ssa"
17 "cmd/compile/internal/ssagen"
18 "cmd/compile/internal/types"
19 "cmd/internal/obj"
20 "cmd/internal/obj/x86"
21 )
22
23
24 func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
25 flive := b.FlagsLiveAtEnd
26 for _, c := range b.ControlValues() {
27 flive = c.Type.IsFlags() || flive
28 }
29 for i := len(b.Values) - 1; i >= 0; i-- {
30 v := b.Values[i]
31 if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) {
32
33 v.Aux = ssa.AuxMark
34 }
35 if v.Type.IsFlags() {
36 flive = false
37 }
38 for _, a := range v.Args {
39 if a.Type.IsFlags() {
40 flive = true
41 }
42 }
43 }
44 }
45
46
47 func loadByType(t *types.Type) obj.As {
48
49 if !t.IsFloat() {
50 switch t.Size() {
51 case 1:
52 return x86.AMOVBLZX
53 case 2:
54 return x86.AMOVWLZX
55 }
56 }
57
58 return storeByType(t)
59 }
60
61
62 func storeByType(t *types.Type) obj.As {
63 width := t.Size()
64 if t.IsFloat() {
65 switch width {
66 case 4:
67 return x86.AMOVSS
68 case 8:
69 return x86.AMOVSD
70 }
71 } else {
72 switch width {
73 case 1:
74 return x86.AMOVB
75 case 2:
76 return x86.AMOVW
77 case 4:
78 return x86.AMOVL
79 case 8:
80 return x86.AMOVQ
81 case 16:
82 return x86.AMOVUPS
83 }
84 }
85 panic(fmt.Sprintf("bad store type %v", t))
86 }
87
88
89 func moveByType(t *types.Type) obj.As {
90 if t.IsFloat() {
91
92
93
94
95 return x86.AMOVUPS
96 } else {
97 switch t.Size() {
98 case 1:
99
100 return x86.AMOVL
101 case 2:
102 return x86.AMOVL
103 case 4:
104 return x86.AMOVL
105 case 8:
106 return x86.AMOVQ
107 case 16:
108 return x86.AMOVUPS
109 default:
110 panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t))
111 }
112 }
113 }
114
115
116
117
118
119
120
121 func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
122 p := s.Prog(op)
123 p.From.Type = obj.TYPE_REG
124 p.To.Type = obj.TYPE_REG
125 p.To.Reg = dest
126 p.From.Reg = src
127 return p
128 }
129
130
131
132
133
134 func memIdx(a *obj.Addr, v *ssa.Value) {
135 r, i := v.Args[0].Reg(), v.Args[1].Reg()
136 a.Type = obj.TYPE_MEM
137 a.Scale = v.Op.Scale()
138 if a.Scale == 1 && i == x86.REG_SP {
139 r, i = i, r
140 }
141 a.Reg = r
142 a.Index = i
143 }
144
145
146
147 func duffStart(size int64) int64 {
148 x, _ := duff(size)
149 return x
150 }
151 func duffAdj(size int64) int64 {
152 _, x := duff(size)
153 return x
154 }
155
156
157
158 func duff(size int64) (int64, int64) {
159 if size < 32 || size > 1024 || size%dzClearStep != 0 {
160 panic("bad duffzero size")
161 }
162 steps := size / dzClearStep
163 blocks := steps / dzBlockLen
164 steps %= dzBlockLen
165 off := dzBlockSize * (dzBlocks - blocks)
166 var adj int64
167 if steps != 0 {
168 off -= dzLeaqSize
169 off -= dzMovSize * steps
170 adj -= dzClearStep * (dzBlockLen - steps)
171 }
172 return off, adj
173 }
174
175 func getgFromTLS(s *ssagen.State, r int16) {
176
177
178 if x86.CanUse1InsnTLS(base.Ctxt) {
179
180 p := s.Prog(x86.AMOVQ)
181 p.From.Type = obj.TYPE_MEM
182 p.From.Reg = x86.REG_TLS
183 p.To.Type = obj.TYPE_REG
184 p.To.Reg = r
185 } else {
186
187
188 p := s.Prog(x86.AMOVQ)
189 p.From.Type = obj.TYPE_REG
190 p.From.Reg = x86.REG_TLS
191 p.To.Type = obj.TYPE_REG
192 p.To.Reg = r
193 q := s.Prog(x86.AMOVQ)
194 q.From.Type = obj.TYPE_MEM
195 q.From.Reg = r
196 q.From.Index = x86.REG_TLS
197 q.From.Scale = 1
198 q.To.Type = obj.TYPE_REG
199 q.To.Reg = r
200 }
201 }
202
203 func ssaGenValue(s *ssagen.State, v *ssa.Value) {
204 switch v.Op {
205 case ssa.OpAMD64VFMADD231SD:
206 p := s.Prog(v.Op.Asm())
207 p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[2].Reg()}
208 p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
209 p.AddRestSourceReg(v.Args[1].Reg())
210 case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL:
211 r := v.Reg()
212 r1 := v.Args[0].Reg()
213 r2 := v.Args[1].Reg()
214 switch {
215 case r == r1:
216 p := s.Prog(v.Op.Asm())
217 p.From.Type = obj.TYPE_REG
218 p.From.Reg = r2
219 p.To.Type = obj.TYPE_REG
220 p.To.Reg = r
221 case r == r2:
222 p := s.Prog(v.Op.Asm())
223 p.From.Type = obj.TYPE_REG
224 p.From.Reg = r1
225 p.To.Type = obj.TYPE_REG
226 p.To.Reg = r
227 default:
228 var asm obj.As
229 if v.Op == ssa.OpAMD64ADDQ {
230 asm = x86.ALEAQ
231 } else {
232 asm = x86.ALEAL
233 }
234 p := s.Prog(asm)
235 p.From.Type = obj.TYPE_MEM
236 p.From.Reg = r1
237 p.From.Scale = 1
238 p.From.Index = r2
239 p.To.Type = obj.TYPE_REG
240 p.To.Reg = r
241 }
242
243 case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL,
244 ssa.OpAMD64MULQ, ssa.OpAMD64MULL,
245 ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL,
246 ssa.OpAMD64ORQ, ssa.OpAMD64ORL,
247 ssa.OpAMD64XORQ, ssa.OpAMD64XORL,
248 ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL,
249 ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB,
250 ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB,
251 ssa.OpAMD64ROLQ, ssa.OpAMD64ROLL, ssa.OpAMD64ROLW, ssa.OpAMD64ROLB,
252 ssa.OpAMD64RORQ, ssa.OpAMD64RORL, ssa.OpAMD64RORW, ssa.OpAMD64RORB,
253 ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
254 ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
255 ssa.OpAMD64MINSS, ssa.OpAMD64MINSD,
256 ssa.OpAMD64POR, ssa.OpAMD64PXOR,
257 ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ,
258 ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ,
259 ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ,
260 ssa.OpAMD64PCMPEQB, ssa.OpAMD64PSIGNB,
261 ssa.OpAMD64PUNPCKLBW:
262 opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
263
264 case ssa.OpAMD64PSHUFLW:
265 p := s.Prog(v.Op.Asm())
266 imm := v.AuxInt
267 if imm < 0 || imm > 255 {
268 v.Fatalf("Invalid source selection immediate")
269 }
270 p.From.Offset = imm
271 p.From.Type = obj.TYPE_CONST
272 p.AddRestSourceReg(v.Args[0].Reg())
273 p.To.Type = obj.TYPE_REG
274 p.To.Reg = v.Reg()
275
276 case ssa.OpAMD64PSHUFBbroadcast:
277
278
279
280
281 if s.ABI != obj.ABIInternal {
282
283 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
284 }
285
286 p := s.Prog(v.Op.Asm())
287 p.From.Type = obj.TYPE_REG
288 p.To.Type = obj.TYPE_REG
289 p.To.Reg = v.Reg()
290 p.From.Reg = x86.REG_X15
291
292 case ssa.OpAMD64SHRDQ, ssa.OpAMD64SHLDQ:
293 p := s.Prog(v.Op.Asm())
294 lo, hi, bits := v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg()
295 p.From.Type = obj.TYPE_REG
296 p.From.Reg = bits
297 p.To.Type = obj.TYPE_REG
298 p.To.Reg = lo
299 p.AddRestSourceReg(hi)
300
301 case ssa.OpAMD64BLSIQ, ssa.OpAMD64BLSIL,
302 ssa.OpAMD64BLSMSKQ, ssa.OpAMD64BLSMSKL,
303 ssa.OpAMD64BLSRQ, ssa.OpAMD64BLSRL:
304 p := s.Prog(v.Op.Asm())
305 p.From.Type = obj.TYPE_REG
306 p.From.Reg = v.Args[0].Reg()
307 p.To.Type = obj.TYPE_REG
308 switch v.Op {
309 case ssa.OpAMD64BLSRQ, ssa.OpAMD64BLSRL:
310 p.To.Reg = v.Reg0()
311 default:
312 p.To.Reg = v.Reg()
313 }
314
315 case ssa.OpAMD64ANDNQ, ssa.OpAMD64ANDNL:
316 p := s.Prog(v.Op.Asm())
317 p.From.Type = obj.TYPE_REG
318 p.From.Reg = v.Args[0].Reg()
319 p.To.Type = obj.TYPE_REG
320 p.To.Reg = v.Reg()
321 p.AddRestSourceReg(v.Args[1].Reg())
322
323 case ssa.OpAMD64SARXL, ssa.OpAMD64SARXQ,
324 ssa.OpAMD64SHLXL, ssa.OpAMD64SHLXQ,
325 ssa.OpAMD64SHRXL, ssa.OpAMD64SHRXQ:
326 p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
327 p.AddRestSourceReg(v.Args[0].Reg())
328
329 case ssa.OpAMD64SHLXLload, ssa.OpAMD64SHLXQload,
330 ssa.OpAMD64SHRXLload, ssa.OpAMD64SHRXQload,
331 ssa.OpAMD64SARXLload, ssa.OpAMD64SARXQload:
332 p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
333 m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
334 ssagen.AddAux(&m, v)
335 p.AddRestSource(m)
336
337 case ssa.OpAMD64SHLXLloadidx1, ssa.OpAMD64SHLXLloadidx4, ssa.OpAMD64SHLXLloadidx8,
338 ssa.OpAMD64SHRXLloadidx1, ssa.OpAMD64SHRXLloadidx4, ssa.OpAMD64SHRXLloadidx8,
339 ssa.OpAMD64SARXLloadidx1, ssa.OpAMD64SARXLloadidx4, ssa.OpAMD64SARXLloadidx8,
340 ssa.OpAMD64SHLXQloadidx1, ssa.OpAMD64SHLXQloadidx8,
341 ssa.OpAMD64SHRXQloadidx1, ssa.OpAMD64SHRXQloadidx8,
342 ssa.OpAMD64SARXQloadidx1, ssa.OpAMD64SARXQloadidx8:
343 p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[2].Reg())
344 m := obj.Addr{Type: obj.TYPE_MEM}
345 memIdx(&m, v)
346 ssagen.AddAux(&m, v)
347 p.AddRestSource(m)
348
349 case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
350
351
352
353
354 r := v.Args[1].Reg()
355
356
357 opregreg(s, x86.AXORL, x86.REG_DX, x86.REG_DX)
358
359
360 p := s.Prog(v.Op.Asm())
361 p.From.Type = obj.TYPE_REG
362 p.From.Reg = r
363
364 case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW:
365
366
367
368
369 r := v.Args[1].Reg()
370
371 var opCMP, opNEG, opSXD obj.As
372 switch v.Op {
373 case ssa.OpAMD64DIVQ:
374 opCMP, opNEG, opSXD = x86.ACMPQ, x86.ANEGQ, x86.ACQO
375 case ssa.OpAMD64DIVL:
376 opCMP, opNEG, opSXD = x86.ACMPL, x86.ANEGL, x86.ACDQ
377 case ssa.OpAMD64DIVW:
378 opCMP, opNEG, opSXD = x86.ACMPW, x86.ANEGW, x86.ACWD
379 }
380
381
382
383 var j1, j2 *obj.Prog
384 if ssa.DivisionNeedsFixUp(v) {
385 c := s.Prog(opCMP)
386 c.From.Type = obj.TYPE_REG
387 c.From.Reg = r
388 c.To.Type = obj.TYPE_CONST
389 c.To.Offset = -1
390
391
392 j1 = s.Prog(x86.AJNE)
393 j1.To.Type = obj.TYPE_BRANCH
394
395
396
397 n1 := s.Prog(opNEG)
398 n1.To.Type = obj.TYPE_REG
399 n1.To.Reg = x86.REG_AX
400
401
402 opregreg(s, x86.AXORL, x86.REG_DX, x86.REG_DX)
403
404
405
406
407
408 j2 = s.Prog(obj.AJMP)
409 j2.To.Type = obj.TYPE_BRANCH
410 }
411
412
413 p := s.Prog(opSXD)
414 if j1 != nil {
415 j1.To.SetTarget(p)
416 }
417 p = s.Prog(v.Op.Asm())
418 p.From.Type = obj.TYPE_REG
419 p.From.Reg = r
420
421 if j2 != nil {
422 j2.To.SetTarget(s.Pc())
423 }
424
425 case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU:
426
427
428
429
430
431
432 p := s.Prog(v.Op.Asm())
433 p.From.Type = obj.TYPE_REG
434 p.From.Reg = v.Args[1].Reg()
435
436
437
438 if v.Type.Size() == 1 {
439 m := s.Prog(x86.AMOVB)
440 m.From.Type = obj.TYPE_REG
441 m.From.Reg = x86.REG_AH
442 m.To.Type = obj.TYPE_REG
443 m.To.Reg = x86.REG_DX
444 }
445
446 case ssa.OpAMD64MULQU, ssa.OpAMD64MULLU:
447
448
449 p := s.Prog(v.Op.Asm())
450 p.From.Type = obj.TYPE_REG
451 p.From.Reg = v.Args[1].Reg()
452
453 case ssa.OpAMD64MULQU2:
454
455
456 p := s.Prog(v.Op.Asm())
457 p.From.Type = obj.TYPE_REG
458 p.From.Reg = v.Args[1].Reg()
459
460 case ssa.OpAMD64DIVQU2:
461
462
463 p := s.Prog(v.Op.Asm())
464 p.From.Type = obj.TYPE_REG
465 p.From.Reg = v.Args[2].Reg()
466
467 case ssa.OpAMD64AVGQU:
468
469
470
471 p := s.Prog(x86.AADDQ)
472 p.From.Type = obj.TYPE_REG
473 p.To.Type = obj.TYPE_REG
474 p.To.Reg = v.Reg()
475 p.From.Reg = v.Args[1].Reg()
476 p = s.Prog(x86.ARCRQ)
477 p.From.Type = obj.TYPE_CONST
478 p.From.Offset = 1
479 p.To.Type = obj.TYPE_REG
480 p.To.Reg = v.Reg()
481
482 case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ:
483 r := v.Reg0()
484 r0 := v.Args[0].Reg()
485 r1 := v.Args[1].Reg()
486 switch r {
487 case r0:
488 p := s.Prog(v.Op.Asm())
489 p.From.Type = obj.TYPE_REG
490 p.From.Reg = r1
491 p.To.Type = obj.TYPE_REG
492 p.To.Reg = r
493 case r1:
494 p := s.Prog(v.Op.Asm())
495 p.From.Type = obj.TYPE_REG
496 p.From.Reg = r0
497 p.To.Type = obj.TYPE_REG
498 p.To.Reg = r
499 default:
500 v.Fatalf("output not in same register as an input %s", v.LongString())
501 }
502
503 case ssa.OpAMD64SUBQborrow, ssa.OpAMD64SBBQ:
504 p := s.Prog(v.Op.Asm())
505 p.From.Type = obj.TYPE_REG
506 p.From.Reg = v.Args[1].Reg()
507 p.To.Type = obj.TYPE_REG
508 p.To.Reg = v.Reg0()
509
510 case ssa.OpAMD64ADDQconstcarry, ssa.OpAMD64ADCQconst, ssa.OpAMD64SUBQconstborrow, ssa.OpAMD64SBBQconst:
511 p := s.Prog(v.Op.Asm())
512 p.From.Type = obj.TYPE_CONST
513 p.From.Offset = v.AuxInt
514 p.To.Type = obj.TYPE_REG
515 p.To.Reg = v.Reg0()
516
517 case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst:
518 r := v.Reg()
519 a := v.Args[0].Reg()
520 if r == a {
521 switch v.AuxInt {
522 case 1:
523 var asm obj.As
524
525
526
527
528
529 if v.Op == ssa.OpAMD64ADDQconst {
530 asm = x86.AINCQ
531 } else {
532 asm = x86.AINCL
533 }
534 p := s.Prog(asm)
535 p.To.Type = obj.TYPE_REG
536 p.To.Reg = r
537 return
538 case -1:
539 var asm obj.As
540 if v.Op == ssa.OpAMD64ADDQconst {
541 asm = x86.ADECQ
542 } else {
543 asm = x86.ADECL
544 }
545 p := s.Prog(asm)
546 p.To.Type = obj.TYPE_REG
547 p.To.Reg = r
548 return
549 case 0x80:
550
551
552 asm := x86.ASUBL
553 if v.Op == ssa.OpAMD64ADDQconst {
554 asm = x86.ASUBQ
555 }
556 p := s.Prog(asm)
557 p.From.Type = obj.TYPE_CONST
558 p.From.Offset = -0x80
559 p.To.Type = obj.TYPE_REG
560 p.To.Reg = r
561 return
562
563 }
564 p := s.Prog(v.Op.Asm())
565 p.From.Type = obj.TYPE_CONST
566 p.From.Offset = v.AuxInt
567 p.To.Type = obj.TYPE_REG
568 p.To.Reg = r
569 return
570 }
571 var asm obj.As
572 if v.Op == ssa.OpAMD64ADDQconst {
573 asm = x86.ALEAQ
574 } else {
575 asm = x86.ALEAL
576 }
577 p := s.Prog(asm)
578 p.From.Type = obj.TYPE_MEM
579 p.From.Reg = a
580 p.From.Offset = v.AuxInt
581 p.To.Type = obj.TYPE_REG
582 p.To.Reg = r
583
584 case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ, ssa.OpAMD64CMOVWEQ,
585 ssa.OpAMD64CMOVQLT, ssa.OpAMD64CMOVLLT, ssa.OpAMD64CMOVWLT,
586 ssa.OpAMD64CMOVQNE, ssa.OpAMD64CMOVLNE, ssa.OpAMD64CMOVWNE,
587 ssa.OpAMD64CMOVQGT, ssa.OpAMD64CMOVLGT, ssa.OpAMD64CMOVWGT,
588 ssa.OpAMD64CMOVQLE, ssa.OpAMD64CMOVLLE, ssa.OpAMD64CMOVWLE,
589 ssa.OpAMD64CMOVQGE, ssa.OpAMD64CMOVLGE, ssa.OpAMD64CMOVWGE,
590 ssa.OpAMD64CMOVQHI, ssa.OpAMD64CMOVLHI, ssa.OpAMD64CMOVWHI,
591 ssa.OpAMD64CMOVQLS, ssa.OpAMD64CMOVLLS, ssa.OpAMD64CMOVWLS,
592 ssa.OpAMD64CMOVQCC, ssa.OpAMD64CMOVLCC, ssa.OpAMD64CMOVWCC,
593 ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS,
594 ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF,
595 ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF:
596 p := s.Prog(v.Op.Asm())
597 p.From.Type = obj.TYPE_REG
598 p.From.Reg = v.Args[1].Reg()
599 p.To.Type = obj.TYPE_REG
600 p.To.Reg = v.Reg()
601
602 case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF:
603
604
605
606
607 p := s.Prog(v.Op.Asm())
608 p.From.Type = obj.TYPE_REG
609 p.From.Reg = v.Args[1].Reg()
610 p.To.Type = obj.TYPE_REG
611 p.To.Reg = v.Reg()
612 var q *obj.Prog
613 if v.Op == ssa.OpAMD64CMOVQNEF {
614 q = s.Prog(x86.ACMOVQPS)
615 } else if v.Op == ssa.OpAMD64CMOVLNEF {
616 q = s.Prog(x86.ACMOVLPS)
617 } else {
618 q = s.Prog(x86.ACMOVWPS)
619 }
620 q.From.Type = obj.TYPE_REG
621 q.From.Reg = v.Args[1].Reg()
622 q.To.Type = obj.TYPE_REG
623 q.To.Reg = v.Reg()
624
625 case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF:
626
627
628
629
630
631
632
633
634
635
636
637 t := v.RegTmp()
638 opregreg(s, moveByType(v.Type), t, v.Args[1].Reg())
639
640 p := s.Prog(v.Op.Asm())
641 p.From.Type = obj.TYPE_REG
642 p.From.Reg = v.Reg()
643 p.To.Type = obj.TYPE_REG
644 p.To.Reg = t
645 var q *obj.Prog
646 if v.Op == ssa.OpAMD64CMOVQEQF {
647 q = s.Prog(x86.ACMOVQPC)
648 } else if v.Op == ssa.OpAMD64CMOVLEQF {
649 q = s.Prog(x86.ACMOVLPC)
650 } else {
651 q = s.Prog(x86.ACMOVWPC)
652 }
653 q.From.Type = obj.TYPE_REG
654 q.From.Reg = t
655 q.To.Type = obj.TYPE_REG
656 q.To.Reg = v.Reg()
657
658 case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
659 r := v.Reg()
660 p := s.Prog(v.Op.Asm())
661 p.From.Type = obj.TYPE_CONST
662 p.From.Offset = v.AuxInt
663 p.To.Type = obj.TYPE_REG
664 p.To.Reg = r
665 p.AddRestSourceReg(v.Args[0].Reg())
666
667 case ssa.OpAMD64ANDQconst:
668 asm := v.Op.Asm()
669
670
671 if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) {
672 asm = x86.AANDL
673 }
674 p := s.Prog(asm)
675 p.From.Type = obj.TYPE_CONST
676 p.From.Offset = v.AuxInt
677 p.To.Type = obj.TYPE_REG
678 p.To.Reg = v.Reg()
679
680 case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
681 ssa.OpAMD64ANDLconst,
682 ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst,
683 ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst,
684 ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst,
685 ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
686 ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst,
687 ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst:
688 p := s.Prog(v.Op.Asm())
689 p.From.Type = obj.TYPE_CONST
690 p.From.Offset = v.AuxInt
691 p.To.Type = obj.TYPE_REG
692 p.To.Reg = v.Reg()
693 case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
694 r := v.Reg()
695 p := s.Prog(v.Op.Asm())
696 p.From.Type = obj.TYPE_REG
697 p.From.Reg = r
698 p.To.Type = obj.TYPE_REG
699 p.To.Reg = r
700 case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8,
701 ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8,
702 ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
703 p := s.Prog(v.Op.Asm())
704 memIdx(&p.From, v)
705 o := v.Reg()
706 p.To.Type = obj.TYPE_REG
707 p.To.Reg = o
708 if v.AuxInt != 0 && v.Aux == nil {
709
710 switch v.Op {
711 case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
712 p = s.Prog(x86.ALEAQ)
713 case ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8:
714 p = s.Prog(x86.ALEAL)
715 case ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
716 p = s.Prog(x86.ALEAW)
717 }
718 p.From.Type = obj.TYPE_MEM
719 p.From.Reg = o
720 p.To.Type = obj.TYPE_REG
721 p.To.Reg = o
722 }
723 ssagen.AddAux(&p.From, v)
724 case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
725 p := s.Prog(v.Op.Asm())
726 p.From.Type = obj.TYPE_MEM
727 p.From.Reg = v.Args[0].Reg()
728 ssagen.AddAux(&p.From, v)
729 p.To.Type = obj.TYPE_REG
730 p.To.Reg = v.Reg()
731 case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
732 ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB,
733 ssa.OpAMD64BTL, ssa.OpAMD64BTQ:
734 opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
735 case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
736
737
738 opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
739 case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
740 p := s.Prog(v.Op.Asm())
741 p.From.Type = obj.TYPE_REG
742 p.From.Reg = v.Args[0].Reg()
743 p.To.Type = obj.TYPE_CONST
744 p.To.Offset = v.AuxInt
745 case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
746 ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
747 ssa.OpAMD64BTSQconst,
748 ssa.OpAMD64BTCQconst,
749 ssa.OpAMD64BTRQconst:
750 op := v.Op
751 if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
752
753 op = ssa.OpAMD64BTLconst
754 }
755 p := s.Prog(op.Asm())
756 p.From.Type = obj.TYPE_CONST
757 p.From.Offset = v.AuxInt
758 p.To.Type = obj.TYPE_REG
759 p.To.Reg = v.Args[0].Reg()
760 case ssa.OpAMD64CMPQload, ssa.OpAMD64CMPLload, ssa.OpAMD64CMPWload, ssa.OpAMD64CMPBload:
761 p := s.Prog(v.Op.Asm())
762 p.From.Type = obj.TYPE_MEM
763 p.From.Reg = v.Args[0].Reg()
764 ssagen.AddAux(&p.From, v)
765 p.To.Type = obj.TYPE_REG
766 p.To.Reg = v.Args[1].Reg()
767 case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
768 sc := v.AuxValAndOff()
769 p := s.Prog(v.Op.Asm())
770 p.From.Type = obj.TYPE_MEM
771 p.From.Reg = v.Args[0].Reg()
772 ssagen.AddAux2(&p.From, v, sc.Off64())
773 p.To.Type = obj.TYPE_CONST
774 p.To.Offset = sc.Val64()
775 case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
776 p := s.Prog(v.Op.Asm())
777 memIdx(&p.From, v)
778 ssagen.AddAux(&p.From, v)
779 p.To.Type = obj.TYPE_REG
780 p.To.Reg = v.Args[2].Reg()
781 case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
782 sc := v.AuxValAndOff()
783 p := s.Prog(v.Op.Asm())
784 memIdx(&p.From, v)
785 ssagen.AddAux2(&p.From, v, sc.Off64())
786 p.To.Type = obj.TYPE_CONST
787 p.To.Offset = sc.Val64()
788 case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
789 x := v.Reg()
790
791
792
793 if v.AuxInt == 0 && v.Aux == nil {
794 opregreg(s, x86.AXORL, x, x)
795 break
796 }
797
798 asm := v.Op.Asm()
799
800
801 if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) {
802
803 asm = x86.AMOVL
804 }
805 p := s.Prog(asm)
806 p.From.Type = obj.TYPE_CONST
807 p.From.Offset = v.AuxInt
808 p.To.Type = obj.TYPE_REG
809 p.To.Reg = x
810 case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
811 x := v.Reg()
812 p := s.Prog(v.Op.Asm())
813 p.From.Type = obj.TYPE_FCONST
814 p.From.Val = math.Float64frombits(uint64(v.AuxInt))
815 p.To.Type = obj.TYPE_REG
816 p.To.Reg = x
817 case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload,
818 ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload,
819 ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload:
820 p := s.Prog(v.Op.Asm())
821 p.From.Type = obj.TYPE_MEM
822 p.From.Reg = v.Args[0].Reg()
823 ssagen.AddAux(&p.From, v)
824 p.To.Type = obj.TYPE_REG
825 p.To.Reg = v.Reg()
826 case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
827 ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2,
828 ssa.OpAMD64MOVBELloadidx1, ssa.OpAMD64MOVBELloadidx4, ssa.OpAMD64MOVBELloadidx8, ssa.OpAMD64MOVBEQloadidx1, ssa.OpAMD64MOVBEQloadidx8:
829 p := s.Prog(v.Op.Asm())
830 memIdx(&p.From, v)
831 ssagen.AddAux(&p.From, v)
832 p.To.Type = obj.TYPE_REG
833 p.To.Reg = v.Reg()
834 case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
835 ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify,
836 ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify,
837 ssa.OpAMD64MOVBEQstore, ssa.OpAMD64MOVBELstore, ssa.OpAMD64MOVBEWstore:
838 p := s.Prog(v.Op.Asm())
839 p.From.Type = obj.TYPE_REG
840 p.From.Reg = v.Args[1].Reg()
841 p.To.Type = obj.TYPE_MEM
842 p.To.Reg = v.Args[0].Reg()
843 ssagen.AddAux(&p.To, v)
844 case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
845 ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
846 ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
847 ssa.OpAMD64SUBLmodifyidx1, ssa.OpAMD64SUBLmodifyidx4, ssa.OpAMD64SUBLmodifyidx8, ssa.OpAMD64SUBQmodifyidx1, ssa.OpAMD64SUBQmodifyidx8,
848 ssa.OpAMD64ANDLmodifyidx1, ssa.OpAMD64ANDLmodifyidx4, ssa.OpAMD64ANDLmodifyidx8, ssa.OpAMD64ANDQmodifyidx1, ssa.OpAMD64ANDQmodifyidx8,
849 ssa.OpAMD64ORLmodifyidx1, ssa.OpAMD64ORLmodifyidx4, ssa.OpAMD64ORLmodifyidx8, ssa.OpAMD64ORQmodifyidx1, ssa.OpAMD64ORQmodifyidx8,
850 ssa.OpAMD64XORLmodifyidx1, ssa.OpAMD64XORLmodifyidx4, ssa.OpAMD64XORLmodifyidx8, ssa.OpAMD64XORQmodifyidx1, ssa.OpAMD64XORQmodifyidx8,
851 ssa.OpAMD64MOVBEWstoreidx1, ssa.OpAMD64MOVBEWstoreidx2, ssa.OpAMD64MOVBELstoreidx1, ssa.OpAMD64MOVBELstoreidx4, ssa.OpAMD64MOVBELstoreidx8, ssa.OpAMD64MOVBEQstoreidx1, ssa.OpAMD64MOVBEQstoreidx8:
852 p := s.Prog(v.Op.Asm())
853 p.From.Type = obj.TYPE_REG
854 p.From.Reg = v.Args[2].Reg()
855 memIdx(&p.To, v)
856 ssagen.AddAux(&p.To, v)
857 case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
858 sc := v.AuxValAndOff()
859 off := sc.Off64()
860 val := sc.Val()
861 if val == 1 || val == -1 {
862 var asm obj.As
863 if v.Op == ssa.OpAMD64ADDQconstmodify {
864 if val == 1 {
865 asm = x86.AINCQ
866 } else {
867 asm = x86.ADECQ
868 }
869 } else {
870 if val == 1 {
871 asm = x86.AINCL
872 } else {
873 asm = x86.ADECL
874 }
875 }
876 p := s.Prog(asm)
877 p.To.Type = obj.TYPE_MEM
878 p.To.Reg = v.Args[0].Reg()
879 ssagen.AddAux2(&p.To, v, off)
880 break
881 }
882 fallthrough
883 case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
884 ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify,
885 ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTCQconstmodify:
886 sc := v.AuxValAndOff()
887 off := sc.Off64()
888 val := sc.Val64()
889 p := s.Prog(v.Op.Asm())
890 p.From.Type = obj.TYPE_CONST
891 p.From.Offset = val
892 p.To.Type = obj.TYPE_MEM
893 p.To.Reg = v.Args[0].Reg()
894 ssagen.AddAux2(&p.To, v, off)
895
896 case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
897 p := s.Prog(v.Op.Asm())
898 p.From.Type = obj.TYPE_CONST
899 sc := v.AuxValAndOff()
900 p.From.Offset = sc.Val64()
901 p.To.Type = obj.TYPE_MEM
902 p.To.Reg = v.Args[0].Reg()
903 ssagen.AddAux2(&p.To, v, sc.Off64())
904 case ssa.OpAMD64MOVOstoreconst:
905 sc := v.AuxValAndOff()
906 if sc.Val() != 0 {
907 v.Fatalf("MOVO for non zero constants not implemented: %s", v.LongString())
908 }
909
910 if s.ABI != obj.ABIInternal {
911
912 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
913 }
914 p := s.Prog(v.Op.Asm())
915 p.From.Type = obj.TYPE_REG
916 p.From.Reg = x86.REG_X15
917 p.To.Type = obj.TYPE_MEM
918 p.To.Reg = v.Args[0].Reg()
919 ssagen.AddAux2(&p.To, v, sc.Off64())
920
921 case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
922 ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
923 ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
924 ssa.OpAMD64ORLconstmodifyidx1, ssa.OpAMD64ORLconstmodifyidx4, ssa.OpAMD64ORLconstmodifyidx8, ssa.OpAMD64ORQconstmodifyidx1, ssa.OpAMD64ORQconstmodifyidx8,
925 ssa.OpAMD64XORLconstmodifyidx1, ssa.OpAMD64XORLconstmodifyidx4, ssa.OpAMD64XORLconstmodifyidx8, ssa.OpAMD64XORQconstmodifyidx1, ssa.OpAMD64XORQconstmodifyidx8:
926 p := s.Prog(v.Op.Asm())
927 p.From.Type = obj.TYPE_CONST
928 sc := v.AuxValAndOff()
929 p.From.Offset = sc.Val64()
930 switch {
931 case p.As == x86.AADDQ && p.From.Offset == 1:
932 p.As = x86.AINCQ
933 p.From.Type = obj.TYPE_NONE
934 case p.As == x86.AADDQ && p.From.Offset == -1:
935 p.As = x86.ADECQ
936 p.From.Type = obj.TYPE_NONE
937 case p.As == x86.AADDL && p.From.Offset == 1:
938 p.As = x86.AINCL
939 p.From.Type = obj.TYPE_NONE
940 case p.As == x86.AADDL && p.From.Offset == -1:
941 p.As = x86.ADECL
942 p.From.Type = obj.TYPE_NONE
943 }
944 memIdx(&p.To, v)
945 ssagen.AddAux2(&p.To, v, sc.Off64())
946 case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
947 ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
948 ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS, ssa.OpAMD64VPBROADCASTB, ssa.OpAMD64PMOVMSKB:
949 opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
950 case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
951 r := v.Reg()
952
953 opregreg(s, x86.AXORPS, r, r)
954 opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
955 case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i, ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
956 var p *obj.Prog
957 switch v.Op {
958 case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i:
959 p = s.Prog(x86.AMOVQ)
960 case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
961 p = s.Prog(x86.AMOVL)
962 }
963 p.From.Type = obj.TYPE_REG
964 p.From.Reg = v.Args[0].Reg()
965 p.To.Type = obj.TYPE_REG
966 p.To.Reg = v.Reg()
967 case ssa.OpAMD64ADDQload, ssa.OpAMD64ADDLload, ssa.OpAMD64SUBQload, ssa.OpAMD64SUBLload,
968 ssa.OpAMD64ANDQload, ssa.OpAMD64ANDLload, ssa.OpAMD64ORQload, ssa.OpAMD64ORLload,
969 ssa.OpAMD64XORQload, ssa.OpAMD64XORLload, ssa.OpAMD64ADDSDload, ssa.OpAMD64ADDSSload,
970 ssa.OpAMD64SUBSDload, ssa.OpAMD64SUBSSload, ssa.OpAMD64MULSDload, ssa.OpAMD64MULSSload,
971 ssa.OpAMD64DIVSDload, ssa.OpAMD64DIVSSload:
972 p := s.Prog(v.Op.Asm())
973 p.From.Type = obj.TYPE_MEM
974 p.From.Reg = v.Args[1].Reg()
975 ssagen.AddAux(&p.From, v)
976 p.To.Type = obj.TYPE_REG
977 p.To.Reg = v.Reg()
978 case ssa.OpAMD64ADDLloadidx1, ssa.OpAMD64ADDLloadidx4, ssa.OpAMD64ADDLloadidx8, ssa.OpAMD64ADDQloadidx1, ssa.OpAMD64ADDQloadidx8,
979 ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8,
980 ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8,
981 ssa.OpAMD64ORLloadidx1, ssa.OpAMD64ORLloadidx4, ssa.OpAMD64ORLloadidx8, ssa.OpAMD64ORQloadidx1, ssa.OpAMD64ORQloadidx8,
982 ssa.OpAMD64XORLloadidx1, ssa.OpAMD64XORLloadidx4, ssa.OpAMD64XORLloadidx8, ssa.OpAMD64XORQloadidx1, ssa.OpAMD64XORQloadidx8,
983 ssa.OpAMD64ADDSSloadidx1, ssa.OpAMD64ADDSSloadidx4, ssa.OpAMD64ADDSDloadidx1, ssa.OpAMD64ADDSDloadidx8,
984 ssa.OpAMD64SUBSSloadidx1, ssa.OpAMD64SUBSSloadidx4, ssa.OpAMD64SUBSDloadidx1, ssa.OpAMD64SUBSDloadidx8,
985 ssa.OpAMD64MULSSloadidx1, ssa.OpAMD64MULSSloadidx4, ssa.OpAMD64MULSDloadidx1, ssa.OpAMD64MULSDloadidx8,
986 ssa.OpAMD64DIVSSloadidx1, ssa.OpAMD64DIVSSloadidx4, ssa.OpAMD64DIVSDloadidx1, ssa.OpAMD64DIVSDloadidx8:
987 p := s.Prog(v.Op.Asm())
988
989 r, i := v.Args[1].Reg(), v.Args[2].Reg()
990 p.From.Type = obj.TYPE_MEM
991 p.From.Scale = v.Op.Scale()
992 if p.From.Scale == 1 && i == x86.REG_SP {
993 r, i = i, r
994 }
995 p.From.Reg = r
996 p.From.Index = i
997
998 ssagen.AddAux(&p.From, v)
999 p.To.Type = obj.TYPE_REG
1000 p.To.Reg = v.Reg()
1001 case ssa.OpAMD64DUFFZERO:
1002 if s.ABI != obj.ABIInternal {
1003
1004 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
1005 }
1006 off := duffStart(v.AuxInt)
1007 adj := duffAdj(v.AuxInt)
1008 var p *obj.Prog
1009 if adj != 0 {
1010 p = s.Prog(x86.ALEAQ)
1011 p.From.Type = obj.TYPE_MEM
1012 p.From.Offset = adj
1013 p.From.Reg = x86.REG_DI
1014 p.To.Type = obj.TYPE_REG
1015 p.To.Reg = x86.REG_DI
1016 }
1017 p = s.Prog(obj.ADUFFZERO)
1018 p.To.Type = obj.TYPE_ADDR
1019 p.To.Sym = ir.Syms.Duffzero
1020 p.To.Offset = off
1021 case ssa.OpAMD64DUFFCOPY:
1022 p := s.Prog(obj.ADUFFCOPY)
1023 p.To.Type = obj.TYPE_ADDR
1024 p.To.Sym = ir.Syms.Duffcopy
1025 if v.AuxInt%16 != 0 {
1026 v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
1027 }
1028 p.To.Offset = 14 * (64 - v.AuxInt/16)
1029
1030
1031
1032
1033
1034
1035
1036 case ssa.OpCopy:
1037 if v.Type.IsMemory() {
1038 return
1039 }
1040 x := v.Args[0].Reg()
1041 y := v.Reg()
1042 if x != y {
1043 opregreg(s, moveByType(v.Type), y, x)
1044 }
1045 case ssa.OpLoadReg:
1046 if v.Type.IsFlags() {
1047 v.Fatalf("load flags not implemented: %v", v.LongString())
1048 return
1049 }
1050 p := s.Prog(loadByType(v.Type))
1051 ssagen.AddrAuto(&p.From, v.Args[0])
1052 p.To.Type = obj.TYPE_REG
1053 p.To.Reg = v.Reg()
1054
1055 case ssa.OpStoreReg:
1056 if v.Type.IsFlags() {
1057 v.Fatalf("store flags not implemented: %v", v.LongString())
1058 return
1059 }
1060 p := s.Prog(storeByType(v.Type))
1061 p.From.Type = obj.TYPE_REG
1062 p.From.Reg = v.Args[0].Reg()
1063 ssagen.AddrAuto(&p.To, v)
1064 case ssa.OpAMD64LoweredHasCPUFeature:
1065 p := s.Prog(x86.AMOVBLZX)
1066 p.From.Type = obj.TYPE_MEM
1067 ssagen.AddAux(&p.From, v)
1068 p.To.Type = obj.TYPE_REG
1069 p.To.Reg = v.Reg()
1070 case ssa.OpArgIntReg, ssa.OpArgFloatReg:
1071
1072
1073 for _, ap := range v.Block.Func.RegArgs {
1074
1075 addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize)
1076 s.FuncInfo().AddSpill(
1077 obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByType(ap.Type), Spill: storeByType(ap.Type)})
1078 }
1079 v.Block.Func.RegArgs = nil
1080 ssagen.CheckArgReg(v)
1081 case ssa.OpAMD64LoweredGetClosurePtr:
1082
1083 ssagen.CheckLoweredGetClosurePtr(v)
1084 case ssa.OpAMD64LoweredGetG:
1085 if s.ABI == obj.ABIInternal {
1086 v.Fatalf("LoweredGetG should not appear in ABIInternal")
1087 }
1088 r := v.Reg()
1089 getgFromTLS(s, r)
1090 case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLtail:
1091 if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
1092
1093 if buildcfg.GOOS != "plan9" {
1094 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
1095 }
1096
1097 getgFromTLS(s, x86.REG_R14)
1098 }
1099 if v.Op == ssa.OpAMD64CALLtail {
1100 s.TailCall(v)
1101 break
1102 }
1103 s.Call(v)
1104 if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
1105
1106 if buildcfg.GOOS != "plan9" {
1107 opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
1108 }
1109
1110 getgFromTLS(s, x86.REG_R14)
1111 }
1112 case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
1113 s.Call(v)
1114
1115 case ssa.OpAMD64LoweredGetCallerPC:
1116 p := s.Prog(x86.AMOVQ)
1117 p.From.Type = obj.TYPE_MEM
1118 p.From.Offset = -8
1119 p.From.Name = obj.NAME_PARAM
1120 p.To.Type = obj.TYPE_REG
1121 p.To.Reg = v.Reg()
1122
1123 case ssa.OpAMD64LoweredGetCallerSP:
1124
1125 mov := x86.AMOVQ
1126 if types.PtrSize == 4 {
1127 mov = x86.AMOVL
1128 }
1129 p := s.Prog(mov)
1130 p.From.Type = obj.TYPE_ADDR
1131 p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
1132 p.From.Name = obj.NAME_PARAM
1133 p.To.Type = obj.TYPE_REG
1134 p.To.Reg = v.Reg()
1135
1136 case ssa.OpAMD64LoweredWB:
1137 p := s.Prog(obj.ACALL)
1138 p.To.Type = obj.TYPE_MEM
1139 p.To.Name = obj.NAME_EXTERN
1140
1141 p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
1142
1143 case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
1144 p := s.Prog(obj.ACALL)
1145 p.To.Type = obj.TYPE_MEM
1146 p.To.Name = obj.NAME_EXTERN
1147 p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
1148 s.UseArgs(int64(2 * types.PtrSize))
1149
1150 case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
1151 ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
1152 ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
1153 p := s.Prog(v.Op.Asm())
1154 p.To.Type = obj.TYPE_REG
1155 p.To.Reg = v.Reg()
1156
1157 case ssa.OpAMD64NEGLflags:
1158 p := s.Prog(v.Op.Asm())
1159 p.To.Type = obj.TYPE_REG
1160 p.To.Reg = v.Reg0()
1161
1162 case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
1163 p := s.Prog(v.Op.Asm())
1164 p.From.Type = obj.TYPE_REG
1165 p.From.Reg = v.Args[0].Reg()
1166 p.To.Type = obj.TYPE_REG
1167 switch v.Op {
1168 case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ:
1169 p.To.Reg = v.Reg0()
1170 case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
1171 p.To.Reg = v.Reg()
1172 }
1173 case ssa.OpAMD64ROUNDSD:
1174 p := s.Prog(v.Op.Asm())
1175 val := v.AuxInt
1176
1177 if val < 0 || val > 3 {
1178 v.Fatalf("Invalid rounding mode")
1179 }
1180 p.From.Offset = val
1181 p.From.Type = obj.TYPE_CONST
1182 p.AddRestSourceReg(v.Args[0].Reg())
1183 p.To.Type = obj.TYPE_REG
1184 p.To.Reg = v.Reg()
1185 case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL,
1186 ssa.OpAMD64TZCNTQ, ssa.OpAMD64TZCNTL,
1187 ssa.OpAMD64LZCNTQ, ssa.OpAMD64LZCNTL:
1188 if v.Args[0].Reg() != v.Reg() {
1189
1190
1191
1192 opregreg(s, x86.AXORL, v.Reg(), v.Reg())
1193 }
1194 p := s.Prog(v.Op.Asm())
1195 p.From.Type = obj.TYPE_REG
1196 p.From.Reg = v.Args[0].Reg()
1197 p.To.Type = obj.TYPE_REG
1198 p.To.Reg = v.Reg()
1199
1200 case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
1201 ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
1202 ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
1203 ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
1204 ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
1205 ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
1206 ssa.OpAMD64SETA, ssa.OpAMD64SETAE,
1207 ssa.OpAMD64SETO:
1208 p := s.Prog(v.Op.Asm())
1209 p.To.Type = obj.TYPE_REG
1210 p.To.Reg = v.Reg()
1211
1212 case ssa.OpAMD64SETEQstore, ssa.OpAMD64SETNEstore,
1213 ssa.OpAMD64SETLstore, ssa.OpAMD64SETLEstore,
1214 ssa.OpAMD64SETGstore, ssa.OpAMD64SETGEstore,
1215 ssa.OpAMD64SETBstore, ssa.OpAMD64SETBEstore,
1216 ssa.OpAMD64SETAstore, ssa.OpAMD64SETAEstore:
1217 p := s.Prog(v.Op.Asm())
1218 p.To.Type = obj.TYPE_MEM
1219 p.To.Reg = v.Args[0].Reg()
1220 ssagen.AddAux(&p.To, v)
1221
1222 case ssa.OpAMD64SETEQstoreidx1, ssa.OpAMD64SETNEstoreidx1,
1223 ssa.OpAMD64SETLstoreidx1, ssa.OpAMD64SETLEstoreidx1,
1224 ssa.OpAMD64SETGstoreidx1, ssa.OpAMD64SETGEstoreidx1,
1225 ssa.OpAMD64SETBstoreidx1, ssa.OpAMD64SETBEstoreidx1,
1226 ssa.OpAMD64SETAstoreidx1, ssa.OpAMD64SETAEstoreidx1:
1227 p := s.Prog(v.Op.Asm())
1228 memIdx(&p.To, v)
1229 ssagen.AddAux(&p.To, v)
1230
1231 case ssa.OpAMD64SETNEF:
1232 t := v.RegTmp()
1233 p := s.Prog(v.Op.Asm())
1234 p.To.Type = obj.TYPE_REG
1235 p.To.Reg = v.Reg()
1236 q := s.Prog(x86.ASETPS)
1237 q.To.Type = obj.TYPE_REG
1238 q.To.Reg = t
1239
1240 opregreg(s, x86.AORL, v.Reg(), t)
1241
1242 case ssa.OpAMD64SETEQF:
1243 t := v.RegTmp()
1244 p := s.Prog(v.Op.Asm())
1245 p.To.Type = obj.TYPE_REG
1246 p.To.Reg = v.Reg()
1247 q := s.Prog(x86.ASETPC)
1248 q.To.Type = obj.TYPE_REG
1249 q.To.Reg = t
1250
1251 opregreg(s, x86.AANDL, v.Reg(), t)
1252
1253 case ssa.OpAMD64InvertFlags:
1254 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
1255 case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
1256 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
1257 case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
1258 v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
1259 case ssa.OpAMD64REPSTOSQ:
1260 s.Prog(x86.AREP)
1261 s.Prog(x86.ASTOSQ)
1262 case ssa.OpAMD64REPMOVSQ:
1263 s.Prog(x86.AREP)
1264 s.Prog(x86.AMOVSQ)
1265 case ssa.OpAMD64LoweredNilCheck:
1266
1267
1268
1269
1270
1271
1272 p := s.Prog(x86.ATESTB)
1273 p.From.Type = obj.TYPE_REG
1274 p.From.Reg = x86.REG_AX
1275 p.To.Type = obj.TYPE_MEM
1276 p.To.Reg = v.Args[0].Reg()
1277 if logopt.Enabled() {
1278 logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
1279 }
1280 if base.Debug.Nil != 0 && v.Pos.Line() > 1 {
1281 base.WarnfAt(v.Pos, "generated nil check")
1282 }
1283 case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
1284 p := s.Prog(v.Op.Asm())
1285 p.From.Type = obj.TYPE_MEM
1286 p.From.Reg = v.Args[0].Reg()
1287 ssagen.AddAux(&p.From, v)
1288 p.To.Type = obj.TYPE_REG
1289 p.To.Reg = v.Reg0()
1290 case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
1291 p := s.Prog(v.Op.Asm())
1292 p.From.Type = obj.TYPE_REG
1293 p.From.Reg = v.Reg0()
1294 p.To.Type = obj.TYPE_MEM
1295 p.To.Reg = v.Args[1].Reg()
1296 ssagen.AddAux(&p.To, v)
1297 case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
1298 s.Prog(x86.ALOCK)
1299 p := s.Prog(v.Op.Asm())
1300 p.From.Type = obj.TYPE_REG
1301 p.From.Reg = v.Reg0()
1302 p.To.Type = obj.TYPE_MEM
1303 p.To.Reg = v.Args[1].Reg()
1304 ssagen.AddAux(&p.To, v)
1305 case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
1306 if v.Args[1].Reg() != x86.REG_AX {
1307 v.Fatalf("input[1] not in AX %s", v.LongString())
1308 }
1309 s.Prog(x86.ALOCK)
1310 p := s.Prog(v.Op.Asm())
1311 p.From.Type = obj.TYPE_REG
1312 p.From.Reg = v.Args[2].Reg()
1313 p.To.Type = obj.TYPE_MEM
1314 p.To.Reg = v.Args[0].Reg()
1315 ssagen.AddAux(&p.To, v)
1316 p = s.Prog(x86.ASETEQ)
1317 p.To.Type = obj.TYPE_REG
1318 p.To.Reg = v.Reg0()
1319 case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ANDQlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock, ssa.OpAMD64ORQlock:
1320
1321 s.Prog(x86.ALOCK)
1322 p := s.Prog(v.Op.Asm())
1323 p.From.Type = obj.TYPE_REG
1324 p.From.Reg = v.Args[1].Reg()
1325 p.To.Type = obj.TYPE_MEM
1326 p.To.Reg = v.Args[0].Reg()
1327 ssagen.AddAux(&p.To, v)
1328 case ssa.OpAMD64LoweredAtomicAnd64, ssa.OpAMD64LoweredAtomicOr64, ssa.OpAMD64LoweredAtomicAnd32, ssa.OpAMD64LoweredAtomicOr32:
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 mov := x86.AMOVQ
1339 op := x86.AANDQ
1340 cmpxchg := x86.ACMPXCHGQ
1341 switch v.Op {
1342 case ssa.OpAMD64LoweredAtomicOr64:
1343 op = x86.AORQ
1344 case ssa.OpAMD64LoweredAtomicAnd32:
1345 mov = x86.AMOVL
1346 op = x86.AANDL
1347 cmpxchg = x86.ACMPXCHGL
1348 case ssa.OpAMD64LoweredAtomicOr32:
1349 mov = x86.AMOVL
1350 op = x86.AORL
1351 cmpxchg = x86.ACMPXCHGL
1352 }
1353 addr := v.Args[0].Reg()
1354 mask := v.Args[1].Reg()
1355 tmp := v.RegTmp()
1356 p1 := s.Prog(mov)
1357 p1.From.Type = obj.TYPE_REG
1358 p1.From.Reg = mask
1359 p1.To.Type = obj.TYPE_REG
1360 p1.To.Reg = tmp
1361 p2 := s.Prog(mov)
1362 p2.From.Type = obj.TYPE_MEM
1363 p2.From.Reg = addr
1364 ssagen.AddAux(&p2.From, v)
1365 p2.To.Type = obj.TYPE_REG
1366 p2.To.Reg = x86.REG_AX
1367 p3 := s.Prog(op)
1368 p3.From.Type = obj.TYPE_REG
1369 p3.From.Reg = x86.REG_AX
1370 p3.To.Type = obj.TYPE_REG
1371 p3.To.Reg = tmp
1372 s.Prog(x86.ALOCK)
1373 p5 := s.Prog(cmpxchg)
1374 p5.From.Type = obj.TYPE_REG
1375 p5.From.Reg = tmp
1376 p5.To.Type = obj.TYPE_MEM
1377 p5.To.Reg = addr
1378 ssagen.AddAux(&p5.To, v)
1379 p6 := s.Prog(x86.AJNE)
1380 p6.To.Type = obj.TYPE_BRANCH
1381 p6.To.SetTarget(p1)
1382 case ssa.OpAMD64PrefetchT0, ssa.OpAMD64PrefetchNTA:
1383 p := s.Prog(v.Op.Asm())
1384 p.From.Type = obj.TYPE_MEM
1385 p.From.Reg = v.Args[0].Reg()
1386 case ssa.OpClobber:
1387 p := s.Prog(x86.AMOVL)
1388 p.From.Type = obj.TYPE_CONST
1389 p.From.Offset = 0xdeaddead
1390 p.To.Type = obj.TYPE_MEM
1391 p.To.Reg = x86.REG_SP
1392 ssagen.AddAux(&p.To, v)
1393 p = s.Prog(x86.AMOVL)
1394 p.From.Type = obj.TYPE_CONST
1395 p.From.Offset = 0xdeaddead
1396 p.To.Type = obj.TYPE_MEM
1397 p.To.Reg = x86.REG_SP
1398 ssagen.AddAux(&p.To, v)
1399 p.To.Offset += 4
1400 case ssa.OpClobberReg:
1401 x := uint64(0xdeaddeaddeaddead)
1402 p := s.Prog(x86.AMOVQ)
1403 p.From.Type = obj.TYPE_CONST
1404 p.From.Offset = int64(x)
1405 p.To.Type = obj.TYPE_REG
1406 p.To.Reg = v.Reg()
1407 default:
1408 v.Fatalf("genValue not implemented: %s", v.LongString())
1409 }
1410 }
1411
1412 var blockJump = [...]struct {
1413 asm, invasm obj.As
1414 }{
1415 ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
1416 ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
1417 ssa.BlockAMD64LT: {x86.AJLT, x86.AJGE},
1418 ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT},
1419 ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT},
1420 ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE},
1421 ssa.BlockAMD64OS: {x86.AJOS, x86.AJOC},
1422 ssa.BlockAMD64OC: {x86.AJOC, x86.AJOS},
1423 ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
1424 ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
1425 ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
1426 ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI},
1427 ssa.BlockAMD64ORD: {x86.AJPC, x86.AJPS},
1428 ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
1429 }
1430
1431 var eqfJumps = [2][2]ssagen.IndexJump{
1432 {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}},
1433 {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}},
1434 }
1435 var nefJumps = [2][2]ssagen.IndexJump{
1436 {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}},
1437 {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}},
1438 }
1439
1440 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
1441 switch b.Kind {
1442 case ssa.BlockPlain:
1443 if b.Succs[0].Block() != next {
1444 p := s.Prog(obj.AJMP)
1445 p.To.Type = obj.TYPE_BRANCH
1446 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
1447 }
1448 case ssa.BlockDefer:
1449
1450
1451
1452 p := s.Prog(x86.ATESTL)
1453 p.From.Type = obj.TYPE_REG
1454 p.From.Reg = x86.REG_AX
1455 p.To.Type = obj.TYPE_REG
1456 p.To.Reg = x86.REG_AX
1457 p = s.Prog(x86.AJNE)
1458 p.To.Type = obj.TYPE_BRANCH
1459 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
1460 if b.Succs[0].Block() != next {
1461 p := s.Prog(obj.AJMP)
1462 p.To.Type = obj.TYPE_BRANCH
1463 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
1464 }
1465 case ssa.BlockExit, ssa.BlockRetJmp:
1466 case ssa.BlockRet:
1467 s.Prog(obj.ARET)
1468
1469 case ssa.BlockAMD64EQF:
1470 s.CombJump(b, next, &eqfJumps)
1471
1472 case ssa.BlockAMD64NEF:
1473 s.CombJump(b, next, &nefJumps)
1474
1475 case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
1476 ssa.BlockAMD64LT, ssa.BlockAMD64GE,
1477 ssa.BlockAMD64LE, ssa.BlockAMD64GT,
1478 ssa.BlockAMD64OS, ssa.BlockAMD64OC,
1479 ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
1480 ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
1481 jmp := blockJump[b.Kind]
1482 switch next {
1483 case b.Succs[0].Block():
1484 s.Br(jmp.invasm, b.Succs[1].Block())
1485 case b.Succs[1].Block():
1486 s.Br(jmp.asm, b.Succs[0].Block())
1487 default:
1488 if b.Likely != ssa.BranchUnlikely {
1489 s.Br(jmp.asm, b.Succs[0].Block())
1490 s.Br(obj.AJMP, b.Succs[1].Block())
1491 } else {
1492 s.Br(jmp.invasm, b.Succs[1].Block())
1493 s.Br(obj.AJMP, b.Succs[0].Block())
1494 }
1495 }
1496
1497 case ssa.BlockAMD64JUMPTABLE:
1498
1499 p := s.Prog(obj.AJMP)
1500 p.To.Type = obj.TYPE_MEM
1501 p.To.Reg = b.Controls[1].Reg()
1502 p.To.Index = b.Controls[0].Reg()
1503 p.To.Scale = 8
1504
1505 s.JumpTables = append(s.JumpTables, b)
1506
1507 default:
1508 b.Fatalf("branch not implemented: %s", b.LongString())
1509 }
1510 }
1511
1512 func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
1513 p := s.Prog(loadByType(t))
1514 p.From.Type = obj.TYPE_MEM
1515 p.From.Name = obj.NAME_AUTO
1516 p.From.Sym = n.Linksym()
1517 p.From.Offset = n.FrameOffset() + off
1518 p.To.Type = obj.TYPE_REG
1519 p.To.Reg = reg
1520 return p
1521 }
1522
1523 func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
1524 p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
1525 p.To.Name = obj.NAME_PARAM
1526 p.To.Sym = n.Linksym()
1527 p.Pos = p.Pos.WithNotStmt()
1528 return p
1529 }
1530
View as plain text