]>
git.saurik.com Git - apple/javascriptcore.git/blob - offlineasm/arm64.rb
1 # Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
31 # x<number> => GPR. This is both the generic name of the register, and the name used
32 # to indicate that the register is used in 64-bit mode.
33 # w<number> => GPR in 32-bit mode. This is the low 32-bits of the GPR. If it is
34 # mutated then the high 32-bit part of the register is zero filled.
35 # q<number> => FPR. This is the generic name of the register.
36 # d<number> => FPR used as an IEEE 64-bit binary floating point number (i.e. double).
38 # GPR conventions, to match the baseline JIT:
40 # x0 => return value, cached result, first argument, t0, a0, r0
43 # x9 => (nonArgGPR1 in baseline)
44 # x10 => t4 (unused in baseline)
45 # x11 => t5 (unused in baseline)
46 # x12 => t6 (unused in baseline)
47 # x13 => scratch (unused in baseline)
52 # x26 => timeout check (i.e. not touched by LLInt)
53 # x27 => csr1 (tagTypeNumber)
54 # x28 => csr2 (tagMask)
58 # FPR conentions, to match the baseline JIT:
64 # q4 => ft4 (unused in baseline)
65 # q5 => ft5 (unused in baseline)
68 def arm64GPRName(name
, kind
)
69 raise "bad GPR name #{name}" unless name
=~
/^x/
77 raise "Wrong kind: #{kind}"
81 def arm64FPRName(name
, kind
)
82 raise "bad FPR kind #{kind}" unless kind
== :double
83 raise "bad FPR name #{name}" unless name
=~
/^q/
88 def arm64Operand(kind
)
91 arm64GPRName(@name, kind
)
93 arm64FPRName(@name, kind
)
95 raise "Bad name: #{@name}"
100 ARM64_EXTRA_GPRS
= [SpecialRegister
.new("x16"), SpecialRegister
.new("x17"), SpecialRegister
.new("x13")]
101 ARM64_EXTRA_FPRS
= [SpecialRegister
.new("q31")]
104 def arm64Operand(kind
)
106 when 't0', 'a0', 'r0'
107 arm64GPRName('x0', kind
)
108 when 't1', 'a1', 'r1'
109 arm64GPRName('x1', kind
)
111 arm64GPRName('x2', kind
)
113 arm64GPRName('x23', kind
)
115 arm64GPRName('x10', kind
)
117 arm64GPRName('x11', kind
)
119 arm64GPRName('x12', kind
)
121 arm64GPRName('x25', kind
)
123 arm64GPRName('x27', kind
)
125 arm64GPRName('x28', kind
)
131 raise "Bad register name #{@name} at #{codeOriginString}"
137 def arm64Operand(kind
)
140 arm64FPRName('q0', kind
)
142 arm64FPRName('q1', kind
)
144 arm64FPRName('q2', kind
)
146 arm64FPRName('q3', kind
)
148 arm64FPRName('q4', kind
)
150 arm64FPRName('q5', kind
)
151 else "Bad register name #{@name} at #{codeOriginString}"
157 def arm64Operand(kind
)
158 raise "Invalid immediate #{value} at #{codeOriginString}" if value
< 0 or value
> 4095
164 def arm64Operand(kind
)
165 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset
.value
< -255 or offset
.value
> 4095
166 "[#{base.arm64Operand(:ptr)}, \##{offset.value}]"
169 def arm64EmitLea(destination
, kind
)
170 $asm.puts
"add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, \##{offset.value}"
175 def arm64Operand(kind
)
176 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset
.value !
= 0
177 "[#{base.arm64Operand(:ptr)}, #{index.arm64Operand(:ptr)}, lsl \##{scaleShift}]"
180 def arm64EmitLea(destination
, kind
)
181 $asm.puts
"add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, #{index.arm64Operand(kind)}, lsl \##{scaleShift}"
185 class AbsoluteAddress
186 def arm64Operand(kind
)
187 raise "Unconverted absolute address #{address.value} at #{codeOriginString}"
191 # FIXME: we could support AbsoluteAddress for lea, but we don't.
194 # Actual lowering code follows.
198 def getModifiedListARM64
200 result
= riscLowerNot(result
)
201 result
= riscLowerSimpleBranchOps(result
)
202 result
= riscLowerHardBranchOps64(result
)
203 result
= riscLowerShiftOps(result
)
204 result
= riscLowerMalformedAddresses(result
) {
207 when "loadb", "loadbs", "storeb", /^bb/, /^btb/, /^cb/, /^tb/
209 when "loadh", "loadhs"
211 when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi",
212 "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/,
213 /^ci/, /^ti/, "addis", "subis", "mulis", "smulli", "leai"
215 when "loadp", "storep", "loadq", "storeq", "loadd", "stored", "lshiftp", "lshiftq", "negp", "negq", "rshiftp", "rshiftq",
216 "urshiftp", "urshiftq", "addp", "addq", "mulp", "mulq", "andp", "andq", "orp", "orq", "subp", "subq", "xorp", "xorq", "addd",
217 "divd", "subd", "muld", "sqrtd", /^bp/, /^bq/, /^btp/, /^btq/, /^cp/, /^cq/, /^tp/, /^tq/, /^bd/,
218 "jmp", "call", "leap", "leaq"
221 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
224 if address
.is_a
? BaseIndex
225 address
.offset
.value
== 0 and
226 (node
.opcode
=~
/^lea/ or address
.scale
== 1 or address
.scale
== size
)
227 elsif address
.is_a
? Address
228 (-255..4095).include? address
.offset
.value
233 result
= riscLowerMisplacedImmediates(result
, ["storeb", "storei", "storep", "storeq"])
234 result
= riscLowerMalformedImmediates(result
, 0..4095)
235 result
= riscLowerMisplacedAddresses(result
)
236 result
= riscLowerMalformedAddresses(result
) {
242 not (address
.is_a
? Address
and address
.offset
.value
< 0)
246 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
249 result
= riscLowerTest(result
)
250 result
= assignRegistersToTemporaries(result
, :gpr, ARM64_EXTRA_GPRS
)
251 result
= assignRegistersToTemporaries(result
, :fpr, ARM64_EXTRA_FPRS
)
256 def arm64Operands(operands
, kinds
)
258 raise "Mismatched operand lists: #{operands.inspect} and #{kinds.inspect}" if operands
.size !
= kinds
.size
260 kinds
= operands
.map
{ kinds
}
262 (0...operands
.size
).map
{
264 operands
[index
].arm64Operand(kinds
[index
])
268 def arm64FlippedOperands(operands
, kinds
)
270 kinds
= [kinds
[-1]] + kinds
[0..-2]
272 arm64Operands([operands
[-1]] + operands
[0..-2], kinds
)
275 # TAC = three address code.
276 def arm64TACOperands(operands
, kind
)
277 if operands
.size
== 3
278 return arm64FlippedOperands(operands
, kind
)
281 raise unless operands
.size
== 2
283 return operands
[1].arm64Operand(kind
) +
", " +
arm64FlippedOperands(operands
, kind
)
286 def emitARM64Add(opcode
, operands
, kind
)
287 if operands
.size
== 3
288 raise unless operands
[1].register
?
289 raise unless operands
[2].register
?
291 if operands
[0].immediate
?
292 if operands
[0].value
== 0 and flag !~
/s$/
293 unless operands
[1] == operands
[2]
294 $asm.puts
"mov #{arm64FlippedOperands(operands[1..2], kind)}"
297 $asm.puts
"#{opcode} #{arm64Operands(operands.reverse, kind)}"
302 raise unless operands
[0].register
?
303 $asm.puts
"#{opcode} #{arm64FlippedOperands(operands, kind)}"
307 raise unless operands
.size
== 2
309 if operands
[0].immediate
? and operands
[0].value
== 0 and opcode !~
/s$/
313 $asm.puts
"#{opcode} #{arm64TACOperands(operands, kind)}"
316 def emitARM64Unflipped(opcode
, operands
, kind
)
317 $asm.puts
"#{opcode} #{arm64Operands(operands, kind)}"
320 def emitARM64TAC(opcode
, operands
, kind
)
321 $asm.puts
"#{opcode} #{arm64TACOperands(operands, kind)}"
324 def emitARM64(opcode
, operands
, kind
)
325 $asm.puts
"#{opcode} #{arm64FlippedOperands(operands, kind)}"
328 def emitARM64Access(opcode
, opcodeNegativeOffset
, register
, memory
, kind
)
329 if memory
.is_a
? Address
and memory
.offset
.value
< 0
330 $asm.puts
"#{opcodeNegativeOffset} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
334 $asm.puts
"#{opcode} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
337 def emitARM64Shift(opcodeRegs
, opcodeImmediate
, operands
, kind
)
338 if operands
.size
== 3 and operands
[1].immediate
?
339 magicNumbers
= yield operands
[1].value
340 $asm.puts
"#{opcodeImmediate} #{operands[2].arm64Operand(kind)}, #{operands[0].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
344 if operands
.size
== 2 and operands
[0].immediate
?
345 magicNumbers
= yield operands
[0].value
346 $asm.puts
"#{opcodeImmediate} #{operands[1].arm64Operand(kind)}, #{operands[1].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
350 emitARM64TAC(opcodeRegs
, operands
, kind
)
353 def emitARM64Branch(opcode
, operands
, kind
, branchOpcode
)
354 emitARM64Unflipped(opcode
, operands
[0..-2], kind
)
355 $asm.puts
"#{branchOpcode} #{operands[-1].asmLabel}"
358 def emitARM64Compare(operands
, kind
, compareCode
)
359 emitARM64Unflipped("subs #{arm64GPRName('xzr', kind)}, ", operands
[0..-2], kind
)
360 $asm.puts
"csinc #{operands[-1].arm64Operand(:int)}, wzr, wzr, #{compareCode}"
363 def emitARM64MoveImmediate(value
, target
)
365 isNegative
= value
< 0
366 [48, 32, 16, 0].each
{
368 currentValue
= (value
>> shift
) & 0xffff
369 next if currentValue
== (isNegative
? 0xffff : 0) and shift !
= 0
372 $asm.puts
"movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
374 $asm.puts
"movz #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
378 $asm.puts
"movk #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
385 $asm.comment codeOriginString
388 emitARM64Add("add", operands
, :int)
390 emitARM64Add("adds", operands
, :int)
392 emitARM64Add("add", operands
, :ptr)
394 emitARM64Add("adds", operands
, :ptr)
396 emitARM64Add("add", operands
, :ptr)
398 emitARM64TAC("and", operands
, :int)
400 emitARM64TAC("and", operands
, :ptr)
402 emitARM64TAC("and", operands
, :ptr)
404 emitARM64TAC("orr", operands
, :int)
406 emitARM64TAC("orr", operands
, :ptr)
408 emitARM64TAC("orr", operands
, :ptr)
410 emitARM64TAC("eor", operands
, :int)
412 emitARM64TAC("eor", operands
, :ptr)
414 emitARM64TAC("eor", operands
, :ptr)
416 emitARM64Shift("lslv", "ubfm", operands
, :int) {
418 [32 - value
, 31 - value
]
421 emitARM64Shift("lslv", "ubfm", operands
, :ptr) {
423 [64 - value
, 63 - value
]
426 emitARM64Shift("lslv", "ubfm", operands
, :ptr) {
428 [64 - value
, 63 - value
]
431 emitARM64Shift("asrv", "sbfm", operands
, :int) {
436 emitARM64Shift("asrv", "sbfm", operands
, :ptr) {
441 emitARM64Shift("asrv", "sbfm", operands
, :ptr) {
446 emitARM64Shift("lsrv", "ubfm", operands
, :int) {
451 emitARM64Shift("lsrv", "ubfm", operands
, :ptr) {
456 emitARM64Shift("lsrv", "ubfm", operands
, :ptr) {
461 $asm.puts
"madd #{arm64TACOperands(operands, :int)}, wzr"
463 $asm.puts
"madd #{arm64TACOperands(operands, :ptr)}, xzr"
465 $asm.puts
"madd #{arm64TACOperands(operands, :ptr)}, xzr"
467 emitARM64TAC("sub", operands
, :int)
469 emitARM64TAC("sub", operands
, :ptr)
471 emitARM64TAC("sub", operands
, :ptr)
473 emitARM64TAC("subs", operands
, :int)
475 $asm.puts
"sub #{operands[0].arm64Operand(:int)}, wzr, #{operands[0].arm64Operand(:int)}"
477 $asm.puts
"sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
479 $asm.puts
"sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
481 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :int)
483 emitARM64Access("ldrsw", "ldursw", operands
[1], operands
[0], :ptr)
485 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :ptr)
487 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :ptr)
489 emitARM64Unflipped("str", operands
, :int)
491 emitARM64Unflipped("str", operands
, :ptr)
493 emitARM64Unflipped("str", operands
, :ptr)
495 emitARM64Access("ldrb", "ldurb", operands
[1], operands
[0], :int)
497 emitARM64Access("ldrsb", "ldursb", operands
[1], operands
[0], :int)
499 emitARM64Unflipped("strb", operands
, :int)
501 emitARM64Access("ldrh", "ldurh", operands
[1], operands
[0], :int)
503 emitARM64Access("ldrsh", "ldursh", operands
[1], operands
[0], :int)
505 emitARM64Unflipped("strh", operands
, :int)
507 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :double)
509 emitARM64Unflipped("str", operands
, :double)
511 emitARM64TAC("fadd", operands
, :double)
513 emitARM64TAC("fdiv", operands
, :double)
515 emitARM64TAC("fsub", operands
, :double)
517 emitARM64TAC("fmul", operands
, :double)
519 emitARM64("fsqrt", operands
, :double)
521 emitARM64("scvtf", operands
, [:int, :double])
523 emitARM64Branch("fcmp", operands
, :double, "b.eq")
525 emitARM64Unflipped("fcmp", operands
[0..1], :double)
526 isUnordered
= LocalLabel
.unique("bdneq")
527 $asm.puts
"b.vs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
528 $asm.puts
"b.ne #{operands[2].asmLabel}"
529 isUnordered
.lower("ARM64")
531 emitARM64Branch("fcmp", operands
, :double, "b.gt")
533 emitARM64Branch("fcmp", operands
, :double, "b.ge")
535 emitARM64Branch("fcmp", operands
, :double, "b.mi")
537 emitARM64Branch("fcmp", operands
, :double, "b.ls")
539 emitARM64Unflipped("fcmp", operands
[0..1], :double)
540 $asm.puts
"b.vs #{operands[2].asmLabel}"
541 $asm.puts
"b.eq #{operands[2].asmLabel}"
543 emitARM64Branch("fcmp", operands
, :double, "b.ne")
545 emitARM64Branch("fcmp", operands
, :double, "b.hi")
547 emitARM64Branch("fcmp", operands
, :double, "b.pl")
549 emitARM64Branch("fcmp", operands
, :double, "b.lt")
551 emitARM64Branch("fcmp", operands
, :double, "b.le")
553 # FIXME: may be a good idea to just get rid of this instruction, since the interpreter
554 # currently does not use it.
555 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
557 emitARM64("fcvtzs", operands
, [:double, :int])
559 # FIXME: remove this instruction, or use it and implement it. Currently it's not
561 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
563 # FIXME: remove it or support it.
564 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
566 emitARM64Unflipped("pop", operands
, :ptr)
568 emitARM64Unflipped("push", operands
, :ptr)
570 if operands
[0].immediate
?
571 emitARM64MoveImmediate(operands
[0].value
, operands
[1])
573 emitARM64("mov", operands
, :ptr)
576 emitARM64("sxtw", operands
, :ptr)
578 emitARM64("sxtw", operands
, :ptr)
580 emitARM64("uxtw", operands
, :ptr)
582 emitARM64("uxtw", operands
, :ptr)
586 if operands
[0].immediate
? and operands
[0].value
== 0
587 $asm.puts
"cbz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
588 elsif operands
[1].immediate
? and operands
[1].value
== 0
589 $asm.puts
"cbz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
591 emitARM64Branch("subs wzr, ", operands
, :int, "b.eq")
594 if operands
[0].immediate
? and operands
[0].value
== 0
595 $asm.puts
"cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
596 elsif operands
[1].immediate
? and operands
[1].value
== 0
597 $asm.puts
"cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
599 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.eq")
602 if operands
[0].immediate
? and operands
[0].value
== 0
603 $asm.puts
"cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
604 elsif operands
[1].immediate
? and operands
[1].value
== 0
605 $asm.puts
"cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
607 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.eq")
609 when "bineq", "bbneq"
610 if operands
[0].immediate
? and operands
[0].value
== 0
611 $asm.puts
"cbnz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
612 elsif operands
[1].immediate
? and operands
[1].value
== 0
613 $asm.puts
"cbnz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
615 emitARM64Branch("subs wzr, ", operands
, :int, "b.ne")
618 if operands
[0].immediate
? and operands
[0].value
== 0
619 $asm.puts
"cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
620 elsif operands
[1].immediate
? and operands
[1].value
== 0
621 $asm.puts
"cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
623 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.ne")
626 if operands
[0].immediate
? and operands
[0].value
== 0
627 $asm.puts
"cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
628 elsif operands
[1].immediate
? and operands
[1].value
== 0
629 $asm.puts
"cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
631 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.ne")
634 emitARM64Branch("subs wzr, ", operands
, :int, "b.hi")
636 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.hi")
638 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.hi")
639 when "biaeq", "bbaeq"
640 emitARM64Branch("subs wzr, ", operands
, :int, "b.hs")
642 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.hs")
644 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.hs")
646 emitARM64Branch("subs wzr, ", operands
, :int, "b.lo")
648 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.lo")
650 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.lo")
651 when "bibeq", "bbbeq"
652 emitARM64Branch("subs wzr, ", operands
, :int, "b.ls")
654 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.ls")
656 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.ls")
658 emitARM64Branch("subs wzr, ", operands
, :int, "b.gt")
660 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.gt")
662 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.gt")
663 when "bigteq", "bbgteq"
664 emitARM64Branch("subs wzr, ", operands
, :int, "b.ge")
666 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.ge")
668 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.ge")
670 emitARM64Branch("subs wzr, ", operands
, :int, "b.lt")
672 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.lt")
674 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.lt")
675 when "bilteq", "bblteq"
676 emitARM64Branch("subs wzr, ", operands
, :int, "b.le")
678 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.le")
680 emitARM64Branch("subs xzr, ", operands
, :ptr, "b.le")
682 if operands
[0].label
?
683 $asm.puts
"b #{operands[0].asmLabel}"
685 emitARM64Unflipped("br", operands
, :ptr)
688 if operands
[0].label
?
689 $asm.puts
"bl #{operands[0].asmLabel}"
691 emitARM64Unflipped("blr", operands
, :ptr)
698 emitARM64Compare(operands
, :int, "ne")
700 emitARM64Compare(operands
, :ptr, "ne")
702 emitARM64Compare(operands
, :ptr, "ne")
703 when "cineq", "cbneq"
704 emitARM64Compare(operands
, :int, "eq")
706 emitARM64Compare(operands
, :ptr, "eq")
708 emitARM64Compare(operands
, :ptr, "eq")
710 emitARM64Compare(operands
, :int, "ls")
712 emitARM64Compare(operands
, :ptr, "ls")
714 emitARM64Compare(operands
, :ptr, "ls")
715 when "ciaeq", "cbaeq"
716 emitARM64Compare(operands
, :int, "lo")
718 emitARM64Compare(operands
, :ptr, "lo")
720 emitARM64Compare(operands
, :ptr, "lo")
722 emitARM64Compare(operands
, :int, "hs")
724 emitARM64Compare(operands
, :ptr, "hs")
726 emitARM64Compare(operands
, :ptr, "hs")
727 when "cibeq", "cbbeq"
728 emitARM64Compare(operands
, :int, "hi")
730 emitARM64Compare(operands
, :ptr, "hi")
732 emitARM64Compare(operands
, :ptr, "hi")
734 emitARM64Compare(operands
, :int, "ge")
736 emitARM64Compare(operands
, :ptr, "ge")
738 emitARM64Compare(operands
, :ptr, "ge")
739 when "cilteq", "cblteq"
740 emitARM64Compare(operands
, :int, "gt")
742 emitARM64Compare(operands
, :ptr, "gt")
744 emitARM64Compare(operands
, :ptr, "gt")
746 emitARM64Compare(operands
, :int, "le")
748 emitARM64Compare(operands
, :ptr, "le")
750 emitARM64Compare(operands
, :ptr, "le")
751 when "cigteq", "cbgteq"
752 emitARM64Compare(operands
, :int, "lt")
754 emitARM64Compare(operands
, :ptr, "lt")
756 emitARM64Compare(operands
, :ptr, "lt")
758 $asm.puts
"ldr #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
760 $asm.puts
"str #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
762 emitARM64("fmov", operands
, [:ptr, :double])
764 emitARM64("fmov", operands
, [:ptr, :double])
766 emitARM64("fmov", operands
, [:double, :ptr])
768 emitARM64("fmov", operands
, [:double, :ptr])
770 $asm.puts
"b.vs #{operands[0].asmLabel}"
772 $asm.puts
"b.mi #{operands[0].asmLabel}"
774 $asm.puts
"b.eq #{operands[0].asmLabel}"
776 $asm.puts
"b.ne #{operands[0].asmLabel}"
778 operands
[0].arm64EmitLea(operands
[1], :int)
780 operands
[0].arm64EmitLea(operands
[1], :ptr)
782 operands
[0].arm64EmitLea(operands
[1], :ptr)
784 $asm.puts
"smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
786 raise "Unhandled opcode #{opcode} at #{codeOriginString}"