]>
git.saurik.com Git - apple/javascriptcore.git/blob - offlineasm/arm64.rb
1 # Copyright (C) 2011, 2012, 2014 Apple Inc. All rights reserved.
2 # Copyright (C) 2014 University of Szeged. All rights reserved.
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions
7 # 1. Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # 2. Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
13 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 # THE POSSIBILITY OF SUCH DAMAGE.
31 # x<number> => GPR. This is both the generic name of the register, and the name used
32 # to indicate that the register is used in 64-bit mode.
33 # w<number> => GPR in 32-bit mode. This is the low 32-bits of the GPR. If it is
34 # mutated then the high 32-bit part of the register is zero filled.
35 # q<number> => FPR. This is the generic name of the register.
36 # d<number> => FPR used as an IEEE 64-bit binary floating point number (i.e. double).
38 # GPR conventions, to match the baseline JIT:
40 # x0 => return value, cached result, first argument, t0, a0, r0
46 # x9 => (nonArgGPR1 in baseline)
47 # x13 => scratch (unused in baseline)
52 # x27 => csr1 (tagTypeNumber)
53 # x28 => csr2 (tagMask)
58 # FPR conentions, to match the baseline JIT:
64 # q4 => ft4 (unused in baseline)
65 # q5 => ft5 (unused in baseline)
68 def arm64GPRName(name
, kind
)
69 raise "bad GPR name #{name}" unless name
=~
/^x/
77 raise "Wrong kind: #{kind}"
81 def arm64FPRName(name
, kind
)
82 raise "bad FPR kind #{kind}" unless kind
== :double
83 raise "bad FPR name #{name}" unless name
=~
/^q/
88 def arm64Operand(kind
)
91 arm64GPRName(@name, kind
)
93 arm64FPRName(@name, kind
)
95 raise "Bad name: #{@name}"
100 ARM64_EXTRA_GPRS
= [SpecialRegister
.new("x16"), SpecialRegister
.new("x17"), SpecialRegister
.new("x13")]
101 ARM64_EXTRA_FPRS
= [SpecialRegister
.new("q31")]
104 def arm64Operand(kind
)
106 when 't0', 'a0', 'r0'
107 arm64GPRName('x0', kind
)
108 when 't1', 'a1', 'r1'
109 arm64GPRName('x1', kind
)
111 arm64GPRName('x2', kind
)
113 arm64GPRName('x3', kind
)
115 arm64GPRName('x23', kind
)
117 arm64GPRName('x5', kind
)
119 arm64GPRName('x24', kind
)
121 arm64GPRName('x6', kind
)
123 arm64GPRName('x7', kind
)
125 arm64GPRName('x29', kind
)
127 arm64GPRName('x27', kind
)
129 arm64GPRName('x28', kind
)
135 raise "Bad register name #{@name} at #{codeOriginString}"
141 def arm64Operand(kind
)
144 arm64FPRName('q0', kind
)
146 arm64FPRName('q1', kind
)
148 arm64FPRName('q2', kind
)
150 arm64FPRName('q3', kind
)
152 arm64FPRName('q4', kind
)
154 arm64FPRName('q5', kind
)
155 else "Bad register name #{@name} at #{codeOriginString}"
161 def arm64Operand(kind
)
162 raise "Invalid immediate #{value} at #{codeOriginString}" if value
< 0 or value
> 4095
168 def arm64Operand(kind
)
169 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset
.value
< -255 or offset
.value
> 4095
170 "[#{base.arm64Operand(:ptr)}, \##{offset.value}]"
173 def arm64EmitLea(destination
, kind
)
174 $asm.puts
"add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, \##{offset.value}"
179 def arm64Operand(kind
)
180 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset
.value !
= 0
181 "[#{base.arm64Operand(:ptr)}, #{index.arm64Operand(:ptr)}, lsl \##{scaleShift}]"
184 def arm64EmitLea(destination
, kind
)
185 $asm.puts
"add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, #{index.arm64Operand(kind)}, lsl \##{scaleShift}"
189 class AbsoluteAddress
190 def arm64Operand(kind
)
191 raise "Unconverted absolute address #{address.value} at #{codeOriginString}"
195 # FIXME: We could support AbsoluteAddress for lea, but we don't.
198 # Actual lowering code follows.
201 def arm64LowerMalformedLoadStoreAddresses(list
)
204 def isAddressMalformed(operand
)
205 operand
.is_a
? Address
and not (-255..4095).include? operand
.offset
.value
210 if node
.is_a
? Instruction
211 if node
.opcode
=~
/^store/ and isAddressMalformed(node
.operands
[1])
212 address
= node
.operands
[1]
213 tmp
= Tmp
.new(codeOrigin
, :gpr)
214 newList
<< Instruction
.new(node
.codeOrigin
, "move", [address
.offset
, tmp
])
215 newList
<< Instruction
.new(node
.codeOrigin
, node
.opcode
, [node
.operands
[0], BaseIndex
.new(node
.codeOrigin
, address
.base
, tmp
, 1, Immediate
.new(codeOrigin
, 0))], node
.annotation
)
216 elsif node
.opcode
=~
/^load/ and isAddressMalformed(node
.operands
[0])
217 address
= node
.operands
[0]
218 tmp
= Tmp
.new(codeOrigin
, :gpr)
219 newList
<< Instruction
.new(node
.codeOrigin
, "move", [address
.offset
, tmp
])
220 newList
<< Instruction
.new(node
.codeOrigin
, node
.opcode
, [BaseIndex
.new(node
.codeOrigin
, address
.base
, tmp
, 1, Immediate
.new(codeOrigin
, 0)), node
.operands
[1]], node
.annotation
)
231 # Workaround for Cortex-A53 erratum (835769)
232 def arm64CortexA53Fix835769(list
)
234 lastOpcodeUnsafe
= false
238 if node
.is_a
? Instruction
240 when /^store/, /^load/
241 # List all macro instructions that can be lowered to a load, store or prefetch ARM64 assembly instruction
242 lastOpcodeUnsafe
= true
243 when "muli", "mulp", "mulq", "smulli"
244 # List all macro instructions that can be lowered to a 64-bit multiply-accumulate ARM64 assembly instruction
245 # (defined as one of MADD, MSUB, SMADDL, SMSUBL, UMADDL or UMSUBL).
247 newList
<< Instruction
.new(node
.codeOrigin
, "nopCortexA53Fix835769", [])
249 lastOpcodeUnsafe
= false
251 lastOpcodeUnsafe
= false
260 def getModifiedListARM64
262 result
= riscLowerNot(result
)
263 result
= riscLowerSimpleBranchOps(result
)
264 result
= riscLowerHardBranchOps64(result
)
265 result
= riscLowerShiftOps(result
)
266 result
= arm64LowerMalformedLoadStoreAddresses(result
)
267 result
= riscLowerMalformedAddresses(result
) {
270 when "loadb", "loadbs", "storeb", /^bb/, /^btb/, /^cb/, /^tb/
272 when "loadh", "loadhs"
274 when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi",
275 "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/,
276 /^ci/, /^ti/, "addis", "subis", "mulis", "smulli", "leai"
278 when "loadp", "storep", "loadq", "storeq", "loadd", "stored", "lshiftp", "lshiftq", "negp", "negq", "rshiftp", "rshiftq",
279 "urshiftp", "urshiftq", "addp", "addq", "mulp", "mulq", "andp", "andq", "orp", "orq", "subp", "subq", "xorp", "xorq", "addd",
280 "divd", "subd", "muld", "sqrtd", /^bp/, /^bq/, /^btp/, /^btq/, /^cp/, /^cq/, /^tp/, /^tq/, /^bd/,
281 "jmp", "call", "leap", "leaq"
284 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
287 if address
.is_a
? BaseIndex
288 address
.offset
.value
== 0 and
289 (node
.opcode
=~
/^lea/ or address
.scale
== 1 or address
.scale
== size
)
290 elsif address
.is_a
? Address
291 (-255..4095).include? address
.offset
.value
296 result
= riscLowerMisplacedImmediates(result
, ["storeb", "storei", "storep", "storeq"])
297 result
= riscLowerMalformedImmediates(result
, 0..4095)
298 result
= riscLowerMisplacedAddresses(result
)
299 result
= riscLowerMalformedAddresses(result
) {
305 not (address
.is_a
? Address
and address
.offset
.value
< 0)
309 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
312 result
= riscLowerTest(result
)
313 result
= assignRegistersToTemporaries(result
, :gpr, ARM64_EXTRA_GPRS
)
314 result
= assignRegistersToTemporaries(result
, :fpr, ARM64_EXTRA_FPRS
)
315 result
= arm64CortexA53Fix835769(result
)
320 def arm64Operands(operands
, kinds
)
322 raise "Mismatched operand lists: #{operands.inspect} and #{kinds.inspect}" if operands
.size !
= kinds
.size
324 kinds
= operands
.map
{ kinds
}
326 (0...operands
.size
).map
{
328 operands
[index
].arm64Operand(kinds
[index
])
332 def arm64FlippedOperands(operands
, kinds
)
334 kinds
= [kinds
[-1]] + kinds
[0..-2]
336 arm64Operands([operands
[-1]] + operands
[0..-2], kinds
)
339 # TAC = three address code.
340 def arm64TACOperands(operands
, kind
)
341 if operands
.size
== 3
342 return arm64FlippedOperands(operands
, kind
)
345 raise unless operands
.size
== 2
347 return operands
[1].arm64Operand(kind
) +
", " +
arm64FlippedOperands(operands
, kind
)
350 def emitARM64Add(opcode
, operands
, kind
)
351 if operands
.size
== 3
352 raise unless operands
[1].register
?
353 raise unless operands
[2].register
?
355 if operands
[0].immediate
?
356 if operands
[0].value
== 0 and flag !~
/s$/
357 unless operands
[1] == operands
[2]
358 $asm.puts
"mov #{arm64FlippedOperands(operands[1..2], kind)}"
361 $asm.puts
"#{opcode} #{arm64Operands(operands.reverse, kind)}"
366 raise unless operands
[0].register
?
367 $asm.puts
"#{opcode} #{arm64FlippedOperands(operands, kind)}"
371 raise unless operands
.size
== 2
373 if operands
[0].immediate
? and operands
[0].value
== 0 and opcode !~
/s$/
377 $asm.puts
"#{opcode} #{arm64TACOperands(operands, kind)}"
380 def emitARM64Unflipped(opcode
, operands
, kind
)
381 $asm.puts
"#{opcode} #{arm64Operands(operands, kind)}"
384 def emitARM64TAC(opcode
, operands
, kind
)
385 $asm.puts
"#{opcode} #{arm64TACOperands(operands, kind)}"
388 def emitARM64(opcode
, operands
, kind
)
389 $asm.puts
"#{opcode} #{arm64FlippedOperands(operands, kind)}"
392 def emitARM64Access(opcode
, opcodeNegativeOffset
, register
, memory
, kind
)
393 if memory
.is_a
? Address
and memory
.offset
.value
< 0
394 $asm.puts
"#{opcodeNegativeOffset} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
398 $asm.puts
"#{opcode} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
401 def emitARM64Shift(opcodeRegs
, opcodeImmediate
, operands
, kind
)
402 if operands
.size
== 3 and operands
[1].immediate
?
403 magicNumbers
= yield operands
[1].value
404 $asm.puts
"#{opcodeImmediate} #{operands[2].arm64Operand(kind)}, #{operands[0].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
408 if operands
.size
== 2 and operands
[0].immediate
?
409 magicNumbers
= yield operands
[0].value
410 $asm.puts
"#{opcodeImmediate} #{operands[1].arm64Operand(kind)}, #{operands[1].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
414 emitARM64TAC(opcodeRegs
, operands
, kind
)
417 def emitARM64Branch(opcode
, operands
, kind
, branchOpcode
)
418 emitARM64Unflipped(opcode
, operands
[0..-2], kind
)
419 $asm.puts
"#{branchOpcode} #{operands[-1].asmLabel}"
422 def emitARM64Compare(operands
, kind
, compareCode
)
423 emitARM64Unflipped("subs #{arm64GPRName('xzr', kind)}, ", operands
[0..-2], kind
)
424 $asm.puts
"csinc #{operands[-1].arm64Operand(:int)}, wzr, wzr, #{compareCode}"
427 def emitARM64MoveImmediate(value
, target
)
429 isNegative
= value
< 0
430 [48, 32, 16, 0].each
{
432 currentValue
= (value
>> shift
) & 0xffff
433 next if currentValue
== (isNegative
? 0xffff : 0) and (shift !
= 0 or !first
)
436 $asm.puts
"movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
438 $asm.puts
"movz #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
442 $asm.puts
"movk #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
449 $asm.comment codeOriginString
450 $asm.annotation annotation
if $enableInstrAnnotations
454 emitARM64Add("add", operands
, :int)
456 emitARM64Add("adds", operands
, :int)
458 emitARM64Add("add", operands
, :ptr)
460 emitARM64Add("adds", operands
, :ptr)
462 emitARM64Add("add", operands
, :ptr)
464 emitARM64TAC("and", operands
, :int)
466 emitARM64TAC("and", operands
, :ptr)
468 emitARM64TAC("and", operands
, :ptr)
470 emitARM64TAC("orr", operands
, :int)
472 emitARM64TAC("orr", operands
, :ptr)
474 emitARM64TAC("orr", operands
, :ptr)
476 emitARM64TAC("eor", operands
, :int)
478 emitARM64TAC("eor", operands
, :ptr)
480 emitARM64TAC("eor", operands
, :ptr)
482 emitARM64Shift("lslv", "ubfm", operands
, :int) {
484 [32 - value
, 31 - value
]
487 emitARM64Shift("lslv", "ubfm", operands
, :ptr) {
489 [64 - value
, 63 - value
]
492 emitARM64Shift("lslv", "ubfm", operands
, :ptr) {
494 [64 - value
, 63 - value
]
497 emitARM64Shift("asrv", "sbfm", operands
, :int) {
502 emitARM64Shift("asrv", "sbfm", operands
, :ptr) {
507 emitARM64Shift("asrv", "sbfm", operands
, :ptr) {
512 emitARM64Shift("lsrv", "ubfm", operands
, :int) {
517 emitARM64Shift("lsrv", "ubfm", operands
, :ptr) {
522 emitARM64Shift("lsrv", "ubfm", operands
, :ptr) {
527 $asm.puts
"madd #{arm64TACOperands(operands, :int)}, wzr"
529 $asm.puts
"madd #{arm64TACOperands(operands, :ptr)}, xzr"
531 $asm.puts
"madd #{arm64TACOperands(operands, :ptr)}, xzr"
533 emitARM64TAC("sub", operands
, :int)
535 emitARM64TAC("sub", operands
, :ptr)
537 emitARM64TAC("sub", operands
, :ptr)
539 emitARM64TAC("subs", operands
, :int)
541 $asm.puts
"sub #{operands[0].arm64Operand(:int)}, wzr, #{operands[0].arm64Operand(:int)}"
543 $asm.puts
"sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
545 $asm.puts
"sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
547 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :int)
549 emitARM64Access("ldrsw", "ldursw", operands
[1], operands
[0], :ptr)
551 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :ptr)
553 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :ptr)
555 emitARM64Unflipped("str", operands
, :int)
557 emitARM64Unflipped("str", operands
, :ptr)
559 emitARM64Unflipped("str", operands
, :ptr)
561 emitARM64Access("ldrb", "ldurb", operands
[1], operands
[0], :int)
563 emitARM64Access("ldrsb", "ldursb", operands
[1], operands
[0], :int)
565 emitARM64Unflipped("strb", operands
, :int)
567 emitARM64Access("ldrh", "ldurh", operands
[1], operands
[0], :int)
569 emitARM64Access("ldrsh", "ldursh", operands
[1], operands
[0], :int)
571 emitARM64Unflipped("strh", operands
, :int)
573 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :double)
575 emitARM64Unflipped("str", operands
, :double)
577 emitARM64TAC("fadd", operands
, :double)
579 emitARM64TAC("fdiv", operands
, :double)
581 emitARM64TAC("fsub", operands
, :double)
583 emitARM64TAC("fmul", operands
, :double)
585 emitARM64("fsqrt", operands
, :double)
587 emitARM64("scvtf", operands
, [:int, :double])
589 emitARM64Branch("fcmp", operands
, :double, "b.eq")
591 emitARM64Unflipped("fcmp", operands
[0..1], :double)
592 isUnordered
= LocalLabel
.unique("bdneq")
593 $asm.puts
"b.vs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
594 $asm.puts
"b.ne #{operands[2].asmLabel}"
595 isUnordered
.lower("ARM64")
597 emitARM64Branch("fcmp", operands
, :double, "b.gt")
599 emitARM64Branch("fcmp", operands
, :double, "b.ge")
601 emitARM64Branch("fcmp", operands
, :double, "b.mi")
603 emitARM64Branch("fcmp", operands
, :double, "b.ls")
605 emitARM64Unflipped("fcmp", operands
[0..1], :double)
606 $asm.puts
"b.vs #{operands[2].asmLabel}"
607 $asm.puts
"b.eq #{operands[2].asmLabel}"
609 emitARM64Branch("fcmp", operands
, :double, "b.ne")
611 emitARM64Branch("fcmp", operands
, :double, "b.hi")
613 emitARM64Branch("fcmp", operands
, :double, "b.pl")
615 emitARM64Branch("fcmp", operands
, :double, "b.lt")
617 emitARM64Branch("fcmp", operands
, :double, "b.le")
619 # FIXME: May be a good idea to just get rid of this instruction, since the interpreter
620 # currently does not use it.
621 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
623 emitARM64("fcvtzs", operands
, [:double, :int])
625 # FIXME: Remove this instruction, or use it and implement it. Currently it's not
627 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
629 # FIXME: Remove it or support it.
630 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
632 operands
.each_slice(2) {
634 # Note that the operands are in the reverse order of the case for push.
635 # This is due to the fact that order matters for pushing and popping, and
636 # on platforms that only push/pop one slot at a time they pop their
637 # arguments in the reverse order that they were pushed. In order to remain
638 # compatible with those platforms we assume here that that's what has been done.
640 # So for example, if we did push(A, B, C, D), we would then pop(D, C, B, A).
641 # But since the ordering of arguments doesn't change on arm64 between the stp and ldp
642 # instructions we need to flip flop the argument positions that were passed to us.
643 $asm.puts
"ldp #{ops[1].arm64Operand(:ptr)}, #{ops[0].arm64Operand(:ptr)}, [sp], #16"
646 operands
.each_slice(2) {
648 $asm.puts
"stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
651 if operands[0].immediate?
652 emitARM64MoveImmediate(operands[0].value, operands[1])
654 emitARM64("mov
", operands, :ptr)
657 emitARM64("sxtw
", operands, [:int, :ptr])
659 emitARM64("sxtw
", operands, [:int, :ptr])
661 emitARM64("uxtw
", operands, [:int, :ptr])
663 emitARM64("uxtw
", operands, [:int, :ptr])
667 if operands[0].immediate? and operands[0].value == 0
668 $asm.puts "cbz
#{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
669 elsif operands[1].immediate? and operands[1].value == 0
670 $asm.puts "cbz
#{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
672 emitARM64Branch("subs wzr
, ", operands, :int, "b
.eq
")
675 if operands[0].immediate? and operands[0].value == 0
676 $asm.puts "cbz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
677 elsif operands[1].immediate? and operands[1].value == 0
678 $asm.puts "cbz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
680 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.eq
")
683 if operands[0].immediate? and operands[0].value == 0
684 $asm.puts "cbz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
685 elsif operands[1].immediate? and operands[1].value == 0
686 $asm.puts "cbz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
688 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.eq
")
690 when "bineq
", "bbneq
"
691 if operands[0].immediate? and operands[0].value == 0
692 $asm.puts "cbnz
#{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
693 elsif operands[1].immediate? and operands[1].value == 0
694 $asm.puts "cbnz
#{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
696 emitARM64Branch("subs wzr
, ", operands, :int, "b
.ne
")
699 if operands[0].immediate? and operands[0].value == 0
700 $asm.puts "cbnz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
701 elsif operands[1].immediate? and operands[1].value == 0
702 $asm.puts "cbnz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
704 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ne
")
707 if operands[0].immediate? and operands[0].value == 0
708 $asm.puts "cbnz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
709 elsif operands[1].immediate? and operands[1].value == 0
710 $asm.puts "cbnz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
712 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ne
")
715 emitARM64Branch("subs wzr
, ", operands, :int, "b
.hi
")
717 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hi
")
719 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hi
")
720 when "biaeq
", "bbaeq
"
721 emitARM64Branch("subs wzr
, ", operands, :int, "b
.hs
")
723 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hs
")
725 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hs
")
727 emitARM64Branch("subs wzr
, ", operands, :int, "b
.lo
")
729 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lo
")
731 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lo
")
732 when "bibeq
", "bbbeq
"
733 emitARM64Branch("subs wzr
, ", operands, :int, "b
.ls
")
735 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ls
")
737 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ls
")
739 emitARM64Branch("subs wzr
, ", operands, :int, "b
.gt
")
741 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.gt
")
743 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.gt
")
744 when "bigteq
", "bbgteq
"
745 emitARM64Branch("subs wzr
, ", operands, :int, "b
.ge
")
747 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ge
")
749 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ge
")
751 emitARM64Branch("subs wzr
, ", operands, :int, "b
.lt
")
753 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lt
")
755 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lt
")
756 when "bilteq
", "bblteq
"
757 emitARM64Branch("subs wzr
, ", operands, :int, "b
.le
")
759 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.le
")
761 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.le
")
763 if operands[0].label?
764 $asm.puts "b
#{operands[0].asmLabel}"
766 emitARM64Unflipped("br
", operands, :ptr)
769 if operands[0].label?
770 $asm.puts "bl
#{operands[0].asmLabel}"
772 emitARM64Unflipped("blr
", operands, :ptr)
779 emitARM64Compare(operands
, :int, "ne")
781 emitARM64Compare(operands
, :ptr, "ne")
783 emitARM64Compare(operands
, :ptr, "ne")
784 when "cineq", "cbneq"
785 emitARM64Compare(operands
, :int, "eq")
787 emitARM64Compare(operands
, :ptr, "eq")
789 emitARM64Compare(operands
, :ptr, "eq")
791 emitARM64Compare(operands
, :int, "ls")
793 emitARM64Compare(operands
, :ptr, "ls")
795 emitARM64Compare(operands
, :ptr, "ls")
796 when "ciaeq", "cbaeq"
797 emitARM64Compare(operands
, :int, "lo")
799 emitARM64Compare(operands
, :ptr, "lo")
801 emitARM64Compare(operands
, :ptr, "lo")
803 emitARM64Compare(operands
, :int, "hs")
805 emitARM64Compare(operands
, :ptr, "hs")
807 emitARM64Compare(operands
, :ptr, "hs")
808 when "cibeq", "cbbeq"
809 emitARM64Compare(operands
, :int, "hi")
811 emitARM64Compare(operands
, :ptr, "hi")
813 emitARM64Compare(operands
, :ptr, "hi")
815 emitARM64Compare(operands
, :int, "ge")
817 emitARM64Compare(operands
, :ptr, "ge")
819 emitARM64Compare(operands
, :ptr, "ge")
820 when "cilteq", "cblteq"
821 emitARM64Compare(operands
, :int, "gt")
823 emitARM64Compare(operands
, :ptr, "gt")
825 emitARM64Compare(operands
, :ptr, "gt")
827 emitARM64Compare(operands
, :int, "le")
829 emitARM64Compare(operands
, :ptr, "le")
831 emitARM64Compare(operands
, :ptr, "le")
832 when "cigteq", "cbgteq"
833 emitARM64Compare(operands
, :int, "lt")
835 emitARM64Compare(operands
, :ptr, "lt")
837 emitARM64Compare(operands
, :ptr, "lt")
839 $asm.puts
"ldr #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
841 $asm.puts
"str #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
843 emitARM64("fmov", operands
, [:ptr, :double])
845 emitARM64("fmov", operands
, [:ptr, :double])
847 emitARM64("fmov", operands
, [:double, :ptr])
849 emitARM64("fmov", operands
, [:double, :ptr])
851 $asm.puts
"b.vs #{operands[0].asmLabel}"
853 $asm.puts
"b.mi #{operands[0].asmLabel}"
855 $asm.puts
"b.eq #{operands[0].asmLabel}"
857 $asm.puts
"b.ne #{operands[0].asmLabel}"
859 operands
[0].arm64EmitLea(operands
[1], :int)
861 operands
[0].arm64EmitLea(operands
[1], :ptr)
863 operands
[0].arm64EmitLea(operands
[1], :ptr)
865 $asm.puts
"smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
869 $asm.puts
"adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}"
870 when "nopCortexA53Fix835769"
871 $asm.putStr("#if CPU(ARM64_CORTEXA53)")
873 $asm.putStr("#endif")