]>
git.saurik.com Git - apple/javascriptcore.git/blob - offlineasm/arm64.rb
1 # Copyright (C) 2011, 2012, 2014 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
30 # x<number> => GPR. This is both the generic name of the register, and the name used
31 # to indicate that the register is used in 64-bit mode.
32 # w<number> => GPR in 32-bit mode. This is the low 32-bits of the GPR. If it is
33 # mutated then the high 32-bit part of the register is zero filled.
34 # q<number> => FPR. This is the generic name of the register.
35 # d<number> => FPR used as an IEEE 64-bit binary floating point number (i.e. double).
37 # GPR conventions, to match the baseline JIT:
39 # x0 => return value, cached result, first argument, t0, a0, r0
45 # x9 => (nonArgGPR1 in baseline)
46 # x13 => scratch (unused in baseline)
51 # x27 => csr1 (tagTypeNumber)
52 # x28 => csr2 (tagMask)
57 # FPR conentions, to match the baseline JIT:
63 # q4 => ft4 (unused in baseline)
64 # q5 => ft5 (unused in baseline)
67 def arm64GPRName(name
, kind
)
68 raise "bad GPR name #{name}" unless name
=~
/^x/
76 raise "Wrong kind: #{kind}"
80 def arm64FPRName(name
, kind
)
81 raise "bad FPR kind #{kind}" unless kind
== :double
82 raise "bad FPR name #{name}" unless name
=~
/^q/
87 def arm64Operand(kind
)
90 arm64GPRName(@name, kind
)
92 arm64FPRName(@name, kind
)
94 raise "Bad name: #{@name}"
99 ARM64_EXTRA_GPRS
= [SpecialRegister
.new("x16"), SpecialRegister
.new("x17"), SpecialRegister
.new("x13")]
100 ARM64_EXTRA_FPRS
= [SpecialRegister
.new("q31")]
103 def arm64Operand(kind
)
105 when 't0', 'a0', 'r0'
106 arm64GPRName('x0', kind
)
107 when 't1', 'a1', 'r1'
108 arm64GPRName('x1', kind
)
110 arm64GPRName('x2', kind
)
112 arm64GPRName('x3', kind
)
114 arm64GPRName('x23', kind
)
116 arm64GPRName('x5', kind
)
118 arm64GPRName('x24', kind
)
120 arm64GPRName('x6', kind
)
122 arm64GPRName('x7', kind
)
124 arm64GPRName('x29', kind
)
126 arm64GPRName('x27', kind
)
128 arm64GPRName('x28', kind
)
134 raise "Bad register name #{@name} at #{codeOriginString}"
140 def arm64Operand(kind
)
143 arm64FPRName('q0', kind
)
145 arm64FPRName('q1', kind
)
147 arm64FPRName('q2', kind
)
149 arm64FPRName('q3', kind
)
151 arm64FPRName('q4', kind
)
153 arm64FPRName('q5', kind
)
154 else "Bad register name #{@name} at #{codeOriginString}"
160 def arm64Operand(kind
)
161 raise "Invalid immediate #{value} at #{codeOriginString}" if value
< 0 or value
> 4095
167 def arm64Operand(kind
)
168 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset
.value
< -255 or offset
.value
> 4095
169 "[#{base.arm64Operand(:ptr)}, \##{offset.value}]"
172 def arm64EmitLea(destination
, kind
)
173 $asm.puts
"add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, \##{offset.value}"
178 def arm64Operand(kind
)
179 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset
.value !
= 0
180 "[#{base.arm64Operand(:ptr)}, #{index.arm64Operand(:ptr)}, lsl \##{scaleShift}]"
183 def arm64EmitLea(destination
, kind
)
184 $asm.puts
"add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, #{index.arm64Operand(kind)}, lsl \##{scaleShift}"
188 class AbsoluteAddress
189 def arm64Operand(kind
)
190 raise "Unconverted absolute address #{address.value} at #{codeOriginString}"
194 # FIXME: We could support AbsoluteAddress for lea, but we don't.
197 # Actual lowering code follows.
201 def getModifiedListARM64
203 result
= riscLowerNot(result
)
204 result
= riscLowerSimpleBranchOps(result
)
205 result
= riscLowerHardBranchOps64(result
)
206 result
= riscLowerShiftOps(result
)
207 result
= riscLowerMalformedAddresses(result
) {
210 when "loadb", "loadbs", "storeb", /^bb/, /^btb/, /^cb/, /^tb/
212 when "loadh", "loadhs"
214 when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi",
215 "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/,
216 /^ci/, /^ti/, "addis", "subis", "mulis", "smulli", "leai"
218 when "loadp", "storep", "loadq", "storeq", "loadd", "stored", "lshiftp", "lshiftq", "negp", "negq", "rshiftp", "rshiftq",
219 "urshiftp", "urshiftq", "addp", "addq", "mulp", "mulq", "andp", "andq", "orp", "orq", "subp", "subq", "xorp", "xorq", "addd",
220 "divd", "subd", "muld", "sqrtd", /^bp/, /^bq/, /^btp/, /^btq/, /^cp/, /^cq/, /^tp/, /^tq/, /^bd/,
221 "jmp", "call", "leap", "leaq"
224 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
227 if address
.is_a
? BaseIndex
228 address
.offset
.value
== 0 and
229 (node
.opcode
=~
/^lea/ or address
.scale
== 1 or address
.scale
== size
)
230 elsif address
.is_a
? Address
231 (-255..4095).include? address
.offset
.value
236 result
= riscLowerMisplacedImmediates(result
, ["storeb", "storei", "storep", "storeq"])
237 result
= riscLowerMalformedImmediates(result
, 0..4095)
238 result
= riscLowerMisplacedAddresses(result
)
239 result
= riscLowerMalformedAddresses(result
) {
245 not (address
.is_a
? Address
and address
.offset
.value
< 0)
249 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
252 result
= riscLowerTest(result
)
253 result
= assignRegistersToTemporaries(result
, :gpr, ARM64_EXTRA_GPRS
)
254 result
= assignRegistersToTemporaries(result
, :fpr, ARM64_EXTRA_FPRS
)
259 def arm64Operands(operands
, kinds
)
261 raise "Mismatched operand lists: #{operands.inspect} and #{kinds.inspect}" if operands
.size !
= kinds
.size
263 kinds
= operands
.map
{ kinds
}
265 (0...operands
.size
).map
{
267 operands
[index
].arm64Operand(kinds
[index
])
271 def arm64FlippedOperands(operands
, kinds
)
273 kinds
= [kinds
[-1]] + kinds
[0..-2]
275 arm64Operands([operands
[-1]] + operands
[0..-2], kinds
)
278 # TAC = three address code.
279 def arm64TACOperands(operands
, kind
)
280 if operands
.size
== 3
281 return arm64FlippedOperands(operands
, kind
)
284 raise unless operands
.size
== 2
286 return operands
[1].arm64Operand(kind
) +
", " +
arm64FlippedOperands(operands
, kind
)
289 def emitARM64Add(opcode
, operands
, kind
)
290 if operands
.size
== 3
291 raise unless operands
[1].register
?
292 raise unless operands
[2].register
?
294 if operands
[0].immediate
?
295 if operands
[0].value
== 0 and flag !~
/s$/
296 unless operands
[1] == operands
[2]
297 $asm.puts
"mov #{arm64FlippedOperands(operands[1..2], kind)}"
300 $asm.puts
"#{opcode} #{arm64Operands(operands.reverse, kind)}"
305 raise unless operands
[0].register
?
306 $asm.puts
"#{opcode} #{arm64FlippedOperands(operands, kind)}"
310 raise unless operands
.size
== 2
312 if operands
[0].immediate
? and operands
[0].value
== 0 and opcode !~
/s$/
316 $asm.puts
"#{opcode} #{arm64TACOperands(operands, kind)}"
319 def emitARM64Unflipped(opcode
, operands
, kind
)
320 $asm.puts
"#{opcode} #{arm64Operands(operands, kind)}"
323 def emitARM64TAC(opcode
, operands
, kind
)
324 $asm.puts
"#{opcode} #{arm64TACOperands(operands, kind)}"
327 def emitARM64(opcode
, operands
, kind
)
328 $asm.puts
"#{opcode} #{arm64FlippedOperands(operands, kind)}"
331 def emitARM64Access(opcode
, opcodeNegativeOffset
, register
, memory
, kind
)
332 if memory
.is_a
? Address
and memory
.offset
.value
< 0
333 $asm.puts
"#{opcodeNegativeOffset} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
337 $asm.puts
"#{opcode} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
340 def emitARM64Shift(opcodeRegs
, opcodeImmediate
, operands
, kind
)
341 if operands
.size
== 3 and operands
[1].immediate
?
342 magicNumbers
= yield operands
[1].value
343 $asm.puts
"#{opcodeImmediate} #{operands[2].arm64Operand(kind)}, #{operands[0].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
347 if operands
.size
== 2 and operands
[0].immediate
?
348 magicNumbers
= yield operands
[0].value
349 $asm.puts
"#{opcodeImmediate} #{operands[1].arm64Operand(kind)}, #{operands[1].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
353 emitARM64TAC(opcodeRegs
, operands
, kind
)
356 def emitARM64Branch(opcode
, operands
, kind
, branchOpcode
)
357 emitARM64Unflipped(opcode
, operands
[0..-2], kind
)
358 $asm.puts
"#{branchOpcode} #{operands[-1].asmLabel}"
361 def emitARM64Compare(operands
, kind
, compareCode
)
362 emitARM64Unflipped("subs #{arm64GPRName('xzr', kind)}, ", operands
[0..-2], kind
)
363 $asm.puts
"csinc #{operands[-1].arm64Operand(:int)}, wzr, wzr, #{compareCode}"
366 def emitARM64MoveImmediate(value
, target
)
368 isNegative
= value
< 0
369 [48, 32, 16, 0].each
{
371 currentValue
= (value
>> shift
) & 0xffff
372 next if currentValue
== (isNegative
? 0xffff : 0) and shift !
= 0
375 $asm.puts
"movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
377 $asm.puts
"movz #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
381 $asm.puts
"movk #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
388 $asm.comment codeOriginString
389 $asm.annotation annotation
if $enableInstrAnnotations
393 emitARM64Add("add", operands
, :int)
395 emitARM64Add("adds", operands
, :int)
397 emitARM64Add("add", operands
, :ptr)
399 emitARM64Add("adds", operands
, :ptr)
401 emitARM64Add("add", operands
, :ptr)
403 emitARM64TAC("and", operands
, :int)
405 emitARM64TAC("and", operands
, :ptr)
407 emitARM64TAC("and", operands
, :ptr)
409 emitARM64TAC("orr", operands
, :int)
411 emitARM64TAC("orr", operands
, :ptr)
413 emitARM64TAC("orr", operands
, :ptr)
415 emitARM64TAC("eor", operands
, :int)
417 emitARM64TAC("eor", operands
, :ptr)
419 emitARM64TAC("eor", operands
, :ptr)
421 emitARM64Shift("lslv", "ubfm", operands
, :int) {
423 [32 - value
, 31 - value
]
426 emitARM64Shift("lslv", "ubfm", operands
, :ptr) {
428 [64 - value
, 63 - value
]
431 emitARM64Shift("lslv", "ubfm", operands
, :ptr) {
433 [64 - value
, 63 - value
]
436 emitARM64Shift("asrv", "sbfm", operands
, :int) {
441 emitARM64Shift("asrv", "sbfm", operands
, :ptr) {
446 emitARM64Shift("asrv", "sbfm", operands
, :ptr) {
451 emitARM64Shift("lsrv", "ubfm", operands
, :int) {
456 emitARM64Shift("lsrv", "ubfm", operands
, :ptr) {
461 emitARM64Shift("lsrv", "ubfm", operands
, :ptr) {
466 $asm.puts
"madd #{arm64TACOperands(operands, :int)}, wzr"
468 $asm.puts
"madd #{arm64TACOperands(operands, :ptr)}, xzr"
470 $asm.puts
"madd #{arm64TACOperands(operands, :ptr)}, xzr"
472 emitARM64TAC("sub", operands
, :int)
474 emitARM64TAC("sub", operands
, :ptr)
476 emitARM64TAC("sub", operands
, :ptr)
478 emitARM64TAC("subs", operands
, :int)
480 $asm.puts
"sub #{operands[0].arm64Operand(:int)}, wzr, #{operands[0].arm64Operand(:int)}"
482 $asm.puts
"sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
484 $asm.puts
"sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
486 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :int)
488 emitARM64Access("ldrsw", "ldursw", operands
[1], operands
[0], :ptr)
490 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :ptr)
492 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :ptr)
494 emitARM64Unflipped("str", operands
, :int)
496 emitARM64Unflipped("str", operands
, :ptr)
498 emitARM64Unflipped("str", operands
, :ptr)
500 emitARM64Access("ldrb", "ldurb", operands
[1], operands
[0], :int)
502 emitARM64Access("ldrsb", "ldursb", operands
[1], operands
[0], :int)
504 emitARM64Unflipped("strb", operands
, :int)
506 emitARM64Access("ldrh", "ldurh", operands
[1], operands
[0], :int)
508 emitARM64Access("ldrsh", "ldursh", operands
[1], operands
[0], :int)
510 emitARM64Unflipped("strh", operands
, :int)
512 emitARM64Access("ldr", "ldur", operands
[1], operands
[0], :double)
514 emitARM64Unflipped("str", operands
, :double)
516 emitARM64TAC("fadd", operands
, :double)
518 emitARM64TAC("fdiv", operands
, :double)
520 emitARM64TAC("fsub", operands
, :double)
522 emitARM64TAC("fmul", operands
, :double)
524 emitARM64("fsqrt", operands
, :double)
526 emitARM64("scvtf", operands
, [:int, :double])
528 emitARM64Branch("fcmp", operands
, :double, "b.eq")
530 emitARM64Unflipped("fcmp", operands
[0..1], :double)
531 isUnordered
= LocalLabel
.unique("bdneq")
532 $asm.puts
"b.vs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
533 $asm.puts
"b.ne #{operands[2].asmLabel}"
534 isUnordered
.lower("ARM64")
536 emitARM64Branch("fcmp", operands
, :double, "b.gt")
538 emitARM64Branch("fcmp", operands
, :double, "b.ge")
540 emitARM64Branch("fcmp", operands
, :double, "b.mi")
542 emitARM64Branch("fcmp", operands
, :double, "b.ls")
544 emitARM64Unflipped("fcmp", operands
[0..1], :double)
545 $asm.puts
"b.vs #{operands[2].asmLabel}"
546 $asm.puts
"b.eq #{operands[2].asmLabel}"
548 emitARM64Branch("fcmp", operands
, :double, "b.ne")
550 emitARM64Branch("fcmp", operands
, :double, "b.hi")
552 emitARM64Branch("fcmp", operands
, :double, "b.pl")
554 emitARM64Branch("fcmp", operands
, :double, "b.lt")
556 emitARM64Branch("fcmp", operands
, :double, "b.le")
558 # FIXME: May be a good idea to just get rid of this instruction, since the interpreter
559 # currently does not use it.
560 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
562 emitARM64("fcvtzs", operands
, [:double, :int])
564 # FIXME: Remove this instruction, or use it and implement it. Currently it's not
566 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
568 # FIXME: Remove it or support it.
569 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
571 operands
.each_slice(2) {
573 # Note that the operands are in the reverse order of the case for push.
574 # This is due to the fact that order matters for pushing and popping, and
575 # on platforms that only push/pop one slot at a time they pop their
576 # arguments in the reverse order that they were pushed. In order to remain
577 # compatible with those platforms we assume here that that's what has been done.
579 # So for example, if we did push(A, B, C, D), we would then pop(D, C, B, A).
580 # But since the ordering of arguments doesn't change on arm64 between the stp and ldp
581 # instructions we need to flip flop the argument positions that were passed to us.
582 $asm.puts
"ldp #{ops[1].arm64Operand(:ptr)}, #{ops[0].arm64Operand(:ptr)}, [sp], #16"
585 operands
.each_slice(2) {
587 $asm.puts
"stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
590 $asm.puts "ldp x29
, x30
, [sp
], #16"
592 $asm.puts
"stp x29, x30, [sp, #-16]!"
593 when "popCalleeSaves
"
594 $asm.puts "ldp x28
, x27
, [sp
], #16"
595 $asm.puts
"ldp x26, x25, [sp], #16"
596 $asm.puts
"ldp x24, x23, [sp], #16"
597 $asm.puts
"ldp x22, x21, [sp], #16"
598 $asm.puts
"ldp x20, x19, [sp], #16"
599 when "pushCalleeSaves"
600 $asm.puts
"stp x20, x19, [sp, #-16]!"
601 $asm.puts "stp x22
, x21
, [sp
, #-16]!"
602 $asm.puts
"stp x24, x23, [sp, #-16]!"
603 $asm.puts "stp x26
, x25
, [sp
, #-16]!"
604 $asm.puts
"stp x28, x27, [sp, #-16]!"
606 if operands[0].immediate?
607 emitARM64MoveImmediate(operands[0].value, operands[1])
609 emitARM64("mov
", operands, :ptr)
612 emitARM64("sxtw
", operands, [:int, :ptr])
614 emitARM64("sxtw
", operands, [:int, :ptr])
616 emitARM64("uxtw
", operands, [:int, :ptr])
618 emitARM64("uxtw
", operands, [:int, :ptr])
622 if operands[0].immediate? and operands[0].value == 0
623 $asm.puts "cbz
#{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
624 elsif operands[1].immediate? and operands[1].value == 0
625 $asm.puts "cbz
#{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
627 emitARM64Branch("subs wzr
, ", operands, :int, "b
.eq
")
630 if operands[0].immediate? and operands[0].value == 0
631 $asm.puts "cbz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
632 elsif operands[1].immediate? and operands[1].value == 0
633 $asm.puts "cbz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
635 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.eq
")
638 if operands[0].immediate? and operands[0].value == 0
639 $asm.puts "cbz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
640 elsif operands[1].immediate? and operands[1].value == 0
641 $asm.puts "cbz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
643 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.eq
")
645 when "bineq
", "bbneq
"
646 if operands[0].immediate? and operands[0].value == 0
647 $asm.puts "cbnz
#{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
648 elsif operands[1].immediate? and operands[1].value == 0
649 $asm.puts "cbnz
#{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
651 emitARM64Branch("subs wzr
, ", operands, :int, "b
.ne
")
654 if operands[0].immediate? and operands[0].value == 0
655 $asm.puts "cbnz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
656 elsif operands[1].immediate? and operands[1].value == 0
657 $asm.puts "cbnz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
659 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ne
")
662 if operands[0].immediate? and operands[0].value == 0
663 $asm.puts "cbnz
#{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
664 elsif operands[1].immediate? and operands[1].value == 0
665 $asm.puts "cbnz
#{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
667 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ne
")
670 emitARM64Branch("subs wzr
, ", operands, :int, "b
.hi
")
672 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hi
")
674 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hi
")
675 when "biaeq
", "bbaeq
"
676 emitARM64Branch("subs wzr
, ", operands, :int, "b
.hs
")
678 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hs
")
680 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.hs
")
682 emitARM64Branch("subs wzr
, ", operands, :int, "b
.lo
")
684 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lo
")
686 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lo
")
687 when "bibeq
", "bbbeq
"
688 emitARM64Branch("subs wzr
, ", operands, :int, "b
.ls
")
690 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ls
")
692 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ls
")
694 emitARM64Branch("subs wzr
, ", operands, :int, "b
.gt
")
696 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.gt
")
698 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.gt
")
699 when "bigteq
", "bbgteq
"
700 emitARM64Branch("subs wzr
, ", operands, :int, "b
.ge
")
702 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ge
")
704 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.ge
")
706 emitARM64Branch("subs wzr
, ", operands, :int, "b
.lt
")
708 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lt
")
710 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.lt
")
711 when "bilteq
", "bblteq
"
712 emitARM64Branch("subs wzr
, ", operands, :int, "b
.le
")
714 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.le
")
716 emitARM64Branch("subs xzr
, ", operands, :ptr, "b
.le
")
718 if operands[0].label?
719 $asm.puts "b
#{operands[0].asmLabel}"
721 emitARM64Unflipped("br
", operands, :ptr)
724 if operands[0].label?
725 $asm.puts "bl
#{operands[0].asmLabel}"
727 emitARM64Unflipped("blr
", operands, :ptr)
734 emitARM64Compare(operands
, :int, "ne")
736 emitARM64Compare(operands
, :ptr, "ne")
738 emitARM64Compare(operands
, :ptr, "ne")
739 when "cineq", "cbneq"
740 emitARM64Compare(operands
, :int, "eq")
742 emitARM64Compare(operands
, :ptr, "eq")
744 emitARM64Compare(operands
, :ptr, "eq")
746 emitARM64Compare(operands
, :int, "ls")
748 emitARM64Compare(operands
, :ptr, "ls")
750 emitARM64Compare(operands
, :ptr, "ls")
751 when "ciaeq", "cbaeq"
752 emitARM64Compare(operands
, :int, "lo")
754 emitARM64Compare(operands
, :ptr, "lo")
756 emitARM64Compare(operands
, :ptr, "lo")
758 emitARM64Compare(operands
, :int, "hs")
760 emitARM64Compare(operands
, :ptr, "hs")
762 emitARM64Compare(operands
, :ptr, "hs")
763 when "cibeq", "cbbeq"
764 emitARM64Compare(operands
, :int, "hi")
766 emitARM64Compare(operands
, :ptr, "hi")
768 emitARM64Compare(operands
, :ptr, "hi")
770 emitARM64Compare(operands
, :int, "ge")
772 emitARM64Compare(operands
, :ptr, "ge")
774 emitARM64Compare(operands
, :ptr, "ge")
775 when "cilteq", "cblteq"
776 emitARM64Compare(operands
, :int, "gt")
778 emitARM64Compare(operands
, :ptr, "gt")
780 emitARM64Compare(operands
, :ptr, "gt")
782 emitARM64Compare(operands
, :int, "le")
784 emitARM64Compare(operands
, :ptr, "le")
786 emitARM64Compare(operands
, :ptr, "le")
787 when "cigteq", "cbgteq"
788 emitARM64Compare(operands
, :int, "lt")
790 emitARM64Compare(operands
, :ptr, "lt")
792 emitARM64Compare(operands
, :ptr, "lt")
794 $asm.puts
"ldr #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
796 $asm.puts
"str #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
798 emitARM64("fmov", operands
, [:ptr, :double])
800 emitARM64("fmov", operands
, [:ptr, :double])
802 emitARM64("fmov", operands
, [:double, :ptr])
804 emitARM64("fmov", operands
, [:double, :ptr])
806 $asm.puts
"b.vs #{operands[0].asmLabel}"
808 $asm.puts
"b.mi #{operands[0].asmLabel}"
810 $asm.puts
"b.eq #{operands[0].asmLabel}"
812 $asm.puts
"b.ne #{operands[0].asmLabel}"
814 operands
[0].arm64EmitLea(operands
[1], :int)
816 operands
[0].arm64EmitLea(operands
[1], :ptr)
818 operands
[0].arm64EmitLea(operands
[1], :ptr)
820 $asm.puts
"smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
824 $asm.puts
"adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}"