-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2014 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
require "ast"
require "opt"
require "risc"
-require "risc_arm64"
# Naming conventions:
#
#
# x0 => return value, cached result, first argument, t0, a0, r0
# x1 => t1, a1, r1
-# x2 => t2
+# x2 => t2, a2
+# x3 => a3
+# x5 => t4
+# x6 => t6
# x9 => (nonArgGPR1 in baseline)
-# x10 => t4 (unused in baseline)
-# x11 => t5 (unused in baseline)
-# x12 => t6 (unused in baseline)
# x13 => scratch (unused in baseline)
# x16 => scratch
# x17 => scratch
# x23 => t3
-# x25 => cfr
-# x26 => timeout check (i.e. not touched by LLInt)
+# x24 => t5
# x27 => csr1 (tagTypeNumber)
# x28 => csr2 (tagMask)
+# x29 => cfr
# sp => sp
# lr => lr
#
arm64GPRName('x0', kind)
when 't1', 'a1', 'r1'
arm64GPRName('x1', kind)
- when 't2'
+ when 't2', 'a2'
arm64GPRName('x2', kind)
+ when 'a3'
+ arm64GPRName('x3', kind)
when 't3'
arm64GPRName('x23', kind)
when 't4'
- arm64GPRName('x10', kind)
+ arm64GPRName('x5', kind)
when 't5'
- arm64GPRName('x11', kind)
+ arm64GPRName('x24', kind)
when 't6'
- arm64GPRName('x12', kind)
+ arm64GPRName('x6', kind)
+ when 't7'
+ arm64GPRName('x7', kind)
when 'cfr'
- arm64GPRName('x25', kind)
+ arm64GPRName('x29', kind)
when 'csr1'
arm64GPRName('x27', kind)
when 'csr2'
when 'sp'
'sp'
when 'lr'
- 'lr'
+ 'x30'
else
raise "Bad register name #{@name} at #{codeOriginString}"
end
end
end
-# FIXME: we could support AbsoluteAddress for lea, but we don't.
+# FIXME: We could support AbsoluteAddress for lea, but we don't.
#
# Actual lowering code follows.
class Instruction
def lowerARM64
$asm.comment codeOriginString
+ $asm.annotation annotation if $enableInstrAnnotations
+
case opcode
when 'addi'
emitARM64Add("add", operands, :int)
when "bdltequn"
emitARM64Branch("fcmp", operands, :double, "b.le")
when "btd2i"
- # FIXME: may be a good idea to just get rid of this instruction, since the interpreter
+ # FIXME: May be a good idea to just get rid of this instruction, since the interpreter
# currently does not use it.
raise "ARM64 does not support this opcode yet, #{codeOriginString}"
when "td2i"
emitARM64("fcvtzs", operands, [:double, :int])
when "bcd2i"
- # FIXME: remove this instruction, or use it and implement it. Currently it's not
+ # FIXME: Remove this instruction, or use it and implement it. Currently it's not
# used.
raise "ARM64 does not support this opcode yet, #{codeOriginString}"
when "movdz"
- # FIXME: remove it or support it.
+ # FIXME: Remove it or support it.
raise "ARM64 does not support this opcode yet, #{codeOriginString}"
when "pop"
- emitARM64Unflipped("pop", operands, :ptr)
+ operands.each_slice(2) {
+ | ops |
+ # Note that the operands are in the reverse order of the case for push.
+ # This is due to the fact that order matters for pushing and popping, and
+ # on platforms that only push/pop one slot at a time they pop their
+ # arguments in the reverse order that they were pushed. In order to remain
+ # compatible with those platforms we assume here that that's what has been done.
+
+ # So for example, if we did push(A, B, C, D), we would then pop(D, C, B, A).
+ # But since the ordering of arguments doesn't change on arm64 between the stp and ldp
+ # instructions we need to flip flop the argument positions that were passed to us.
+ $asm.puts "ldp #{ops[1].arm64Operand(:ptr)}, #{ops[0].arm64Operand(:ptr)}, [sp], #16"
+ }
when "push"
- emitARM64Unflipped("push", operands, :ptr)
+ operands.each_slice(2) {
+ | ops |
+ $asm.puts "stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
+ }
+ when "popLRAndFP"
+ $asm.puts "ldp x29, x30, [sp], #16"
+ when "pushLRAndFP"
+ $asm.puts "stp x29, x30, [sp, #-16]!"
+ when "popCalleeSaves"
+ $asm.puts "ldp x28, x27, [sp], #16"
+ $asm.puts "ldp x26, x25, [sp], #16"
+ $asm.puts "ldp x24, x23, [sp], #16"
+ $asm.puts "ldp x22, x21, [sp], #16"
+ $asm.puts "ldp x20, x19, [sp], #16"
+ when "pushCalleeSaves"
+ $asm.puts "stp x20, x19, [sp, #-16]!"
+ $asm.puts "stp x22, x21, [sp, #-16]!"
+ $asm.puts "stp x24, x23, [sp, #-16]!"
+ $asm.puts "stp x26, x25, [sp, #-16]!"
+ $asm.puts "stp x28, x27, [sp, #-16]!"
when "move"
if operands[0].immediate?
emitARM64MoveImmediate(operands[0].value, operands[1])
emitARM64("mov", operands, :ptr)
end
when "sxi2p"
- emitARM64("sxtw", operands, :ptr)
+ emitARM64("sxtw", operands, [:int, :ptr])
when "sxi2q"
- emitARM64("sxtw", operands, :ptr)
+ emitARM64("sxtw", operands, [:int, :ptr])
when "zxi2p"
- emitARM64("uxtw", operands, :ptr)
+ emitARM64("uxtw", operands, [:int, :ptr])
when "zxi2q"
- emitARM64("uxtw", operands, :ptr)
+ emitARM64("uxtw", operands, [:int, :ptr])
when "nop"
$asm.puts "nop"
when "bieq", "bbeq"
operands[0].arm64EmitLea(operands[1], :ptr)
when "smulli"
$asm.puts "smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
+ when "memfence"
+ $asm.puts "dmb sy"
+ when "pcrtoaddr"
+ $asm.puts "adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}"
else
- raise "Unhandled opcode #{opcode} at #{codeOriginString}"
+ lowerDefault
end
end
end