]> git.saurik.com Git - apple/javascriptcore.git/blame - offlineasm/arm64.rb
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / offlineasm / arm64.rb
CommitLineData
81345200 1# Copyright (C) 2011, 2012, 2014 Apple Inc. All rights reserved.
ed1e77d3 2# Copyright (C) 2014 University of Szeged. All rights reserved.
93a37866
A
3#
4# Redistribution and use in source and binary forms, with or without
5# modification, are permitted provided that the following conditions
6# are met:
7# 1. Redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer.
9# 2. Redistributions in binary form must reproduce the above copyright
10# notice, this list of conditions and the following disclaimer in the
11# documentation and/or other materials provided with the distribution.
12#
13# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23# THE POSSIBILITY OF SUCH DAMAGE.
24
25require "ast"
26require "opt"
27require "risc"
93a37866
A
28
29# Naming conventions:
30#
31# x<number> => GPR. This is both the generic name of the register, and the name used
32# to indicate that the register is used in 64-bit mode.
33# w<number> => GPR in 32-bit mode. This is the low 32-bits of the GPR. If it is
34# mutated then the high 32-bit part of the register is zero filled.
35# q<number> => FPR. This is the generic name of the register.
36# d<number> => FPR used as an IEEE 64-bit binary floating point number (i.e. double).
37#
38# GPR conventions, to match the baseline JIT:
39#
40# x0 => return value, cached result, first argument, t0, a0, r0
41# x1 => t1, a1, r1
81345200
A
42# x2 => t2, a2
43# x3 => a3
44# x5 => t4
45# x6 => t6
93a37866 46# x9 => (nonArgGPR1 in baseline)
93a37866
A
47# x13 => scratch (unused in baseline)
48# x16 => scratch
49# x17 => scratch
50# x23 => t3
81345200 51# x24 => t5
93a37866
A
52# x27 => csr1 (tagTypeNumber)
53# x28 => csr2 (tagMask)
81345200 54# x29 => cfr
93a37866
A
55# sp => sp
56# lr => lr
57#
58# FPR conentions, to match the baseline JIT:
59#
60# q0 => ft0
61# q1 => ft1
62# q2 => ft2
63# q3 => ft3
64# q4 => ft4 (unused in baseline)
65# q5 => ft5 (unused in baseline)
66# q31 => scratch
67
68def arm64GPRName(name, kind)
69 raise "bad GPR name #{name}" unless name =~ /^x/
70 number = name[1..-1]
71 case kind
72 when :int
73 "w" + number
74 when :ptr
75 "x" + number
76 else
77 raise "Wrong kind: #{kind}"
78 end
79end
80
81def arm64FPRName(name, kind)
82 raise "bad FPR kind #{kind}" unless kind == :double
83 raise "bad FPR name #{name}" unless name =~ /^q/
84 "d" + name[1..-1]
85end
86
87class SpecialRegister
88 def arm64Operand(kind)
89 case @name
90 when /^x/
91 arm64GPRName(@name, kind)
92 when /^q/
93 arm64FPRName(@name, kind)
94 else
95 raise "Bad name: #{@name}"
96 end
97 end
98end
99
100ARM64_EXTRA_GPRS = [SpecialRegister.new("x16"), SpecialRegister.new("x17"), SpecialRegister.new("x13")]
101ARM64_EXTRA_FPRS = [SpecialRegister.new("q31")]
102
103class RegisterID
104 def arm64Operand(kind)
105 case @name
106 when 't0', 'a0', 'r0'
107 arm64GPRName('x0', kind)
108 when 't1', 'a1', 'r1'
109 arm64GPRName('x1', kind)
81345200 110 when 't2', 'a2'
93a37866 111 arm64GPRName('x2', kind)
81345200
A
112 when 'a3'
113 arm64GPRName('x3', kind)
93a37866
A
114 when 't3'
115 arm64GPRName('x23', kind)
116 when 't4'
81345200 117 arm64GPRName('x5', kind)
93a37866 118 when 't5'
81345200 119 arm64GPRName('x24', kind)
93a37866 120 when 't6'
81345200
A
121 arm64GPRName('x6', kind)
122 when 't7'
123 arm64GPRName('x7', kind)
93a37866 124 when 'cfr'
81345200 125 arm64GPRName('x29', kind)
93a37866
A
126 when 'csr1'
127 arm64GPRName('x27', kind)
128 when 'csr2'
129 arm64GPRName('x28', kind)
130 when 'sp'
131 'sp'
132 when 'lr'
81345200 133 'x30'
93a37866
A
134 else
135 raise "Bad register name #{@name} at #{codeOriginString}"
136 end
137 end
138end
139
140class FPRegisterID
141 def arm64Operand(kind)
142 case @name
143 when 'ft0'
144 arm64FPRName('q0', kind)
145 when 'ft1'
146 arm64FPRName('q1', kind)
147 when 'ft2'
148 arm64FPRName('q2', kind)
149 when 'ft3'
150 arm64FPRName('q3', kind)
151 when 'ft4'
152 arm64FPRName('q4', kind)
153 when 'ft5'
154 arm64FPRName('q5', kind)
155 else "Bad register name #{@name} at #{codeOriginString}"
156 end
157 end
158end
159
160class Immediate
161 def arm64Operand(kind)
162 raise "Invalid immediate #{value} at #{codeOriginString}" if value < 0 or value > 4095
163 "\##{value}"
164 end
165end
166
167class Address
168 def arm64Operand(kind)
169 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value < -255 or offset.value > 4095
170 "[#{base.arm64Operand(:ptr)}, \##{offset.value}]"
171 end
172
173 def arm64EmitLea(destination, kind)
174 $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, \##{offset.value}"
175 end
176end
177
178class BaseIndex
179 def arm64Operand(kind)
180 raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value != 0
181 "[#{base.arm64Operand(:ptr)}, #{index.arm64Operand(:ptr)}, lsl \##{scaleShift}]"
182 end
183
184 def arm64EmitLea(destination, kind)
185 $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, #{index.arm64Operand(kind)}, lsl \##{scaleShift}"
186 end
187end
188
189class AbsoluteAddress
190 def arm64Operand(kind)
191 raise "Unconverted absolute address #{address.value} at #{codeOriginString}"
192 end
193end
194
81345200 195# FIXME: We could support AbsoluteAddress for lea, but we don't.
93a37866
A
196
197#
198# Actual lowering code follows.
199#
200
ed1e77d3
A
201def arm64LowerMalformedLoadStoreAddresses(list)
202 newList = []
203
204 def isAddressMalformed(operand)
205 operand.is_a? Address and not (-255..4095).include? operand.offset.value
206 end
207
208 list.each {
209 | node |
210 if node.is_a? Instruction
211 if node.opcode =~ /^store/ and isAddressMalformed(node.operands[1])
212 address = node.operands[1]
213 tmp = Tmp.new(codeOrigin, :gpr)
214 newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
215 newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0))], node.annotation)
216 elsif node.opcode =~ /^load/ and isAddressMalformed(node.operands[0])
217 address = node.operands[0]
218 tmp = Tmp.new(codeOrigin, :gpr)
219 newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
220 newList << Instruction.new(node.codeOrigin, node.opcode, [BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0)), node.operands[1]], node.annotation)
221 else
222 newList << node
223 end
224 else
225 newList << node
226 end
227 }
228 newList
229end
230
231# Workaround for Cortex-A53 erratum (835769)
232def arm64CortexA53Fix835769(list)
233 newList = []
234 lastOpcodeUnsafe = false
235
236 list.each {
237 | node |
238 if node.is_a? Instruction
239 case node.opcode
240 when /^store/, /^load/
241 # List all macro instructions that can be lowered to a load, store or prefetch ARM64 assembly instruction
242 lastOpcodeUnsafe = true
243 when "muli", "mulp", "mulq", "smulli"
244 # List all macro instructions that can be lowered to a 64-bit multiply-accumulate ARM64 assembly instruction
245 # (defined as one of MADD, MSUB, SMADDL, SMSUBL, UMADDL or UMSUBL).
246 if lastOpcodeUnsafe
247 newList << Instruction.new(node.codeOrigin, "nopCortexA53Fix835769", [])
248 end
249 lastOpcodeUnsafe = false
250 else
251 lastOpcodeUnsafe = false
252 end
253 end
254 newList << node
255 }
256 newList
257end
258
93a37866
A
259class Sequence
260 def getModifiedListARM64
261 result = @list
262 result = riscLowerNot(result)
263 result = riscLowerSimpleBranchOps(result)
264 result = riscLowerHardBranchOps64(result)
265 result = riscLowerShiftOps(result)
ed1e77d3 266 result = arm64LowerMalformedLoadStoreAddresses(result)
93a37866
A
267 result = riscLowerMalformedAddresses(result) {
268 | node, address |
269 case node.opcode
270 when "loadb", "loadbs", "storeb", /^bb/, /^btb/, /^cb/, /^tb/
271 size = 1
272 when "loadh", "loadhs"
273 size = 2
274 when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi",
275 "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/,
276 /^ci/, /^ti/, "addis", "subis", "mulis", "smulli", "leai"
277 size = 4
278 when "loadp", "storep", "loadq", "storeq", "loadd", "stored", "lshiftp", "lshiftq", "negp", "negq", "rshiftp", "rshiftq",
279 "urshiftp", "urshiftq", "addp", "addq", "mulp", "mulq", "andp", "andq", "orp", "orq", "subp", "subq", "xorp", "xorq", "addd",
280 "divd", "subd", "muld", "sqrtd", /^bp/, /^bq/, /^btp/, /^btq/, /^cp/, /^cq/, /^tp/, /^tq/, /^bd/,
281 "jmp", "call", "leap", "leaq"
282 size = 8
283 else
284 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
285 end
286
287 if address.is_a? BaseIndex
288 address.offset.value == 0 and
289 (node.opcode =~ /^lea/ or address.scale == 1 or address.scale == size)
290 elsif address.is_a? Address
291 (-255..4095).include? address.offset.value
292 else
293 false
294 end
295 }
296 result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"])
297 result = riscLowerMalformedImmediates(result, 0..4095)
298 result = riscLowerMisplacedAddresses(result)
299 result = riscLowerMalformedAddresses(result) {
300 | node, address |
301 case node.opcode
302 when /^load/
303 true
304 when /^store/
305 not (address.is_a? Address and address.offset.value < 0)
306 when /^lea/
307 true
308 else
309 raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
310 end
311 }
312 result = riscLowerTest(result)
313 result = assignRegistersToTemporaries(result, :gpr, ARM64_EXTRA_GPRS)
314 result = assignRegistersToTemporaries(result, :fpr, ARM64_EXTRA_FPRS)
ed1e77d3 315 result = arm64CortexA53Fix835769(result)
93a37866
A
316 return result
317 end
318end
319
320def arm64Operands(operands, kinds)
321 if kinds.is_a? Array
322 raise "Mismatched operand lists: #{operands.inspect} and #{kinds.inspect}" if operands.size != kinds.size
323 else
324 kinds = operands.map{ kinds }
325 end
326 (0...operands.size).map {
327 | index |
328 operands[index].arm64Operand(kinds[index])
329 }.join(', ')
330end
331
332def arm64FlippedOperands(operands, kinds)
333 if kinds.is_a? Array
334 kinds = [kinds[-1]] + kinds[0..-2]
335 end
336 arm64Operands([operands[-1]] + operands[0..-2], kinds)
337end
338
339# TAC = three address code.
340def arm64TACOperands(operands, kind)
341 if operands.size == 3
342 return arm64FlippedOperands(operands, kind)
343 end
344
345 raise unless operands.size == 2
346
347 return operands[1].arm64Operand(kind) + ", " + arm64FlippedOperands(operands, kind)
348end
349
350def emitARM64Add(opcode, operands, kind)
351 if operands.size == 3
352 raise unless operands[1].register?
353 raise unless operands[2].register?
354
355 if operands[0].immediate?
356 if operands[0].value == 0 and flag !~ /s$/
357 unless operands[1] == operands[2]
358 $asm.puts "mov #{arm64FlippedOperands(operands[1..2], kind)}"
359 end
360 else
361 $asm.puts "#{opcode} #{arm64Operands(operands.reverse, kind)}"
362 end
363 return
364 end
365
366 raise unless operands[0].register?
367 $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}"
368 return
369 end
370
371 raise unless operands.size == 2
372
373 if operands[0].immediate? and operands[0].value == 0 and opcode !~ /s$/
374 return
375 end
376
377 $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}"
378end
379
380def emitARM64Unflipped(opcode, operands, kind)
381 $asm.puts "#{opcode} #{arm64Operands(operands, kind)}"
382end
383
384def emitARM64TAC(opcode, operands, kind)
385 $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}"
386end
387
388def emitARM64(opcode, operands, kind)
389 $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}"
390end
391
392def emitARM64Access(opcode, opcodeNegativeOffset, register, memory, kind)
393 if memory.is_a? Address and memory.offset.value < 0
394 $asm.puts "#{opcodeNegativeOffset} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
395 return
396 end
397
398 $asm.puts "#{opcode} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
399end
400
401def emitARM64Shift(opcodeRegs, opcodeImmediate, operands, kind)
402 if operands.size == 3 and operands[1].immediate?
403 magicNumbers = yield operands[1].value
404 $asm.puts "#{opcodeImmediate} #{operands[2].arm64Operand(kind)}, #{operands[0].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
405 return
406 end
407
408 if operands.size == 2 and operands[0].immediate?
409 magicNumbers = yield operands[0].value
410 $asm.puts "#{opcodeImmediate} #{operands[1].arm64Operand(kind)}, #{operands[1].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
411 return
412 end
413
414 emitARM64TAC(opcodeRegs, operands, kind)
415end
416
417def emitARM64Branch(opcode, operands, kind, branchOpcode)
418 emitARM64Unflipped(opcode, operands[0..-2], kind)
419 $asm.puts "#{branchOpcode} #{operands[-1].asmLabel}"
420end
421
422def emitARM64Compare(operands, kind, compareCode)
423 emitARM64Unflipped("subs #{arm64GPRName('xzr', kind)}, ", operands[0..-2], kind)
424 $asm.puts "csinc #{operands[-1].arm64Operand(:int)}, wzr, wzr, #{compareCode}"
425end
426
427def emitARM64MoveImmediate(value, target)
428 first = true
429 isNegative = value < 0
430 [48, 32, 16, 0].each {
431 | shift |
432 currentValue = (value >> shift) & 0xffff
ed1e77d3 433 next if currentValue == (isNegative ? 0xffff : 0) and (shift != 0 or !first)
93a37866
A
434 if first
435 if isNegative
436 $asm.puts "movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
437 else
438 $asm.puts "movz #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
439 end
440 first = false
441 else
442 $asm.puts "movk #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
443 end
444 }
445end
446
447class Instruction
448 def lowerARM64
449 $asm.comment codeOriginString
81345200
A
450 $asm.annotation annotation if $enableInstrAnnotations
451
93a37866
A
452 case opcode
453 when 'addi'
454 emitARM64Add("add", operands, :int)
455 when 'addis'
456 emitARM64Add("adds", operands, :int)
457 when 'addp'
458 emitARM64Add("add", operands, :ptr)
459 when 'addps'
460 emitARM64Add("adds", operands, :ptr)
461 when 'addq'
462 emitARM64Add("add", operands, :ptr)
463 when "andi"
464 emitARM64TAC("and", operands, :int)
465 when "andp"
466 emitARM64TAC("and", operands, :ptr)
467 when "andq"
468 emitARM64TAC("and", operands, :ptr)
469 when "ori"
470 emitARM64TAC("orr", operands, :int)
471 when "orp"
472 emitARM64TAC("orr", operands, :ptr)
473 when "orq"
474 emitARM64TAC("orr", operands, :ptr)
475 when "xori"
476 emitARM64TAC("eor", operands, :int)
477 when "xorp"
478 emitARM64TAC("eor", operands, :ptr)
479 when "xorq"
480 emitARM64TAC("eor", operands, :ptr)
481 when "lshifti"
482 emitARM64Shift("lslv", "ubfm", operands, :int) {
483 | value |
484 [32 - value, 31 - value]
485 }
486 when "lshiftp"
487 emitARM64Shift("lslv", "ubfm", operands, :ptr) {
488 | value |
489 [64 - value, 63 - value]
490 }
491 when "lshiftq"
492 emitARM64Shift("lslv", "ubfm", operands, :ptr) {
493 | value |
494 [64 - value, 63 - value]
495 }
496 when "rshifti"
497 emitARM64Shift("asrv", "sbfm", operands, :int) {
498 | value |
499 [value, 31]
500 }
501 when "rshiftp"
502 emitARM64Shift("asrv", "sbfm", operands, :ptr) {
503 | value |
504 [value, 63]
505 }
506 when "rshiftq"
507 emitARM64Shift("asrv", "sbfm", operands, :ptr) {
508 | value |
509 [value, 63]
510 }
511 when "urshifti"
512 emitARM64Shift("lsrv", "ubfm", operands, :int) {
513 | value |
514 [value, 31]
515 }
516 when "urshiftp"
517 emitARM64Shift("lsrv", "ubfm", operands, :ptr) {
518 | value |
519 [value, 63]
520 }
521 when "urshiftq"
522 emitARM64Shift("lsrv", "ubfm", operands, :ptr) {
523 | value |
524 [value, 63]
525 }
526 when "muli"
527 $asm.puts "madd #{arm64TACOperands(operands, :int)}, wzr"
528 when "mulp"
529 $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr"
530 when "mulq"
531 $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr"
532 when "subi"
533 emitARM64TAC("sub", operands, :int)
534 when "subp"
535 emitARM64TAC("sub", operands, :ptr)
536 when "subq"
537 emitARM64TAC("sub", operands, :ptr)
538 when "subis"
539 emitARM64TAC("subs", operands, :int)
540 when "negi"
541 $asm.puts "sub #{operands[0].arm64Operand(:int)}, wzr, #{operands[0].arm64Operand(:int)}"
542 when "negp"
543 $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
544 when "negq"
545 $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
546 when "loadi"
547 emitARM64Access("ldr", "ldur", operands[1], operands[0], :int)
548 when "loadis"
549 emitARM64Access("ldrsw", "ldursw", operands[1], operands[0], :ptr)
550 when "loadp"
551 emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr)
552 when "loadq"
553 emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr)
554 when "storei"
555 emitARM64Unflipped("str", operands, :int)
556 when "storep"
557 emitARM64Unflipped("str", operands, :ptr)
558 when "storeq"
559 emitARM64Unflipped("str", operands, :ptr)
560 when "loadb"
561 emitARM64Access("ldrb", "ldurb", operands[1], operands[0], :int)
562 when "loadbs"
563 emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :int)
564 when "storeb"
565 emitARM64Unflipped("strb", operands, :int)
566 when "loadh"
567 emitARM64Access("ldrh", "ldurh", operands[1], operands[0], :int)
568 when "loadhs"
569 emitARM64Access("ldrsh", "ldursh", operands[1], operands[0], :int)
570 when "storeh"
571 emitARM64Unflipped("strh", operands, :int)
572 when "loadd"
573 emitARM64Access("ldr", "ldur", operands[1], operands[0], :double)
574 when "stored"
575 emitARM64Unflipped("str", operands, :double)
576 when "addd"
577 emitARM64TAC("fadd", operands, :double)
578 when "divd"
579 emitARM64TAC("fdiv", operands, :double)
580 when "subd"
581 emitARM64TAC("fsub", operands, :double)
582 when "muld"
583 emitARM64TAC("fmul", operands, :double)
584 when "sqrtd"
585 emitARM64("fsqrt", operands, :double)
586 when "ci2d"
587 emitARM64("scvtf", operands, [:int, :double])
588 when "bdeq"
589 emitARM64Branch("fcmp", operands, :double, "b.eq")
590 when "bdneq"
591 emitARM64Unflipped("fcmp", operands[0..1], :double)
592 isUnordered = LocalLabel.unique("bdneq")
593 $asm.puts "b.vs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
594 $asm.puts "b.ne #{operands[2].asmLabel}"
595 isUnordered.lower("ARM64")
596 when "bdgt"
597 emitARM64Branch("fcmp", operands, :double, "b.gt")
598 when "bdgteq"
599 emitARM64Branch("fcmp", operands, :double, "b.ge")
600 when "bdlt"
601 emitARM64Branch("fcmp", operands, :double, "b.mi")
602 when "bdlteq"
603 emitARM64Branch("fcmp", operands, :double, "b.ls")
604 when "bdequn"
605 emitARM64Unflipped("fcmp", operands[0..1], :double)
606 $asm.puts "b.vs #{operands[2].asmLabel}"
607 $asm.puts "b.eq #{operands[2].asmLabel}"
608 when "bdnequn"
609 emitARM64Branch("fcmp", operands, :double, "b.ne")
610 when "bdgtun"
611 emitARM64Branch("fcmp", operands, :double, "b.hi")
612 when "bdgtequn"
613 emitARM64Branch("fcmp", operands, :double, "b.pl")
614 when "bdltun"
615 emitARM64Branch("fcmp", operands, :double, "b.lt")
616 when "bdltequn"
617 emitARM64Branch("fcmp", operands, :double, "b.le")
618 when "btd2i"
81345200 619 # FIXME: May be a good idea to just get rid of this instruction, since the interpreter
93a37866
A
620 # currently does not use it.
621 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
622 when "td2i"
623 emitARM64("fcvtzs", operands, [:double, :int])
624 when "bcd2i"
81345200 625 # FIXME: Remove this instruction, or use it and implement it. Currently it's not
93a37866
A
626 # used.
627 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
628 when "movdz"
81345200 629 # FIXME: Remove it or support it.
93a37866
A
630 raise "ARM64 does not support this opcode yet, #{codeOriginString}"
631 when "pop"
81345200
A
632 operands.each_slice(2) {
633 | ops |
634 # Note that the operands are in the reverse order of the case for push.
635 # This is due to the fact that order matters for pushing and popping, and
636 # on platforms that only push/pop one slot at a time they pop their
637 # arguments in the reverse order that they were pushed. In order to remain
638 # compatible with those platforms we assume here that that's what has been done.
639
640 # So for example, if we did push(A, B, C, D), we would then pop(D, C, B, A).
641 # But since the ordering of arguments doesn't change on arm64 between the stp and ldp
642 # instructions we need to flip flop the argument positions that were passed to us.
643 $asm.puts "ldp #{ops[1].arm64Operand(:ptr)}, #{ops[0].arm64Operand(:ptr)}, [sp], #16"
644 }
93a37866 645 when "push"
81345200
A
646 operands.each_slice(2) {
647 | ops |
648 $asm.puts "stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
649 }
93a37866
A
650 when "move"
651 if operands[0].immediate?
652 emitARM64MoveImmediate(operands[0].value, operands[1])
653 else
654 emitARM64("mov", operands, :ptr)
655 end
656 when "sxi2p"
81345200 657 emitARM64("sxtw", operands, [:int, :ptr])
93a37866 658 when "sxi2q"
81345200 659 emitARM64("sxtw", operands, [:int, :ptr])
93a37866 660 when "zxi2p"
81345200 661 emitARM64("uxtw", operands, [:int, :ptr])
93a37866 662 when "zxi2q"
81345200 663 emitARM64("uxtw", operands, [:int, :ptr])
93a37866
A
664 when "nop"
665 $asm.puts "nop"
666 when "bieq", "bbeq"
667 if operands[0].immediate? and operands[0].value == 0
668 $asm.puts "cbz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
669 elsif operands[1].immediate? and operands[1].value == 0
670 $asm.puts "cbz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
671 else
672 emitARM64Branch("subs wzr, ", operands, :int, "b.eq")
673 end
674 when "bpeq"
675 if operands[0].immediate? and operands[0].value == 0
676 $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
677 elsif operands[1].immediate? and operands[1].value == 0
678 $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
679 else
680 emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq")
681 end
682 when "bqeq"
683 if operands[0].immediate? and operands[0].value == 0
684 $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
685 elsif operands[1].immediate? and operands[1].value == 0
686 $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
687 else
688 emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq")
689 end
690 when "bineq", "bbneq"
691 if operands[0].immediate? and operands[0].value == 0
692 $asm.puts "cbnz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
693 elsif operands[1].immediate? and operands[1].value == 0
694 $asm.puts "cbnz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
695 else
696 emitARM64Branch("subs wzr, ", operands, :int, "b.ne")
697 end
698 when "bpneq"
699 if operands[0].immediate? and operands[0].value == 0
700 $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
701 elsif operands[1].immediate? and operands[1].value == 0
702 $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
703 else
704 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne")
705 end
706 when "bqneq"
707 if operands[0].immediate? and operands[0].value == 0
708 $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
709 elsif operands[1].immediate? and operands[1].value == 0
710 $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
711 else
712 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne")
713 end
714 when "bia", "bba"
715 emitARM64Branch("subs wzr, ", operands, :int, "b.hi")
716 when "bpa"
717 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi")
718 when "bqa"
719 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi")
720 when "biaeq", "bbaeq"
721 emitARM64Branch("subs wzr, ", operands, :int, "b.hs")
722 when "bpaeq"
723 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs")
724 when "bqaeq"
725 emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs")
726 when "bib", "bbb"
727 emitARM64Branch("subs wzr, ", operands, :int, "b.lo")
728 when "bpb"
729 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo")
730 when "bqb"
731 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo")
732 when "bibeq", "bbbeq"
733 emitARM64Branch("subs wzr, ", operands, :int, "b.ls")
734 when "bpbeq"
735 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls")
736 when "bqbeq"
737 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls")
738 when "bigt", "bbgt"
739 emitARM64Branch("subs wzr, ", operands, :int, "b.gt")
740 when "bpgt"
741 emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt")
742 when "bqgt"
743 emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt")
744 when "bigteq", "bbgteq"
745 emitARM64Branch("subs wzr, ", operands, :int, "b.ge")
746 when "bpgteq"
747 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge")
748 when "bqgteq"
749 emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge")
750 when "bilt", "bblt"
751 emitARM64Branch("subs wzr, ", operands, :int, "b.lt")
752 when "bplt"
753 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt")
754 when "bqlt"
755 emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt")
756 when "bilteq", "bblteq"
757 emitARM64Branch("subs wzr, ", operands, :int, "b.le")
758 when "bplteq"
759 emitARM64Branch("subs xzr, ", operands, :ptr, "b.le")
760 when "bqlteq"
761 emitARM64Branch("subs xzr, ", operands, :ptr, "b.le")
762 when "jmp"
763 if operands[0].label?
764 $asm.puts "b #{operands[0].asmLabel}"
765 else
766 emitARM64Unflipped("br", operands, :ptr)
767 end
768 when "call"
769 if operands[0].label?
770 $asm.puts "bl #{operands[0].asmLabel}"
771 else
772 emitARM64Unflipped("blr", operands, :ptr)
773 end
774 when "break"
775 $asm.puts "brk \#0"
776 when "ret"
777 $asm.puts "ret"
778 when "cieq", "cbeq"
779 emitARM64Compare(operands, :int, "ne")
780 when "cpeq"
781 emitARM64Compare(operands, :ptr, "ne")
782 when "cqeq"
783 emitARM64Compare(operands, :ptr, "ne")
784 when "cineq", "cbneq"
785 emitARM64Compare(operands, :int, "eq")
786 when "cpneq"
787 emitARM64Compare(operands, :ptr, "eq")
788 when "cqneq"
789 emitARM64Compare(operands, :ptr, "eq")
790 when "cia", "cba"
791 emitARM64Compare(operands, :int, "ls")
792 when "cpa"
793 emitARM64Compare(operands, :ptr, "ls")
794 when "cqa"
795 emitARM64Compare(operands, :ptr, "ls")
796 when "ciaeq", "cbaeq"
797 emitARM64Compare(operands, :int, "lo")
798 when "cpaeq"
799 emitARM64Compare(operands, :ptr, "lo")
800 when "cqaeq"
801 emitARM64Compare(operands, :ptr, "lo")
802 when "cib", "cbb"
803 emitARM64Compare(operands, :int, "hs")
804 when "cpb"
805 emitARM64Compare(operands, :ptr, "hs")
806 when "cqb"
807 emitARM64Compare(operands, :ptr, "hs")
808 when "cibeq", "cbbeq"
809 emitARM64Compare(operands, :int, "hi")
810 when "cpbeq"
811 emitARM64Compare(operands, :ptr, "hi")
812 when "cqbeq"
813 emitARM64Compare(operands, :ptr, "hi")
814 when "cilt", "cblt"
815 emitARM64Compare(operands, :int, "ge")
816 when "cplt"
817 emitARM64Compare(operands, :ptr, "ge")
818 when "cqlt"
819 emitARM64Compare(operands, :ptr, "ge")
820 when "cilteq", "cblteq"
821 emitARM64Compare(operands, :int, "gt")
822 when "cplteq"
823 emitARM64Compare(operands, :ptr, "gt")
824 when "cqlteq"
825 emitARM64Compare(operands, :ptr, "gt")
826 when "cigt", "cbgt"
827 emitARM64Compare(operands, :int, "le")
828 when "cpgt"
829 emitARM64Compare(operands, :ptr, "le")
830 when "cqgt"
831 emitARM64Compare(operands, :ptr, "le")
832 when "cigteq", "cbgteq"
833 emitARM64Compare(operands, :int, "lt")
834 when "cpgteq"
835 emitARM64Compare(operands, :ptr, "lt")
836 when "cqgteq"
837 emitARM64Compare(operands, :ptr, "lt")
838 when "peek"
839 $asm.puts "ldr #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
840 when "poke"
841 $asm.puts "str #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
842 when "fp2d"
843 emitARM64("fmov", operands, [:ptr, :double])
844 when "fq2d"
845 emitARM64("fmov", operands, [:ptr, :double])
846 when "fd2p"
847 emitARM64("fmov", operands, [:double, :ptr])
848 when "fd2q"
849 emitARM64("fmov", operands, [:double, :ptr])
850 when "bo"
851 $asm.puts "b.vs #{operands[0].asmLabel}"
852 when "bs"
853 $asm.puts "b.mi #{operands[0].asmLabel}"
854 when "bz"
855 $asm.puts "b.eq #{operands[0].asmLabel}"
856 when "bnz"
857 $asm.puts "b.ne #{operands[0].asmLabel}"
858 when "leai"
859 operands[0].arm64EmitLea(operands[1], :int)
860 when "leap"
861 operands[0].arm64EmitLea(operands[1], :ptr)
862 when "leaq"
863 operands[0].arm64EmitLea(operands[1], :ptr)
864 when "smulli"
865 $asm.puts "smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
81345200
A
866 when "memfence"
867 $asm.puts "dmb sy"
868 when "pcrtoaddr"
ed1e77d3
A
869 $asm.puts "adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}"
870 when "nopCortexA53Fix835769"
871 $asm.putStr("#if CPU(ARM64_CORTEXA53)")
872 $asm.puts "nop"
873 $asm.putStr("#endif")
93a37866 874 else
81345200 875 lowerDefault
93a37866
A
876 end
877 end
878end
879