1 # Copyright (C) 2013 Apple Inc. All rights reserved.
2 # Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions
7 # 1. Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # 2. Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
13 # THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS, INC. ``AS IS'' AND ANY
14 # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS, INC. OR ITS
17 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 doubleOperand
= sh4Operand
30 raise "Bogus register name #{doubleOperand}" unless doubleOperand
=~
/^dr/
31 "fr" +
($~
.post_match
.to_i
).to_s
34 doubleOperand
= sh4Operand
35 raise "Bogus register name #{doubleOperand}" unless doubleOperand
=~
/^dr/
36 "fr" +
($~
.post_match
.to_i +
1).to_s
40 class SpecialRegister
< NoChildren
54 SH4_TMP_GPRS
= [ SpecialRegister
.new("r3"), SpecialRegister
.new("r11"), SpecialRegister
.new("r13") ]
55 SH4_TMP_FPRS
= [ SpecialRegister
.new("dr10") ]
87 raise "Bad register #{name} for SH4 at #{codeOriginString}"
108 raise "Bad register #{name} for SH4 at #{codeOriginString}"
115 raise "Invalid immediate #{value} at #{codeOriginString}" if value
< -128 or value
> 127
122 raise "Bad offset #{offset.value} at #{codeOriginString}" if offset
.value
< 0 or offset
.value
> 60
124 "@#{base.sh4Operand}"
126 "@(#{offset.value}, #{base.sh4Operand})"
130 def sh4OperandPostInc
131 raise "Bad offset #{offset.value} for post inc at #{codeOriginString}" unless offset
.value
== 0
132 "@#{base.sh4Operand}+"
136 raise "Bad offset
#{offset.value} for pre dec at
#{codeOriginString}" unless offset.value == 0
137 "@
-#{base.sh4Operand}"
143 raise "Unconverted base index at
#{codeOriginString}"
147 class AbsoluteAddress
149 raise "Unconverted absolute address at
#{codeOriginString}"
159 class SubImmediates < Node
161 "#{@left.sh4Operand} - #{@right.sh4Operand}"
165 class ConstPool < Node
169 def initialize(codeOrigin, entries, size)
171 raise "Invalid size
#{size} for ConstPool
" unless size == 16 or size == 32
177 "#{size}: #{entries}"
198 $asm.puts ".balign
2"
200 $asm.puts ".balign
4"
206 $asm.puts ".word
#{e.value}"
208 $asm.puts ".long
#{e.value}"
214 class ConstPoolEntry < Node
218 attr_reader :labelref
220 def initialize(codeOrigin, value, size)
222 raise "Invalid size
#{size} for ConstPoolEntry
" unless size == 16 or size == 32
225 @label = LocalLabel.unique("constpool
#{size}")
226 @labelref = LocalLabelReference.new(codeOrigin, label)
230 "#{value} (#{size} @
#{label})"
234 other.is_a? ConstPoolEntry and other.value == @value
256 # Lowering of shift ops for SH4. For example:
266 def sh4LowerShiftOps(list)
270 if node.is_a? Instruction
272 when "ulshifti
", "ulshiftp
", "urshifti
", "urshiftp
", "lshifti
", "lshiftp
", "rshifti
", "rshiftp
"
273 if node.opcode[0, 1] == "u
"
275 direction = node.opcode[1, 1]
278 direction = node.opcode[0, 1]
280 if node.operands[0].is_a? Immediate
281 maskedImm = Immediate.new(node.operands[0].codeOrigin, node.operands[0].value & 31)
282 if maskedImm.value == 0
283 # There is nothing to do here.
284 elsif maskedImm.value == 1 or (type == "l
" and [2, 8, 16].include? maskedImm.value)
285 newList << Instruction.new(node.codeOrigin, "sh
#{type}#{direction}x
", [maskedImm, node.operands[1]])
287 tmp = Tmp.new(node.codeOrigin, :gpr)
289 newList << Instruction.new(node.codeOrigin, "move
", [maskedImm, tmp])
291 newList << Instruction.new(node.codeOrigin, "move
", [Immediate.new(node.operands[0].codeOrigin, -1 * maskedImm.value), tmp])
293 newList << Instruction.new(node.codeOrigin, "sh
#{type}d
", [tmp, node.operands[1]])
296 tmp = Tmp.new(node.codeOrigin, :gpr)
297 newList << Instruction.new(node.codeOrigin, "move
", [Immediate.new(node.operands[0].codeOrigin, 31), tmp])
298 newList << Instruction.new(node.codeOrigin, "andi
", [node.operands[0], tmp])
300 newList << Instruction.new(node.codeOrigin, "negi
", [tmp, tmp])
302 newList << Instruction.new(node.codeOrigin, "sh
#{type}d
", [tmp, node.operands[1]])
316 # Lowering of simple branch ops for SH4. For example:
318 # baddis foo, bar, baz
326 def sh4LowerSimpleBranchOps(list)
330 if node.is_a? Instruction
331 annotation = node.annotation
333 when /^b(addi|subi|ori|addp)/
347 raise "Invalid operands
number (#{node.operands.size})" unless node.operands.size == 3
348 if node.operands[1].is_a? RegisterID or node.operands[1].is_a? SpecialRegister
349 newList << Instruction.new(node.codeOrigin, op, node.operands[0..1])
350 newList << Instruction.new(node.codeOrigin, "bs
", node.operands[1..2])
352 tmpVal = Tmp.new(node.codeOrigin, :gpr)
353 tmpPtr = Tmp.new(node.codeOrigin, :gpr)
354 addr = Address.new(node.codeOrigin, tmpPtr, Immediate.new(node.codeOrigin, 0))
355 newList << Instruction.new(node.codeOrigin, "leap
", [node.operands[1], tmpPtr])
356 newList << Instruction.new(node.codeOrigin, "loadi
", [addr, tmpVal])
357 newList << Instruction.new(node.codeOrigin, op, [node.operands[0], tmpVal])
358 newList << Instruction.new(node.codeOrigin, "storei
", [tmpVal, addr])
359 newList << Instruction.new(node.codeOrigin, "bs
", [tmpVal, node.operands[2]])
362 raise "Invalid operands
number (#{node.operands.size})" unless node.operands.size == 3
363 newList << Instruction.new(node.codeOrigin, op, node.operands[0..1])
364 newList << Instruction.new(node.codeOrigin, "btinz
", node.operands[1..2])
368 when "bmulio
", "bmulpo
"
369 raise "Invalid operands
number (#{node.operands.size})" unless node.operands.size == 3
370 tmp1 = Tmp.new(node.codeOrigin, :gpr)
371 tmp2 = Tmp.new(node.codeOrigin, :gpr)
372 newList << Instruction.new(node.codeOrigin, node.opcode, [tmp1, tmp2].concat(node.operands))
385 # Lowering of double accesses for SH4. For example:
387 # loadd [foo, bar, 8], baz
391 # leap [foo, bar, 8], tmp
392 # loaddReversedAndIncrementAddress [tmp], baz
395 def sh4LowerDoubleAccesses(list)
399 if node.is_a? Instruction
402 tmp = Tmp.new(codeOrigin, :gpr)
403 addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
404 newList << Instruction.new(codeOrigin, "leap
", [node.operands[0], tmp])
405 newList << Instruction.new(node.codeOrigin, "loaddReversedAndIncrementAddress
", [addr, node.operands[1]], node.annotation)
407 tmp = Tmp.new(codeOrigin, :gpr)
408 addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
409 newList << Instruction.new(codeOrigin, "leap
", [node.operands[1].withOffset(8), tmp])
410 newList << Instruction.new(node.codeOrigin, "storedReversedAndDecrementAddress
", [node.operands[0], addr], node.annotation)
423 # Lowering of double specials for SH4.
426 def sh4LowerDoubleSpecials(list)
430 if node.is_a? Instruction
432 when "bdltun
", "bdgtun
"
433 # Handle specific floating point unordered opcodes.
434 newList << Instruction.new(codeOrigin, "bdnan
", [node.operands[0], node.operands[2]])
435 newList << Instruction.new(codeOrigin, "bdnan
", [node.operands[1], node.operands[2]])
436 newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands)
437 when "bdnequn
", "bdgtequn
", "bdltequn
"
438 newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands)
439 when "bdneq
", "bdgteq
", "bdlteq
"
440 # Handle specific floating point ordered opcodes.
441 outlabel = LocalLabel.unique("out_
#{node.opcode}")
442 outref = LocalLabelReference.new(codeOrigin, outlabel)
443 newList << Instruction.new(codeOrigin, "bdnan
", [node.operands[0], outref])
444 newList << Instruction.new(codeOrigin, "bdnan
", [node.operands[1], outref])
445 newList << Instruction.new(codeOrigin, node.opcode, node.operands)
459 # Lowering of misplaced labels for SH4.
462 def sh4LowerMisplacedLabels(list)
466 if node.is_a? Instruction
467 operands = node.operands
471 if operand.is_a? LabelReference and node.opcode != "mova"
472 tmp
= Tmp
.new(operand
.codeOrigin
, :gpr)
473 newList
<< Instruction
.new(operand
.codeOrigin
, "move", [operand
, tmp
])
476 newOperands
<< operand
479 newList
<< Instruction
.new(node
.codeOrigin
, node
.opcode
, newOperands
, node
.annotation
)
489 # Lowering of misplaced special registers for SH4. For example:
499 def sh4LowerMisplacedSpecialRegisters(list
)
503 if node
.is_a
? Instruction
506 if node
.operands
[0].is_a
? RegisterID
and node
.operands
[0].sh4Operand
== "pr"
507 newList
<< Instruction
.new(codeOrigin
, "stspr", [node
.operands
[1]])
508 elsif node
.operands
[1].is_a
? RegisterID
and node
.operands
[1].sh4Operand
== "pr"
509 newList
<< Instruction
.new(codeOrigin
, "ldspr", [node
.operands
[0]])
513 when "loadi", "loadis", "loadp"
514 if node
.operands
[1].is_a
? RegisterID
and node
.operands
[1].sh4Operand
== "pr"
515 tmp
= Tmp
.new(codeOrigin
, :gpr)
516 newList
<< Instruction
.new(codeOrigin
, node
.opcode
, [node
.operands
[0], tmp
])
517 newList
<< Instruction
.new(codeOrigin
, "ldspr", [tmp
])
521 when "storei", "storep"
522 if node
.operands
[0].is_a
? RegisterID
and node
.operands
[0].sh4Operand
== "pr"
523 tmp
= Tmp
.new(codeOrigin
, :gpr)
524 newList
<< Instruction
.new(codeOrigin
, "stspr", [tmp
])
525 newList
<< Instruction
.new(codeOrigin
, node
.opcode
, [tmp
, node
.operands
[1]])
541 # Group immediate values outside -128..127 range into constant pools for SH4.
542 # These constant pools will be placed behind non-return opcodes jmp and ret, for example:
556 def sh4LowerConstPool(list
)
562 if node
.is_a
? Instruction
564 when "jmp", "ret", "flushcp"
565 if node
.opcode
== "flushcp"
566 outlabel
= LocalLabel
.unique("flushcp")
567 newList
<< Instruction
.new(codeOrigin
, "jmp", [LocalLabelReference
.new(codeOrigin
, outlabel
)])
571 if not currentPool16
.empty
?
572 newList
<< ConstPool
.new(codeOrigin
, currentPool16
, 16)
575 if not currentPool32
.empty
?
576 newList
<< ConstPool
.new(codeOrigin
, currentPool32
, 32)
579 if node
.opcode
== "flushcp"
583 if node
.operands
[0].is_a
? Immediate
and not (-128..127).include? node
.operands
[0].value
585 if (-32768..32767).include? node
.operands
[0].value
586 currentPool16
.each
{ |e
|
587 if e
.value
== node
.operands
[0].value
592 poolEntry
= ConstPoolEntry
.new(codeOrigin
, node
.operands
[0].value
, 16)
593 currentPool16
<< poolEntry
596 currentPool32
.each
{ |e
|
597 if e
.value
== node
.operands
[0].value
602 poolEntry
= ConstPoolEntry
.new(codeOrigin
, node
.operands
[0].value
, 32)
603 currentPool32
<< poolEntry
606 newList
<< Instruction
.new(codeOrigin
, "move", [poolEntry
, node
.operands
[1]])
607 elsif node
.operands
[0].is_a
? LabelReference
609 currentPool32
.each
{ |e
|
610 if e
.value
== node
.operands
[0].asmLabel
615 poolEntry
= ConstPoolEntry
.new(codeOrigin
, node
.operands
[0].asmLabel
, 32)
616 currentPool32
<< poolEntry
618 newList
<< Instruction
.new(codeOrigin
, "move", [poolEntry
, node
.operands
[1]])
619 elsif node
.operands
[0].is_a
? SubImmediates
620 poolEntry
= ConstPoolEntry
.new(codeOrigin
, node
.operands
[0].sh4Operand
, 32)
621 currentPool32
<< poolEntry
622 newList
<< Instruction
.new(codeOrigin
, "move", [poolEntry
, node
.operands
[1]])
633 if not currentPool16
.empty
?
634 newList
<< ConstPool
.new(codeOrigin
, currentPool16
, 16)
636 if not currentPool32
.empty
?
637 newList
<< ConstPool
.new(codeOrigin
, currentPool32
, 32)
644 # Lowering of argument setup for SH4.
645 # This phase avoids argument register trampling. For example, if a0 == t4:
655 def sh4LowerArgumentSetup(list
)
656 a0
= RegisterID
.forName(codeOrigin
, "a0")
657 a1
= RegisterID
.forName(codeOrigin
, "a1")
658 a2
= RegisterID
.forName(codeOrigin
, "a2")
659 a3
= RegisterID
.forName(codeOrigin
, "a3")
663 if node
.is_a
? Instruction
666 if node
.operands
.size
== 2
667 if node
.operands
[1].sh4Operand !
= a0
.sh4Operand
668 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[0], a0
])
669 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[1], a1
])
670 elsif node
.operands
[0].sh4Operand !
= a1
.sh4Operand
671 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[1], a1
])
672 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[0], a0
])
674 # As (operands[0] == a1) and (operands[1] == a0), we just need to swap a0 and a1.
675 newList
<< Instruction
.new(codeOrigin
, "xori", [a0
, a1
])
676 newList
<< Instruction
.new(codeOrigin
, "xori", [a1
, a0
])
677 newList
<< Instruction
.new(codeOrigin
, "xori", [a0
, a1
])
679 elsif node
.operands
.size
== 4
680 # FIXME: We just raise an error if something is likely to go wrong for now.
681 # It would be better to implement a recovering algorithm.
682 if (node
.operands
[0].sh4Operand
== a1
.sh4Operand
) or
683 (node
.operands
[0].sh4Operand
== a2
.sh4Operand
) or
684 (node
.operands
[0].sh4Operand
== a3
.sh4Operand
) or
685 (node
.operands
[1].sh4Operand
== a0
.sh4Operand
) or
686 (node
.operands
[1].sh4Operand
== a2
.sh4Operand
) or
687 (node
.operands
[1].sh4Operand
== a3
.sh4Operand
) or
688 (node
.operands
[2].sh4Operand
== a0
.sh4Operand
) or
689 (node
.operands
[2].sh4Operand
== a1
.sh4Operand
) or
690 (node
.operands
[2].sh4Operand
== a3
.sh4Operand
) or
691 (node
.operands
[3].sh4Operand
== a0
.sh4Operand
) or
692 (node
.operands
[3].sh4Operand
== a1
.sh4Operand
) or
693 (node
.operands
[3].sh4Operand
== a2
.sh4Operand
)
694 raise "Potential argument register trampling detected."
697 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[0], a0
])
698 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[1], a1
])
699 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[2], a2
])
700 newList
<< Instruction
.new(codeOrigin
, "move", [node
.operands
[3], a3
])
702 raise "Invalid operands number (#{node.operands.size}) for setargs"
716 def getModifiedListSH4
719 # Verify that we will only see instructions and labels.
722 unless node
.is_a
? Instruction
or
724 node
.is_a
? LocalLabel
or
726 raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
730 result
= sh4LowerShiftOps(result
)
731 result
= sh4LowerSimpleBranchOps(result
)
732 result
= riscLowerMalformedAddresses(result
) {
734 if address
.is_a
? Address
736 when "btbz", "btbnz", "cbeq", "bbeq", "bbneq", "bbb", "loadb", "storeb"
737 (0..15).include? address
.offset
.value
and
738 ((node
.operands
[0].is_a
? RegisterID
and node
.operands
[0].sh4Operand
== "r0") or
739 (node
.operands
[1].is_a
? RegisterID
and node
.operands
[1].sh4Operand
== "r0"))
741 (0..30).include? address
.offset
.value
and
742 ((node
.operands
[0].is_a
? RegisterID
and node
.operands
[0].sh4Operand
== "r0") or
743 (node
.operands
[1].is_a
? RegisterID
and node
.operands
[1].sh4Operand
== "r0"))
745 (0..60).include? address
.offset
.value
751 result
= sh4LowerDoubleAccesses(result
)
752 result
= sh4LowerDoubleSpecials(result
)
753 result
= riscLowerMisplacedImmediates(result
, ["storeb", "storei", "storep", "muli", "mulp", "andi", "ori", "xori",
754 "cbeq", "cieq", "cpeq", "cineq", "cpneq", "cib", "baddio", "bsubio", "bmulio", "baddis",
755 "bbeq", "bbneq", "bbb", "bieq", "bpeq", "bineq", "bpneq", "bia", "bpa", "biaeq", "bpaeq", "bib", "bpb",
756 "bigteq", "bpgteq", "bilt", "bplt", "bigt", "bpgt", "bilteq", "bplteq", "btiz", "btpz", "btinz", "btpnz", "btbz", "btbnz"])
757 result
= riscLowerMalformedImmediates(result
, -128..127)
758 result
= riscLowerMisplacedAddresses(result
)
759 result
= sh4LowerMisplacedLabels(result
)
760 result
= sh4LowerMisplacedSpecialRegisters(result
)
762 result
= assignRegistersToTemporaries(result
, :gpr, SH4_TMP_GPRS
)
763 result
= assignRegistersToTemporaries(result
, :gpr, SH4_TMP_FPRS
)
765 result
= sh4LowerConstPool(result
)
766 result
= sh4LowerArgumentSetup(result
)
772 def sh4Operands(operands
)
773 operands
.map
{|v
| v
.sh4Operand
}.join(", ")
776 def emitSH4Branch(sh4opcode
, operand
)
777 raise "Invalid operand #{operand}" unless operand
.is_a
? RegisterID
or operand
.is_a
? SpecialRegister
778 $asm.puts
"#{sh4opcode} @#{operand.sh4Operand}"
782 def emitSH4ShiftImm(val
, operand
, direction
)
786 $asm.puts
"shl#{direction}16 #{operand.sh4Operand}"
789 $asm.puts
"shl#{direction}8 #{operand.sh4Operand}"
792 $asm.puts
"shl#{direction}2 #{operand.sh4Operand}"
795 $asm.puts
"shl#{direction} #{operand.sh4Operand}"
801 def emitSH4BranchIfT(dest
, neg
)
802 outlabel
= LocalLabel
.unique("branchIfT")
803 sh4opcode
= neg
? "bt" : "bf"
804 $asm.puts
"#{sh4opcode} #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}"
805 if dest
.is_a
? LocalLabelReference
806 $asm.puts
"bra #{dest.asmLabel}"
809 emitSH4Branch("jmp", dest
)
811 outlabel
.lower("SH4")
814 def emitSH4IntCompare(cmpOpcode
, operands
)
815 $asm.puts
"cmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}"
818 def emitSH4CondBranch(cmpOpcode
, neg
, operands
)
819 emitSH4IntCompare(cmpOpcode
, operands
)
820 emitSH4BranchIfT(operands
[2], neg
)
823 def emitSH4CompareSet(cmpOpcode
, neg
, operands
)
824 emitSH4IntCompare(cmpOpcode
, operands
)
826 $asm.puts
"movt #{operands[2].sh4Operand}"
828 outlabel
= LocalLabel
.unique("compareSet")
829 $asm.puts
"mov #0, #{operands[2].sh4Operand}"
830 $asm.puts
"bt #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}"
831 $asm.puts
"mov #1, #{operands[2].sh4Operand}"
832 outlabel
.lower("SH4")
836 def emitSH4BranchIfNaN(operands
)
837 raise "Invalid operands number (#{operands.size})" unless operands
.size
== 2
838 $asm.puts
"fcmp/eq #{sh4Operands([operands[0], operands[0]])}"
839 $asm.puts
"bf #{operands[1].asmLabel}"
842 def emitSH4DoubleCondBranch(cmpOpcode
, neg
, operands
)
844 $asm.puts
"fcmp/gt #{sh4Operands([operands[0], operands[1]])}"
846 $asm.puts
"fcmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}"
848 emitSH4BranchIfT(operands
[2], neg
)
853 $asm.comment codeOriginString
856 if operands
.size
== 3
857 if operands
[0].sh4Operand
== operands
[2].sh4Operand
858 $asm.puts
"add #{sh4Operands([operands[1], operands[2]])}"
859 elsif operands
[1].sh4Operand
== operands
[2].sh4Operand
860 $asm.puts
"add #{sh4Operands([operands[0], operands[2]])}"
862 $asm.puts
"mov #{sh4Operands([operands[0], operands[2]])}"
863 $asm.puts
"add #{sh4Operands([operands[1], operands[2]])}"
866 $asm.puts
"add #{sh4Operands(operands)}"
869 if operands
.size
== 3
870 if operands
[1].is_a
? Immediate
871 $asm.puts
"mov #{sh4Operands([Immediate.new(codeOrigin, -1 * operands[1].value), operands[2]])}"
872 $asm.puts
"add #{sh4Operands([operands[0], operands[2]])}"
873 elsif operands
[1].sh4Operand
== operands
[2].sh4Operand
874 $asm.puts
"neg #{sh4Operands([operands[2], operands[2]])}"
875 $asm.puts
"add #{sh4Operands([operands[0], operands[2]])}"
877 $asm.puts
"mov #{sh4Operands([operands[0], operands[2]])}"
878 $asm.puts
"sub #{sh4Operands([operands[1], operands[2]])}"
881 if operands
[0].is_a
? Immediate
882 $asm.puts
"add #{sh4Operands([Immediate.new(codeOrigin, -1 * operands[0].value), operands[1]])}"
884 $asm.puts
"sub #{sh4Operands(operands)}"
888 $asm.puts
"mul.l #{sh4Operands(operands[0..1])}"
889 $asm.puts
"sts macl, #{operands[-1].sh4Operand}"
891 if operands
.size
== 2
892 $asm.puts
"neg #{sh4Operands(operands)}"
894 $asm.puts
"neg #{sh4Operands([operands[0], operands[0]])}"
896 when "andi", "andp", "ori", "orp", "xori", "xorp"
897 raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands
.size
== 2
898 sh4opcode
= opcode
[0..-2]
899 $asm.puts
"#{sh4opcode} #{sh4Operands(operands)}"
900 when "shllx", "shlrx"
901 raise "Unhandled parameters for opcode #{opcode}" unless operands
[0].is_a
? Immediate
902 if operands
[0].value
== 1
903 $asm.puts
"shl#{opcode[3, 1]} #{operands[1].sh4Operand}"
905 $asm.puts
"shl#{opcode[3, 1]}#{operands[0].value} #{operands[1].sh4Operand}"
907 when "shalx", "sharx"
908 raise "Unhandled parameters for opcode #{opcode}" unless operands
[0].is_a
? Immediate
and operands
[0].value
== 1
909 $asm.puts
"sha#{opcode[3, 1]} #{operands[1].sh4Operand}"
911 $asm.puts
"#{opcode} #{sh4Operands(operands)}"
912 when "loaddReversedAndIncrementAddress"
913 # As we are little endian, we don't use "fmov @Rm, DRn" here.
914 $asm.puts
"fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleLo}"
915 $asm.puts
"fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleHi}"
916 when "storedReversedAndDecrementAddress"
917 # As we are little endian, we don't use "fmov DRm, @Rn" here.
918 $asm.puts
"fmov.s #{operands[0].sh4SingleHi}, #{operands[1].sh4OperandPreDec}"
919 $asm.puts
"fmov.s #{operands[0].sh4SingleLo}, #{operands[1].sh4OperandPreDec}"
921 $asm.puts
"lds #{operands[0].sh4Operand}, fpul"
922 $asm.puts
"float fpul, #{operands[1].sh4Operand}"
924 $asm.puts
"lds #{operands[0].sh4Operand}, fpul"
925 $asm.puts
"fsts fpul, #{operands[2].sh4SingleLo}"
926 $asm.puts
"lds #{operands[1].sh4Operand}, fpul"
927 $asm.puts
"fsts fpul, #{operands[2].sh4SingleHi}"
929 $asm.puts
"flds #{operands[0].sh4SingleLo}, fpul"
930 $asm.puts
"sts fpul, #{operands[1].sh4Operand}"
931 $asm.puts
"flds #{operands[0].sh4SingleHi}, fpul"
932 $asm.puts
"sts fpul, #{operands[2].sh4Operand}"
933 when "addd", "subd", "muld", "divd"
934 sh4opcode
= opcode
[0..-2]
935 $asm.puts
"f#{sh4opcode} #{sh4Operands(operands)}"
937 $asm.puts
"ftrc #{operands[0].sh4Operand}, fpul"
938 $asm.puts
"sts fpul, #{operands[1].sh4Operand}"
939 $asm.puts
"float fpul, #{SH4_TMP_FPRS[0].sh4Operand}"
940 $asm.puts
"fcmp/eq #{sh4Operands([operands[0], SH4_TMP_FPRS[0]])}"
941 $asm.puts
"bf #{operands[2].asmLabel}"
942 $asm.puts
"tst #{sh4Operands([operands[1], operands[1]])}"
943 $asm.puts
"bt #{operands[2].asmLabel}"
945 emitSH4BranchIfNaN(operands
)
947 emitSH4DoubleCondBranch("eq", true, operands
)
949 emitSH4DoubleCondBranch("lt", true, operands
)
951 emitSH4DoubleCondBranch("lt", false, operands
)
953 emitSH4DoubleCondBranch("gt", true, operands
)
955 emitSH4DoubleCondBranch("gt", false, operands
)
956 when "baddio", "baddpo", "bsubio", "bsubpo"
957 raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands
.size
== 3
958 $asm.puts
"#{opcode[1, 3]}v #{sh4Operands([operands[0], operands[1]])}"
959 $asm.puts
"bt #{operands[2].asmLabel}"
960 when "bmulio", "bmulpo"
961 raise "Invalid operands number (#{operands.size})" unless operands
.size
== 5
962 $asm.puts
"dmuls.l #{sh4Operands([operands[2], operands[3]])}"
963 $asm.puts
"sts macl, #{operands[3].sh4Operand}"
964 $asm.puts
"cmp/pz #{operands[3].sh4Operand}"
965 $asm.puts
"movt #{operands[1].sh4Operand}"
966 $asm.puts
"add #-1, #{operands[1].sh4Operand}"
967 $asm.puts
"sts mach, #{operands[0].sh4Operand}"
968 $asm.puts
"cmp/eq #{sh4Operands([operands[0], operands[1]])}"
969 $asm.puts
"bf #{operands[4].asmLabel}"
970 when "btiz", "btpz", "btbz", "btinz", "btpnz", "btbnz"
971 if operands
.size
== 3
972 $asm.puts
"tst #{sh4Operands([operands[0], operands[1]])}"
974 if operands
[0].sh4Operand
== "r0"
975 $asm.puts
"cmp/eq #0, r0"
977 $asm.puts
"tst #{sh4Operands([operands[0], operands[0]])}"
980 emitSH4BranchIfT(operands
[-1], (opcode
[-2, 2] == "nz"))
981 when "cieq", "cpeq", "cbeq"
982 emitSH4CompareSet("eq", false, operands
)
983 when "cineq", "cpneq", "cbneq"
984 emitSH4CompareSet("eq", true, operands
)
985 when "cib", "cpb", "cbb"
986 emitSH4CompareSet("hs", true, operands
)
987 when "bieq", "bpeq", "bbeq"
988 emitSH4CondBranch("eq", false, operands
)
989 when "bineq", "bpneq", "bbneq"
990 emitSH4CondBranch("eq", true, operands
)
991 when "bib", "bpb", "bbb"
992 emitSH4CondBranch("hs", true, operands
)
993 when "bia", "bpa", "bba"
994 emitSH4CondBranch("hi", false, operands
)
995 when "bibeq", "bpbeq"
996 emitSH4CondBranch("hi", true, operands
)
997 when "biaeq", "bpaeq"
998 emitSH4CondBranch("hs", false, operands
)
999 when "bigteq", "bpgteq", "bbgteq"
1000 emitSH4CondBranch("ge", false, operands
)
1001 when "bilt", "bplt", "bblt"
1002 emitSH4CondBranch("ge", true, operands
)
1003 when "bigt", "bpgt", "bbgt"
1004 emitSH4CondBranch("gt", false, operands
)
1005 when "bilteq", "bplteq", "bblteq"
1006 emitSH4CondBranch("gt", true, operands
)
1008 $asm.puts
"cmp/pz #{operands[0].sh4Operand}"
1009 $asm.puts
"bf #{operands[1].asmLabel}"
1011 if operands
[0].is_a
? LocalLabelReference
1012 $asm.puts
"bsr #{operands[0].asmLabel}"
1014 elsif operands
[0].is_a
? RegisterID
or operands
[0].is_a
? SpecialRegister
1015 emitSH4Branch("jsr", operands
[0])
1017 raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
1020 if operands
[0].is_a
? LocalLabelReference
1021 $asm.puts
"bra #{operands[0].asmLabel}"
1023 elsif operands
[0].is_a
? RegisterID
or operands
[0].is_a
? SpecialRegister
1024 emitSH4Branch("jmp", operands
[0])
1026 raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
1032 $asm.puts
"mov.b #{sh4Operands(operands)}"
1033 $asm.puts
"extu.b #{sh4Operands([operands[1], operands[1]])}"
1035 $asm.puts
"mov.b #{sh4Operands(operands)}"
1037 $asm.puts
"mov.w #{sh4Operands(operands)}"
1038 $asm.puts
"extu.w #{sh4Operands([operands[1], operands[1]])}"
1039 when "loadi", "loadis", "loadp", "storei", "storep"
1040 $asm.puts
"mov.l #{sh4Operands(operands)}"
1042 $asm.puts
".balign 4" # As balign directive is in a code section, fill value is 'nop' instruction.
1044 $asm.puts
"mova #{sh4Operands(operands)}"
1046 if operands
[0].is_a
? ConstPoolEntry
1047 if operands
[0].size
== 16
1048 $asm.puts
"mov.w #{operands[0].labelref.asmLabel}, #{operands[1].sh4Operand}"
1050 $asm.puts
"mov.l #{operands[0].labelref.asmLabel}, #{operands[1].sh4Operand}"
1052 elsif operands
[0].sh4Operand !
= operands
[1].sh4Operand
1053 $asm.puts
"mov #{sh4Operands(operands)}"
1056 if operands
[0].is_a
? BaseIndex
1058 $asm.puts
"mov #{sh4Operands([biop.index, operands[1]])}"
1059 if biop
.scaleShift
> 0
1060 emitSH4ShiftImm(biop
.scaleShift
, operands
[1], "l")
1062 $asm.puts
"add #{sh4Operands([biop.base, operands[1]])}"
1063 if biop
.offset
.value !
= 0
1064 $asm.puts
"add #{sh4Operands([biop.offset, operands[1]])}"
1066 elsif operands
[0].is_a
? Address
1067 if operands
[0].base !
= operands
[1]
1068 $asm.puts
"mov #{sh4Operands([operands[0].base, operands[1]])}"
1070 if operands
[0].offset
.value !
= 0
1071 $asm.puts
"add #{sh4Operands([operands[0].offset, operands[1]])}"
1074 raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
1077 $asm.puts
"lds #{sh4Operands(operands)}, pr"
1079 $asm.puts
"sts pr, #{sh4Operands(operands)}"
1083 if operands
[0].sh4Operand
== "pr"
1084 $asm.puts
"lds.l @r15+, #{sh4Operands(operands)}"
1086 $asm.puts "mov
.l
@r15+
, #{sh4Operands(operands)}"
1089 if operands[0].sh4Operand == "pr
"
1090 $asm.puts "sts
.l
#{sh4Operands(operands)}, @
-r15
"
1092 $asm.puts "mov
.l
#{sh4Operands(operands)}, @
-r15
"
1094 when "popCalleeSaves
"
1095 $asm.puts "mov
.l
@r15+
, r8
"
1096 $asm.puts "mov
.l
@r15+
, r9
"
1097 $asm.puts "mov
.l
@r15+
, r10
"
1098 $asm.puts "mov
.l
@r15+
, r11
"
1099 $asm.puts "mov
.l
@r15+
, r13
"
1100 when "pushCalleeSaves
"
1101 $asm.puts "mov
.l r13
, @
-r15
"
1102 $asm.puts "mov
.l r11
, @
-r15
"
1103 $asm.puts "mov
.l r10
, @
-r15
"
1104 $asm.puts "mov
.l r9
, @
-r15
"
1105 $asm.puts "mov
.l r8
, @
-r15
"
1107 # This special opcode always generates an illegal instruction exception.
1108 $asm.puts ".word
0xfffd"