2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssembler_h
27 #define MacroAssembler_h
29 #include <wtf/Platform.h>
33 #include "X86Assembler.h"
37 class MacroAssembler
{
39 X86Assembler m_assembler
;
42 static const X86::RegisterID scratchRegister
= X86::r11
;
46 typedef X86::RegisterID RegisterID
;
48 // Note: do not rely on values in this enum, these will change (to 0..3).
66 size_t size() { return m_assembler
.size(); }
67 void* copyCode(ExecutablePool
* allocator
)
69 return m_assembler
.executableCopy(allocator
);
75 // Describes a simple base-offset address.
77 explicit Address(RegisterID base
, int32_t offset
= 0)
89 // This class is used for explicit 'load' and 'store' operations
90 // (as opposed to situations in which a memory operand is provided
91 // to a generic operation, such as an integer arithmetic instruction).
93 // In the case of a load (or store) operation we want to permit
94 // addresses to be implicitly constructed, e.g. the two calls:
96 // load32(Address(addrReg), destReg);
97 // load32(addrReg, destReg);
99 // Are equivalent, and the explicit wrapping of the Address in the former
101 struct ImplicitAddress
{
102 ImplicitAddress(RegisterID base
)
108 ImplicitAddress(Address address
)
110 , offset(address
.offset
)
120 // Describes a complex addressing mode.
122 BaseIndex(RegisterID base
, RegisterID index
, Scale scale
, int32_t offset
= 0)
138 // Describes an memory operand given by a pointer. For regular load & store
139 // operations an unwrapped void* will be used, rather than using this.
140 struct AbsoluteAddress
{
141 explicit AbsoluteAddress(void* ptr
)
155 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
156 // patched after the code has been generated.
158 friend class MacroAssembler
;
159 friend class PatchBuffer
;
166 DataLabelPtr(MacroAssembler
* masm
)
167 : m_label(masm
->m_assembler
.label())
171 static void patch(void* address
, void* value
)
173 X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address
), reinterpret_cast<intptr_t>(value
));
177 X86Assembler::JmpDst m_label
;
182 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
183 // patched after the code has been generated.
185 friend class MacroAssembler
;
186 friend class PatchBuffer
;
193 DataLabel32(MacroAssembler
* masm
)
194 : m_label(masm
->m_assembler
.label())
198 static void patch(void* address
, int32_t value
)
200 X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address
), value
);
204 X86Assembler::JmpDst m_label
;
209 // A Label records a point in the generated instruction stream, typically such that
210 // it may be used as a destination for a jump.
213 friend class MacroAssembler
;
214 friend class PatchBuffer
;
221 Label(MacroAssembler
* masm
)
222 : m_label(masm
->m_assembler
.label())
226 // FIXME: transitionary method, while we replace JmpSrces with Jumps.
227 operator X86Assembler::JmpDst()
233 X86Assembler::JmpDst m_label
;
239 // A jump object is a reference to a jump instruction that has been planted
240 // into the code buffer - it is typically used to link the jump, setting the
241 // relative offset such that when executed it will jump to the desired
244 // Jump objects retain a pointer to the assembler for syntactic purposes -
245 // to allow the jump object to be able to link itself, e.g.:
247 // Jump forwardsBranch = jne32(Imm32(0), reg1);
249 // forwardsBranch.link();
251 // Jumps may also be linked to a Label.
253 friend class PatchBuffer
;
254 friend class MacroAssembler
;
261 // FIXME: transitionary method, while we replace JmpSrces with Jumps.
262 Jump(X86Assembler::JmpSrc jmp
)
267 void link(MacroAssembler
* masm
)
269 masm
->m_assembler
.link(m_jmp
, masm
->m_assembler
.label());
272 void linkTo(Label label
, MacroAssembler
* masm
)
274 masm
->m_assembler
.link(m_jmp
, label
.m_label
);
277 // FIXME: transitionary method, while we replace JmpSrces with Jumps.
278 operator X86Assembler::JmpSrc()
283 static void patch(void* address
, void* destination
)
285 X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address
), destination
);
289 X86Assembler::JmpSrc m_jmp
;
294 // A JumpList is a set of Jump objects.
295 // All jumps in the set will be linked to the same destination.
297 friend class PatchBuffer
;
300 void link(MacroAssembler
* masm
)
302 size_t size
= m_jumps
.size();
303 for (size_t i
= 0; i
< size
; ++i
)
304 m_jumps
[i
].link(masm
);
308 void linkTo(Label label
, MacroAssembler
* masm
)
310 size_t size
= m_jumps
.size();
311 for (size_t i
= 0; i
< size
; ++i
)
312 m_jumps
[i
].linkTo(label
, masm
);
316 void append(Jump jump
)
318 m_jumps
.append(jump
);
321 void append(JumpList
& other
)
323 m_jumps
.append(other
.m_jumps
.begin(), other
.m_jumps
.size());
328 return !m_jumps
.size();
332 Vector
<Jump
, 16> m_jumps
;
338 // This class assists in linking code generated by the macro assembler, once code generation
339 // has been completed, and the code has been copied to is final location in memory. At this
340 // time pointers to labels within the code may be resolved, and relative offsets to external
341 // addresses may be fixed.
344 // * Jump objects may be linked to external targets,
345 // * The address of Jump objects may taken, such that it can later be relinked.
346 // * The return address of a Jump object representing a call may be acquired.
347 // * The address of a Label pointing into the code may be resolved.
348 // * The value referenced by a DataLabel may be fixed.
350 // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
351 // address of calls, as opposed to a point that can be used to later relink a Jump -
352 // possibly wrap the later up in an object that can do just that).
355 PatchBuffer(void* code
)
360 void link(Jump jump
, void* target
)
362 X86Assembler::link(m_code
, jump
.m_jmp
, target
);
365 void link(JumpList list
, void* target
)
367 for (unsigned i
= 0; i
< list
.m_jumps
.size(); ++i
)
368 X86Assembler::link(m_code
, list
.m_jumps
[i
], target
);
371 void* addressOf(Jump jump
)
373 return X86Assembler::getRelocatedAddress(m_code
, jump
.m_jmp
);
376 void* addressOf(Label label
)
378 return X86Assembler::getRelocatedAddress(m_code
, label
.m_label
);
381 void* addressOf(DataLabelPtr label
)
383 return X86Assembler::getRelocatedAddress(m_code
, label
.m_label
);
386 void* addressOf(DataLabel32 label
)
388 return X86Assembler::getRelocatedAddress(m_code
, label
.m_label
);
391 void setPtr(DataLabelPtr label
, void* value
)
393 X86Assembler::patchAddress(m_code
, label
.m_label
, value
);
403 // A pointer sized immediate operand to an instruction - this is wrapped
404 // in a class requiring explicit construction in order to differentiate
405 // from pointers used as absolute addresses to memory operations
407 explicit ImmPtr(void* value
)
414 return reinterpret_cast<intptr_t>(m_value
);
423 // A 32bit immediate operand to an instruction - this is wrapped in a
424 // class requiring explicit construction in order to prevent RegisterIDs
425 // (which are implemented as an enum) from accidentally being passed as
428 explicit Imm32(int32_t value
)
434 explicit Imm32(ImmPtr ptr
)
435 : m_value(ptr
.asIntptr())
443 // Integer arithmetic operations:
445 // Operations are typically two operand - operation(source, srcDst)
446 // For many operations the source may be an Imm32, the srcDst operand
447 // may often be a memory location (explictly described using an Address
450 void addPtr(RegisterID src
, RegisterID dest
)
453 m_assembler
.addq_rr(src
, dest
);
459 void addPtr(Imm32 imm
, RegisterID srcDest
)
462 m_assembler
.addq_ir(imm
.m_value
, srcDest
);
468 void addPtr(ImmPtr imm
, RegisterID dest
)
471 move(imm
, scratchRegister
);
472 m_assembler
.addq_rr(scratchRegister
, dest
);
474 add32(Imm32(imm
), dest
);
478 void addPtr(Imm32 imm
, RegisterID src
, RegisterID dest
)
480 m_assembler
.leal_mr(imm
.m_value
, src
, dest
);
483 void add32(RegisterID src
, RegisterID dest
)
485 m_assembler
.addl_rr(src
, dest
);
488 void add32(Imm32 imm
, Address address
)
490 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
493 void add32(Imm32 imm
, RegisterID dest
)
495 m_assembler
.addl_ir(imm
.m_value
, dest
);
498 void add32(Imm32 imm
, AbsoluteAddress address
)
501 move(ImmPtr(address
.m_ptr
), scratchRegister
);
502 add32(imm
, Address(scratchRegister
));
504 m_assembler
.addl_im(imm
.m_value
, address
.m_ptr
);
508 void add32(Address src
, RegisterID dest
)
510 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
513 void andPtr(RegisterID src
, RegisterID dest
)
516 m_assembler
.andq_rr(src
, dest
);
522 void andPtr(Imm32 imm
, RegisterID srcDest
)
525 m_assembler
.andq_ir(imm
.m_value
, srcDest
);
531 void and32(RegisterID src
, RegisterID dest
)
533 m_assembler
.andl_rr(src
, dest
);
536 void and32(Imm32 imm
, RegisterID dest
)
538 m_assembler
.andl_ir(imm
.m_value
, dest
);
541 void lshift32(Imm32 imm
, RegisterID dest
)
543 m_assembler
.shll_i8r(imm
.m_value
, dest
);
546 void lshift32(RegisterID shift_amount
, RegisterID dest
)
548 // On x86 we can only shift by ecx; if asked to shift by another register we'll
549 // need rejig the shift amount into ecx first, and restore the registers afterwards.
550 if (shift_amount
!= X86::ecx
) {
551 swap(shift_amount
, X86::ecx
);
553 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
554 if (dest
== shift_amount
)
555 m_assembler
.shll_CLr(X86::ecx
);
556 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
557 else if (dest
== X86::ecx
)
558 m_assembler
.shll_CLr(shift_amount
);
559 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
561 m_assembler
.shll_CLr(dest
);
563 swap(shift_amount
, X86::ecx
);
565 m_assembler
.shll_CLr(dest
);
568 // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
569 // For now, this operation has specific register requirements, and the three register must
570 // be unique. It is unfortunate to expose this in the MacroAssembler interface, however
571 // given the complexity to fix, the fact that it is not uncommmon for processors to have
572 // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
573 // support a hardware divide at all, it may not be
574 void mod32(RegisterID divisor
, RegisterID dividend
, RegisterID remainder
)
577 #pragma unused(dividend,remainder)
579 ASSERT((dividend
== X86::eax
) && (remainder
== X86::edx
));
580 ASSERT((dividend
!= divisor
) && (remainder
!= divisor
));
584 m_assembler
.idivl_r(divisor
);
587 void mul32(RegisterID src
, RegisterID dest
)
589 m_assembler
.imull_rr(src
, dest
);
592 void mul32(Imm32 imm
, RegisterID src
, RegisterID dest
)
594 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
597 void not32(RegisterID srcDest
)
599 m_assembler
.notl_r(srcDest
);
602 void orPtr(RegisterID src
, RegisterID dest
)
605 m_assembler
.orq_rr(src
, dest
);
611 void orPtr(ImmPtr imm
, RegisterID dest
)
614 move(imm
, scratchRegister
);
615 m_assembler
.orq_rr(scratchRegister
, dest
);
617 or32(Imm32(imm
), dest
);
621 void orPtr(Imm32 imm
, RegisterID dest
)
624 m_assembler
.orq_ir(imm
.m_value
, dest
);
630 void or32(RegisterID src
, RegisterID dest
)
632 m_assembler
.orl_rr(src
, dest
);
635 void or32(Imm32 imm
, RegisterID dest
)
637 m_assembler
.orl_ir(imm
.m_value
, dest
);
640 void rshiftPtr(RegisterID shift_amount
, RegisterID dest
)
643 // On x86 we can only shift by ecx; if asked to shift by another register we'll
644 // need rejig the shift amount into ecx first, and restore the registers afterwards.
645 if (shift_amount
!= X86::ecx
) {
646 swap(shift_amount
, X86::ecx
);
648 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
649 if (dest
== shift_amount
)
650 m_assembler
.sarq_CLr(X86::ecx
);
651 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
652 else if (dest
== X86::ecx
)
653 m_assembler
.sarq_CLr(shift_amount
);
654 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
656 m_assembler
.sarq_CLr(dest
);
658 swap(shift_amount
, X86::ecx
);
660 m_assembler
.sarq_CLr(dest
);
662 rshift32(shift_amount
, dest
);
666 void rshiftPtr(Imm32 imm
, RegisterID dest
)
669 m_assembler
.sarq_i8r(imm
.m_value
, dest
);
675 void rshift32(RegisterID shift_amount
, RegisterID dest
)
677 // On x86 we can only shift by ecx; if asked to shift by another register we'll
678 // need rejig the shift amount into ecx first, and restore the registers afterwards.
679 if (shift_amount
!= X86::ecx
) {
680 swap(shift_amount
, X86::ecx
);
682 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
683 if (dest
== shift_amount
)
684 m_assembler
.sarl_CLr(X86::ecx
);
685 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
686 else if (dest
== X86::ecx
)
687 m_assembler
.sarl_CLr(shift_amount
);
688 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
690 m_assembler
.sarl_CLr(dest
);
692 swap(shift_amount
, X86::ecx
);
694 m_assembler
.sarl_CLr(dest
);
697 void rshift32(Imm32 imm
, RegisterID dest
)
699 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
702 void subPtr(RegisterID src
, RegisterID dest
)
705 m_assembler
.subq_rr(src
, dest
);
711 void subPtr(Imm32 imm
, RegisterID dest
)
714 m_assembler
.subq_ir(imm
.m_value
, dest
);
720 void subPtr(ImmPtr imm
, RegisterID dest
)
723 move(imm
, scratchRegister
);
724 m_assembler
.subq_rr(scratchRegister
, dest
);
726 sub32(Imm32(imm
), dest
);
730 void sub32(RegisterID src
, RegisterID dest
)
732 m_assembler
.subl_rr(src
, dest
);
735 void sub32(Imm32 imm
, RegisterID dest
)
737 m_assembler
.subl_ir(imm
.m_value
, dest
);
740 void sub32(Imm32 imm
, Address address
)
742 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
745 void sub32(Imm32 imm
, AbsoluteAddress address
)
748 move(ImmPtr(address
.m_ptr
), scratchRegister
);
749 sub32(imm
, Address(scratchRegister
));
751 m_assembler
.subl_im(imm
.m_value
, address
.m_ptr
);
755 void sub32(Address src
, RegisterID dest
)
757 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
760 void xorPtr(RegisterID src
, RegisterID dest
)
763 m_assembler
.xorq_rr(src
, dest
);
769 void xorPtr(Imm32 imm
, RegisterID srcDest
)
772 m_assembler
.xorq_ir(imm
.m_value
, srcDest
);
778 void xor32(RegisterID src
, RegisterID dest
)
780 m_assembler
.xorl_rr(src
, dest
);
783 void xor32(Imm32 imm
, RegisterID srcDest
)
785 m_assembler
.xorl_ir(imm
.m_value
, srcDest
);
789 // Memory access operations:
791 // Loads are of the form load(address, destination) and stores of the form
792 // store(source, address). The source for a store may be an Imm32. Address
793 // operand objects to loads and store will be implicitly constructed if a
794 // register is passed.
796 void loadPtr(ImplicitAddress address
, RegisterID dest
)
799 m_assembler
.movq_mr(address
.offset
, address
.base
, dest
);
801 load32(address
, dest
);
805 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
808 m_assembler
.movq_mr_disp32(address
.offset
, address
.base
, dest
);
809 return DataLabel32(this);
811 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
812 return DataLabel32(this);
816 void loadPtr(BaseIndex address
, RegisterID dest
)
819 m_assembler
.movq_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
821 load32(address
, dest
);
825 void loadPtr(void* address
, RegisterID dest
)
828 if (dest
== X86::eax
)
829 m_assembler
.movq_mEAX(address
);
831 move(X86::eax
, dest
);
832 m_assembler
.movq_mEAX(address
);
833 swap(X86::eax
, dest
);
836 load32(address
, dest
);
840 void load32(ImplicitAddress address
, RegisterID dest
)
842 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
845 void load32(BaseIndex address
, RegisterID dest
)
847 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
850 void load32(void* address
, RegisterID dest
)
853 if (dest
== X86::eax
)
854 m_assembler
.movl_mEAX(address
);
856 move(X86::eax
, dest
);
857 m_assembler
.movl_mEAX(address
);
858 swap(X86::eax
, dest
);
861 m_assembler
.movl_mr(address
, dest
);
865 void load16(BaseIndex address
, RegisterID dest
)
867 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
870 void storePtr(RegisterID src
, ImplicitAddress address
)
873 m_assembler
.movq_rm(src
, address
.offset
, address
.base
);
875 store32(src
, address
);
879 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
882 m_assembler
.movq_rm_disp32(src
, address
.offset
, address
.base
);
883 return DataLabel32(this);
885 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
886 return DataLabel32(this);
890 void storePtr(RegisterID src
, BaseIndex address
)
893 m_assembler
.movq_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
895 store32(src
, address
);
899 void storePtr(ImmPtr imm
, ImplicitAddress address
)
902 move(imm
, scratchRegister
);
903 storePtr(scratchRegister
, address
);
905 m_assembler
.movl_i32m(imm
.asIntptr(), address
.offset
, address
.base
);
909 #if !PLATFORM(X86_64)
910 void storePtr(ImmPtr imm
, void* address
)
912 store32(Imm32(imm
), address
);
916 DataLabelPtr
storePtrWithPatch(Address address
)
919 m_assembler
.movq_i64r(0, scratchRegister
);
920 DataLabelPtr
label(this);
921 storePtr(scratchRegister
, address
);
924 m_assembler
.movl_i32m(0, address
.offset
, address
.base
);
925 return DataLabelPtr(this);
929 void store32(RegisterID src
, ImplicitAddress address
)
931 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
934 void store32(RegisterID src
, BaseIndex address
)
936 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
939 void store32(Imm32 imm
, ImplicitAddress address
)
941 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
944 void store32(Imm32 imm
, void* address
)
947 move(X86::eax
, scratchRegister
);
949 m_assembler
.movl_EAXm(address
);
950 move(scratchRegister
, X86::eax
);
952 m_assembler
.movl_i32m(imm
.m_value
, address
);
957 // Stack manipulation operations:
959 // The ABI is assumed to provide a stack abstraction to memory,
960 // containing machine word sized units of data. Push and pop
961 // operations add and remove a single register sized unit of data
962 // to or from the stack. Peek and poke operations read or write
963 // values on the stack, without moving the current stack position.
965 void pop(RegisterID dest
)
967 m_assembler
.pop_r(dest
);
970 void push(RegisterID src
)
972 m_assembler
.push_r(src
);
975 void push(Address address
)
977 m_assembler
.push_m(address
.offset
, address
.base
);
982 m_assembler
.push_i32(imm
.m_value
);
987 addPtr(Imm32(sizeof(void*)), X86::esp
);
990 void peek(RegisterID dest
, int index
= 0)
992 loadPtr(Address(X86::esp
, (index
* sizeof(void *))), dest
);
995 void poke(RegisterID src
, int index
= 0)
997 storePtr(src
, Address(X86::esp
, (index
* sizeof(void *))));
1000 void poke(Imm32 value
, int index
= 0)
1002 store32(value
, Address(X86::esp
, (index
* sizeof(void *))));
1005 void poke(ImmPtr imm
, int index
= 0)
1007 storePtr(imm
, Address(X86::esp
, (index
* sizeof(void *))));
1010 // Register move operations:
1012 // Move values in registers.
1014 void move(Imm32 imm
, RegisterID dest
)
1016 // Note: on 64-bit the Imm32 value is zero extended into the register, it
1017 // may be useful to have a separate version that sign extends the value?
1019 m_assembler
.xorl_rr(dest
, dest
);
1021 m_assembler
.movl_i32r(imm
.m_value
, dest
);
1024 void move(RegisterID src
, RegisterID dest
)
1026 // Note: on 64-bit this is is a full register move; perhaps it would be
1027 // useful to have separate move32 & movePtr, with move32 zero extending?
1028 #if PLATFORM(X86_64)
1029 m_assembler
.movq_rr(src
, dest
);
1031 m_assembler
.movl_rr(src
, dest
);
1035 void move(ImmPtr imm
, RegisterID dest
)
1037 #if PLATFORM(X86_64)
1038 if (CAN_SIGN_EXTEND_U32_64(imm
.asIntptr()))
1039 m_assembler
.movl_i32r(static_cast<int32_t>(imm
.asIntptr()), dest
);
1041 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
1043 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
1047 void swap(RegisterID reg1
, RegisterID reg2
)
1049 #if PLATFORM(X86_64)
1050 m_assembler
.xchgq_rr(reg1
, reg2
);
1052 m_assembler
.xchgl_rr(reg1
, reg2
);
1056 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1058 #if PLATFORM(X86_64)
1059 m_assembler
.movsxd_rr(src
, dest
);
1066 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1068 #if PLATFORM(X86_64)
1069 m_assembler
.movl_rr(src
, dest
);
1077 // Forwards / external control flow operations:
1079 // This set of jump and conditional branch operations return a Jump
1080 // object which may linked at a later point, allow forwards jump,
1081 // or jumps that will require external linkage (after the code has been
1084 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1085 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1086 // used (representing the names 'below' and 'above').
1088 // Operands to the comparision are provided in the expected order, e.g.
1089 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
1090 // treated as a signed 32bit value, is less than or equal to 5.
1092 // jz and jnz test whether the first operand is equal to zero, and take
1093 // an optional second operand of a mask under which to perform the test.
1096 void compareImm32ForBranch(RegisterID left
, int32_t right
)
1098 m_assembler
.cmpl_ir(right
, left
);
1101 void compareImm32ForBranchEquality(RegisterID reg
, int32_t imm
)
1104 m_assembler
.testl_rr(reg
, reg
);
1106 m_assembler
.cmpl_ir(imm
, reg
);
1109 void compareImm32ForBranchEquality(Address address
, int32_t imm
)
1111 m_assembler
.cmpl_im(imm
, address
.offset
, address
.base
);
1114 void testImm32(RegisterID reg
, Imm32 mask
)
1116 // if we are only interested in the low seven bits, this can be tested with a testb
1117 if (mask
.m_value
== -1)
1118 m_assembler
.testl_rr(reg
, reg
);
1119 else if ((mask
.m_value
& ~0x7f) == 0)
1120 m_assembler
.testb_i8r(mask
.m_value
, reg
);
1122 m_assembler
.testl_i32r(mask
.m_value
, reg
);
1125 void testImm32(Address address
, Imm32 mask
)
1127 if (mask
.m_value
== -1)
1128 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1130 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1133 void testImm32(BaseIndex address
, Imm32 mask
)
1135 if (mask
.m_value
== -1)
1136 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1138 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1141 #if PLATFORM(X86_64)
1142 void compareImm64ForBranch(RegisterID left
, int32_t right
)
1144 m_assembler
.cmpq_ir(right
, left
);
1147 void compareImm64ForBranchEquality(RegisterID reg
, int32_t imm
)
1150 m_assembler
.testq_rr(reg
, reg
);
1152 m_assembler
.cmpq_ir(imm
, reg
);
1155 void testImm64(RegisterID reg
, Imm32 mask
)
1157 // if we are only interested in the low seven bits, this can be tested with a testb
1158 if (mask
.m_value
== -1)
1159 m_assembler
.testq_rr(reg
, reg
);
1160 else if ((mask
.m_value
& ~0x7f) == 0)
1161 m_assembler
.testb_i8r(mask
.m_value
, reg
);
1163 m_assembler
.testq_i32r(mask
.m_value
, reg
);
1166 void testImm64(Address address
, Imm32 mask
)
1168 if (mask
.m_value
== -1)
1169 m_assembler
.cmpq_im(0, address
.offset
, address
.base
);
1171 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
);
1174 void testImm64(BaseIndex address
, Imm32 mask
)
1176 if (mask
.m_value
== -1)
1177 m_assembler
.cmpq_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1179 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1184 Jump
ja32(RegisterID left
, Imm32 right
)
1186 compareImm32ForBranch(left
, right
.m_value
);
1187 return Jump(m_assembler
.ja());
1190 Jump
jaePtr(RegisterID left
, RegisterID right
)
1192 #if PLATFORM(X86_64)
1193 m_assembler
.cmpq_rr(right
, left
);
1194 return Jump(m_assembler
.jae());
1196 return jae32(left
, right
);
1200 Jump
jaePtr(RegisterID reg
, ImmPtr ptr
)
1202 #if PLATFORM(X86_64)
1203 intptr_t imm
= ptr
.asIntptr();
1204 if (CAN_SIGN_EXTEND_32_64(imm
)) {
1205 compareImm64ForBranch(reg
, imm
);
1206 return Jump(m_assembler
.jae());
1208 move(ptr
, scratchRegister
);
1209 return jaePtr(reg
, scratchRegister
);
1212 return jae32(reg
, Imm32(ptr
));
1216 Jump
jae32(RegisterID left
, RegisterID right
)
1218 m_assembler
.cmpl_rr(right
, left
);
1219 return Jump(m_assembler
.jae());
1222 Jump
jae32(RegisterID left
, Imm32 right
)
1224 compareImm32ForBranch(left
, right
.m_value
);
1225 return Jump(m_assembler
.jae());
1228 Jump
jae32(RegisterID left
, Address right
)
1230 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
1231 return Jump(m_assembler
.jae());
1234 Jump
jae32(Address left
, RegisterID right
)
1236 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
1237 return Jump(m_assembler
.jae());
1240 Jump
jbPtr(RegisterID left
, RegisterID right
)
1242 #if PLATFORM(X86_64)
1243 m_assembler
.cmpq_rr(right
, left
);
1244 return Jump(m_assembler
.jb());
1246 return jb32(left
, right
);
1250 Jump
jbPtr(RegisterID reg
, ImmPtr ptr
)
1252 #if PLATFORM(X86_64)
1253 intptr_t imm
= ptr
.asIntptr();
1254 if (CAN_SIGN_EXTEND_32_64(imm
)) {
1255 compareImm64ForBranch(reg
, imm
);
1256 return Jump(m_assembler
.jb());
1258 move(ptr
, scratchRegister
);
1259 return jbPtr(reg
, scratchRegister
);
1262 return jb32(reg
, Imm32(ptr
));
1266 Jump
jb32(RegisterID left
, RegisterID right
)
1268 m_assembler
.cmpl_rr(right
, left
);
1269 return Jump(m_assembler
.jb());
1272 Jump
jb32(RegisterID left
, Imm32 right
)
1274 compareImm32ForBranch(left
, right
.m_value
);
1275 return Jump(m_assembler
.jb());
1278 Jump
jb32(RegisterID left
, Address right
)
1280 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
1281 return Jump(m_assembler
.jb());
1284 Jump
jePtr(RegisterID op1
, RegisterID op2
)
1286 #if PLATFORM(X86_64)
1287 m_assembler
.cmpq_rr(op1
, op2
);
1288 return Jump(m_assembler
.je());
1290 return je32(op1
, op2
);
1294 Jump
jePtr(RegisterID reg
, Address address
)
1296 #if PLATFORM(X86_64)
1297 m_assembler
.cmpq_rm(reg
, address
.offset
, address
.base
);
1299 m_assembler
.cmpl_rm(reg
, address
.offset
, address
.base
);
1301 return Jump(m_assembler
.je());
1304 Jump
jePtr(RegisterID reg
, ImmPtr ptr
)
1306 #if PLATFORM(X86_64)
1307 intptr_t imm
= ptr
.asIntptr();
1308 if (CAN_SIGN_EXTEND_32_64(imm
)) {
1309 compareImm64ForBranchEquality(reg
, imm
);
1310 return Jump(m_assembler
.je());
1312 move(ptr
, scratchRegister
);
1313 return jePtr(scratchRegister
, reg
);
1316 return je32(reg
, Imm32(ptr
));
1320 Jump
jePtr(Address address
, ImmPtr imm
)
1322 #if PLATFORM(X86_64)
1323 move(imm
, scratchRegister
);
1324 return jePtr(scratchRegister
, address
);
1326 return je32(address
, Imm32(imm
));
1330 Jump
je32(RegisterID op1
, RegisterID op2
)
1332 m_assembler
.cmpl_rr(op1
, op2
);
1333 return Jump(m_assembler
.je());
1336 Jump
je32(Address op1
, RegisterID op2
)
1338 m_assembler
.cmpl_mr(op1
.offset
, op1
.base
, op2
);
1339 return Jump(m_assembler
.je());
1342 Jump
je32(RegisterID reg
, Imm32 imm
)
1344 compareImm32ForBranchEquality(reg
, imm
.m_value
);
1345 return Jump(m_assembler
.je());
1348 Jump
je32(Address address
, Imm32 imm
)
1350 compareImm32ForBranchEquality(address
, imm
.m_value
);
1351 return Jump(m_assembler
.je());
1354 Jump
je16(RegisterID op1
, BaseIndex op2
)
1356 m_assembler
.cmpw_rm(op1
, op2
.offset
, op2
.base
, op2
.index
, op2
.scale
);
1357 return Jump(m_assembler
.je());
1360 Jump
jg32(RegisterID left
, RegisterID right
)
1362 m_assembler
.cmpl_rr(right
, left
);
1363 return Jump(m_assembler
.jg());
1366 Jump
jg32(RegisterID reg
, Address address
)
1368 m_assembler
.cmpl_mr(address
.offset
, address
.base
, reg
);
1369 return Jump(m_assembler
.jg());
1372 Jump
jgePtr(RegisterID left
, RegisterID right
)
1374 #if PLATFORM(X86_64)
1375 m_assembler
.cmpq_rr(right
, left
);
1376 return Jump(m_assembler
.jge());
1378 return jge32(left
, right
);
1382 Jump
jgePtr(RegisterID reg
, ImmPtr ptr
)
1384 #if PLATFORM(X86_64)
1385 intptr_t imm
= ptr
.asIntptr();
1386 if (CAN_SIGN_EXTEND_32_64(imm
)) {
1387 compareImm64ForBranch(reg
, imm
);
1388 return Jump(m_assembler
.jge());
1390 move(ptr
, scratchRegister
);
1391 return jgePtr(reg
, scratchRegister
);
1394 return jge32(reg
, Imm32(ptr
));
1398 Jump
jge32(RegisterID left
, RegisterID right
)
1400 m_assembler
.cmpl_rr(right
, left
);
1401 return Jump(m_assembler
.jge());
1404 Jump
jge32(RegisterID left
, Imm32 right
)
1406 compareImm32ForBranch(left
, right
.m_value
);
1407 return Jump(m_assembler
.jge());
1410 Jump
jlPtr(RegisterID left
, RegisterID right
)
1412 #if PLATFORM(X86_64)
1413 m_assembler
.cmpq_rr(right
, left
);
1414 return Jump(m_assembler
.jl());
1416 return jl32(left
, right
);
1420 Jump
jlPtr(RegisterID reg
, ImmPtr ptr
)
1422 #if PLATFORM(X86_64)
1423 intptr_t imm
= ptr
.asIntptr();
1424 if (CAN_SIGN_EXTEND_32_64(imm
)) {
1425 compareImm64ForBranch(reg
, imm
);
1426 return Jump(m_assembler
.jl());
1428 move(ptr
, scratchRegister
);
1429 return jlPtr(reg
, scratchRegister
);
1432 return jl32(reg
, Imm32(ptr
));
1436 Jump
jl32(RegisterID left
, RegisterID right
)
1438 m_assembler
.cmpl_rr(right
, left
);
1439 return Jump(m_assembler
.jl());
1442 Jump
jl32(RegisterID left
, Imm32 right
)
1444 compareImm32ForBranch(left
, right
.m_value
);
1445 return Jump(m_assembler
.jl());
1448 Jump
jlePtr(RegisterID left
, RegisterID right
)
1450 #if PLATFORM(X86_64)
1451 m_assembler
.cmpq_rr(right
, left
);
1452 return Jump(m_assembler
.jle());
1454 return jle32(left
, right
);
1458 Jump
jlePtr(RegisterID reg
, ImmPtr ptr
)
1460 #if PLATFORM(X86_64)
1461 intptr_t imm
= ptr
.asIntptr();
1462 if (CAN_SIGN_EXTEND_32_64(imm
)) {
1463 compareImm64ForBranch(reg
, imm
);
1464 return Jump(m_assembler
.jle());
1466 move(ptr
, scratchRegister
);
1467 return jlePtr(reg
, scratchRegister
);
1470 return jle32(reg
, Imm32(ptr
));
1474 Jump
jle32(RegisterID left
, RegisterID right
)
1476 m_assembler
.cmpl_rr(right
, left
);
1477 return Jump(m_assembler
.jle());
1480 Jump
jle32(RegisterID left
, Imm32 right
)
1482 compareImm32ForBranch(left
, right
.m_value
);
1483 return Jump(m_assembler
.jle());
1486 Jump
jnePtr(RegisterID op1
, RegisterID op2
)
1488 #if PLATFORM(X86_64)
1489 m_assembler
.cmpq_rr(op1
, op2
);
1490 return Jump(m_assembler
.jne());
1492 return jne32(op1
, op2
);
1496 Jump
jnePtr(RegisterID reg
, Address address
)
1498 #if PLATFORM(X86_64)
1499 m_assembler
.cmpq_rm(reg
, address
.offset
, address
.base
);
1501 m_assembler
.cmpl_rm(reg
, address
.offset
, address
.base
);
1503 return Jump(m_assembler
.jne());
1506 Jump
jnePtr(RegisterID reg
, AbsoluteAddress address
)
1508 #if PLATFORM(X86_64)
1509 move(ImmPtr(address
.m_ptr
), scratchRegister
);
1510 return jnePtr(reg
, Address(scratchRegister
));
1512 m_assembler
.cmpl_rm(reg
, address
.m_ptr
);
1513 return Jump(m_assembler
.jne());
1517 Jump
jnePtr(RegisterID reg
, ImmPtr ptr
)
1519 #if PLATFORM(X86_64)
1520 intptr_t imm
= ptr
.asIntptr();
1521 if (CAN_SIGN_EXTEND_32_64(imm
)) {
1522 compareImm64ForBranchEquality(reg
, imm
);
1523 return Jump(m_assembler
.jne());
1525 move(ptr
, scratchRegister
);
1526 return jnePtr(scratchRegister
, reg
);
1529 return jne32(reg
, Imm32(ptr
));
1533 Jump
jnePtr(Address address
, ImmPtr imm
)
1535 #if PLATFORM(X86_64)
1536 move(imm
, scratchRegister
);
1537 return jnePtr(scratchRegister
, address
);
1539 return jne32(address
, Imm32(imm
));
1543 #if !PLATFORM(X86_64)
1544 Jump
jnePtr(AbsoluteAddress address
, ImmPtr imm
)
1546 m_assembler
.cmpl_im(imm
.asIntptr(), address
.m_ptr
);
1547 return Jump(m_assembler
.jne());
1551 Jump
jnePtrWithPatch(RegisterID reg
, DataLabelPtr
& dataLabel
, ImmPtr initialValue
= ImmPtr(0))
1553 #if PLATFORM(X86_64)
1554 m_assembler
.movq_i64r(initialValue
.asIntptr(), scratchRegister
);
1555 dataLabel
= DataLabelPtr(this);
1556 return jnePtr(scratchRegister
, reg
);
1558 m_assembler
.cmpl_ir_force32(initialValue
.asIntptr(), reg
);
1559 dataLabel
= DataLabelPtr(this);
1560 return Jump(m_assembler
.jne());
1564 Jump
jnePtrWithPatch(Address address
, DataLabelPtr
& dataLabel
, ImmPtr initialValue
= ImmPtr(0))
1566 #if PLATFORM(X86_64)
1567 m_assembler
.movq_i64r(initialValue
.asIntptr(), scratchRegister
);
1568 dataLabel
= DataLabelPtr(this);
1569 return jnePtr(scratchRegister
, address
);
1571 m_assembler
.cmpl_im_force32(initialValue
.asIntptr(), address
.offset
, address
.base
);
1572 dataLabel
= DataLabelPtr(this);
1573 return Jump(m_assembler
.jne());
1577 Jump
jne32(RegisterID op1
, RegisterID op2
)
1579 m_assembler
.cmpl_rr(op1
, op2
);
1580 return Jump(m_assembler
.jne());
1583 Jump
jne32(RegisterID reg
, Imm32 imm
)
1585 compareImm32ForBranchEquality(reg
, imm
.m_value
);
1586 return Jump(m_assembler
.jne());
1589 Jump
jne32(Address address
, Imm32 imm
)
1591 compareImm32ForBranchEquality(address
, imm
.m_value
);
1592 return Jump(m_assembler
.jne());
1595 Jump
jne32(Address address
, RegisterID reg
)
1597 m_assembler
.cmpl_rm(reg
, address
.offset
, address
.base
);
1598 return Jump(m_assembler
.jne());
1601 Jump
jnzPtr(RegisterID reg
, RegisterID mask
)
1603 #if PLATFORM(X86_64)
1604 m_assembler
.testq_rr(reg
, mask
);
1605 return Jump(m_assembler
.jne());
1607 return jnz32(reg
, mask
);
1611 Jump
jnzPtr(RegisterID reg
, Imm32 mask
= Imm32(-1))
1613 #if PLATFORM(X86_64)
1614 testImm64(reg
, mask
);
1615 return Jump(m_assembler
.jne());
1617 return jnz32(reg
, mask
);
1621 Jump
jnzPtr(RegisterID reg
, ImmPtr mask
)
1623 #if PLATFORM(X86_64)
1624 move(mask
, scratchRegister
);
1625 m_assembler
.testq_rr(scratchRegister
, reg
);
1626 return Jump(m_assembler
.jne());
1628 return jnz32(reg
, Imm32(mask
));
1632 Jump
jnzPtr(Address address
, Imm32 mask
= Imm32(-1))
1634 #if PLATFORM(X86_64)
1635 testImm64(address
, mask
);
1636 return Jump(m_assembler
.jne());
1638 return jnz32(address
, mask
);
1642 Jump
jnz32(RegisterID reg
, RegisterID mask
)
1644 m_assembler
.testl_rr(reg
, mask
);
1645 return Jump(m_assembler
.jne());
1648 Jump
jnz32(RegisterID reg
, Imm32 mask
= Imm32(-1))
1650 testImm32(reg
, mask
);
1651 return Jump(m_assembler
.jne());
1654 Jump
jnz32(Address address
, Imm32 mask
= Imm32(-1))
1656 testImm32(address
, mask
);
1657 return Jump(m_assembler
.jne());
1660 Jump
jzPtr(RegisterID reg
, RegisterID mask
)
1662 #if PLATFORM(X86_64)
1663 m_assembler
.testq_rr(reg
, mask
);
1664 return Jump(m_assembler
.je());
1666 return jz32(reg
, mask
);
1670 Jump
jzPtr(RegisterID reg
, Imm32 mask
= Imm32(-1))
1672 #if PLATFORM(X86_64)
1673 testImm64(reg
, mask
);
1674 return Jump(m_assembler
.je());
1676 return jz32(reg
, mask
);
1680 Jump
jzPtr(RegisterID reg
, ImmPtr mask
)
1682 #if PLATFORM(X86_64)
1683 move(mask
, scratchRegister
);
1684 m_assembler
.testq_rr(scratchRegister
, reg
);
1685 return Jump(m_assembler
.je());
1687 return jz32(reg
, Imm32(mask
));
1691 Jump
jzPtr(Address address
, Imm32 mask
= Imm32(-1))
1693 #if PLATFORM(X86_64)
1694 testImm64(address
, mask
);
1695 return Jump(m_assembler
.je());
1697 return jz32(address
, mask
);
1701 Jump
jzPtr(BaseIndex address
, Imm32 mask
= Imm32(-1))
1703 #if PLATFORM(X86_64)
1704 testImm64(address
, mask
);
1705 return Jump(m_assembler
.je());
1707 return jz32(address
, mask
);
1711 Jump
jz32(RegisterID reg
, RegisterID mask
)
1713 m_assembler
.testl_rr(reg
, mask
);
1714 return Jump(m_assembler
.je());
1717 Jump
jz32(RegisterID reg
, Imm32 mask
= Imm32(-1))
1719 testImm32(reg
, mask
);
1720 return Jump(m_assembler
.je());
1723 Jump
jz32(Address address
, Imm32 mask
= Imm32(-1))
1725 testImm32(address
, mask
);
1726 return Jump(m_assembler
.je());
1729 Jump
jz32(BaseIndex address
, Imm32 mask
= Imm32(-1))
1731 testImm32(address
, mask
);
1732 return Jump(m_assembler
.je());
1737 return Jump(m_assembler
.jmp());
1741 // Backwards, local control flow operations:
1743 // These operations provide a shorter notation for local
1744 // backwards branches, which may be both more convenient
1745 // for the user, and for the programmer, and for the
1746 // assembler (allowing shorter values to be used in
1747 // relative offsets).
1749 // The code sequence:
1751 // Label topOfLoop(this);
1753 // jne32(reg1, reg2, topOfLoop);
1755 // Is equivalent to the longer, potentially less efficient form:
1757 // Label topOfLoop(this);
1759 // jne32(reg1, reg2).linkTo(topOfLoop);
1761 void jae32(RegisterID left
, Address right
, Label target
)
1763 jae32(left
, right
).linkTo(target
, this);
1766 void je32(RegisterID op1
, Imm32 imm
, Label target
)
1768 je32(op1
, imm
).linkTo(target
, this);
1771 void je16(RegisterID op1
, BaseIndex op2
, Label target
)
1773 je16(op1
, op2
).linkTo(target
, this);
1776 void jl32(RegisterID left
, Imm32 right
, Label target
)
1778 jl32(left
, right
).linkTo(target
, this);
1781 void jle32(RegisterID left
, RegisterID right
, Label target
)
1783 jle32(left
, right
).linkTo(target
, this);
1786 void jnePtr(RegisterID op1
, ImmPtr imm
, Label target
)
1788 jnePtr(op1
, imm
).linkTo(target
, this);
1791 void jne32(RegisterID op1
, RegisterID op2
, Label target
)
1793 jne32(op1
, op2
).linkTo(target
, this);
1796 void jne32(RegisterID op1
, Imm32 imm
, Label target
)
1798 jne32(op1
, imm
).linkTo(target
, this);
1801 void jzPtr(RegisterID reg
, Label target
)
1803 jzPtr(reg
).linkTo(target
, this);
1806 void jump(Label target
)
1808 m_assembler
.link(m_assembler
.jmp(), target
.m_label
);
1811 void jump(RegisterID target
)
1813 m_assembler
.jmp_r(target
);
1816 // Address is a memory location containing the address to jump to
1817 void jump(Address address
)
1819 m_assembler
.jmp_m(address
.offset
, address
.base
);
1823 // Arithmetic control flow operations:
1825 // This set of conditional branch operations branch based
1826 // on the result of an arithmetic operation. The operation
1827 // is performed as normal, storing the result.
1829 // * jz operations branch if the result is zero.
1830 // * jo operations branch if the (signed) arithmetic
1831 // operation caused an overflow to occur.
1833 Jump
jnzSubPtr(Imm32 imm
, RegisterID dest
)
1836 return Jump(m_assembler
.jne());
1839 Jump
jnzSub32(Imm32 imm
, RegisterID dest
)
1842 return Jump(m_assembler
.jne());
1845 Jump
joAddPtr(RegisterID src
, RegisterID dest
)
1848 return Jump(m_assembler
.jo());
1851 Jump
joAdd32(RegisterID src
, RegisterID dest
)
1854 return Jump(m_assembler
.jo());
1857 Jump
joAdd32(Imm32 imm
, RegisterID dest
)
1860 return Jump(m_assembler
.jo());
1863 Jump
joMul32(RegisterID src
, RegisterID dest
)
1866 return Jump(m_assembler
.jo());
1869 Jump
joMul32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1871 mul32(imm
, src
, dest
);
1872 return Jump(m_assembler
.jo());
1875 Jump
joSub32(RegisterID src
, RegisterID dest
)
1878 return Jump(m_assembler
.jo());
1881 Jump
joSub32(Imm32 imm
, RegisterID dest
)
1884 return Jump(m_assembler
.jo());
1887 Jump
jzSubPtr(Imm32 imm
, RegisterID dest
)
1890 return Jump(m_assembler
.je());
1893 Jump
jzSub32(Imm32 imm
, RegisterID dest
)
1896 return Jump(m_assembler
.je());
1900 // Miscellaneous operations:
1909 return Jump(m_assembler
.call());
1912 // FIXME: why does this return a Jump object? - it can't be linked.
1913 // This may be to get a reference to the return address of the call.
1915 // This should probably be handled by a separate label type to a regular
1916 // jump. Todo: add a CallLabel type, for the regular call - can be linked
1917 // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
1918 // Also add a CallReturnLabel type for this to return (just a more JmpDsty
1919 // form of label, can get the void* after the code has been linked, but can't
1920 // try to link it like a Jump object), and let the CallLabel be cast into a
1922 Jump
call(RegisterID target
)
1924 return Jump(m_assembler
.call(target
));
1934 m_assembler
.align(16);
1938 ptrdiff_t differenceBetween(Label from
, Jump to
)
1940 return X86Assembler::getDifferenceBetweenLabels(from
.m_label
, to
.m_jmp
);
1943 ptrdiff_t differenceBetween(Label from
, Label to
)
1945 return X86Assembler::getDifferenceBetweenLabels(from
.m_label
, to
.m_label
);
1948 ptrdiff_t differenceBetween(Label from
, DataLabelPtr to
)
1950 return X86Assembler::getDifferenceBetweenLabels(from
.m_label
, to
.m_label
);
1953 ptrdiff_t differenceBetween(Label from
, DataLabel32 to
)
1955 return X86Assembler::getDifferenceBetweenLabels(from
.m_label
, to
.m_label
);
1958 ptrdiff_t differenceBetween(DataLabelPtr from
, Jump to
)
1960 return X86Assembler::getDifferenceBetweenLabels(from
.m_label
, to
.m_jmp
);
1968 void sete32(RegisterID src
, RegisterID srcDest
)
1970 m_assembler
.cmpl_rr(srcDest
, src
);
1971 m_assembler
.sete_r(srcDest
);
1972 m_assembler
.movzbl_rr(srcDest
, srcDest
);
1975 void sete32(Imm32 imm
, RegisterID srcDest
)
1977 compareImm32ForBranchEquality(srcDest
, imm
.m_value
);
1978 m_assembler
.sete_r(srcDest
);
1979 m_assembler
.movzbl_rr(srcDest
, srcDest
);
1982 void setne32(RegisterID src
, RegisterID srcDest
)
1984 m_assembler
.cmpl_rr(srcDest
, src
);
1985 m_assembler
.setne_r(srcDest
);
1986 m_assembler
.movzbl_rr(srcDest
, srcDest
);
1989 void setne32(Imm32 imm
, RegisterID srcDest
)
1991 compareImm32ForBranchEquality(srcDest
, imm
.m_value
);
1992 m_assembler
.setne_r(srcDest
);
1993 m_assembler
.movzbl_rr(srcDest
, srcDest
);
1997 // The mask should be optional... paerhaps the argument order should be
1998 // dest-src, operations always have a dest? ... possibly not true, considering
1999 // asm ops like test, or pseudo ops like pop().
2000 void setnz32(Address address
, Imm32 mask
, RegisterID dest
)
2002 testImm32(address
, mask
);
2003 m_assembler
.setnz_r(dest
);
2004 m_assembler
.movzbl_rr(dest
, dest
);
2007 void setz32(Address address
, Imm32 mask
, RegisterID dest
)
2009 testImm32(address
, mask
);
2010 m_assembler
.setz_r(dest
);
2011 m_assembler
.movzbl_rr(dest
, dest
);
2017 #endif // ENABLE(ASSEMBLER)
2019 #endif // MacroAssembler_h