2 * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86_64_h
27 #define MacroAssemblerX86_64_h
29 #if ENABLE(ASSEMBLER) && CPU(X86_64)
31 #include "MacroAssemblerX86Common.h"
34 #include <wtf/StdLibExtras.h>
37 #define REPTACH_OFFSET_CALL_R11 3
39 inline bool CAN_SIGN_EXTEND_32_64(int64_t value
) { return value
== (int64_t)(int32_t)value
; }
43 class MacroAssemblerX86_64
: public MacroAssemblerX86Common
{
45 static const Scale ScalePtr
= TimesEight
;
47 using MacroAssemblerX86Common::add32
;
48 using MacroAssemblerX86Common::and32
;
49 using MacroAssemblerX86Common::branchAdd32
;
50 using MacroAssemblerX86Common::or32
;
51 using MacroAssemblerX86Common::sub32
;
52 using MacroAssemblerX86Common::load8
;
53 using MacroAssemblerX86Common::load32
;
54 using MacroAssemblerX86Common::store32
;
55 using MacroAssemblerX86Common::store8
;
56 using MacroAssemblerX86Common::call
;
57 using MacroAssemblerX86Common::jump
;
58 using MacroAssemblerX86Common::addDouble
;
59 using MacroAssemblerX86Common::loadDouble
;
60 using MacroAssemblerX86Common::convertInt32ToDouble
;
62 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
64 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
65 add32(imm
, Address(scratchRegister
));
68 void and32(TrustedImm32 imm
, AbsoluteAddress address
)
70 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
71 and32(imm
, Address(scratchRegister
));
74 void add32(AbsoluteAddress address
, RegisterID dest
)
76 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
77 add32(Address(scratchRegister
), dest
);
80 void or32(TrustedImm32 imm
, AbsoluteAddress address
)
82 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
83 or32(imm
, Address(scratchRegister
));
86 void or32(RegisterID reg
, AbsoluteAddress address
)
88 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
89 or32(reg
, Address(scratchRegister
));
92 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
94 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
95 sub32(imm
, Address(scratchRegister
));
98 void load8(const void* address
, RegisterID dest
)
100 move(TrustedImmPtr(address
), dest
);
104 void load32(const void* address
, RegisterID dest
)
106 if (dest
== X86Registers::eax
)
107 m_assembler
.movl_mEAX(address
);
109 move(TrustedImmPtr(address
), dest
);
114 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
116 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
117 m_assembler
.addsd_mr(0, scratchRegister
, dest
);
120 void convertInt32ToDouble(TrustedImm32 imm
, FPRegisterID dest
)
122 move(imm
, scratchRegister
);
123 m_assembler
.cvtsi2sd_rr(scratchRegister
, dest
);
126 void store32(TrustedImm32 imm
, void* address
)
128 move(TrustedImmPtr(address
), scratchRegister
);
129 store32(imm
, scratchRegister
);
132 void store32(RegisterID source
, void* address
)
134 if (source
== X86Registers::eax
)
135 m_assembler
.movl_EAXm(address
);
137 move(TrustedImmPtr(address
), scratchRegister
);
138 store32(source
, scratchRegister
);
142 void store8(TrustedImm32 imm
, void* address
)
144 move(TrustedImmPtr(address
), scratchRegister
);
145 store8(imm
, Address(scratchRegister
));
148 void store8(RegisterID reg
, void* address
)
150 move(TrustedImmPtr(address
), scratchRegister
);
151 store8(reg
, Address(scratchRegister
));
155 Call
callWithSlowPathReturnType()
157 // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
158 // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
159 // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
160 // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
161 // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
162 // It is assumed that the parameters are already shifted to the right, when entering this method.
163 // Note: this implementation supports up to 3 parameters.
165 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
166 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
167 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
168 store64(X86Registers::ebp
, Address(X86Registers::esp
, -16));
170 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
171 // In addition, we need to allocate 16 bytes for the return value.
172 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
173 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp
);
175 // The first parameter register should contain a pointer to the stack allocated space for the return value.
176 move(X86Registers::esp
, X86Registers::ecx
);
177 add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx
);
179 DataLabelPtr label
= moveWithPatch(TrustedImmPtr(0), scratchRegister
);
180 Call result
= Call(m_assembler
.call(scratchRegister
), Call::Linkable
);
182 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp
);
184 // Copy the return value into rax and rdx.
185 load64(Address(X86Registers::eax
, sizeof(int64_t)), X86Registers::edx
);
186 load64(Address(X86Registers::eax
), X86Registers::eax
);
188 ASSERT_UNUSED(label
, differenceBetween(label
, result
) == REPTACH_OFFSET_CALL_R11
);
196 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
197 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
198 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
199 store64(X86Registers::ebp
, Address(X86Registers::esp
, -16));
201 // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
202 // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
205 load64(Address(X86Registers::esp
, 4 * sizeof(int64_t)), scratchRegister
);
206 store64(scratchRegister
, Address(X86Registers::esp
, -4 * sizeof(int64_t)));
209 load64(Address(X86Registers::esp
, 5 * sizeof(int64_t)), scratchRegister
);
210 store64(scratchRegister
, Address(X86Registers::esp
, -3 * sizeof(int64_t)));
212 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
213 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
214 // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
215 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp
);
217 DataLabelPtr label
= moveWithPatch(TrustedImmPtr(0), scratchRegister
);
218 Call result
= Call(m_assembler
.call(scratchRegister
), Call::Linkable
);
220 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp
);
222 ASSERT_UNUSED(label
, differenceBetween(label
, result
) == REPTACH_OFFSET_CALL_R11
);
226 // Address is a memory location containing the address to jump to
227 void jump(AbsoluteAddress address
)
229 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
230 jump(Address(scratchRegister
));
233 Call
tailRecursiveCall()
235 DataLabelPtr label
= moveWithPatch(TrustedImmPtr(0), scratchRegister
);
236 Jump newJump
= Jump(m_assembler
.jmp_r(scratchRegister
));
237 ASSERT_UNUSED(label
, differenceBetween(label
, newJump
) == REPTACH_OFFSET_CALL_R11
);
238 return Call::fromTailJump(newJump
);
241 Call
makeTailRecursiveCall(Jump oldJump
)
244 DataLabelPtr label
= moveWithPatch(TrustedImmPtr(0), scratchRegister
);
245 Jump newJump
= Jump(m_assembler
.jmp_r(scratchRegister
));
246 ASSERT_UNUSED(label
, differenceBetween(label
, newJump
) == REPTACH_OFFSET_CALL_R11
);
247 return Call::fromTailJump(newJump
);
250 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 src
, AbsoluteAddress dest
)
252 move(TrustedImmPtr(dest
.m_ptr
), scratchRegister
);
253 add32(src
, Address(scratchRegister
));
254 return Jump(m_assembler
.jCC(x86Condition(cond
)));
257 void add64(RegisterID src
, RegisterID dest
)
259 m_assembler
.addq_rr(src
, dest
);
262 void add64(Address src
, RegisterID dest
)
264 m_assembler
.addq_mr(src
.offset
, src
.base
, dest
);
267 void add64(AbsoluteAddress src
, RegisterID dest
)
269 move(TrustedImmPtr(src
.m_ptr
), scratchRegister
);
270 add64(Address(scratchRegister
), dest
);
273 void add64(TrustedImm32 imm
, RegisterID srcDest
)
275 if (imm
.m_value
== 1)
276 m_assembler
.incq_r(srcDest
);
278 m_assembler
.addq_ir(imm
.m_value
, srcDest
);
281 void add64(TrustedImm64 imm
, RegisterID dest
)
283 if (imm
.m_value
== 1)
284 m_assembler
.incq_r(dest
);
286 move(imm
, scratchRegister
);
287 add64(scratchRegister
, dest
);
291 void add64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
293 m_assembler
.leaq_mr(imm
.m_value
, src
, dest
);
296 void add64(TrustedImm32 imm
, Address address
)
298 m_assembler
.addq_im(imm
.m_value
, address
.offset
, address
.base
);
301 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
303 move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
304 add64(imm
, Address(scratchRegister
));
307 void addPtrNoFlags(TrustedImm32 imm
, RegisterID srcDest
)
309 m_assembler
.leaq_mr(imm
.m_value
, srcDest
, srcDest
);
312 void and64(RegisterID src
, RegisterID dest
)
314 m_assembler
.andq_rr(src
, dest
);
317 void and64(TrustedImm32 imm
, RegisterID srcDest
)
319 m_assembler
.andq_ir(imm
.m_value
, srcDest
);
322 void and64(TrustedImmPtr imm
, RegisterID srcDest
)
324 move(imm
, scratchRegister
);
325 and64(scratchRegister
, srcDest
);
328 void lshift64(TrustedImm32 imm
, RegisterID dest
)
330 m_assembler
.shlq_i8r(imm
.m_value
, dest
);
333 void rshift64(TrustedImm32 imm
, RegisterID dest
)
335 m_assembler
.sarq_i8r(imm
.m_value
, dest
);
338 void mul64(RegisterID src
, RegisterID dest
)
340 m_assembler
.imulq_rr(src
, dest
);
343 void neg64(RegisterID dest
)
345 m_assembler
.negq_r(dest
);
348 void or64(RegisterID src
, RegisterID dest
)
350 m_assembler
.orq_rr(src
, dest
);
353 void or64(TrustedImm64 imm
, RegisterID dest
)
355 move(imm
, scratchRegister
);
356 or64(scratchRegister
, dest
);
359 void or64(TrustedImm32 imm
, RegisterID dest
)
361 m_assembler
.orq_ir(imm
.m_value
, dest
);
364 void or64(RegisterID op1
, RegisterID op2
, RegisterID dest
)
368 else if (op1
== dest
)
376 void or64(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
382 void rotateRight64(TrustedImm32 imm
, RegisterID srcDst
)
384 m_assembler
.rorq_i8r(imm
.m_value
, srcDst
);
387 void sub64(RegisterID src
, RegisterID dest
)
389 m_assembler
.subq_rr(src
, dest
);
392 void sub64(TrustedImm32 imm
, RegisterID dest
)
394 if (imm
.m_value
== 1)
395 m_assembler
.decq_r(dest
);
397 m_assembler
.subq_ir(imm
.m_value
, dest
);
400 void sub64(TrustedImm64 imm
, RegisterID dest
)
402 if (imm
.m_value
== 1)
403 m_assembler
.decq_r(dest
);
405 move(imm
, scratchRegister
);
406 sub64(scratchRegister
, dest
);
410 void xor64(RegisterID src
, RegisterID dest
)
412 m_assembler
.xorq_rr(src
, dest
);
415 void xor64(RegisterID src
, Address dest
)
417 m_assembler
.xorq_rm(src
, dest
.offset
, dest
.base
);
420 void xor64(TrustedImm32 imm
, RegisterID srcDest
)
422 m_assembler
.xorq_ir(imm
.m_value
, srcDest
);
425 void load64(ImplicitAddress address
, RegisterID dest
)
427 m_assembler
.movq_mr(address
.offset
, address
.base
, dest
);
430 void load64(BaseIndex address
, RegisterID dest
)
432 m_assembler
.movq_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
435 void load64(const void* address
, RegisterID dest
)
437 if (dest
== X86Registers::eax
)
438 m_assembler
.movq_mEAX(address
);
440 move(TrustedImmPtr(address
), dest
);
445 DataLabel32
load64WithAddressOffsetPatch(Address address
, RegisterID dest
)
448 m_assembler
.movq_mr_disp32(address
.offset
, address
.base
, dest
);
449 return DataLabel32(this);
452 DataLabelCompact
load64WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
455 m_assembler
.movq_mr_disp8(address
.offset
, address
.base
, dest
);
456 return DataLabelCompact(this);
459 void store64(RegisterID src
, ImplicitAddress address
)
461 m_assembler
.movq_rm(src
, address
.offset
, address
.base
);
464 void store64(RegisterID src
, BaseIndex address
)
466 m_assembler
.movq_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
469 void store64(RegisterID src
, void* address
)
471 if (src
== X86Registers::eax
)
472 m_assembler
.movq_EAXm(address
);
474 move(TrustedImmPtr(address
), scratchRegister
);
475 store64(src
, scratchRegister
);
479 void store64(TrustedImm64 imm
, ImplicitAddress address
)
481 if (CAN_SIGN_EXTEND_32_64(imm
.m_value
))
482 m_assembler
.movq_i32m(static_cast<int>(imm
.m_value
), address
.offset
, address
.base
);
484 move(imm
, scratchRegister
);
485 store64(scratchRegister
, address
);
489 void store64(TrustedImm64 imm
, BaseIndex address
)
491 move(imm
, scratchRegister
);
492 m_assembler
.movq_rm(scratchRegister
, address
.offset
, address
.base
, address
.index
, address
.scale
);
495 DataLabel32
store64WithAddressOffsetPatch(RegisterID src
, Address address
)
498 m_assembler
.movq_rm_disp32(src
, address
.offset
, address
.base
);
499 return DataLabel32(this);
502 void move64ToDouble(RegisterID src
, FPRegisterID dest
)
504 m_assembler
.movq_rr(src
, dest
);
507 void moveDoubleTo64(FPRegisterID src
, RegisterID dest
)
509 m_assembler
.movq_rr(src
, dest
);
512 void compare64(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
514 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
515 m_assembler
.testq_rr(left
, left
);
517 m_assembler
.cmpq_ir(right
.m_value
, left
);
518 m_assembler
.setCC_r(x86Condition(cond
), dest
);
519 m_assembler
.movzbl_rr(dest
, dest
);
522 void compare64(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
524 m_assembler
.cmpq_rr(right
, left
);
525 m_assembler
.setCC_r(x86Condition(cond
), dest
);
526 m_assembler
.movzbl_rr(dest
, dest
);
529 Jump
branch64(RelationalCondition cond
, RegisterID left
, RegisterID right
)
531 m_assembler
.cmpq_rr(right
, left
);
532 return Jump(m_assembler
.jCC(x86Condition(cond
)));
535 Jump
branch64(RelationalCondition cond
, RegisterID left
, TrustedImm64 right
)
537 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
) {
538 m_assembler
.testq_rr(left
, left
);
539 return Jump(m_assembler
.jCC(x86Condition(cond
)));
541 move(right
, scratchRegister
);
542 return branch64(cond
, left
, scratchRegister
);
545 Jump
branch64(RelationalCondition cond
, RegisterID left
, Address right
)
547 m_assembler
.cmpq_mr(right
.offset
, right
.base
, left
);
548 return Jump(m_assembler
.jCC(x86Condition(cond
)));
551 Jump
branch64(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
553 move(TrustedImmPtr(left
.m_ptr
), scratchRegister
);
554 return branch64(cond
, Address(scratchRegister
), right
);
557 Jump
branch64(RelationalCondition cond
, Address left
, RegisterID right
)
559 m_assembler
.cmpq_rm(right
, left
.offset
, left
.base
);
560 return Jump(m_assembler
.jCC(x86Condition(cond
)));
563 Jump
branch64(RelationalCondition cond
, Address left
, TrustedImm64 right
)
565 move(right
, scratchRegister
);
566 return branch64(cond
, left
, scratchRegister
);
569 Jump
branch64(RelationalCondition cond
, BaseIndex address
, RegisterID right
)
571 m_assembler
.cmpq_rm(right
, address
.offset
, address
.base
, address
.index
, address
.scale
);
572 return Jump(m_assembler
.jCC(x86Condition(cond
)));
575 Jump
branchPtr(RelationalCondition cond
, BaseIndex left
, RegisterID right
)
577 return branch64(cond
, left
, right
);
580 Jump
branchPtr(RelationalCondition cond
, BaseIndex left
, TrustedImmPtr right
)
582 move(right
, scratchRegister
);
583 return branchPtr(cond
, left
, scratchRegister
);
586 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
588 m_assembler
.testq_rr(reg
, mask
);
589 return Jump(m_assembler
.jCC(x86Condition(cond
)));
592 Jump
branchTest64(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
594 // if we are only interested in the low seven bits, this can be tested with a testb
595 if (mask
.m_value
== -1)
596 m_assembler
.testq_rr(reg
, reg
);
597 else if ((mask
.m_value
& ~0x7f) == 0)
598 m_assembler
.testb_i8r(mask
.m_value
, reg
);
600 m_assembler
.testq_i32r(mask
.m_value
, reg
);
601 return Jump(m_assembler
.jCC(x86Condition(cond
)));
604 void test64(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
, RegisterID dest
)
606 if (mask
.m_value
== -1)
607 m_assembler
.testq_rr(reg
, reg
);
608 else if ((mask
.m_value
& ~0x7f) == 0)
609 m_assembler
.testb_i8r(mask
.m_value
, reg
);
611 m_assembler
.testq_i32r(mask
.m_value
, reg
);
612 set32(x86Condition(cond
), dest
);
615 void test64(ResultCondition cond
, RegisterID reg
, RegisterID mask
, RegisterID dest
)
617 m_assembler
.testq_rr(reg
, mask
);
618 set32(x86Condition(cond
), dest
);
621 Jump
branchTest64(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
623 load64(address
.m_ptr
, scratchRegister
);
624 return branchTest64(cond
, scratchRegister
, mask
);
627 Jump
branchTest64(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
629 if (mask
.m_value
== -1)
630 m_assembler
.cmpq_im(0, address
.offset
, address
.base
);
632 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
);
633 return Jump(m_assembler
.jCC(x86Condition(cond
)));
636 Jump
branchTest64(ResultCondition cond
, Address address
, RegisterID reg
)
638 m_assembler
.testq_rm(reg
, address
.offset
, address
.base
);
639 return Jump(m_assembler
.jCC(x86Condition(cond
)));
642 Jump
branchTest64(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
644 if (mask
.m_value
== -1)
645 m_assembler
.cmpq_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
647 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
648 return Jump(m_assembler
.jCC(x86Condition(cond
)));
652 Jump
branchAdd64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
655 return Jump(m_assembler
.jCC(x86Condition(cond
)));
658 Jump
branchAdd64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
661 return Jump(m_assembler
.jCC(x86Condition(cond
)));
664 Jump
branchMul64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
667 if (cond
!= Overflow
)
668 m_assembler
.testq_rr(dest
, dest
);
669 return Jump(m_assembler
.jCC(x86Condition(cond
)));
672 Jump
branchSub64(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
675 return Jump(m_assembler
.jCC(x86Condition(cond
)));
678 Jump
branchSub64(ResultCondition cond
, RegisterID src
, RegisterID dest
)
681 return Jump(m_assembler
.jCC(x86Condition(cond
)));
684 Jump
branchSub64(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
687 return branchSub64(cond
, src2
, dest
);
690 Jump
branchNeg64(ResultCondition cond
, RegisterID srcDest
)
693 return Jump(m_assembler
.jCC(x86Condition(cond
)));
696 void abortWithReason(AbortReason reason
)
698 move(TrustedImm32(reason
), X86Registers::r11
);
702 void abortWithReason(AbortReason reason
, intptr_t misc
)
704 move(TrustedImm64(misc
), X86Registers::r10
);
705 abortWithReason(reason
);
708 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
710 ConvertibleLoadLabel result
= ConvertibleLoadLabel(this);
711 m_assembler
.movq_mr(address
.offset
, address
.base
, dest
);
715 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
718 m_assembler
.movq_i64r(initialValue
.asIntptr(), dest
);
719 return DataLabelPtr(this);
722 DataLabelPtr
moveWithPatch(TrustedImm32 initialValue
, RegisterID dest
)
725 m_assembler
.movq_i64r(initialValue
.m_value
, dest
);
726 return DataLabelPtr(this);
729 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
731 dataLabel
= moveWithPatch(initialRightValue
, scratchRegister
);
732 return branch64(cond
, left
, scratchRegister
);
735 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
737 dataLabel
= moveWithPatch(initialRightValue
, scratchRegister
);
738 return branch64(cond
, left
, scratchRegister
);
741 Jump
branch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
744 m_assembler
.movl_i32r(initialRightValue
.m_value
, scratchRegister
);
745 dataLabel
= DataLabel32(this);
746 return branch32(cond
, left
, scratchRegister
);
749 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
751 DataLabelPtr label
= moveWithPatch(initialValue
, scratchRegister
);
752 store64(scratchRegister
, address
);
756 using MacroAssemblerX86Common::branch8
;
757 Jump
branch8(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
759 MacroAssemblerX86Common::move(TrustedImmPtr(left
.m_ptr
), scratchRegister
);
760 return MacroAssemblerX86Common::branch8(cond
, Address(scratchRegister
), right
);
763 using MacroAssemblerX86Common::branchTest8
;
764 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
766 TrustedImmPtr
addr(reinterpret_cast<void*>(address
.offset
));
767 MacroAssemblerX86Common::move(addr
, scratchRegister
);
768 return MacroAssemblerX86Common::branchTest8(cond
, BaseIndex(scratchRegister
, address
.base
, TimesOne
), mask
);
771 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
773 MacroAssemblerX86Common::move(TrustedImmPtr(address
.m_ptr
), scratchRegister
);
774 return MacroAssemblerX86Common::branchTest8(cond
, Address(scratchRegister
), mask
);
777 void convertInt64ToDouble(RegisterID src
, FPRegisterID dest
)
779 m_assembler
.cvtsi2sdq_rr(src
, dest
);
782 static bool supportsFloatingPoint() { return true; }
783 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
784 static bool supportsFloatingPointTruncate() { return true; }
785 static bool supportsFloatingPointSqrt() { return true; }
786 static bool supportsFloatingPointAbs() { return true; }
788 static FunctionPtr
readCallTarget(CodeLocationCall call
)
790 return FunctionPtr(X86Assembler::readPointer(call
.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11
).dataLocation()));
793 static bool haveScratchRegisterForBlinding() { return true; }
794 static RegisterID
scratchRegisterForBlinding() { return scratchRegister
; }
796 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
797 static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
799 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
801 const int rexBytes
= 1;
802 const int opcodeBytes
= 1;
803 const int immediateBytes
= 8;
804 const int totalBytes
= rexBytes
+ opcodeBytes
+ immediateBytes
;
805 ASSERT(totalBytes
>= maxJumpReplacementSize());
806 return label
.labelAtOffset(-totalBytes
);
809 static CodeLocationLabel
startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label
)
811 const int rexBytes
= 1;
812 const int opcodeBytes
= 1;
813 const int immediateBytes
= 4;
814 const int totalBytes
= rexBytes
+ opcodeBytes
+ immediateBytes
;
815 ASSERT(totalBytes
>= maxJumpReplacementSize());
816 return label
.labelAtOffset(-totalBytes
);
819 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label
)
821 return startOfBranchPtrWithPatchOnRegister(label
);
824 static CodeLocationLabel
startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label
)
826 return startOfBranch32WithPatchOnRegister(label
);
829 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart
, Address
, void* initialValue
)
831 X86Assembler::revertJumpTo_movq_i64r(instructionStart
.executableAddress(), reinterpret_cast<intptr_t>(initialValue
), scratchRegister
);
834 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart
, Address
, int32_t initialValue
)
836 X86Assembler::revertJumpTo_movl_i32r(instructionStart
.executableAddress(), initialValue
, scratchRegister
);
839 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID
, void* initialValue
)
841 X86Assembler::revertJumpTo_movq_i64r(instructionStart
.executableAddress(), reinterpret_cast<intptr_t>(initialValue
), scratchRegister
);
845 // This function emits code to preserve the CPUState (e.g. registers),
846 // call a user supplied probe function, and restore the CPUState before
847 // continuing with other JIT generated code.
849 // The user supplied probe function will be called with a single pointer to
850 // a ProbeContext struct (defined above) which contains, among other things,
851 // the preserved CPUState. This allows the user probe function to inspect
852 // the CPUState at that point in the JIT generated code.
854 // If the user probe function alters the register values in the ProbeContext,
855 // the altered values will be loaded into the CPU registers when the probe
858 // The ProbeContext is stack allocated and is only valid for the duration
859 // of the call to the user probe function.
861 void probe(ProbeFunction
, void* arg1
= 0, void* arg2
= 0);
862 #endif // USE(MASM_PROBE)
865 friend class LinkBuffer
;
866 friend class RepatchBuffer
;
868 static void linkCall(void* code
, Call call
, FunctionPtr function
)
870 if (!call
.isFlagSet(Call::Near
))
871 X86Assembler::linkPointer(code
, call
.m_label
.labelAtOffset(-REPTACH_OFFSET_CALL_R11
), function
.value());
873 X86Assembler::linkCall(code
, call
.m_label
, function
.value());
876 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
878 X86Assembler::repatchPointer(call
.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11
).dataLocation(), destination
.executableAddress());
881 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
883 X86Assembler::repatchPointer(call
.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11
).dataLocation(), destination
.executableAddress());
887 inline TrustedImm64
trustedImm64FromPtr(void* ptr
)
889 return TrustedImm64(TrustedImmPtr(ptr
));
892 inline TrustedImm64
trustedImm64FromPtr(ProbeFunction function
)
894 return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function
)));
897 inline TrustedImm64
trustedImm64FromPtr(void (*function
)())
899 return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function
)));
906 extern "C" void ctiMasmProbeTrampoline();
908 // What code is emitted for the probe?
909 // ==================================
910 // We want to keep the size of the emitted probe invocation code as compact as
911 // possible to minimize the perturbation to the JIT generated code. However,
912 // we also need to preserve the CPU registers and set up the ProbeContext to be
913 // passed to the user probe function.
915 // Hence, we do only the minimum here to preserve a scratch register (i.e. rax
916 // in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
917 // We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
918 // work i.e. saving the CPUState (and setting up the ProbeContext), calling the
919 // user probe function, and restoring the CPUState before returning to JIT
922 // What values are in the saved registers?
923 // ======================================
924 // Conceptually, the saved registers should contain values as if the probe
925 // is not present in the JIT generated code. Hence, they should contain values
926 // that are expected at the start of the instruction immediately following the
929 // Specifcally, the saved stack pointer register will point to the stack
930 // position before we push the ProbeContext frame. The saved rip will point to
931 // the address of the instruction immediately following the probe.
933 inline void MacroAssemblerX86_64::probe(MacroAssemblerX86_64::ProbeFunction function
, void* arg1
, void* arg2
)
935 push(RegisterID::esp
);
936 push(RegisterID::eax
);
937 move(trustedImm64FromPtr(arg2
), RegisterID::eax
);
938 push(RegisterID::eax
);
939 move(trustedImm64FromPtr(arg1
), RegisterID::eax
);
940 push(RegisterID::eax
);
941 move(trustedImm64FromPtr(function
), RegisterID::eax
);
942 push(RegisterID::eax
);
943 move(trustedImm64FromPtr(ctiMasmProbeTrampoline
), RegisterID::eax
);
944 call(RegisterID::eax
);
946 #endif // USE(MASM_PROBE)
950 #endif // ENABLE(ASSEMBLER)
952 #endif // MacroAssemblerX86_64_h