2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86_64_h
27 #define MacroAssemblerX86_64_h
29 #include <wtf/Platform.h>
31 #if ENABLE(ASSEMBLER) && PLATFORM(X86_64)
33 #include "MacroAssemblerX86Common.h"
35 #define REPTACH_OFFSET_CALL_R11 3
39 class MacroAssemblerX86_64
: public MacroAssemblerX86Common
{
41 static const X86::RegisterID scratchRegister
= X86::r11
;
44 static const Scale ScalePtr
= TimesEight
;
46 using MacroAssemblerX86Common::add32
;
47 using MacroAssemblerX86Common::and32
;
48 using MacroAssemblerX86Common::or32
;
49 using MacroAssemblerX86Common::sub32
;
50 using MacroAssemblerX86Common::load32
;
51 using MacroAssemblerX86Common::store32
;
52 using MacroAssemblerX86Common::call
;
53 using MacroAssemblerX86Common::loadDouble
;
54 using MacroAssemblerX86Common::convertInt32ToDouble
;
56 void add32(Imm32 imm
, AbsoluteAddress address
)
58 move(ImmPtr(address
.m_ptr
), scratchRegister
);
59 add32(imm
, Address(scratchRegister
));
62 void and32(Imm32 imm
, AbsoluteAddress address
)
64 move(ImmPtr(address
.m_ptr
), scratchRegister
);
65 and32(imm
, Address(scratchRegister
));
68 void or32(Imm32 imm
, AbsoluteAddress address
)
70 move(ImmPtr(address
.m_ptr
), scratchRegister
);
71 or32(imm
, Address(scratchRegister
));
74 void sub32(Imm32 imm
, AbsoluteAddress address
)
76 move(ImmPtr(address
.m_ptr
), scratchRegister
);
77 sub32(imm
, Address(scratchRegister
));
80 void load32(void* address
, RegisterID dest
)
83 m_assembler
.movl_mEAX(address
);
86 m_assembler
.movl_mEAX(address
);
91 void loadDouble(void* address
, FPRegisterID dest
)
93 move(ImmPtr(address
), scratchRegister
);
94 loadDouble(scratchRegister
, dest
);
97 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
99 move(Imm32(*static_cast<int32_t*>(src
.m_ptr
)), scratchRegister
);
100 m_assembler
.cvtsi2sd_rr(scratchRegister
, dest
);
103 void store32(Imm32 imm
, void* address
)
105 move(X86::eax
, scratchRegister
);
107 m_assembler
.movl_EAXm(address
);
108 move(scratchRegister
, X86::eax
);
113 DataLabelPtr label
= moveWithPatch(ImmPtr(0), scratchRegister
);
114 Call result
= Call(m_assembler
.call(scratchRegister
), Call::Linkable
);
115 ASSERT(differenceBetween(label
, result
) == REPTACH_OFFSET_CALL_R11
);
119 Call
tailRecursiveCall()
121 DataLabelPtr label
= moveWithPatch(ImmPtr(0), scratchRegister
);
122 Jump newJump
= Jump(m_assembler
.jmp_r(scratchRegister
));
123 ASSERT(differenceBetween(label
, newJump
) == REPTACH_OFFSET_CALL_R11
);
124 return Call::fromTailJump(newJump
);
127 Call
makeTailRecursiveCall(Jump oldJump
)
130 DataLabelPtr label
= moveWithPatch(ImmPtr(0), scratchRegister
);
131 Jump newJump
= Jump(m_assembler
.jmp_r(scratchRegister
));
132 ASSERT(differenceBetween(label
, newJump
) == REPTACH_OFFSET_CALL_R11
);
133 return Call::fromTailJump(newJump
);
137 void addPtr(RegisterID src
, RegisterID dest
)
139 m_assembler
.addq_rr(src
, dest
);
142 void addPtr(Imm32 imm
, RegisterID srcDest
)
144 m_assembler
.addq_ir(imm
.m_value
, srcDest
);
147 void addPtr(ImmPtr imm
, RegisterID dest
)
149 move(imm
, scratchRegister
);
150 m_assembler
.addq_rr(scratchRegister
, dest
);
153 void addPtr(Imm32 imm
, RegisterID src
, RegisterID dest
)
155 m_assembler
.leaq_mr(imm
.m_value
, src
, dest
);
158 void addPtr(Imm32 imm
, Address address
)
160 m_assembler
.addq_im(imm
.m_value
, address
.offset
, address
.base
);
163 void addPtr(Imm32 imm
, AbsoluteAddress address
)
165 move(ImmPtr(address
.m_ptr
), scratchRegister
);
166 addPtr(imm
, Address(scratchRegister
));
169 void andPtr(RegisterID src
, RegisterID dest
)
171 m_assembler
.andq_rr(src
, dest
);
174 void andPtr(Imm32 imm
, RegisterID srcDest
)
176 m_assembler
.andq_ir(imm
.m_value
, srcDest
);
179 void orPtr(RegisterID src
, RegisterID dest
)
181 m_assembler
.orq_rr(src
, dest
);
184 void orPtr(ImmPtr imm
, RegisterID dest
)
186 move(imm
, scratchRegister
);
187 m_assembler
.orq_rr(scratchRegister
, dest
);
190 void orPtr(Imm32 imm
, RegisterID dest
)
192 m_assembler
.orq_ir(imm
.m_value
, dest
);
195 void rshiftPtr(RegisterID shift_amount
, RegisterID dest
)
197 // On x86 we can only shift by ecx; if asked to shift by another register we'll
198 // need rejig the shift amount into ecx first, and restore the registers afterwards.
199 if (shift_amount
!= X86::ecx
) {
200 swap(shift_amount
, X86::ecx
);
202 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
203 if (dest
== shift_amount
)
204 m_assembler
.sarq_CLr(X86::ecx
);
205 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
206 else if (dest
== X86::ecx
)
207 m_assembler
.sarq_CLr(shift_amount
);
208 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
210 m_assembler
.sarq_CLr(dest
);
212 swap(shift_amount
, X86::ecx
);
214 m_assembler
.sarq_CLr(dest
);
217 void rshiftPtr(Imm32 imm
, RegisterID dest
)
219 m_assembler
.sarq_i8r(imm
.m_value
, dest
);
222 void subPtr(RegisterID src
, RegisterID dest
)
224 m_assembler
.subq_rr(src
, dest
);
227 void subPtr(Imm32 imm
, RegisterID dest
)
229 m_assembler
.subq_ir(imm
.m_value
, dest
);
232 void subPtr(ImmPtr imm
, RegisterID dest
)
234 move(imm
, scratchRegister
);
235 m_assembler
.subq_rr(scratchRegister
, dest
);
238 void xorPtr(RegisterID src
, RegisterID dest
)
240 m_assembler
.xorq_rr(src
, dest
);
243 void xorPtr(Imm32 imm
, RegisterID srcDest
)
245 m_assembler
.xorq_ir(imm
.m_value
, srcDest
);
249 void loadPtr(ImplicitAddress address
, RegisterID dest
)
251 m_assembler
.movq_mr(address
.offset
, address
.base
, dest
);
254 void loadPtr(BaseIndex address
, RegisterID dest
)
256 m_assembler
.movq_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
259 void loadPtr(void* address
, RegisterID dest
)
261 if (dest
== X86::eax
)
262 m_assembler
.movq_mEAX(address
);
264 move(X86::eax
, dest
);
265 m_assembler
.movq_mEAX(address
);
266 swap(X86::eax
, dest
);
270 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
272 m_assembler
.movq_mr_disp32(address
.offset
, address
.base
, dest
);
273 return DataLabel32(this);
276 void storePtr(RegisterID src
, ImplicitAddress address
)
278 m_assembler
.movq_rm(src
, address
.offset
, address
.base
);
281 void storePtr(RegisterID src
, BaseIndex address
)
283 m_assembler
.movq_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
286 void storePtr(RegisterID src
, void* address
)
289 m_assembler
.movq_EAXm(address
);
292 m_assembler
.movq_EAXm(address
);
297 void storePtr(ImmPtr imm
, ImplicitAddress address
)
299 intptr_t ptr
= imm
.asIntptr();
300 if (CAN_SIGN_EXTEND_32_64(ptr
))
301 m_assembler
.movq_i32m(static_cast<int>(ptr
), address
.offset
, address
.base
);
303 move(imm
, scratchRegister
);
304 storePtr(scratchRegister
, address
);
308 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
310 m_assembler
.movq_rm_disp32(src
, address
.offset
, address
.base
);
311 return DataLabel32(this);
314 void movePtrToDouble(RegisterID src
, FPRegisterID dest
)
316 m_assembler
.movq_rr(src
, dest
);
319 void moveDoubleToPtr(FPRegisterID src
, RegisterID dest
)
321 m_assembler
.movq_rr(src
, dest
);
324 void setPtr(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
326 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
327 m_assembler
.testq_rr(left
, left
);
329 m_assembler
.cmpq_ir(right
.m_value
, left
);
330 m_assembler
.setCC_r(x86Condition(cond
), dest
);
331 m_assembler
.movzbl_rr(dest
, dest
);
334 Jump
branchPtr(Condition cond
, RegisterID left
, RegisterID right
)
336 m_assembler
.cmpq_rr(right
, left
);
337 return Jump(m_assembler
.jCC(x86Condition(cond
)));
340 Jump
branchPtr(Condition cond
, RegisterID left
, ImmPtr right
)
342 intptr_t imm
= right
.asIntptr();
343 if (CAN_SIGN_EXTEND_32_64(imm
)) {
345 m_assembler
.testq_rr(left
, left
);
347 m_assembler
.cmpq_ir(imm
, left
);
348 return Jump(m_assembler
.jCC(x86Condition(cond
)));
350 move(right
, scratchRegister
);
351 return branchPtr(cond
, left
, scratchRegister
);
355 Jump
branchPtr(Condition cond
, RegisterID left
, Address right
)
357 m_assembler
.cmpq_mr(right
.offset
, right
.base
, left
);
358 return Jump(m_assembler
.jCC(x86Condition(cond
)));
361 Jump
branchPtr(Condition cond
, AbsoluteAddress left
, RegisterID right
)
363 move(ImmPtr(left
.m_ptr
), scratchRegister
);
364 return branchPtr(cond
, Address(scratchRegister
), right
);
367 Jump
branchPtr(Condition cond
, Address left
, RegisterID right
)
369 m_assembler
.cmpq_rm(right
, left
.offset
, left
.base
);
370 return Jump(m_assembler
.jCC(x86Condition(cond
)));
373 Jump
branchPtr(Condition cond
, Address left
, ImmPtr right
)
375 move(right
, scratchRegister
);
376 return branchPtr(cond
, left
, scratchRegister
);
379 Jump
branchTestPtr(Condition cond
, RegisterID reg
, RegisterID mask
)
381 m_assembler
.testq_rr(reg
, mask
);
382 return Jump(m_assembler
.jCC(x86Condition(cond
)));
385 Jump
branchTestPtr(Condition cond
, RegisterID reg
, Imm32 mask
= Imm32(-1))
387 // if we are only interested in the low seven bits, this can be tested with a testb
388 if (mask
.m_value
== -1)
389 m_assembler
.testq_rr(reg
, reg
);
390 else if ((mask
.m_value
& ~0x7f) == 0)
391 m_assembler
.testb_i8r(mask
.m_value
, reg
);
393 m_assembler
.testq_i32r(mask
.m_value
, reg
);
394 return Jump(m_assembler
.jCC(x86Condition(cond
)));
397 Jump
branchTestPtr(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
399 if (mask
.m_value
== -1)
400 m_assembler
.cmpq_im(0, address
.offset
, address
.base
);
402 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
);
403 return Jump(m_assembler
.jCC(x86Condition(cond
)));
406 Jump
branchTestPtr(Condition cond
, BaseIndex address
, Imm32 mask
= Imm32(-1))
408 if (mask
.m_value
== -1)
409 m_assembler
.cmpq_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
411 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
412 return Jump(m_assembler
.jCC(x86Condition(cond
)));
416 Jump
branchAddPtr(Condition cond
, RegisterID src
, RegisterID dest
)
418 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
420 return Jump(m_assembler
.jCC(x86Condition(cond
)));
423 Jump
branchSubPtr(Condition cond
, Imm32 imm
, RegisterID dest
)
425 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
427 return Jump(m_assembler
.jCC(x86Condition(cond
)));
430 DataLabelPtr
moveWithPatch(ImmPtr initialValue
, RegisterID dest
)
432 m_assembler
.movq_i64r(initialValue
.asIntptr(), dest
);
433 return DataLabelPtr(this);
436 Jump
branchPtrWithPatch(Condition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
438 dataLabel
= moveWithPatch(initialRightValue
, scratchRegister
);
439 return branchPtr(cond
, left
, scratchRegister
);
442 Jump
branchPtrWithPatch(Condition cond
, Address left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
444 dataLabel
= moveWithPatch(initialRightValue
, scratchRegister
);
445 return branchPtr(cond
, left
, scratchRegister
);
448 DataLabelPtr
storePtrWithPatch(ImmPtr initialValue
, ImplicitAddress address
)
450 DataLabelPtr label
= moveWithPatch(initialValue
, scratchRegister
);
451 storePtr(scratchRegister
, address
);
455 Label
loadPtrWithPatchToLEA(Address address
, RegisterID dest
)
458 loadPtr(address
, dest
);
462 bool supportsFloatingPoint() const { return true; }
463 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
464 bool supportsFloatingPointTruncate() const { return true; }
467 friend class LinkBuffer
;
468 friend class RepatchBuffer
;
470 static void linkCall(void* code
, Call call
, FunctionPtr function
)
472 if (!call
.isFlagSet(Call::Near
))
473 X86Assembler::linkPointer(code
, X86Assembler::labelFor(call
.m_jmp
, -REPTACH_OFFSET_CALL_R11
), function
.value());
475 X86Assembler::linkCall(code
, call
.m_jmp
, function
.value());
478 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
480 X86Assembler::repatchPointer(call
.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11
).dataLocation(), destination
.executableAddress());
483 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
485 X86Assembler::repatchPointer(call
.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11
).dataLocation(), destination
.executableAddress());
492 #endif // ENABLE(ASSEMBLER)
494 #endif // MacroAssemblerX86_64_h