2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86_64_h
27 #define MacroAssemblerX86_64_h
29 #include <wtf/Platform.h>
31 #if ENABLE(ASSEMBLER) && CPU(X86_64)
33 #include "MacroAssemblerX86Common.h"
35 #define REPTACH_OFFSET_CALL_R11 3
39 class MacroAssemblerX86_64
: public MacroAssemblerX86Common
{
41 static const X86Registers::RegisterID scratchRegister
= X86Registers::r11
;
44 static const Scale ScalePtr
= TimesEight
;
46 using MacroAssemblerX86Common::add32
;
47 using MacroAssemblerX86Common::and32
;
48 using MacroAssemblerX86Common::or32
;
49 using MacroAssemblerX86Common::sub32
;
50 using MacroAssemblerX86Common::load32
;
51 using MacroAssemblerX86Common::store32
;
52 using MacroAssemblerX86Common::call
;
53 using MacroAssemblerX86Common::loadDouble
;
54 using MacroAssemblerX86Common::convertInt32ToDouble
;
56 void add32(Imm32 imm
, AbsoluteAddress address
)
58 move(ImmPtr(address
.m_ptr
), scratchRegister
);
59 add32(imm
, Address(scratchRegister
));
62 void and32(Imm32 imm
, AbsoluteAddress address
)
64 move(ImmPtr(address
.m_ptr
), scratchRegister
);
65 and32(imm
, Address(scratchRegister
));
68 void or32(Imm32 imm
, AbsoluteAddress address
)
70 move(ImmPtr(address
.m_ptr
), scratchRegister
);
71 or32(imm
, Address(scratchRegister
));
74 void sub32(Imm32 imm
, AbsoluteAddress address
)
76 move(ImmPtr(address
.m_ptr
), scratchRegister
);
77 sub32(imm
, Address(scratchRegister
));
80 void load32(void* address
, RegisterID dest
)
82 if (dest
== X86Registers::eax
)
83 m_assembler
.movl_mEAX(address
);
85 move(X86Registers::eax
, dest
);
86 m_assembler
.movl_mEAX(address
);
87 swap(X86Registers::eax
, dest
);
91 void loadDouble(void* address
, FPRegisterID dest
)
93 move(ImmPtr(address
), scratchRegister
);
94 loadDouble(scratchRegister
, dest
);
97 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
99 move(Imm32(*static_cast<int32_t*>(src
.m_ptr
)), scratchRegister
);
100 m_assembler
.cvtsi2sd_rr(scratchRegister
, dest
);
103 void store32(Imm32 imm
, void* address
)
105 move(X86Registers::eax
, scratchRegister
);
106 move(imm
, X86Registers::eax
);
107 m_assembler
.movl_EAXm(address
);
108 move(scratchRegister
, X86Registers::eax
);
113 DataLabelPtr label
= moveWithPatch(ImmPtr(0), scratchRegister
);
114 Call result
= Call(m_assembler
.call(scratchRegister
), Call::Linkable
);
115 ASSERT(differenceBetween(label
, result
) == REPTACH_OFFSET_CALL_R11
);
119 Call
tailRecursiveCall()
121 DataLabelPtr label
= moveWithPatch(ImmPtr(0), scratchRegister
);
122 Jump newJump
= Jump(m_assembler
.jmp_r(scratchRegister
));
123 ASSERT(differenceBetween(label
, newJump
) == REPTACH_OFFSET_CALL_R11
);
124 return Call::fromTailJump(newJump
);
127 Call
makeTailRecursiveCall(Jump oldJump
)
130 DataLabelPtr label
= moveWithPatch(ImmPtr(0), scratchRegister
);
131 Jump newJump
= Jump(m_assembler
.jmp_r(scratchRegister
));
132 ASSERT(differenceBetween(label
, newJump
) == REPTACH_OFFSET_CALL_R11
);
133 return Call::fromTailJump(newJump
);
137 void addPtr(RegisterID src
, RegisterID dest
)
139 m_assembler
.addq_rr(src
, dest
);
142 void addPtr(Imm32 imm
, RegisterID srcDest
)
144 m_assembler
.addq_ir(imm
.m_value
, srcDest
);
147 void addPtr(ImmPtr imm
, RegisterID dest
)
149 move(imm
, scratchRegister
);
150 m_assembler
.addq_rr(scratchRegister
, dest
);
153 void addPtr(Imm32 imm
, RegisterID src
, RegisterID dest
)
155 m_assembler
.leaq_mr(imm
.m_value
, src
, dest
);
158 void addPtr(Imm32 imm
, Address address
)
160 m_assembler
.addq_im(imm
.m_value
, address
.offset
, address
.base
);
163 void addPtr(Imm32 imm
, AbsoluteAddress address
)
165 move(ImmPtr(address
.m_ptr
), scratchRegister
);
166 addPtr(imm
, Address(scratchRegister
));
169 void andPtr(RegisterID src
, RegisterID dest
)
171 m_assembler
.andq_rr(src
, dest
);
174 void andPtr(Imm32 imm
, RegisterID srcDest
)
176 m_assembler
.andq_ir(imm
.m_value
, srcDest
);
179 void orPtr(RegisterID src
, RegisterID dest
)
181 m_assembler
.orq_rr(src
, dest
);
184 void orPtr(ImmPtr imm
, RegisterID dest
)
186 move(imm
, scratchRegister
);
187 m_assembler
.orq_rr(scratchRegister
, dest
);
190 void orPtr(Imm32 imm
, RegisterID dest
)
192 m_assembler
.orq_ir(imm
.m_value
, dest
);
195 void subPtr(RegisterID src
, RegisterID dest
)
197 m_assembler
.subq_rr(src
, dest
);
200 void subPtr(Imm32 imm
, RegisterID dest
)
202 m_assembler
.subq_ir(imm
.m_value
, dest
);
205 void subPtr(ImmPtr imm
, RegisterID dest
)
207 move(imm
, scratchRegister
);
208 m_assembler
.subq_rr(scratchRegister
, dest
);
211 void xorPtr(RegisterID src
, RegisterID dest
)
213 m_assembler
.xorq_rr(src
, dest
);
216 void xorPtr(Imm32 imm
, RegisterID srcDest
)
218 m_assembler
.xorq_ir(imm
.m_value
, srcDest
);
222 void loadPtr(ImplicitAddress address
, RegisterID dest
)
224 m_assembler
.movq_mr(address
.offset
, address
.base
, dest
);
227 void loadPtr(BaseIndex address
, RegisterID dest
)
229 m_assembler
.movq_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
232 void loadPtr(void* address
, RegisterID dest
)
234 if (dest
== X86Registers::eax
)
235 m_assembler
.movq_mEAX(address
);
237 move(X86Registers::eax
, dest
);
238 m_assembler
.movq_mEAX(address
);
239 swap(X86Registers::eax
, dest
);
243 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
245 m_assembler
.movq_mr_disp32(address
.offset
, address
.base
, dest
);
246 return DataLabel32(this);
249 void storePtr(RegisterID src
, ImplicitAddress address
)
251 m_assembler
.movq_rm(src
, address
.offset
, address
.base
);
254 void storePtr(RegisterID src
, BaseIndex address
)
256 m_assembler
.movq_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
259 void storePtr(RegisterID src
, void* address
)
261 if (src
== X86Registers::eax
)
262 m_assembler
.movq_EAXm(address
);
264 swap(X86Registers::eax
, src
);
265 m_assembler
.movq_EAXm(address
);
266 swap(X86Registers::eax
, src
);
270 void storePtr(ImmPtr imm
, ImplicitAddress address
)
272 move(imm
, scratchRegister
);
273 storePtr(scratchRegister
, address
);
276 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
278 m_assembler
.movq_rm_disp32(src
, address
.offset
, address
.base
);
279 return DataLabel32(this);
282 void movePtrToDouble(RegisterID src
, FPRegisterID dest
)
284 m_assembler
.movq_rr(src
, dest
);
287 void moveDoubleToPtr(FPRegisterID src
, RegisterID dest
)
289 m_assembler
.movq_rr(src
, dest
);
292 void setPtr(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
294 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
295 m_assembler
.testq_rr(left
, left
);
297 m_assembler
.cmpq_ir(right
.m_value
, left
);
298 m_assembler
.setCC_r(x86Condition(cond
), dest
);
299 m_assembler
.movzbl_rr(dest
, dest
);
302 Jump
branchPtr(Condition cond
, RegisterID left
, RegisterID right
)
304 m_assembler
.cmpq_rr(right
, left
);
305 return Jump(m_assembler
.jCC(x86Condition(cond
)));
308 Jump
branchPtr(Condition cond
, RegisterID left
, ImmPtr right
)
310 move(right
, scratchRegister
);
311 return branchPtr(cond
, left
, scratchRegister
);
314 Jump
branchPtr(Condition cond
, RegisterID left
, Address right
)
316 m_assembler
.cmpq_mr(right
.offset
, right
.base
, left
);
317 return Jump(m_assembler
.jCC(x86Condition(cond
)));
320 Jump
branchPtr(Condition cond
, AbsoluteAddress left
, RegisterID right
)
322 move(ImmPtr(left
.m_ptr
), scratchRegister
);
323 return branchPtr(cond
, Address(scratchRegister
), right
);
326 Jump
branchPtr(Condition cond
, Address left
, RegisterID right
)
328 m_assembler
.cmpq_rm(right
, left
.offset
, left
.base
);
329 return Jump(m_assembler
.jCC(x86Condition(cond
)));
332 Jump
branchPtr(Condition cond
, Address left
, ImmPtr right
)
334 move(right
, scratchRegister
);
335 return branchPtr(cond
, left
, scratchRegister
);
338 Jump
branchTestPtr(Condition cond
, RegisterID reg
, RegisterID mask
)
340 m_assembler
.testq_rr(reg
, mask
);
341 return Jump(m_assembler
.jCC(x86Condition(cond
)));
344 Jump
branchTestPtr(Condition cond
, RegisterID reg
, Imm32 mask
= Imm32(-1))
346 // if we are only interested in the low seven bits, this can be tested with a testb
347 if (mask
.m_value
== -1)
348 m_assembler
.testq_rr(reg
, reg
);
349 else if ((mask
.m_value
& ~0x7f) == 0)
350 m_assembler
.testb_i8r(mask
.m_value
, reg
);
352 m_assembler
.testq_i32r(mask
.m_value
, reg
);
353 return Jump(m_assembler
.jCC(x86Condition(cond
)));
356 Jump
branchTestPtr(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
358 if (mask
.m_value
== -1)
359 m_assembler
.cmpq_im(0, address
.offset
, address
.base
);
361 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
);
362 return Jump(m_assembler
.jCC(x86Condition(cond
)));
365 Jump
branchTestPtr(Condition cond
, BaseIndex address
, Imm32 mask
= Imm32(-1))
367 if (mask
.m_value
== -1)
368 m_assembler
.cmpq_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
370 m_assembler
.testq_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
371 return Jump(m_assembler
.jCC(x86Condition(cond
)));
375 Jump
branchAddPtr(Condition cond
, RegisterID src
, RegisterID dest
)
377 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
379 return Jump(m_assembler
.jCC(x86Condition(cond
)));
382 Jump
branchSubPtr(Condition cond
, Imm32 imm
, RegisterID dest
)
384 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
386 return Jump(m_assembler
.jCC(x86Condition(cond
)));
389 DataLabelPtr
moveWithPatch(ImmPtr initialValue
, RegisterID dest
)
391 m_assembler
.movq_i64r(initialValue
.asIntptr(), dest
);
392 return DataLabelPtr(this);
395 Jump
branchPtrWithPatch(Condition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
397 dataLabel
= moveWithPatch(initialRightValue
, scratchRegister
);
398 return branchPtr(cond
, left
, scratchRegister
);
401 Jump
branchPtrWithPatch(Condition cond
, Address left
, DataLabelPtr
& dataLabel
, ImmPtr initialRightValue
= ImmPtr(0))
403 dataLabel
= moveWithPatch(initialRightValue
, scratchRegister
);
404 return branchPtr(cond
, left
, scratchRegister
);
407 DataLabelPtr
storePtrWithPatch(ImmPtr initialValue
, ImplicitAddress address
)
409 DataLabelPtr label
= moveWithPatch(initialValue
, scratchRegister
);
410 storePtr(scratchRegister
, address
);
414 Label
loadPtrWithPatchToLEA(Address address
, RegisterID dest
)
417 loadPtr(address
, dest
);
421 bool supportsFloatingPoint() const { return true; }
422 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
423 bool supportsFloatingPointTruncate() const { return true; }
426 friend class LinkBuffer
;
427 friend class RepatchBuffer
;
429 static void linkCall(void* code
, Call call
, FunctionPtr function
)
431 if (!call
.isFlagSet(Call::Near
))
432 X86Assembler::linkPointer(code
, X86Assembler::labelFor(call
.m_jmp
, -REPTACH_OFFSET_CALL_R11
), function
.value());
434 X86Assembler::linkCall(code
, call
.m_jmp
, function
.value());
437 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
439 X86Assembler::repatchPointer(call
.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11
).dataLocation(), destination
.executableAddress());
442 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
444 X86Assembler::repatchPointer(call
.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11
).dataLocation(), destination
.executableAddress());
451 #endif // ENABLE(ASSEMBLER)
453 #endif // MacroAssemblerX86_64_h