2 * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86_h
27 #define MacroAssemblerX86_h
29 #if ENABLE(ASSEMBLER) && CPU(X86)
31 #include "MacroAssemblerX86Common.h"
35 class MacroAssemblerX86
: public MacroAssemblerX86Common
{
37 static const Scale ScalePtr
= TimesFour
;
39 using MacroAssemblerX86Common::add32
;
40 using MacroAssemblerX86Common::and32
;
41 using MacroAssemblerX86Common::branchAdd32
;
42 using MacroAssemblerX86Common::branchSub32
;
43 using MacroAssemblerX86Common::sub32
;
44 using MacroAssemblerX86Common::or32
;
45 using MacroAssemblerX86Common::load32
;
46 using MacroAssemblerX86Common::load8
;
47 using MacroAssemblerX86Common::store32
;
48 using MacroAssemblerX86Common::store8
;
49 using MacroAssemblerX86Common::branch32
;
50 using MacroAssemblerX86Common::call
;
51 using MacroAssemblerX86Common::jump
;
52 using MacroAssemblerX86Common::addDouble
;
53 using MacroAssemblerX86Common::loadDouble
;
54 using MacroAssemblerX86Common::storeDouble
;
55 using MacroAssemblerX86Common::convertInt32ToDouble
;
56 using MacroAssemblerX86Common::branch8
;
57 using MacroAssemblerX86Common::branchTest8
;
59 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
61 m_assembler
.leal_mr(imm
.m_value
, src
, dest
);
64 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
66 m_assembler
.addl_im(imm
.m_value
, address
.m_ptr
);
69 void add32(AbsoluteAddress address
, RegisterID dest
)
71 m_assembler
.addl_mr(address
.m_ptr
, dest
);
74 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
76 m_assembler
.addl_im(imm
.m_value
, address
.m_ptr
);
77 m_assembler
.adcl_im(imm
.m_value
>> 31, reinterpret_cast<const char*>(address
.m_ptr
) + sizeof(int32_t));
80 void and32(TrustedImm32 imm
, AbsoluteAddress address
)
82 m_assembler
.andl_im(imm
.m_value
, address
.m_ptr
);
85 void or32(TrustedImm32 imm
, AbsoluteAddress address
)
87 m_assembler
.orl_im(imm
.m_value
, address
.m_ptr
);
90 void or32(RegisterID reg
, AbsoluteAddress address
)
92 m_assembler
.orl_rm(reg
, address
.m_ptr
);
95 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
97 m_assembler
.subl_im(imm
.m_value
, address
.m_ptr
);
100 void load32(const void* address
, RegisterID dest
)
102 m_assembler
.movl_mr(address
, dest
);
105 void load8(const void* address
, RegisterID dest
)
107 m_assembler
.movzbl_mr(address
, dest
);
110 void abortWithReason(AbortReason reason
)
112 move(TrustedImm32(reason
), X86Registers::eax
);
116 void abortWithReason(AbortReason reason
, intptr_t misc
)
118 move(TrustedImm32(misc
), X86Registers::edx
);
119 abortWithReason(reason
);
122 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
124 ConvertibleLoadLabel result
= ConvertibleLoadLabel(this);
125 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
129 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
131 m_assembler
.addsd_mr(address
.m_ptr
, dest
);
134 void storeDouble(FPRegisterID src
, TrustedImmPtr address
)
136 ASSERT(isSSE2Present());
137 ASSERT(address
.m_value
);
138 m_assembler
.movsd_rm(src
, address
.m_value
);
141 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
143 m_assembler
.cvtsi2sd_mr(src
.m_ptr
, dest
);
146 void store32(TrustedImm32 imm
, void* address
)
148 m_assembler
.movl_i32m(imm
.m_value
, address
);
151 void store32(RegisterID src
, void* address
)
153 m_assembler
.movl_rm(src
, address
);
156 void store8(RegisterID src
, void* address
)
158 m_assembler
.movb_rm(src
, address
);
161 void store8(TrustedImm32 imm
, void* address
)
163 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
164 m_assembler
.movb_i8m(imm
.m_value
, address
);
167 void moveDoubleToInts(FPRegisterID src
, RegisterID dest1
, RegisterID dest2
)
169 ASSERT(isSSE2Present());
170 m_assembler
.pextrw_irr(3, src
, dest1
);
171 m_assembler
.pextrw_irr(2, src
, dest2
);
172 lshift32(TrustedImm32(16), dest1
);
174 movePackedToInt32(src
, dest1
);
177 void moveIntsToDouble(RegisterID src1
, RegisterID src2
, FPRegisterID dest
, FPRegisterID scratch
)
179 moveInt32ToPacked(src1
, dest
);
180 moveInt32ToPacked(src2
, scratch
);
181 lshiftPacked(TrustedImm32(32), scratch
);
182 orPacked(scratch
, dest
);
185 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
187 m_assembler
.addl_im(imm
.m_value
, dest
.m_ptr
);
188 return Jump(m_assembler
.jCC(x86Condition(cond
)));
191 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
193 m_assembler
.subl_im(imm
.m_value
, dest
.m_ptr
);
194 return Jump(m_assembler
.jCC(x86Condition(cond
)));
197 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
199 m_assembler
.cmpl_rm(right
, left
.m_ptr
);
200 return Jump(m_assembler
.jCC(x86Condition(cond
)));
203 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
205 m_assembler
.cmpl_im(right
.m_value
, left
.m_ptr
);
206 return Jump(m_assembler
.jCC(x86Condition(cond
)));
211 return Call(m_assembler
.call(), Call::Linkable
);
214 // Address is a memory location containing the address to jump to
215 void jump(AbsoluteAddress address
)
217 m_assembler
.jmp_m(address
.m_ptr
);
220 Call
tailRecursiveCall()
222 return Call::fromTailJump(jump());
225 Call
makeTailRecursiveCall(Jump oldJump
)
227 return Call::fromTailJump(oldJump
);
231 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
234 m_assembler
.movl_i32r(initialValue
.asIntptr(), dest
);
235 return DataLabelPtr(this);
238 Jump
branch8(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
240 m_assembler
.cmpb_im(right
.m_value
, left
.m_ptr
);
241 return Jump(m_assembler
.jCC(x86Condition(cond
)));
244 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
246 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
247 if (mask
.m_value
== -1)
248 m_assembler
.cmpb_im(0, address
.m_ptr
);
250 m_assembler
.testb_im(mask
.m_value
, address
.m_ptr
);
251 return Jump(m_assembler
.jCC(x86Condition(cond
)));
254 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
257 m_assembler
.cmpl_ir_force32(initialRightValue
.asIntptr(), left
);
258 dataLabel
= DataLabelPtr(this);
259 return Jump(m_assembler
.jCC(x86Condition(cond
)));
262 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
265 m_assembler
.cmpl_im_force32(initialRightValue
.asIntptr(), left
.offset
, left
.base
);
266 dataLabel
= DataLabelPtr(this);
267 return Jump(m_assembler
.jCC(x86Condition(cond
)));
270 Jump
branch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
273 m_assembler
.cmpl_im_force32(initialRightValue
.m_value
, left
.offset
, left
.base
);
274 dataLabel
= DataLabel32(this);
275 return Jump(m_assembler
.jCC(x86Condition(cond
)));
278 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
281 m_assembler
.movl_i32m(initialValue
.asIntptr(), address
.offset
, address
.base
);
282 return DataLabelPtr(this);
285 static bool supportsFloatingPoint() { return isSSE2Present(); }
286 static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
287 static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
288 static bool supportsFloatingPointAbs() { return isSSE2Present(); }
290 static FunctionPtr
readCallTarget(CodeLocationCall call
)
292 intptr_t offset
= reinterpret_cast<int32_t*>(call
.dataLocation())[-1];
293 return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call
.dataLocation()) + offset
));
296 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
297 static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
299 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
301 const int opcodeBytes
= 1;
302 const int modRMBytes
= 1;
303 const int immediateBytes
= 4;
304 const int totalBytes
= opcodeBytes
+ modRMBytes
+ immediateBytes
;
305 ASSERT(totalBytes
>= maxJumpReplacementSize());
306 return label
.labelAtOffset(-totalBytes
);
309 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label
)
311 const int opcodeBytes
= 1;
312 const int modRMBytes
= 1;
313 const int offsetBytes
= 0;
314 const int immediateBytes
= 4;
315 const int totalBytes
= opcodeBytes
+ modRMBytes
+ offsetBytes
+ immediateBytes
;
316 ASSERT(totalBytes
>= maxJumpReplacementSize());
317 return label
.labelAtOffset(-totalBytes
);
320 static CodeLocationLabel
startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label
)
322 const int opcodeBytes
= 1;
323 const int modRMBytes
= 1;
324 const int offsetBytes
= 0;
325 const int immediateBytes
= 4;
326 const int totalBytes
= opcodeBytes
+ modRMBytes
+ offsetBytes
+ immediateBytes
;
327 ASSERT(totalBytes
>= maxJumpReplacementSize());
328 return label
.labelAtOffset(-totalBytes
);
331 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID reg
, void* initialValue
)
333 X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart
.executableAddress(), reinterpret_cast<intptr_t>(initialValue
), reg
);
336 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart
, Address address
, void* initialValue
)
338 ASSERT(!address
.offset
);
339 X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart
.executableAddress(), reinterpret_cast<intptr_t>(initialValue
), 0, address
.base
);
342 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart
, Address address
, int32_t initialValue
)
344 ASSERT(!address
.offset
);
345 X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart
.executableAddress(), initialValue
, 0, address
.base
);
349 friend class LinkBuffer
;
350 friend class RepatchBuffer
;
352 static void linkCall(void* code
, Call call
, FunctionPtr function
)
354 X86Assembler::linkCall(code
, call
.m_label
, function
.value());
357 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
359 X86Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
362 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
364 X86Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
370 #endif // ENABLE(ASSEMBLER)
372 #endif // MacroAssemblerX86_h