2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86_h
27 #define MacroAssemblerX86_h
29 #if ENABLE(ASSEMBLER) && CPU(X86)
31 #include "MacroAssemblerX86Common.h"
35 class MacroAssemblerX86
: public MacroAssemblerX86Common
{
37 static const Scale ScalePtr
= TimesFour
;
39 using MacroAssemblerX86Common::add32
;
40 using MacroAssemblerX86Common::and32
;
41 using MacroAssemblerX86Common::branchAdd32
;
42 using MacroAssemblerX86Common::branchSub32
;
43 using MacroAssemblerX86Common::sub32
;
44 using MacroAssemblerX86Common::or32
;
45 using MacroAssemblerX86Common::load32
;
46 using MacroAssemblerX86Common::store32
;
47 using MacroAssemblerX86Common::store8
;
48 using MacroAssemblerX86Common::branch32
;
49 using MacroAssemblerX86Common::call
;
50 using MacroAssemblerX86Common::jump
;
51 using MacroAssemblerX86Common::addDouble
;
52 using MacroAssemblerX86Common::loadDouble
;
53 using MacroAssemblerX86Common::storeDouble
;
54 using MacroAssemblerX86Common::convertInt32ToDouble
;
55 using MacroAssemblerX86Common::branchTest8
;
57 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
59 m_assembler
.leal_mr(imm
.m_value
, src
, dest
);
62 void add32(TrustedImm32 imm
, AbsoluteAddress address
)
64 m_assembler
.addl_im(imm
.m_value
, address
.m_ptr
);
67 void add32(AbsoluteAddress address
, RegisterID dest
)
69 m_assembler
.addl_mr(address
.m_ptr
, dest
);
72 void add64(TrustedImm32 imm
, AbsoluteAddress address
)
74 m_assembler
.addl_im(imm
.m_value
, address
.m_ptr
);
75 m_assembler
.adcl_im(imm
.m_value
>> 31, reinterpret_cast<const char*>(address
.m_ptr
) + sizeof(int32_t));
78 void and32(TrustedImm32 imm
, AbsoluteAddress address
)
80 m_assembler
.andl_im(imm
.m_value
, address
.m_ptr
);
83 void or32(TrustedImm32 imm
, AbsoluteAddress address
)
85 m_assembler
.orl_im(imm
.m_value
, address
.m_ptr
);
88 void or32(RegisterID reg
, AbsoluteAddress address
)
90 m_assembler
.orl_rm(reg
, address
.m_ptr
);
93 void sub32(TrustedImm32 imm
, AbsoluteAddress address
)
95 m_assembler
.subl_im(imm
.m_value
, address
.m_ptr
);
98 void load32(const void* address
, RegisterID dest
)
100 m_assembler
.movl_mr(address
, dest
);
103 ConvertibleLoadLabel
convertibleLoadPtr(Address address
, RegisterID dest
)
105 ConvertibleLoadLabel result
= ConvertibleLoadLabel(this);
106 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
110 void addDouble(AbsoluteAddress address
, FPRegisterID dest
)
112 m_assembler
.addsd_mr(address
.m_ptr
, dest
);
115 void storeDouble(FPRegisterID src
, const void* address
)
117 ASSERT(isSSE2Present());
118 m_assembler
.movsd_rm(src
, address
);
121 void convertInt32ToDouble(AbsoluteAddress src
, FPRegisterID dest
)
123 m_assembler
.cvtsi2sd_mr(src
.m_ptr
, dest
);
126 void store32(TrustedImm32 imm
, void* address
)
128 m_assembler
.movl_i32m(imm
.m_value
, address
);
131 void store32(RegisterID src
, void* address
)
133 m_assembler
.movl_rm(src
, address
);
136 void store8(TrustedImm32 imm
, void* address
)
138 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
139 m_assembler
.movb_i8m(imm
.m_value
, address
);
142 // Possibly clobbers src.
143 void moveDoubleToInts(FPRegisterID src
, RegisterID dest1
, RegisterID dest2
)
145 movePackedToInt32(src
, dest1
);
146 rshiftPacked(TrustedImm32(32), src
);
147 movePackedToInt32(src
, dest2
);
150 void moveIntsToDouble(RegisterID src1
, RegisterID src2
, FPRegisterID dest
, FPRegisterID scratch
)
152 moveInt32ToPacked(src1
, dest
);
153 moveInt32ToPacked(src2
, scratch
);
154 lshiftPacked(TrustedImm32(32), scratch
);
155 orPacked(scratch
, dest
);
158 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
160 m_assembler
.addl_im(imm
.m_value
, dest
.m_ptr
);
161 return Jump(m_assembler
.jCC(x86Condition(cond
)));
164 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, AbsoluteAddress dest
)
166 m_assembler
.subl_im(imm
.m_value
, dest
.m_ptr
);
167 return Jump(m_assembler
.jCC(x86Condition(cond
)));
170 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
172 m_assembler
.cmpl_rm(right
, left
.m_ptr
);
173 return Jump(m_assembler
.jCC(x86Condition(cond
)));
176 Jump
branch32(RelationalCondition cond
, AbsoluteAddress left
, TrustedImm32 right
)
178 m_assembler
.cmpl_im(right
.m_value
, left
.m_ptr
);
179 return Jump(m_assembler
.jCC(x86Condition(cond
)));
184 return Call(m_assembler
.call(), Call::Linkable
);
187 // Address is a memory location containing the address to jump to
188 void jump(AbsoluteAddress address
)
190 m_assembler
.jmp_m(address
.m_ptr
);
193 Call
tailRecursiveCall()
195 return Call::fromTailJump(jump());
198 Call
makeTailRecursiveCall(Jump oldJump
)
200 return Call::fromTailJump(oldJump
);
204 DataLabelPtr
moveWithPatch(TrustedImmPtr initialValue
, RegisterID dest
)
207 m_assembler
.movl_i32r(initialValue
.asIntptr(), dest
);
208 return DataLabelPtr(this);
211 Jump
branchTest8(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
213 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
214 if (mask
.m_value
== -1)
215 m_assembler
.cmpb_im(0, address
.m_ptr
);
217 m_assembler
.testb_im(mask
.m_value
, address
.m_ptr
);
218 return Jump(m_assembler
.jCC(x86Condition(cond
)));
221 Jump
branchPtrWithPatch(RelationalCondition cond
, RegisterID left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
224 m_assembler
.cmpl_ir_force32(initialRightValue
.asIntptr(), left
);
225 dataLabel
= DataLabelPtr(this);
226 return Jump(m_assembler
.jCC(x86Condition(cond
)));
229 Jump
branchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
232 m_assembler
.cmpl_im_force32(initialRightValue
.asIntptr(), left
.offset
, left
.base
);
233 dataLabel
= DataLabelPtr(this);
234 return Jump(m_assembler
.jCC(x86Condition(cond
)));
237 DataLabelPtr
storePtrWithPatch(TrustedImmPtr initialValue
, ImplicitAddress address
)
240 m_assembler
.movl_i32m(initialValue
.asIntptr(), address
.offset
, address
.base
);
241 return DataLabelPtr(this);
244 static bool supportsFloatingPoint() { return isSSE2Present(); }
245 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
246 static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
247 static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
248 static bool supportsFloatingPointAbs() { return isSSE2Present(); }
250 static FunctionPtr
readCallTarget(CodeLocationCall call
)
252 intptr_t offset
= reinterpret_cast<int32_t*>(call
.dataLocation())[-1];
253 return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call
.dataLocation()) + offset
));
256 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
258 static CodeLocationLabel
startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label
)
260 const int opcodeBytes
= 1;
261 const int modRMBytes
= 1;
262 const int immediateBytes
= 4;
263 const int totalBytes
= opcodeBytes
+ modRMBytes
+ immediateBytes
;
264 ASSERT(totalBytes
>= maxJumpReplacementSize());
265 return label
.labelAtOffset(-totalBytes
);
268 static CodeLocationLabel
startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label
)
270 const int opcodeBytes
= 1;
271 const int modRMBytes
= 1;
272 const int offsetBytes
= 0;
273 const int immediateBytes
= 4;
274 const int totalBytes
= opcodeBytes
+ modRMBytes
+ offsetBytes
+ immediateBytes
;
275 ASSERT(totalBytes
>= maxJumpReplacementSize());
276 return label
.labelAtOffset(-totalBytes
);
279 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart
, RegisterID reg
, void* initialValue
)
281 X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart
.executableAddress(), reinterpret_cast<intptr_t>(initialValue
), reg
);
284 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart
, Address address
, void* initialValue
)
286 ASSERT(!address
.offset
);
287 X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart
.executableAddress(), reinterpret_cast<intptr_t>(initialValue
), 0, address
.base
);
291 friend class LinkBuffer
;
292 friend class RepatchBuffer
;
294 static void linkCall(void* code
, Call call
, FunctionPtr function
)
296 X86Assembler::linkCall(code
, call
.m_label
, function
.value());
299 static void repatchCall(CodeLocationCall call
, CodeLocationLabel destination
)
301 X86Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
304 static void repatchCall(CodeLocationCall call
, FunctionPtr destination
)
306 X86Assembler::relinkCall(call
.dataLocation(), destination
.executableAddress());
312 #endif // ENABLE(ASSEMBLER)
314 #endif // MacroAssemblerX86_h