]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef MacroAssemblerX86_h | |
27 | #define MacroAssemblerX86_h | |
28 | ||
29 | #if ENABLE(ASSEMBLER) && CPU(X86) | |
30 | ||
31 | #include "MacroAssemblerX86Common.h" | |
32 | ||
33 | namespace JSC { | |
34 | ||
35 | class MacroAssemblerX86 : public MacroAssemblerX86Common { | |
36 | public: | |
37 | static const Scale ScalePtr = TimesFour; | |
38 | ||
39 | using MacroAssemblerX86Common::add32; | |
40 | using MacroAssemblerX86Common::and32; | |
41 | using MacroAssemblerX86Common::branchAdd32; | |
42 | using MacroAssemblerX86Common::branchSub32; | |
43 | using MacroAssemblerX86Common::sub32; | |
44 | using MacroAssemblerX86Common::or32; | |
45 | using MacroAssemblerX86Common::load32; | |
46 | using MacroAssemblerX86Common::load8; | |
47 | using MacroAssemblerX86Common::store32; | |
48 | using MacroAssemblerX86Common::store8; | |
49 | using MacroAssemblerX86Common::branch32; | |
50 | using MacroAssemblerX86Common::call; | |
51 | using MacroAssemblerX86Common::jump; | |
52 | using MacroAssemblerX86Common::addDouble; | |
53 | using MacroAssemblerX86Common::loadDouble; | |
54 | using MacroAssemblerX86Common::storeDouble; | |
55 | using MacroAssemblerX86Common::convertInt32ToDouble; | |
56 | using MacroAssemblerX86Common::branch8; | |
57 | using MacroAssemblerX86Common::branchTest8; | |
58 | ||
59 | void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) | |
60 | { | |
61 | m_assembler.leal_mr(imm.m_value, src, dest); | |
62 | } | |
63 | ||
64 | void add32(TrustedImm32 imm, AbsoluteAddress address) | |
65 | { | |
66 | m_assembler.addl_im(imm.m_value, address.m_ptr); | |
67 | } | |
68 | ||
69 | void add32(AbsoluteAddress address, RegisterID dest) | |
70 | { | |
71 | m_assembler.addl_mr(address.m_ptr, dest); | |
72 | } | |
73 | ||
74 | void add64(TrustedImm32 imm, AbsoluteAddress address) | |
75 | { | |
76 | m_assembler.addl_im(imm.m_value, address.m_ptr); | |
77 | m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast<const char*>(address.m_ptr) + sizeof(int32_t)); | |
78 | } | |
79 | ||
80 | void and32(TrustedImm32 imm, AbsoluteAddress address) | |
81 | { | |
82 | m_assembler.andl_im(imm.m_value, address.m_ptr); | |
83 | } | |
84 | ||
85 | void or32(TrustedImm32 imm, AbsoluteAddress address) | |
86 | { | |
87 | m_assembler.orl_im(imm.m_value, address.m_ptr); | |
88 | } | |
89 | ||
90 | void or32(RegisterID reg, AbsoluteAddress address) | |
91 | { | |
92 | m_assembler.orl_rm(reg, address.m_ptr); | |
93 | } | |
94 | ||
95 | void sub32(TrustedImm32 imm, AbsoluteAddress address) | |
96 | { | |
97 | m_assembler.subl_im(imm.m_value, address.m_ptr); | |
98 | } | |
99 | ||
100 | void load32(const void* address, RegisterID dest) | |
101 | { | |
102 | m_assembler.movl_mr(address, dest); | |
103 | } | |
104 | ||
105 | void load8(const void* address, RegisterID dest) | |
106 | { | |
107 | m_assembler.movzbl_mr(address, dest); | |
108 | } | |
109 | ||
110 | void abortWithReason(AbortReason reason) | |
111 | { | |
112 | move(TrustedImm32(reason), X86Registers::eax); | |
113 | breakpoint(); | |
114 | } | |
115 | ||
116 | void abortWithReason(AbortReason reason, intptr_t misc) | |
117 | { | |
118 | move(TrustedImm32(misc), X86Registers::edx); | |
119 | abortWithReason(reason); | |
120 | } | |
121 | ||
122 | ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) | |
123 | { | |
124 | ConvertibleLoadLabel result = ConvertibleLoadLabel(this); | |
125 | m_assembler.movl_mr(address.offset, address.base, dest); | |
126 | return result; | |
127 | } | |
128 | ||
129 | void addDouble(AbsoluteAddress address, FPRegisterID dest) | |
130 | { | |
131 | m_assembler.addsd_mr(address.m_ptr, dest); | |
132 | } | |
133 | ||
134 | void storeDouble(FPRegisterID src, TrustedImmPtr address) | |
135 | { | |
136 | ASSERT(isSSE2Present()); | |
137 | ASSERT(address.m_value); | |
138 | m_assembler.movsd_rm(src, address.m_value); | |
139 | } | |
140 | ||
141 | void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) | |
142 | { | |
143 | m_assembler.cvtsi2sd_mr(src.m_ptr, dest); | |
144 | } | |
145 | ||
146 | void store32(TrustedImm32 imm, void* address) | |
147 | { | |
148 | m_assembler.movl_i32m(imm.m_value, address); | |
149 | } | |
150 | ||
151 | void store32(RegisterID src, void* address) | |
152 | { | |
153 | m_assembler.movl_rm(src, address); | |
154 | } | |
155 | ||
156 | void store8(RegisterID src, void* address) | |
157 | { | |
158 | m_assembler.movb_rm(src, address); | |
159 | } | |
160 | ||
161 | void store8(TrustedImm32 imm, void* address) | |
162 | { | |
163 | ASSERT(-128 <= imm.m_value && imm.m_value < 128); | |
164 | m_assembler.movb_i8m(imm.m_value, address); | |
165 | } | |
166 | ||
167 | void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) | |
168 | { | |
169 | ASSERT(isSSE2Present()); | |
170 | m_assembler.pextrw_irr(3, src, dest1); | |
171 | m_assembler.pextrw_irr(2, src, dest2); | |
172 | lshift32(TrustedImm32(16), dest1); | |
173 | or32(dest1, dest2); | |
174 | movePackedToInt32(src, dest1); | |
175 | } | |
176 | ||
177 | void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch) | |
178 | { | |
179 | moveInt32ToPacked(src1, dest); | |
180 | moveInt32ToPacked(src2, scratch); | |
181 | lshiftPacked(TrustedImm32(32), scratch); | |
182 | orPacked(scratch, dest); | |
183 | } | |
184 | ||
185 | Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) | |
186 | { | |
187 | m_assembler.addl_im(imm.m_value, dest.m_ptr); | |
188 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
189 | } | |
190 | ||
191 | Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) | |
192 | { | |
193 | m_assembler.subl_im(imm.m_value, dest.m_ptr); | |
194 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
195 | } | |
196 | ||
197 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) | |
198 | { | |
199 | m_assembler.cmpl_rm(right, left.m_ptr); | |
200 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
201 | } | |
202 | ||
203 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) | |
204 | { | |
205 | m_assembler.cmpl_im(right.m_value, left.m_ptr); | |
206 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
207 | } | |
208 | ||
209 | Call call() | |
210 | { | |
211 | return Call(m_assembler.call(), Call::Linkable); | |
212 | } | |
213 | ||
214 | // Address is a memory location containing the address to jump to | |
215 | void jump(AbsoluteAddress address) | |
216 | { | |
217 | m_assembler.jmp_m(address.m_ptr); | |
218 | } | |
219 | ||
220 | Call tailRecursiveCall() | |
221 | { | |
222 | return Call::fromTailJump(jump()); | |
223 | } | |
224 | ||
225 | Call makeTailRecursiveCall(Jump oldJump) | |
226 | { | |
227 | return Call::fromTailJump(oldJump); | |
228 | } | |
229 | ||
230 | ||
231 | DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) | |
232 | { | |
233 | padBeforePatch(); | |
234 | m_assembler.movl_i32r(initialValue.asIntptr(), dest); | |
235 | return DataLabelPtr(this); | |
236 | } | |
237 | ||
238 | Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) | |
239 | { | |
240 | m_assembler.cmpb_im(right.m_value, left.m_ptr); | |
241 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
242 | } | |
243 | ||
244 | Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) | |
245 | { | |
246 | ASSERT(mask.m_value >= -128 && mask.m_value <= 255); | |
247 | if (mask.m_value == -1) | |
248 | m_assembler.cmpb_im(0, address.m_ptr); | |
249 | else | |
250 | m_assembler.testb_im(mask.m_value, address.m_ptr); | |
251 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
252 | } | |
253 | ||
254 | Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) | |
255 | { | |
256 | padBeforePatch(); | |
257 | m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left); | |
258 | dataLabel = DataLabelPtr(this); | |
259 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
260 | } | |
261 | ||
262 | Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) | |
263 | { | |
264 | padBeforePatch(); | |
265 | m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base); | |
266 | dataLabel = DataLabelPtr(this); | |
267 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
268 | } | |
269 | ||
270 | Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) | |
271 | { | |
272 | padBeforePatch(); | |
273 | m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base); | |
274 | dataLabel = DataLabel32(this); | |
275 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
276 | } | |
277 | ||
278 | DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) | |
279 | { | |
280 | padBeforePatch(); | |
281 | m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base); | |
282 | return DataLabelPtr(this); | |
283 | } | |
284 | ||
285 | static bool supportsFloatingPoint() { return isSSE2Present(); } | |
286 | static bool supportsFloatingPointTruncate() { return isSSE2Present(); } | |
287 | static bool supportsFloatingPointSqrt() { return isSSE2Present(); } | |
288 | static bool supportsFloatingPointAbs() { return isSSE2Present(); } | |
289 | ||
290 | static FunctionPtr readCallTarget(CodeLocationCall call) | |
291 | { | |
292 | intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1]; | |
293 | return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset)); | |
294 | } | |
295 | ||
296 | static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } | |
297 | static bool canJumpReplacePatchableBranch32WithPatch() { return true; } | |
298 | ||
299 | static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) | |
300 | { | |
301 | const int opcodeBytes = 1; | |
302 | const int modRMBytes = 1; | |
303 | const int immediateBytes = 4; | |
304 | const int totalBytes = opcodeBytes + modRMBytes + immediateBytes; | |
305 | ASSERT(totalBytes >= maxJumpReplacementSize()); | |
306 | return label.labelAtOffset(-totalBytes); | |
307 | } | |
308 | ||
309 | static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) | |
310 | { | |
311 | const int opcodeBytes = 1; | |
312 | const int modRMBytes = 1; | |
313 | const int offsetBytes = 0; | |
314 | const int immediateBytes = 4; | |
315 | const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes; | |
316 | ASSERT(totalBytes >= maxJumpReplacementSize()); | |
317 | return label.labelAtOffset(-totalBytes); | |
318 | } | |
319 | ||
320 | static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label) | |
321 | { | |
322 | const int opcodeBytes = 1; | |
323 | const int modRMBytes = 1; | |
324 | const int offsetBytes = 0; | |
325 | const int immediateBytes = 4; | |
326 | const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes; | |
327 | ASSERT(totalBytes >= maxJumpReplacementSize()); | |
328 | return label.labelAtOffset(-totalBytes); | |
329 | } | |
330 | ||
331 | static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue) | |
332 | { | |
333 | X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg); | |
334 | } | |
335 | ||
336 | static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue) | |
337 | { | |
338 | ASSERT(!address.offset); | |
339 | X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base); | |
340 | } | |
341 | ||
342 | static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue) | |
343 | { | |
344 | ASSERT(!address.offset); | |
345 | X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base); | |
346 | } | |
347 | ||
348 | private: | |
349 | friend class LinkBuffer; | |
350 | friend class RepatchBuffer; | |
351 | ||
352 | static void linkCall(void* code, Call call, FunctionPtr function) | |
353 | { | |
354 | X86Assembler::linkCall(code, call.m_label, function.value()); | |
355 | } | |
356 | ||
357 | static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) | |
358 | { | |
359 | X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); | |
360 | } | |
361 | ||
362 | static void repatchCall(CodeLocationCall call, FunctionPtr destination) | |
363 | { | |
364 | X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); | |
365 | } | |
366 | }; | |
367 | ||
368 | } // namespace JSC | |
369 | ||
370 | #endif // ENABLE(ASSEMBLER) | |
371 | ||
372 | #endif // MacroAssemblerX86_h |