]> git.saurik.com Git - apple/javascriptcore.git/blame_incremental - assembler/MacroAssemblerX86_64.h
JavaScriptCore-554.1.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86_64.h
... / ...
CommitLineData
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86_64_h
27#define MacroAssemblerX86_64_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && PLATFORM(X86_64)
32
33#include "MacroAssemblerX86Common.h"
34
35#define REPTACH_OFFSET_CALL_R11 3
36
37namespace JSC {
38
39class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
40protected:
41 static const X86::RegisterID scratchRegister = X86::r11;
42
43public:
44 static const Scale ScalePtr = TimesEight;
45
46 using MacroAssemblerX86Common::add32;
47 using MacroAssemblerX86Common::and32;
48 using MacroAssemblerX86Common::or32;
49 using MacroAssemblerX86Common::sub32;
50 using MacroAssemblerX86Common::load32;
51 using MacroAssemblerX86Common::store32;
52 using MacroAssemblerX86Common::call;
53 using MacroAssemblerX86Common::loadDouble;
54 using MacroAssemblerX86Common::convertInt32ToDouble;
55
56 void add32(Imm32 imm, AbsoluteAddress address)
57 {
58 move(ImmPtr(address.m_ptr), scratchRegister);
59 add32(imm, Address(scratchRegister));
60 }
61
62 void and32(Imm32 imm, AbsoluteAddress address)
63 {
64 move(ImmPtr(address.m_ptr), scratchRegister);
65 and32(imm, Address(scratchRegister));
66 }
67
68 void or32(Imm32 imm, AbsoluteAddress address)
69 {
70 move(ImmPtr(address.m_ptr), scratchRegister);
71 or32(imm, Address(scratchRegister));
72 }
73
74 void sub32(Imm32 imm, AbsoluteAddress address)
75 {
76 move(ImmPtr(address.m_ptr), scratchRegister);
77 sub32(imm, Address(scratchRegister));
78 }
79
80 void load32(void* address, RegisterID dest)
81 {
82 if (dest == X86::eax)
83 m_assembler.movl_mEAX(address);
84 else {
85 move(X86::eax, dest);
86 m_assembler.movl_mEAX(address);
87 swap(X86::eax, dest);
88 }
89 }
90
91 void loadDouble(void* address, FPRegisterID dest)
92 {
93 move(ImmPtr(address), scratchRegister);
94 loadDouble(scratchRegister, dest);
95 }
96
97 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
98 {
99 move(Imm32(*static_cast<int32_t*>(src.m_ptr)), scratchRegister);
100 m_assembler.cvtsi2sd_rr(scratchRegister, dest);
101 }
102
103 void store32(Imm32 imm, void* address)
104 {
105 move(X86::eax, scratchRegister);
106 move(imm, X86::eax);
107 m_assembler.movl_EAXm(address);
108 move(scratchRegister, X86::eax);
109 }
110
111 Call call()
112 {
113 DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
114 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
115 ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
116 return result;
117 }
118
119 Call tailRecursiveCall()
120 {
121 DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
122 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
123 ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
124 return Call::fromTailJump(newJump);
125 }
126
127 Call makeTailRecursiveCall(Jump oldJump)
128 {
129 oldJump.link(this);
130 DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister);
131 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
132 ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
133 return Call::fromTailJump(newJump);
134 }
135
136
137 void addPtr(RegisterID src, RegisterID dest)
138 {
139 m_assembler.addq_rr(src, dest);
140 }
141
142 void addPtr(Imm32 imm, RegisterID srcDest)
143 {
144 m_assembler.addq_ir(imm.m_value, srcDest);
145 }
146
147 void addPtr(ImmPtr imm, RegisterID dest)
148 {
149 move(imm, scratchRegister);
150 m_assembler.addq_rr(scratchRegister, dest);
151 }
152
153 void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
154 {
155 m_assembler.leaq_mr(imm.m_value, src, dest);
156 }
157
158 void addPtr(Imm32 imm, Address address)
159 {
160 m_assembler.addq_im(imm.m_value, address.offset, address.base);
161 }
162
163 void addPtr(Imm32 imm, AbsoluteAddress address)
164 {
165 move(ImmPtr(address.m_ptr), scratchRegister);
166 addPtr(imm, Address(scratchRegister));
167 }
168
169 void andPtr(RegisterID src, RegisterID dest)
170 {
171 m_assembler.andq_rr(src, dest);
172 }
173
174 void andPtr(Imm32 imm, RegisterID srcDest)
175 {
176 m_assembler.andq_ir(imm.m_value, srcDest);
177 }
178
179 void orPtr(RegisterID src, RegisterID dest)
180 {
181 m_assembler.orq_rr(src, dest);
182 }
183
184 void orPtr(ImmPtr imm, RegisterID dest)
185 {
186 move(imm, scratchRegister);
187 m_assembler.orq_rr(scratchRegister, dest);
188 }
189
190 void orPtr(Imm32 imm, RegisterID dest)
191 {
192 m_assembler.orq_ir(imm.m_value, dest);
193 }
194
195 void rshiftPtr(RegisterID shift_amount, RegisterID dest)
196 {
197 // On x86 we can only shift by ecx; if asked to shift by another register we'll
198 // need rejig the shift amount into ecx first, and restore the registers afterwards.
199 if (shift_amount != X86::ecx) {
200 swap(shift_amount, X86::ecx);
201
202 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
203 if (dest == shift_amount)
204 m_assembler.sarq_CLr(X86::ecx);
205 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
206 else if (dest == X86::ecx)
207 m_assembler.sarq_CLr(shift_amount);
208 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
209 else
210 m_assembler.sarq_CLr(dest);
211
212 swap(shift_amount, X86::ecx);
213 } else
214 m_assembler.sarq_CLr(dest);
215 }
216
217 void rshiftPtr(Imm32 imm, RegisterID dest)
218 {
219 m_assembler.sarq_i8r(imm.m_value, dest);
220 }
221
222 void subPtr(RegisterID src, RegisterID dest)
223 {
224 m_assembler.subq_rr(src, dest);
225 }
226
227 void subPtr(Imm32 imm, RegisterID dest)
228 {
229 m_assembler.subq_ir(imm.m_value, dest);
230 }
231
232 void subPtr(ImmPtr imm, RegisterID dest)
233 {
234 move(imm, scratchRegister);
235 m_assembler.subq_rr(scratchRegister, dest);
236 }
237
238 void xorPtr(RegisterID src, RegisterID dest)
239 {
240 m_assembler.xorq_rr(src, dest);
241 }
242
243 void xorPtr(Imm32 imm, RegisterID srcDest)
244 {
245 m_assembler.xorq_ir(imm.m_value, srcDest);
246 }
247
248
249 void loadPtr(ImplicitAddress address, RegisterID dest)
250 {
251 m_assembler.movq_mr(address.offset, address.base, dest);
252 }
253
254 void loadPtr(BaseIndex address, RegisterID dest)
255 {
256 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
257 }
258
259 void loadPtr(void* address, RegisterID dest)
260 {
261 if (dest == X86::eax)
262 m_assembler.movq_mEAX(address);
263 else {
264 move(X86::eax, dest);
265 m_assembler.movq_mEAX(address);
266 swap(X86::eax, dest);
267 }
268 }
269
270 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
271 {
272 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
273 return DataLabel32(this);
274 }
275
276 void storePtr(RegisterID src, ImplicitAddress address)
277 {
278 m_assembler.movq_rm(src, address.offset, address.base);
279 }
280
281 void storePtr(RegisterID src, BaseIndex address)
282 {
283 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
284 }
285
286 void storePtr(RegisterID src, void* address)
287 {
288 if (src == X86::eax)
289 m_assembler.movq_EAXm(address);
290 else {
291 swap(X86::eax, src);
292 m_assembler.movq_EAXm(address);
293 swap(X86::eax, src);
294 }
295 }
296
297 void storePtr(ImmPtr imm, ImplicitAddress address)
298 {
299 intptr_t ptr = imm.asIntptr();
300 if (CAN_SIGN_EXTEND_32_64(ptr))
301 m_assembler.movq_i32m(static_cast<int>(ptr), address.offset, address.base);
302 else {
303 move(imm, scratchRegister);
304 storePtr(scratchRegister, address);
305 }
306 }
307
308 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
309 {
310 m_assembler.movq_rm_disp32(src, address.offset, address.base);
311 return DataLabel32(this);
312 }
313
314 void movePtrToDouble(RegisterID src, FPRegisterID dest)
315 {
316 m_assembler.movq_rr(src, dest);
317 }
318
319 void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
320 {
321 m_assembler.movq_rr(src, dest);
322 }
323
324 void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
325 {
326 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
327 m_assembler.testq_rr(left, left);
328 else
329 m_assembler.cmpq_ir(right.m_value, left);
330 m_assembler.setCC_r(x86Condition(cond), dest);
331 m_assembler.movzbl_rr(dest, dest);
332 }
333
334 Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
335 {
336 m_assembler.cmpq_rr(right, left);
337 return Jump(m_assembler.jCC(x86Condition(cond)));
338 }
339
340 Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
341 {
342 intptr_t imm = right.asIntptr();
343 if (CAN_SIGN_EXTEND_32_64(imm)) {
344 if (!imm)
345 m_assembler.testq_rr(left, left);
346 else
347 m_assembler.cmpq_ir(imm, left);
348 return Jump(m_assembler.jCC(x86Condition(cond)));
349 } else {
350 move(right, scratchRegister);
351 return branchPtr(cond, left, scratchRegister);
352 }
353 }
354
355 Jump branchPtr(Condition cond, RegisterID left, Address right)
356 {
357 m_assembler.cmpq_mr(right.offset, right.base, left);
358 return Jump(m_assembler.jCC(x86Condition(cond)));
359 }
360
361 Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
362 {
363 move(ImmPtr(left.m_ptr), scratchRegister);
364 return branchPtr(cond, Address(scratchRegister), right);
365 }
366
367 Jump branchPtr(Condition cond, Address left, RegisterID right)
368 {
369 m_assembler.cmpq_rm(right, left.offset, left.base);
370 return Jump(m_assembler.jCC(x86Condition(cond)));
371 }
372
373 Jump branchPtr(Condition cond, Address left, ImmPtr right)
374 {
375 move(right, scratchRegister);
376 return branchPtr(cond, left, scratchRegister);
377 }
378
379 Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
380 {
381 m_assembler.testq_rr(reg, mask);
382 return Jump(m_assembler.jCC(x86Condition(cond)));
383 }
384
385 Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
386 {
387 // if we are only interested in the low seven bits, this can be tested with a testb
388 if (mask.m_value == -1)
389 m_assembler.testq_rr(reg, reg);
390 else if ((mask.m_value & ~0x7f) == 0)
391 m_assembler.testb_i8r(mask.m_value, reg);
392 else
393 m_assembler.testq_i32r(mask.m_value, reg);
394 return Jump(m_assembler.jCC(x86Condition(cond)));
395 }
396
397 Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
398 {
399 if (mask.m_value == -1)
400 m_assembler.cmpq_im(0, address.offset, address.base);
401 else
402 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
403 return Jump(m_assembler.jCC(x86Condition(cond)));
404 }
405
406 Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
407 {
408 if (mask.m_value == -1)
409 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
410 else
411 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
412 return Jump(m_assembler.jCC(x86Condition(cond)));
413 }
414
415
416 Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
417 {
418 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
419 addPtr(src, dest);
420 return Jump(m_assembler.jCC(x86Condition(cond)));
421 }
422
423 Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
424 {
425 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
426 subPtr(imm, dest);
427 return Jump(m_assembler.jCC(x86Condition(cond)));
428 }
429
430 DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
431 {
432 m_assembler.movq_i64r(initialValue.asIntptr(), dest);
433 return DataLabelPtr(this);
434 }
435
436 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
437 {
438 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
439 return branchPtr(cond, left, scratchRegister);
440 }
441
442 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
443 {
444 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
445 return branchPtr(cond, left, scratchRegister);
446 }
447
448 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
449 {
450 DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
451 storePtr(scratchRegister, address);
452 return label;
453 }
454
455 Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
456 {
457 Label label(this);
458 loadPtr(address, dest);
459 return label;
460 }
461
462 bool supportsFloatingPoint() const { return true; }
463 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
464 bool supportsFloatingPointTruncate() const { return true; }
465
466private:
467 friend class LinkBuffer;
468 friend class RepatchBuffer;
469
470 static void linkCall(void* code, Call call, FunctionPtr function)
471 {
472 if (!call.isFlagSet(Call::Near))
473 X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value());
474 else
475 X86Assembler::linkCall(code, call.m_jmp, function.value());
476 }
477
478 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
479 {
480 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
481 }
482
483 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
484 {
485 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
486 }
487
488};
489
490} // namespace JSC
491
492#endif // ENABLE(ASSEMBLER)
493
494#endif // MacroAssemblerX86_64_h