]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86_64.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86_64.h
1 /*
2 * Copyright (C) 2008, 2012, 2014, 2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86_64_h
27 #define MacroAssemblerX86_64_h
28
29 #if ENABLE(ASSEMBLER) && CPU(X86_64)
30
31 #include "MacroAssemblerX86Common.h"
32
33 #define REPTACH_OFFSET_CALL_R11 3
34
35 inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
36
37 namespace JSC {
38
39 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
40 public:
41 static const Scale ScalePtr = TimesEight;
42
43 using MacroAssemblerX86Common::add32;
44 using MacroAssemblerX86Common::and32;
45 using MacroAssemblerX86Common::branchAdd32;
46 using MacroAssemblerX86Common::or32;
47 using MacroAssemblerX86Common::sub32;
48 using MacroAssemblerX86Common::load8;
49 using MacroAssemblerX86Common::load32;
50 using MacroAssemblerX86Common::store32;
51 using MacroAssemblerX86Common::store8;
52 using MacroAssemblerX86Common::call;
53 using MacroAssemblerX86Common::jump;
54 using MacroAssemblerX86Common::addDouble;
55 using MacroAssemblerX86Common::loadDouble;
56 using MacroAssemblerX86Common::convertInt32ToDouble;
57
58 void add32(TrustedImm32 imm, AbsoluteAddress address)
59 {
60 move(TrustedImmPtr(address.m_ptr), scratchRegister);
61 add32(imm, Address(scratchRegister));
62 }
63
64 void and32(TrustedImm32 imm, AbsoluteAddress address)
65 {
66 move(TrustedImmPtr(address.m_ptr), scratchRegister);
67 and32(imm, Address(scratchRegister));
68 }
69
70 void add32(AbsoluteAddress address, RegisterID dest)
71 {
72 move(TrustedImmPtr(address.m_ptr), scratchRegister);
73 add32(Address(scratchRegister), dest);
74 }
75
76 void or32(TrustedImm32 imm, AbsoluteAddress address)
77 {
78 move(TrustedImmPtr(address.m_ptr), scratchRegister);
79 or32(imm, Address(scratchRegister));
80 }
81
82 void or32(RegisterID reg, AbsoluteAddress address)
83 {
84 move(TrustedImmPtr(address.m_ptr), scratchRegister);
85 or32(reg, Address(scratchRegister));
86 }
87
88 void sub32(TrustedImm32 imm, AbsoluteAddress address)
89 {
90 move(TrustedImmPtr(address.m_ptr), scratchRegister);
91 sub32(imm, Address(scratchRegister));
92 }
93
94 void load8(const void* address, RegisterID dest)
95 {
96 move(TrustedImmPtr(address), dest);
97 load8(dest, dest);
98 }
99
100 void load32(const void* address, RegisterID dest)
101 {
102 if (dest == X86Registers::eax)
103 m_assembler.movl_mEAX(address);
104 else {
105 move(TrustedImmPtr(address), dest);
106 load32(dest, dest);
107 }
108 }
109
110 void addDouble(AbsoluteAddress address, FPRegisterID dest)
111 {
112 move(TrustedImmPtr(address.m_ptr), scratchRegister);
113 m_assembler.addsd_mr(0, scratchRegister, dest);
114 }
115
116 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
117 {
118 move(imm, scratchRegister);
119 m_assembler.cvtsi2sd_rr(scratchRegister, dest);
120 }
121
122 void store32(TrustedImm32 imm, void* address)
123 {
124 move(TrustedImmPtr(address), scratchRegister);
125 store32(imm, scratchRegister);
126 }
127
128 void store32(RegisterID source, void* address)
129 {
130 if (source == X86Registers::eax)
131 m_assembler.movl_EAXm(address);
132 else {
133 move(TrustedImmPtr(address), scratchRegister);
134 store32(source, scratchRegister);
135 }
136 }
137
138 void store8(TrustedImm32 imm, void* address)
139 {
140 move(TrustedImmPtr(address), scratchRegister);
141 store8(imm, Address(scratchRegister));
142 }
143
144 void store8(RegisterID reg, void* address)
145 {
146 move(TrustedImmPtr(address), scratchRegister);
147 store8(reg, Address(scratchRegister));
148 }
149
150 #if OS(WINDOWS)
151 Call callWithSlowPathReturnType()
152 {
153 // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
154 // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
155 // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
156 // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
157 // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
158 // It is assumed that the parameters are already shifted to the right, when entering this method.
159 // Note: this implementation supports up to 3 parameters.
160
161 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
162 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
163 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
164 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
165
166 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
167 // In addition, we need to allocate 16 bytes for the return value.
168 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
169 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
170
171 // The first parameter register should contain a pointer to the stack allocated space for the return value.
172 move(X86Registers::esp, X86Registers::ecx);
173 add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
174
175 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
176 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
177
178 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
179
180 // Copy the return value into rax and rdx.
181 load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
182 load64(Address(X86Registers::eax), X86Registers::eax);
183
184 ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
185 return result;
186 }
187 #endif
188
189 Call call()
190 {
191 #if OS(WINDOWS)
192 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
193 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
194 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
195 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
196
197 // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
198 // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
199
200 // Copy argument 5
201 load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister);
202 store64(scratchRegister, Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t))));
203
204 // Copy argument 6
205 load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister);
206 store64(scratchRegister, Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t))));
207
208 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
209 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
210 // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
211 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
212 #endif
213 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
214 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
215 #if OS(WINDOWS)
216 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
217 #endif
218 ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
219 return result;
220 }
221
222 // Address is a memory location containing the address to jump to
223 void jump(AbsoluteAddress address)
224 {
225 move(TrustedImmPtr(address.m_ptr), scratchRegister);
226 jump(Address(scratchRegister));
227 }
228
229 Call tailRecursiveCall()
230 {
231 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
232 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
233 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
234 return Call::fromTailJump(newJump);
235 }
236
237 Call makeTailRecursiveCall(Jump oldJump)
238 {
239 oldJump.link(this);
240 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
241 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
242 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
243 return Call::fromTailJump(newJump);
244 }
245
246 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
247 {
248 move(TrustedImmPtr(dest.m_ptr), scratchRegister);
249 add32(src, Address(scratchRegister));
250 return Jump(m_assembler.jCC(x86Condition(cond)));
251 }
252
253 void add64(RegisterID src, RegisterID dest)
254 {
255 m_assembler.addq_rr(src, dest);
256 }
257
258 void add64(Address src, RegisterID dest)
259 {
260 m_assembler.addq_mr(src.offset, src.base, dest);
261 }
262
263 void add64(AbsoluteAddress src, RegisterID dest)
264 {
265 move(TrustedImmPtr(src.m_ptr), scratchRegister);
266 add64(Address(scratchRegister), dest);
267 }
268
269 void add64(TrustedImm32 imm, RegisterID srcDest)
270 {
271 if (imm.m_value == 1)
272 m_assembler.incq_r(srcDest);
273 else
274 m_assembler.addq_ir(imm.m_value, srcDest);
275 }
276
277 void add64(TrustedImm64 imm, RegisterID dest)
278 {
279 if (imm.m_value == 1)
280 m_assembler.incq_r(dest);
281 else {
282 move(imm, scratchRegister);
283 add64(scratchRegister, dest);
284 }
285 }
286
287 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
288 {
289 m_assembler.leaq_mr(imm.m_value, src, dest);
290 }
291
292 void add64(TrustedImm32 imm, Address address)
293 {
294 if (imm.m_value == 1)
295 m_assembler.incq_m(address.offset, address.base);
296 else
297 m_assembler.addq_im(imm.m_value, address.offset, address.base);
298 }
299
300 void add64(TrustedImm32 imm, AbsoluteAddress address)
301 {
302 move(TrustedImmPtr(address.m_ptr), scratchRegister);
303 add64(imm, Address(scratchRegister));
304 }
305
306 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
307 {
308 m_assembler.leaq_mr(imm.m_value, srcDest, srcDest);
309 }
310
311 void and64(RegisterID src, RegisterID dest)
312 {
313 m_assembler.andq_rr(src, dest);
314 }
315
316 void and64(TrustedImm32 imm, RegisterID srcDest)
317 {
318 m_assembler.andq_ir(imm.m_value, srcDest);
319 }
320
321 void and64(TrustedImmPtr imm, RegisterID srcDest)
322 {
323 move(imm, scratchRegister);
324 and64(scratchRegister, srcDest);
325 }
326
327 void lshift64(TrustedImm32 imm, RegisterID dest)
328 {
329 m_assembler.shlq_i8r(imm.m_value, dest);
330 }
331
332 void rshift64(TrustedImm32 imm, RegisterID dest)
333 {
334 m_assembler.sarq_i8r(imm.m_value, dest);
335 }
336
337 void urshift64(TrustedImm32 imm, RegisterID dest)
338 {
339 m_assembler.shrq_i8r(imm.m_value, dest);
340 }
341
342 void mul64(RegisterID src, RegisterID dest)
343 {
344 m_assembler.imulq_rr(src, dest);
345 }
346
347 void neg64(RegisterID dest)
348 {
349 m_assembler.negq_r(dest);
350 }
351
352 void or64(RegisterID src, RegisterID dest)
353 {
354 m_assembler.orq_rr(src, dest);
355 }
356
357 void or64(TrustedImm64 imm, RegisterID dest)
358 {
359 move(imm, scratchRegister);
360 or64(scratchRegister, dest);
361 }
362
363 void or64(TrustedImm32 imm, RegisterID dest)
364 {
365 m_assembler.orq_ir(imm.m_value, dest);
366 }
367
368 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
369 {
370 if (op1 == op2)
371 move(op1, dest);
372 else if (op1 == dest)
373 or64(op2, dest);
374 else {
375 move(op2, dest);
376 or64(op1, dest);
377 }
378 }
379
380 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
381 {
382 move(src, dest);
383 or64(imm, dest);
384 }
385
386 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
387 {
388 m_assembler.rorq_i8r(imm.m_value, srcDst);
389 }
390
391 void sub64(RegisterID src, RegisterID dest)
392 {
393 m_assembler.subq_rr(src, dest);
394 }
395
396 void sub64(TrustedImm32 imm, RegisterID dest)
397 {
398 if (imm.m_value == 1)
399 m_assembler.decq_r(dest);
400 else
401 m_assembler.subq_ir(imm.m_value, dest);
402 }
403
404 void sub64(TrustedImm64 imm, RegisterID dest)
405 {
406 if (imm.m_value == 1)
407 m_assembler.decq_r(dest);
408 else {
409 move(imm, scratchRegister);
410 sub64(scratchRegister, dest);
411 }
412 }
413
414 void xor64(RegisterID src, RegisterID dest)
415 {
416 m_assembler.xorq_rr(src, dest);
417 }
418
419 void xor64(RegisterID src, Address dest)
420 {
421 m_assembler.xorq_rm(src, dest.offset, dest.base);
422 }
423
424 void xor64(TrustedImm32 imm, RegisterID srcDest)
425 {
426 m_assembler.xorq_ir(imm.m_value, srcDest);
427 }
428
429 void load64(ImplicitAddress address, RegisterID dest)
430 {
431 m_assembler.movq_mr(address.offset, address.base, dest);
432 }
433
434 void load64(BaseIndex address, RegisterID dest)
435 {
436 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
437 }
438
439 void load64(const void* address, RegisterID dest)
440 {
441 if (dest == X86Registers::eax)
442 m_assembler.movq_mEAX(address);
443 else {
444 move(TrustedImmPtr(address), dest);
445 load64(dest, dest);
446 }
447 }
448
449 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
450 {
451 padBeforePatch();
452 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
453 return DataLabel32(this);
454 }
455
456 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
457 {
458 padBeforePatch();
459 m_assembler.movq_mr_disp8(address.offset, address.base, dest);
460 return DataLabelCompact(this);
461 }
462
463 void store64(RegisterID src, ImplicitAddress address)
464 {
465 m_assembler.movq_rm(src, address.offset, address.base);
466 }
467
468 void store64(RegisterID src, BaseIndex address)
469 {
470 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
471 }
472
473 void store64(RegisterID src, void* address)
474 {
475 if (src == X86Registers::eax)
476 m_assembler.movq_EAXm(address);
477 else {
478 move(TrustedImmPtr(address), scratchRegister);
479 store64(src, scratchRegister);
480 }
481 }
482
483 void store64(TrustedImm64 imm, ImplicitAddress address)
484 {
485 if (CAN_SIGN_EXTEND_32_64(imm.m_value))
486 m_assembler.movq_i32m(static_cast<int>(imm.m_value), address.offset, address.base);
487 else {
488 move(imm, scratchRegister);
489 store64(scratchRegister, address);
490 }
491 }
492
493 void store64(TrustedImm64 imm, BaseIndex address)
494 {
495 move(imm, scratchRegister);
496 m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
497 }
498
499 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
500 {
501 padBeforePatch();
502 m_assembler.movq_rm_disp32(src, address.offset, address.base);
503 return DataLabel32(this);
504 }
505
506 void move64ToDouble(RegisterID src, FPRegisterID dest)
507 {
508 m_assembler.movq_rr(src, dest);
509 }
510
511 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
512 {
513 m_assembler.movq_rr(src, dest);
514 }
515
516 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
517 {
518 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
519 m_assembler.testq_rr(left, left);
520 else
521 m_assembler.cmpq_ir(right.m_value, left);
522 m_assembler.setCC_r(x86Condition(cond), dest);
523 m_assembler.movzbl_rr(dest, dest);
524 }
525
526 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
527 {
528 m_assembler.cmpq_rr(right, left);
529 m_assembler.setCC_r(x86Condition(cond), dest);
530 m_assembler.movzbl_rr(dest, dest);
531 }
532
533 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
534 {
535 m_assembler.cmpq_rr(right, left);
536 return Jump(m_assembler.jCC(x86Condition(cond)));
537 }
538
539 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
540 {
541 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
542 m_assembler.testq_rr(left, left);
543 return Jump(m_assembler.jCC(x86Condition(cond)));
544 }
545 move(right, scratchRegister);
546 return branch64(cond, left, scratchRegister);
547 }
548
549 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
550 {
551 m_assembler.cmpq_mr(right.offset, right.base, left);
552 return Jump(m_assembler.jCC(x86Condition(cond)));
553 }
554
555 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
556 {
557 move(TrustedImmPtr(left.m_ptr), scratchRegister);
558 return branch64(cond, Address(scratchRegister), right);
559 }
560
561 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
562 {
563 m_assembler.cmpq_rm(right, left.offset, left.base);
564 return Jump(m_assembler.jCC(x86Condition(cond)));
565 }
566
567 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
568 {
569 move(right, scratchRegister);
570 return branch64(cond, left, scratchRegister);
571 }
572
573 Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
574 {
575 m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
576 return Jump(m_assembler.jCC(x86Condition(cond)));
577 }
578
579 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
580 {
581 return branch64(cond, left, right);
582 }
583
584 Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
585 {
586 move(right, scratchRegister);
587 return branchPtr(cond, left, scratchRegister);
588 }
589
590 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
591 {
592 m_assembler.testq_rr(reg, mask);
593 return Jump(m_assembler.jCC(x86Condition(cond)));
594 }
595
596 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
597 {
598 // if we are only interested in the low seven bits, this can be tested with a testb
599 if (mask.m_value == -1)
600 m_assembler.testq_rr(reg, reg);
601 else if ((mask.m_value & ~0x7f) == 0)
602 m_assembler.testb_i8r(mask.m_value, reg);
603 else
604 m_assembler.testq_i32r(mask.m_value, reg);
605 return Jump(m_assembler.jCC(x86Condition(cond)));
606 }
607
608 void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
609 {
610 if (mask.m_value == -1)
611 m_assembler.testq_rr(reg, reg);
612 else if ((mask.m_value & ~0x7f) == 0)
613 m_assembler.testb_i8r(mask.m_value, reg);
614 else
615 m_assembler.testq_i32r(mask.m_value, reg);
616 set32(x86Condition(cond), dest);
617 }
618
619 void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
620 {
621 m_assembler.testq_rr(reg, mask);
622 set32(x86Condition(cond), dest);
623 }
624
625 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
626 {
627 load64(address.m_ptr, scratchRegister);
628 return branchTest64(cond, scratchRegister, mask);
629 }
630
631 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
632 {
633 if (mask.m_value == -1)
634 m_assembler.cmpq_im(0, address.offset, address.base);
635 else
636 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
637 return Jump(m_assembler.jCC(x86Condition(cond)));
638 }
639
640 Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
641 {
642 m_assembler.testq_rm(reg, address.offset, address.base);
643 return Jump(m_assembler.jCC(x86Condition(cond)));
644 }
645
646 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
647 {
648 if (mask.m_value == -1)
649 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
650 else
651 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
652 return Jump(m_assembler.jCC(x86Condition(cond)));
653 }
654
655
656 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
657 {
658 add64(imm, dest);
659 return Jump(m_assembler.jCC(x86Condition(cond)));
660 }
661
662 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
663 {
664 add64(src, dest);
665 return Jump(m_assembler.jCC(x86Condition(cond)));
666 }
667
668 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
669 {
670 mul64(src, dest);
671 if (cond != Overflow)
672 m_assembler.testq_rr(dest, dest);
673 return Jump(m_assembler.jCC(x86Condition(cond)));
674 }
675
676 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
677 {
678 sub64(imm, dest);
679 return Jump(m_assembler.jCC(x86Condition(cond)));
680 }
681
682 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
683 {
684 sub64(src, dest);
685 return Jump(m_assembler.jCC(x86Condition(cond)));
686 }
687
688 Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
689 {
690 move(src1, dest);
691 return branchSub64(cond, src2, dest);
692 }
693
694 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
695 {
696 neg64(srcDest);
697 return Jump(m_assembler.jCC(x86Condition(cond)));
698 }
699
700 void abortWithReason(AbortReason reason)
701 {
702 move(TrustedImm32(reason), X86Registers::r11);
703 breakpoint();
704 }
705
706 void abortWithReason(AbortReason reason, intptr_t misc)
707 {
708 move(TrustedImm64(misc), X86Registers::r10);
709 abortWithReason(reason);
710 }
711
712 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
713 {
714 ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
715 m_assembler.movq_mr(address.offset, address.base, dest);
716 return result;
717 }
718
719 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
720 {
721 padBeforePatch();
722 m_assembler.movq_i64r(initialValue.asIntptr(), dest);
723 return DataLabelPtr(this);
724 }
725
726 DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
727 {
728 padBeforePatch();
729 m_assembler.movq_i64r(initialValue.m_value, dest);
730 return DataLabelPtr(this);
731 }
732
733 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
734 {
735 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
736 return branch64(cond, left, scratchRegister);
737 }
738
739 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
740 {
741 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
742 return branch64(cond, left, scratchRegister);
743 }
744
745 Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
746 {
747 padBeforePatch();
748 m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister);
749 dataLabel = DataLabel32(this);
750 return branch32(cond, left, scratchRegister);
751 }
752
753 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
754 {
755 DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
756 store64(scratchRegister, address);
757 return label;
758 }
759
760 using MacroAssemblerX86Common::branch8;
761 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
762 {
763 MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister);
764 return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister), right);
765 }
766
767 using MacroAssemblerX86Common::branchTest8;
768 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
769 {
770 TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
771 MacroAssemblerX86Common::move(addr, scratchRegister);
772 return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
773 }
774
775 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
776 {
777 MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister);
778 return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask);
779 }
780
781 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
782 {
783 m_assembler.cvtsi2sdq_rr(src, dest);
784 }
785
786 static bool supportsFloatingPoint() { return true; }
787 static bool supportsFloatingPointTruncate() { return true; }
788 static bool supportsFloatingPointSqrt() { return true; }
789 static bool supportsFloatingPointAbs() { return true; }
790
791 static FunctionPtr readCallTarget(CodeLocationCall call)
792 {
793 return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
794 }
795
796 static bool haveScratchRegisterForBlinding() { return true; }
797 static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
798
799 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
800 static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
801
802 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
803 {
804 const int rexBytes = 1;
805 const int opcodeBytes = 1;
806 const int immediateBytes = 8;
807 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
808 ASSERT(totalBytes >= maxJumpReplacementSize());
809 return label.labelAtOffset(-totalBytes);
810 }
811
812 static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
813 {
814 const int rexBytes = 1;
815 const int opcodeBytes = 1;
816 const int immediateBytes = 4;
817 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
818 ASSERT(totalBytes >= maxJumpReplacementSize());
819 return label.labelAtOffset(-totalBytes);
820 }
821
822 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
823 {
824 return startOfBranchPtrWithPatchOnRegister(label);
825 }
826
827 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
828 {
829 return startOfBranch32WithPatchOnRegister(label);
830 }
831
832 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
833 {
834 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
835 }
836
837 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
838 {
839 X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, scratchRegister);
840 }
841
842 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
843 {
844 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
845 }
846
847 private:
848 friend class LinkBuffer;
849 friend class RepatchBuffer;
850
851 static void linkCall(void* code, Call call, FunctionPtr function)
852 {
853 if (!call.isFlagSet(Call::Near))
854 X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
855 else
856 X86Assembler::linkCall(code, call.m_label, function.value());
857 }
858
859 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
860 {
861 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
862 }
863
864 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
865 {
866 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
867 }
868 };
869
870 } // namespace JSC
871
872 #endif // ENABLE(ASSEMBLER)
873
874 #endif // MacroAssemblerX86_64_h