]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86_64.h
JavaScriptCore-7600.1.4.17.5.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86_64.h
1 /*
2 * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86_64_h
27 #define MacroAssemblerX86_64_h
28
29 #if ENABLE(ASSEMBLER) && CPU(X86_64)
30
31 #include "MacroAssemblerX86Common.h"
32
33 #if USE(MASM_PROBE)
34 #include <wtf/StdLibExtras.h>
35 #endif
36
37 #define REPTACH_OFFSET_CALL_R11 3
38
39 inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
40
41 namespace JSC {
42
43 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
44 public:
45 static const Scale ScalePtr = TimesEight;
46
47 using MacroAssemblerX86Common::add32;
48 using MacroAssemblerX86Common::and32;
49 using MacroAssemblerX86Common::branchAdd32;
50 using MacroAssemblerX86Common::or32;
51 using MacroAssemblerX86Common::sub32;
52 using MacroAssemblerX86Common::load8;
53 using MacroAssemblerX86Common::load32;
54 using MacroAssemblerX86Common::store32;
55 using MacroAssemblerX86Common::store8;
56 using MacroAssemblerX86Common::call;
57 using MacroAssemblerX86Common::jump;
58 using MacroAssemblerX86Common::addDouble;
59 using MacroAssemblerX86Common::loadDouble;
60 using MacroAssemblerX86Common::convertInt32ToDouble;
61
62 void add32(TrustedImm32 imm, AbsoluteAddress address)
63 {
64 move(TrustedImmPtr(address.m_ptr), scratchRegister);
65 add32(imm, Address(scratchRegister));
66 }
67
68 void and32(TrustedImm32 imm, AbsoluteAddress address)
69 {
70 move(TrustedImmPtr(address.m_ptr), scratchRegister);
71 and32(imm, Address(scratchRegister));
72 }
73
74 void add32(AbsoluteAddress address, RegisterID dest)
75 {
76 move(TrustedImmPtr(address.m_ptr), scratchRegister);
77 add32(Address(scratchRegister), dest);
78 }
79
80 void or32(TrustedImm32 imm, AbsoluteAddress address)
81 {
82 move(TrustedImmPtr(address.m_ptr), scratchRegister);
83 or32(imm, Address(scratchRegister));
84 }
85
86 void or32(RegisterID reg, AbsoluteAddress address)
87 {
88 move(TrustedImmPtr(address.m_ptr), scratchRegister);
89 or32(reg, Address(scratchRegister));
90 }
91
92 void sub32(TrustedImm32 imm, AbsoluteAddress address)
93 {
94 move(TrustedImmPtr(address.m_ptr), scratchRegister);
95 sub32(imm, Address(scratchRegister));
96 }
97
98 void load8(const void* address, RegisterID dest)
99 {
100 move(TrustedImmPtr(address), dest);
101 load8(dest, dest);
102 }
103
104 void load32(const void* address, RegisterID dest)
105 {
106 if (dest == X86Registers::eax)
107 m_assembler.movl_mEAX(address);
108 else {
109 move(TrustedImmPtr(address), dest);
110 load32(dest, dest);
111 }
112 }
113
114 void addDouble(AbsoluteAddress address, FPRegisterID dest)
115 {
116 move(TrustedImmPtr(address.m_ptr), scratchRegister);
117 m_assembler.addsd_mr(0, scratchRegister, dest);
118 }
119
120 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
121 {
122 move(imm, scratchRegister);
123 m_assembler.cvtsi2sd_rr(scratchRegister, dest);
124 }
125
126 void store32(TrustedImm32 imm, void* address)
127 {
128 move(TrustedImmPtr(address), scratchRegister);
129 store32(imm, scratchRegister);
130 }
131
132 void store32(RegisterID source, void* address)
133 {
134 if (source == X86Registers::eax)
135 m_assembler.movl_EAXm(address);
136 else {
137 move(TrustedImmPtr(address), scratchRegister);
138 store32(source, scratchRegister);
139 }
140 }
141
142 void store8(TrustedImm32 imm, void* address)
143 {
144 move(TrustedImmPtr(address), scratchRegister);
145 store8(imm, Address(scratchRegister));
146 }
147
148 void store8(RegisterID reg, void* address)
149 {
150 move(TrustedImmPtr(address), scratchRegister);
151 store8(reg, Address(scratchRegister));
152 }
153
154 #if OS(WINDOWS)
155 Call callWithSlowPathReturnType()
156 {
157 // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
158 // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
159 // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
160 // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
161 // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
162 // It is assumed that the parameters are already shifted to the right, when entering this method.
163 // Note: this implementation supports up to 3 parameters.
164
165 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
166 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
167 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
168 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
169
170 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
171 // In addition, we need to allocate 16 bytes for the return value.
172 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
173 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
174
175 // The first parameter register should contain a pointer to the stack allocated space for the return value.
176 move(X86Registers::esp, X86Registers::ecx);
177 add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
178
179 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
180 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
181
182 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
183
184 // Copy the return value into rax and rdx.
185 load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
186 load64(Address(X86Registers::eax), X86Registers::eax);
187
188 ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
189 return result;
190 }
191 #endif
192
193 Call call()
194 {
195 #if OS(WINDOWS)
196 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
197 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
198 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
199 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
200
201 // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
202 // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
203
204 // Copy argument 5
205 load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister);
206 store64(scratchRegister, Address(X86Registers::esp, -4 * sizeof(int64_t)));
207
208 // Copy argument 6
209 load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister);
210 store64(scratchRegister, Address(X86Registers::esp, -3 * sizeof(int64_t)));
211
212 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
213 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
214 // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
215 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
216 #endif
217 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
218 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
219 #if OS(WINDOWS)
220 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
221 #endif
222 ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
223 return result;
224 }
225
226 // Address is a memory location containing the address to jump to
227 void jump(AbsoluteAddress address)
228 {
229 move(TrustedImmPtr(address.m_ptr), scratchRegister);
230 jump(Address(scratchRegister));
231 }
232
233 Call tailRecursiveCall()
234 {
235 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
236 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
237 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
238 return Call::fromTailJump(newJump);
239 }
240
241 Call makeTailRecursiveCall(Jump oldJump)
242 {
243 oldJump.link(this);
244 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
245 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
246 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
247 return Call::fromTailJump(newJump);
248 }
249
250 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
251 {
252 move(TrustedImmPtr(dest.m_ptr), scratchRegister);
253 add32(src, Address(scratchRegister));
254 return Jump(m_assembler.jCC(x86Condition(cond)));
255 }
256
257 void add64(RegisterID src, RegisterID dest)
258 {
259 m_assembler.addq_rr(src, dest);
260 }
261
262 void add64(Address src, RegisterID dest)
263 {
264 m_assembler.addq_mr(src.offset, src.base, dest);
265 }
266
267 void add64(AbsoluteAddress src, RegisterID dest)
268 {
269 move(TrustedImmPtr(src.m_ptr), scratchRegister);
270 add64(Address(scratchRegister), dest);
271 }
272
273 void add64(TrustedImm32 imm, RegisterID srcDest)
274 {
275 if (imm.m_value == 1)
276 m_assembler.incq_r(srcDest);
277 else
278 m_assembler.addq_ir(imm.m_value, srcDest);
279 }
280
281 void add64(TrustedImm64 imm, RegisterID dest)
282 {
283 if (imm.m_value == 1)
284 m_assembler.incq_r(dest);
285 else {
286 move(imm, scratchRegister);
287 add64(scratchRegister, dest);
288 }
289 }
290
291 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
292 {
293 m_assembler.leaq_mr(imm.m_value, src, dest);
294 }
295
296 void add64(TrustedImm32 imm, Address address)
297 {
298 m_assembler.addq_im(imm.m_value, address.offset, address.base);
299 }
300
301 void add64(TrustedImm32 imm, AbsoluteAddress address)
302 {
303 move(TrustedImmPtr(address.m_ptr), scratchRegister);
304 add64(imm, Address(scratchRegister));
305 }
306
307 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
308 {
309 m_assembler.leaq_mr(imm.m_value, srcDest, srcDest);
310 }
311
312 void and64(RegisterID src, RegisterID dest)
313 {
314 m_assembler.andq_rr(src, dest);
315 }
316
317 void and64(TrustedImm32 imm, RegisterID srcDest)
318 {
319 m_assembler.andq_ir(imm.m_value, srcDest);
320 }
321
322 void and64(TrustedImmPtr imm, RegisterID srcDest)
323 {
324 move(imm, scratchRegister);
325 and64(scratchRegister, srcDest);
326 }
327
328 void lshift64(TrustedImm32 imm, RegisterID dest)
329 {
330 m_assembler.shlq_i8r(imm.m_value, dest);
331 }
332
333 void rshift64(TrustedImm32 imm, RegisterID dest)
334 {
335 m_assembler.sarq_i8r(imm.m_value, dest);
336 }
337
338 void mul64(RegisterID src, RegisterID dest)
339 {
340 m_assembler.imulq_rr(src, dest);
341 }
342
343 void neg64(RegisterID dest)
344 {
345 m_assembler.negq_r(dest);
346 }
347
348 void or64(RegisterID src, RegisterID dest)
349 {
350 m_assembler.orq_rr(src, dest);
351 }
352
353 void or64(TrustedImm64 imm, RegisterID dest)
354 {
355 move(imm, scratchRegister);
356 or64(scratchRegister, dest);
357 }
358
359 void or64(TrustedImm32 imm, RegisterID dest)
360 {
361 m_assembler.orq_ir(imm.m_value, dest);
362 }
363
364 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
365 {
366 if (op1 == op2)
367 move(op1, dest);
368 else if (op1 == dest)
369 or64(op2, dest);
370 else {
371 move(op2, dest);
372 or64(op1, dest);
373 }
374 }
375
376 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
377 {
378 move(src, dest);
379 or64(imm, dest);
380 }
381
382 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
383 {
384 m_assembler.rorq_i8r(imm.m_value, srcDst);
385 }
386
387 void sub64(RegisterID src, RegisterID dest)
388 {
389 m_assembler.subq_rr(src, dest);
390 }
391
392 void sub64(TrustedImm32 imm, RegisterID dest)
393 {
394 if (imm.m_value == 1)
395 m_assembler.decq_r(dest);
396 else
397 m_assembler.subq_ir(imm.m_value, dest);
398 }
399
400 void sub64(TrustedImm64 imm, RegisterID dest)
401 {
402 if (imm.m_value == 1)
403 m_assembler.decq_r(dest);
404 else {
405 move(imm, scratchRegister);
406 sub64(scratchRegister, dest);
407 }
408 }
409
410 void xor64(RegisterID src, RegisterID dest)
411 {
412 m_assembler.xorq_rr(src, dest);
413 }
414
415 void xor64(RegisterID src, Address dest)
416 {
417 m_assembler.xorq_rm(src, dest.offset, dest.base);
418 }
419
420 void xor64(TrustedImm32 imm, RegisterID srcDest)
421 {
422 m_assembler.xorq_ir(imm.m_value, srcDest);
423 }
424
425 void load64(ImplicitAddress address, RegisterID dest)
426 {
427 m_assembler.movq_mr(address.offset, address.base, dest);
428 }
429
430 void load64(BaseIndex address, RegisterID dest)
431 {
432 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
433 }
434
435 void load64(const void* address, RegisterID dest)
436 {
437 if (dest == X86Registers::eax)
438 m_assembler.movq_mEAX(address);
439 else {
440 move(TrustedImmPtr(address), dest);
441 load64(dest, dest);
442 }
443 }
444
445 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
446 {
447 padBeforePatch();
448 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
449 return DataLabel32(this);
450 }
451
452 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
453 {
454 padBeforePatch();
455 m_assembler.movq_mr_disp8(address.offset, address.base, dest);
456 return DataLabelCompact(this);
457 }
458
459 void store64(RegisterID src, ImplicitAddress address)
460 {
461 m_assembler.movq_rm(src, address.offset, address.base);
462 }
463
464 void store64(RegisterID src, BaseIndex address)
465 {
466 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
467 }
468
469 void store64(RegisterID src, void* address)
470 {
471 if (src == X86Registers::eax)
472 m_assembler.movq_EAXm(address);
473 else {
474 move(TrustedImmPtr(address), scratchRegister);
475 store64(src, scratchRegister);
476 }
477 }
478
479 void store64(TrustedImm64 imm, ImplicitAddress address)
480 {
481 if (CAN_SIGN_EXTEND_32_64(imm.m_value))
482 m_assembler.movq_i32m(static_cast<int>(imm.m_value), address.offset, address.base);
483 else {
484 move(imm, scratchRegister);
485 store64(scratchRegister, address);
486 }
487 }
488
489 void store64(TrustedImm64 imm, BaseIndex address)
490 {
491 move(imm, scratchRegister);
492 m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
493 }
494
495 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
496 {
497 padBeforePatch();
498 m_assembler.movq_rm_disp32(src, address.offset, address.base);
499 return DataLabel32(this);
500 }
501
502 void move64ToDouble(RegisterID src, FPRegisterID dest)
503 {
504 m_assembler.movq_rr(src, dest);
505 }
506
507 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
508 {
509 m_assembler.movq_rr(src, dest);
510 }
511
512 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
513 {
514 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
515 m_assembler.testq_rr(left, left);
516 else
517 m_assembler.cmpq_ir(right.m_value, left);
518 m_assembler.setCC_r(x86Condition(cond), dest);
519 m_assembler.movzbl_rr(dest, dest);
520 }
521
522 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
523 {
524 m_assembler.cmpq_rr(right, left);
525 m_assembler.setCC_r(x86Condition(cond), dest);
526 m_assembler.movzbl_rr(dest, dest);
527 }
528
529 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
530 {
531 m_assembler.cmpq_rr(right, left);
532 return Jump(m_assembler.jCC(x86Condition(cond)));
533 }
534
535 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
536 {
537 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
538 m_assembler.testq_rr(left, left);
539 return Jump(m_assembler.jCC(x86Condition(cond)));
540 }
541 move(right, scratchRegister);
542 return branch64(cond, left, scratchRegister);
543 }
544
545 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
546 {
547 m_assembler.cmpq_mr(right.offset, right.base, left);
548 return Jump(m_assembler.jCC(x86Condition(cond)));
549 }
550
551 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
552 {
553 move(TrustedImmPtr(left.m_ptr), scratchRegister);
554 return branch64(cond, Address(scratchRegister), right);
555 }
556
557 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
558 {
559 m_assembler.cmpq_rm(right, left.offset, left.base);
560 return Jump(m_assembler.jCC(x86Condition(cond)));
561 }
562
563 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
564 {
565 move(right, scratchRegister);
566 return branch64(cond, left, scratchRegister);
567 }
568
569 Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
570 {
571 m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
572 return Jump(m_assembler.jCC(x86Condition(cond)));
573 }
574
575 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
576 {
577 return branch64(cond, left, right);
578 }
579
580 Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
581 {
582 move(right, scratchRegister);
583 return branchPtr(cond, left, scratchRegister);
584 }
585
586 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
587 {
588 m_assembler.testq_rr(reg, mask);
589 return Jump(m_assembler.jCC(x86Condition(cond)));
590 }
591
592 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
593 {
594 // if we are only interested in the low seven bits, this can be tested with a testb
595 if (mask.m_value == -1)
596 m_assembler.testq_rr(reg, reg);
597 else if ((mask.m_value & ~0x7f) == 0)
598 m_assembler.testb_i8r(mask.m_value, reg);
599 else
600 m_assembler.testq_i32r(mask.m_value, reg);
601 return Jump(m_assembler.jCC(x86Condition(cond)));
602 }
603
604 void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
605 {
606 if (mask.m_value == -1)
607 m_assembler.testq_rr(reg, reg);
608 else if ((mask.m_value & ~0x7f) == 0)
609 m_assembler.testb_i8r(mask.m_value, reg);
610 else
611 m_assembler.testq_i32r(mask.m_value, reg);
612 set32(x86Condition(cond), dest);
613 }
614
615 void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
616 {
617 m_assembler.testq_rr(reg, mask);
618 set32(x86Condition(cond), dest);
619 }
620
621 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
622 {
623 load64(address.m_ptr, scratchRegister);
624 return branchTest64(cond, scratchRegister, mask);
625 }
626
627 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
628 {
629 if (mask.m_value == -1)
630 m_assembler.cmpq_im(0, address.offset, address.base);
631 else
632 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
633 return Jump(m_assembler.jCC(x86Condition(cond)));
634 }
635
636 Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
637 {
638 m_assembler.testq_rm(reg, address.offset, address.base);
639 return Jump(m_assembler.jCC(x86Condition(cond)));
640 }
641
642 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
643 {
644 if (mask.m_value == -1)
645 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
646 else
647 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
648 return Jump(m_assembler.jCC(x86Condition(cond)));
649 }
650
651
652 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
653 {
654 add64(imm, dest);
655 return Jump(m_assembler.jCC(x86Condition(cond)));
656 }
657
658 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
659 {
660 add64(src, dest);
661 return Jump(m_assembler.jCC(x86Condition(cond)));
662 }
663
664 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
665 {
666 mul64(src, dest);
667 if (cond != Overflow)
668 m_assembler.testq_rr(dest, dest);
669 return Jump(m_assembler.jCC(x86Condition(cond)));
670 }
671
672 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
673 {
674 sub64(imm, dest);
675 return Jump(m_assembler.jCC(x86Condition(cond)));
676 }
677
678 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
679 {
680 sub64(src, dest);
681 return Jump(m_assembler.jCC(x86Condition(cond)));
682 }
683
684 Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
685 {
686 move(src1, dest);
687 return branchSub64(cond, src2, dest);
688 }
689
690 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
691 {
692 neg64(srcDest);
693 return Jump(m_assembler.jCC(x86Condition(cond)));
694 }
695
696 void abortWithReason(AbortReason reason)
697 {
698 move(TrustedImm32(reason), X86Registers::r11);
699 breakpoint();
700 }
701
702 void abortWithReason(AbortReason reason, intptr_t misc)
703 {
704 move(TrustedImm64(misc), X86Registers::r10);
705 abortWithReason(reason);
706 }
707
708 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
709 {
710 ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
711 m_assembler.movq_mr(address.offset, address.base, dest);
712 return result;
713 }
714
715 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
716 {
717 padBeforePatch();
718 m_assembler.movq_i64r(initialValue.asIntptr(), dest);
719 return DataLabelPtr(this);
720 }
721
722 DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
723 {
724 padBeforePatch();
725 m_assembler.movq_i64r(initialValue.m_value, dest);
726 return DataLabelPtr(this);
727 }
728
729 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
730 {
731 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
732 return branch64(cond, left, scratchRegister);
733 }
734
735 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
736 {
737 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
738 return branch64(cond, left, scratchRegister);
739 }
740
741 Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
742 {
743 padBeforePatch();
744 m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister);
745 dataLabel = DataLabel32(this);
746 return branch32(cond, left, scratchRegister);
747 }
748
749 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
750 {
751 DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
752 store64(scratchRegister, address);
753 return label;
754 }
755
756 using MacroAssemblerX86Common::branch8;
757 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
758 {
759 MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister);
760 return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister), right);
761 }
762
763 using MacroAssemblerX86Common::branchTest8;
764 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
765 {
766 TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
767 MacroAssemblerX86Common::move(addr, scratchRegister);
768 return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
769 }
770
771 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
772 {
773 MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister);
774 return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask);
775 }
776
777 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
778 {
779 m_assembler.cvtsi2sdq_rr(src, dest);
780 }
781
782 static bool supportsFloatingPoint() { return true; }
783 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
784 static bool supportsFloatingPointTruncate() { return true; }
785 static bool supportsFloatingPointSqrt() { return true; }
786 static bool supportsFloatingPointAbs() { return true; }
787
788 static FunctionPtr readCallTarget(CodeLocationCall call)
789 {
790 return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
791 }
792
793 static bool haveScratchRegisterForBlinding() { return true; }
794 static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
795
796 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
797 static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
798
799 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
800 {
801 const int rexBytes = 1;
802 const int opcodeBytes = 1;
803 const int immediateBytes = 8;
804 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
805 ASSERT(totalBytes >= maxJumpReplacementSize());
806 return label.labelAtOffset(-totalBytes);
807 }
808
809 static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
810 {
811 const int rexBytes = 1;
812 const int opcodeBytes = 1;
813 const int immediateBytes = 4;
814 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
815 ASSERT(totalBytes >= maxJumpReplacementSize());
816 return label.labelAtOffset(-totalBytes);
817 }
818
819 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
820 {
821 return startOfBranchPtrWithPatchOnRegister(label);
822 }
823
824 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
825 {
826 return startOfBranch32WithPatchOnRegister(label);
827 }
828
829 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
830 {
831 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
832 }
833
834 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
835 {
836 X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, scratchRegister);
837 }
838
839 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
840 {
841 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
842 }
843
844 #if USE(MASM_PROBE)
845 // This function emits code to preserve the CPUState (e.g. registers),
846 // call a user supplied probe function, and restore the CPUState before
847 // continuing with other JIT generated code.
848 //
849 // The user supplied probe function will be called with a single pointer to
850 // a ProbeContext struct (defined above) which contains, among other things,
851 // the preserved CPUState. This allows the user probe function to inspect
852 // the CPUState at that point in the JIT generated code.
853 //
854 // If the user probe function alters the register values in the ProbeContext,
855 // the altered values will be loaded into the CPU registers when the probe
856 // returns.
857 //
858 // The ProbeContext is stack allocated and is only valid for the duration
859 // of the call to the user probe function.
860
861 void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
862 #endif // USE(MASM_PROBE)
863
864 private:
865 friend class LinkBuffer;
866 friend class RepatchBuffer;
867
868 static void linkCall(void* code, Call call, FunctionPtr function)
869 {
870 if (!call.isFlagSet(Call::Near))
871 X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
872 else
873 X86Assembler::linkCall(code, call.m_label, function.value());
874 }
875
876 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
877 {
878 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
879 }
880
881 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
882 {
883 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
884 }
885
886 #if USE(MASM_PROBE)
887 inline TrustedImm64 trustedImm64FromPtr(void* ptr)
888 {
889 return TrustedImm64(TrustedImmPtr(ptr));
890 }
891
892 inline TrustedImm64 trustedImm64FromPtr(ProbeFunction function)
893 {
894 return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function)));
895 }
896
897 inline TrustedImm64 trustedImm64FromPtr(void (*function)())
898 {
899 return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function)));
900 }
901 #endif
902 };
903
904 #if USE(MASM_PROBE)
905
906 extern "C" void ctiMasmProbeTrampoline();
907
908 // What code is emitted for the probe?
909 // ==================================
910 // We want to keep the size of the emitted probe invocation code as compact as
911 // possible to minimize the perturbation to the JIT generated code. However,
912 // we also need to preserve the CPU registers and set up the ProbeContext to be
913 // passed to the user probe function.
914 //
915 // Hence, we do only the minimum here to preserve a scratch register (i.e. rax
916 // in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
917 // We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
918 // work i.e. saving the CPUState (and setting up the ProbeContext), calling the
919 // user probe function, and restoring the CPUState before returning to JIT
920 // generated code.
921 //
922 // What values are in the saved registers?
923 // ======================================
924 // Conceptually, the saved registers should contain values as if the probe
925 // is not present in the JIT generated code. Hence, they should contain values
926 // that are expected at the start of the instruction immediately following the
927 // probe.
928 //
929 // Specifcally, the saved stack pointer register will point to the stack
930 // position before we push the ProbeContext frame. The saved rip will point to
931 // the address of the instruction immediately following the probe.
932
933 inline void MacroAssemblerX86_64::probe(MacroAssemblerX86_64::ProbeFunction function, void* arg1, void* arg2)
934 {
935 push(RegisterID::esp);
936 push(RegisterID::eax);
937 move(trustedImm64FromPtr(arg2), RegisterID::eax);
938 push(RegisterID::eax);
939 move(trustedImm64FromPtr(arg1), RegisterID::eax);
940 push(RegisterID::eax);
941 move(trustedImm64FromPtr(function), RegisterID::eax);
942 push(RegisterID::eax);
943 move(trustedImm64FromPtr(ctiMasmProbeTrampoline), RegisterID::eax);
944 call(RegisterID::eax);
945 }
946 #endif // USE(MASM_PROBE)
947
948 } // namespace JSC
949
950 #endif // ENABLE(ASSEMBLER)
951
952 #endif // MacroAssemblerX86_64_h