]> git.saurik.com Git - apple/javascriptcore.git/blame - assembler/MacroAssemblerX86_64.h
JavaScriptCore-7600.1.4.11.8.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86_64.h
CommitLineData
ba379fdc 1/*
81345200 2 * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
ba379fdc
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86_64_h
27#define MacroAssemblerX86_64_h
28
f9bf01c6 29#if ENABLE(ASSEMBLER) && CPU(X86_64)
ba379fdc
A
30
31#include "MacroAssemblerX86Common.h"
32
81345200
A
33#if USE(MASM_PROBE)
34#include <wtf/StdLibExtras.h>
35#endif
36
ba379fdc
A
37#define REPTACH_OFFSET_CALL_R11 3
38
81345200
A
39inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; }
40
ba379fdc
A
41namespace JSC {
42
43class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
ba379fdc
A
44public:
45 static const Scale ScalePtr = TimesEight;
46
47 using MacroAssemblerX86Common::add32;
48 using MacroAssemblerX86Common::and32;
6fe7ccc8 49 using MacroAssemblerX86Common::branchAdd32;
ba379fdc
A
50 using MacroAssemblerX86Common::or32;
51 using MacroAssemblerX86Common::sub32;
81345200 52 using MacroAssemblerX86Common::load8;
ba379fdc
A
53 using MacroAssemblerX86Common::load32;
54 using MacroAssemblerX86Common::store32;
93a37866 55 using MacroAssemblerX86Common::store8;
ba379fdc 56 using MacroAssemblerX86Common::call;
6fe7ccc8 57 using MacroAssemblerX86Common::jump;
14957cd0 58 using MacroAssemblerX86Common::addDouble;
ba379fdc
A
59 using MacroAssemblerX86Common::loadDouble;
60 using MacroAssemblerX86Common::convertInt32ToDouble;
61
14957cd0 62 void add32(TrustedImm32 imm, AbsoluteAddress address)
ba379fdc 63 {
14957cd0 64 move(TrustedImmPtr(address.m_ptr), scratchRegister);
ba379fdc
A
65 add32(imm, Address(scratchRegister));
66 }
67
14957cd0 68 void and32(TrustedImm32 imm, AbsoluteAddress address)
ba379fdc 69 {
14957cd0 70 move(TrustedImmPtr(address.m_ptr), scratchRegister);
ba379fdc
A
71 and32(imm, Address(scratchRegister));
72 }
73
93a37866
A
74 void add32(AbsoluteAddress address, RegisterID dest)
75 {
76 move(TrustedImmPtr(address.m_ptr), scratchRegister);
77 add32(Address(scratchRegister), dest);
78 }
79
14957cd0 80 void or32(TrustedImm32 imm, AbsoluteAddress address)
ba379fdc 81 {
14957cd0 82 move(TrustedImmPtr(address.m_ptr), scratchRegister);
ba379fdc
A
83 or32(imm, Address(scratchRegister));
84 }
85
93a37866
A
86 void or32(RegisterID reg, AbsoluteAddress address)
87 {
88 move(TrustedImmPtr(address.m_ptr), scratchRegister);
89 or32(reg, Address(scratchRegister));
90 }
91
14957cd0 92 void sub32(TrustedImm32 imm, AbsoluteAddress address)
ba379fdc 93 {
14957cd0 94 move(TrustedImmPtr(address.m_ptr), scratchRegister);
ba379fdc
A
95 sub32(imm, Address(scratchRegister));
96 }
81345200
A
97
98 void load8(const void* address, RegisterID dest)
99 {
100 move(TrustedImmPtr(address), dest);
101 load8(dest, dest);
102 }
ba379fdc 103
6fe7ccc8 104 void load32(const void* address, RegisterID dest)
ba379fdc 105 {
f9bf01c6 106 if (dest == X86Registers::eax)
ba379fdc
A
107 m_assembler.movl_mEAX(address);
108 else {
6fe7ccc8
A
109 move(TrustedImmPtr(address), dest);
110 load32(dest, dest);
ba379fdc
A
111 }
112 }
113
14957cd0 114 void addDouble(AbsoluteAddress address, FPRegisterID dest)
ba379fdc 115 {
14957cd0
A
116 move(TrustedImmPtr(address.m_ptr), scratchRegister);
117 m_assembler.addsd_mr(0, scratchRegister, dest);
118 }
119
120 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
121 {
122 move(imm, scratchRegister);
ba379fdc
A
123 m_assembler.cvtsi2sd_rr(scratchRegister, dest);
124 }
125
14957cd0 126 void store32(TrustedImm32 imm, void* address)
ba379fdc 127 {
6fe7ccc8
A
128 move(TrustedImmPtr(address), scratchRegister);
129 store32(imm, scratchRegister);
ba379fdc 130 }
81345200
A
131
132 void store32(RegisterID source, void* address)
133 {
134 if (source == X86Registers::eax)
135 m_assembler.movl_EAXm(address);
136 else {
137 move(TrustedImmPtr(address), scratchRegister);
138 store32(source, scratchRegister);
139 }
140 }
93a37866
A
141
142 void store8(TrustedImm32 imm, void* address)
143 {
144 move(TrustedImmPtr(address), scratchRegister);
145 store8(imm, Address(scratchRegister));
146 }
ba379fdc 147
81345200
A
148 void store8(RegisterID reg, void* address)
149 {
150 move(TrustedImmPtr(address), scratchRegister);
151 store8(reg, Address(scratchRegister));
152 }
153
154#if OS(WINDOWS)
155 Call callWithSlowPathReturnType()
156 {
157 // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
158 // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right,
159 // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument.
160 // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx.
161 // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two.
162 // It is assumed that the parameters are already shifted to the right, when entering this method.
163 // Note: this implementation supports up to 3 parameters.
164
165 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
166 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
167 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
168 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
169
170 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
171 // In addition, we need to allocate 16 bytes for the return value.
172 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
173 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
174
175 // The first parameter register should contain a pointer to the stack allocated space for the return value.
176 move(X86Registers::esp, X86Registers::ecx);
177 add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx);
178
179 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
180 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
181
182 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
183
184 // Copy the return value into rax and rdx.
185 load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx);
186 load64(Address(X86Registers::eax), X86Registers::eax);
187
188 ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
189 return result;
190 }
191#endif
192
ba379fdc
A
193 Call call()
194 {
81345200
A
195#if OS(WINDOWS)
196 // JIT relies on the CallerFrame (frame pointer) being put on the stack,
197 // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit.
198 // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer.
199 store64(X86Registers::ebp, Address(X86Registers::esp, -16));
200
201 // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them.
202 // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied.
203
204 // Copy argument 5
205 load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister);
206 store64(scratchRegister, Address(X86Registers::esp, -4 * sizeof(int64_t)));
207
208 // Copy argument 6
209 load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister);
210 store64(scratchRegister, Address(X86Registers::esp, -3 * sizeof(int64_t)));
211
212 // We also need to allocate the shadow space on the stack for the 4 parameter registers.
213 // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated).
214 // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters.
215 sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
216#endif
14957cd0 217 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
ba379fdc 218 Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
81345200
A
219#if OS(WINDOWS)
220 add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp);
221#endif
14957cd0 222 ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
ba379fdc
A
223 return result;
224 }
225
6fe7ccc8
A
226 // Address is a memory location containing the address to jump to
227 void jump(AbsoluteAddress address)
228 {
229 move(TrustedImmPtr(address.m_ptr), scratchRegister);
230 jump(Address(scratchRegister));
231 }
232
ba379fdc
A
233 Call tailRecursiveCall()
234 {
14957cd0 235 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
ba379fdc 236 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
14957cd0 237 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
ba379fdc
A
238 return Call::fromTailJump(newJump);
239 }
240
241 Call makeTailRecursiveCall(Jump oldJump)
242 {
243 oldJump.link(this);
14957cd0 244 DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
ba379fdc 245 Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
14957cd0 246 ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
ba379fdc
A
247 return Call::fromTailJump(newJump);
248 }
249
93a37866
A
250 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
251 {
252 move(TrustedImmPtr(dest.m_ptr), scratchRegister);
253 add32(src, Address(scratchRegister));
254 return Jump(m_assembler.jCC(x86Condition(cond)));
255 }
ba379fdc 256
93a37866 257 void add64(RegisterID src, RegisterID dest)
ba379fdc
A
258 {
259 m_assembler.addq_rr(src, dest);
260 }
93a37866
A
261
262 void add64(Address src, RegisterID dest)
263 {
264 m_assembler.addq_mr(src.offset, src.base, dest);
265 }
ba379fdc 266
93a37866
A
267 void add64(AbsoluteAddress src, RegisterID dest)
268 {
269 move(TrustedImmPtr(src.m_ptr), scratchRegister);
270 add64(Address(scratchRegister), dest);
271 }
272
273 void add64(TrustedImm32 imm, RegisterID srcDest)
ba379fdc 274 {
81345200
A
275 if (imm.m_value == 1)
276 m_assembler.incq_r(srcDest);
277 else
278 m_assembler.addq_ir(imm.m_value, srcDest);
ba379fdc
A
279 }
280
93a37866 281 void add64(TrustedImm64 imm, RegisterID dest)
ba379fdc 282 {
81345200
A
283 if (imm.m_value == 1)
284 m_assembler.incq_r(dest);
285 else {
286 move(imm, scratchRegister);
287 add64(scratchRegister, dest);
288 }
ba379fdc
A
289 }
290
93a37866 291 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
ba379fdc
A
292 {
293 m_assembler.leaq_mr(imm.m_value, src, dest);
294 }
295
93a37866 296 void add64(TrustedImm32 imm, Address address)
ba379fdc
A
297 {
298 m_assembler.addq_im(imm.m_value, address.offset, address.base);
299 }
300
6fe7ccc8
A
301 void add64(TrustedImm32 imm, AbsoluteAddress address)
302 {
93a37866
A
303 move(TrustedImmPtr(address.m_ptr), scratchRegister);
304 add64(imm, Address(scratchRegister));
6fe7ccc8
A
305 }
306
81345200
A
307 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
308 {
309 m_assembler.leaq_mr(imm.m_value, srcDest, srcDest);
310 }
311
93a37866 312 void and64(RegisterID src, RegisterID dest)
ba379fdc
A
313 {
314 m_assembler.andq_rr(src, dest);
315 }
316
93a37866 317 void and64(TrustedImm32 imm, RegisterID srcDest)
ba379fdc
A
318 {
319 m_assembler.andq_ir(imm.m_value, srcDest);
320 }
81345200
A
321
322 void and64(TrustedImmPtr imm, RegisterID srcDest)
323 {
324 move(imm, scratchRegister);
325 and64(scratchRegister, srcDest);
326 }
327
328 void lshift64(TrustedImm32 imm, RegisterID dest)
329 {
330 m_assembler.shlq_i8r(imm.m_value, dest);
331 }
332
333 void rshift64(TrustedImm32 imm, RegisterID dest)
334 {
335 m_assembler.sarq_i8r(imm.m_value, dest);
336 }
337
338 void mul64(RegisterID src, RegisterID dest)
339 {
340 m_assembler.imulq_rr(src, dest);
341 }
93a37866
A
342
343 void neg64(RegisterID dest)
344 {
345 m_assembler.negq_r(dest);
346 }
ba379fdc 347
93a37866 348 void or64(RegisterID src, RegisterID dest)
ba379fdc
A
349 {
350 m_assembler.orq_rr(src, dest);
351 }
352
93a37866 353 void or64(TrustedImm64 imm, RegisterID dest)
ba379fdc
A
354 {
355 move(imm, scratchRegister);
93a37866 356 or64(scratchRegister, dest);
ba379fdc
A
357 }
358
93a37866 359 void or64(TrustedImm32 imm, RegisterID dest)
ba379fdc
A
360 {
361 m_assembler.orq_ir(imm.m_value, dest);
362 }
363
93a37866 364 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
14957cd0
A
365 {
366 if (op1 == op2)
367 move(op1, dest);
368 else if (op1 == dest)
93a37866 369 or64(op2, dest);
14957cd0
A
370 else {
371 move(op2, dest);
93a37866 372 or64(op1, dest);
14957cd0
A
373 }
374 }
375
93a37866 376 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
14957cd0
A
377 {
378 move(src, dest);
93a37866 379 or64(imm, dest);
14957cd0 380 }
6fe7ccc8 381
93a37866 382 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
6fe7ccc8
A
383 {
384 m_assembler.rorq_i8r(imm.m_value, srcDst);
385 }
14957cd0 386
93a37866 387 void sub64(RegisterID src, RegisterID dest)
ba379fdc
A
388 {
389 m_assembler.subq_rr(src, dest);
390 }
391
93a37866 392 void sub64(TrustedImm32 imm, RegisterID dest)
ba379fdc 393 {
81345200
A
394 if (imm.m_value == 1)
395 m_assembler.decq_r(dest);
396 else
397 m_assembler.subq_ir(imm.m_value, dest);
ba379fdc
A
398 }
399
93a37866 400 void sub64(TrustedImm64 imm, RegisterID dest)
ba379fdc 401 {
81345200
A
402 if (imm.m_value == 1)
403 m_assembler.decq_r(dest);
404 else {
405 move(imm, scratchRegister);
406 sub64(scratchRegister, dest);
407 }
ba379fdc
A
408 }
409
93a37866 410 void xor64(RegisterID src, RegisterID dest)
ba379fdc
A
411 {
412 m_assembler.xorq_rr(src, dest);
413 }
6fe7ccc8 414
93a37866 415 void xor64(RegisterID src, Address dest)
6fe7ccc8
A
416 {
417 m_assembler.xorq_rm(src, dest.offset, dest.base);
418 }
ba379fdc 419
93a37866 420 void xor64(TrustedImm32 imm, RegisterID srcDest)
ba379fdc
A
421 {
422 m_assembler.xorq_ir(imm.m_value, srcDest);
423 }
424
93a37866 425 void load64(ImplicitAddress address, RegisterID dest)
ba379fdc
A
426 {
427 m_assembler.movq_mr(address.offset, address.base, dest);
428 }
429
93a37866 430 void load64(BaseIndex address, RegisterID dest)
ba379fdc
A
431 {
432 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
433 }
434
93a37866 435 void load64(const void* address, RegisterID dest)
ba379fdc 436 {
f9bf01c6 437 if (dest == X86Registers::eax)
ba379fdc
A
438 m_assembler.movq_mEAX(address);
439 else {
6fe7ccc8 440 move(TrustedImmPtr(address), dest);
93a37866 441 load64(dest, dest);
ba379fdc
A
442 }
443 }
444
93a37866 445 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
ba379fdc 446 {
93a37866 447 padBeforePatch();
ba379fdc
A
448 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
449 return DataLabel32(this);
450 }
14957cd0 451
93a37866 452 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
14957cd0 453 {
93a37866 454 padBeforePatch();
14957cd0
A
455 m_assembler.movq_mr_disp8(address.offset, address.base, dest);
456 return DataLabelCompact(this);
457 }
ba379fdc 458
93a37866 459 void store64(RegisterID src, ImplicitAddress address)
ba379fdc
A
460 {
461 m_assembler.movq_rm(src, address.offset, address.base);
462 }
463
93a37866 464 void store64(RegisterID src, BaseIndex address)
ba379fdc
A
465 {
466 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
467 }
468
93a37866 469 void store64(RegisterID src, void* address)
ba379fdc 470 {
f9bf01c6 471 if (src == X86Registers::eax)
ba379fdc
A
472 m_assembler.movq_EAXm(address);
473 else {
6fe7ccc8 474 move(TrustedImmPtr(address), scratchRegister);
93a37866 475 store64(src, scratchRegister);
ba379fdc
A
476 }
477 }
478
93a37866 479 void store64(TrustedImm64 imm, ImplicitAddress address)
ba379fdc 480 {
81345200
A
481 if (CAN_SIGN_EXTEND_32_64(imm.m_value))
482 m_assembler.movq_i32m(static_cast<int>(imm.m_value), address.offset, address.base);
483 else {
484 move(imm, scratchRegister);
485 store64(scratchRegister, address);
486 }
ba379fdc
A
487 }
488
93a37866 489 void store64(TrustedImm64 imm, BaseIndex address)
6fe7ccc8
A
490 {
491 move(imm, scratchRegister);
492 m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
493 }
494
93a37866 495 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
ba379fdc 496 {
93a37866 497 padBeforePatch();
ba379fdc
A
498 m_assembler.movq_rm_disp32(src, address.offset, address.base);
499 return DataLabel32(this);
500 }
501
93a37866 502 void move64ToDouble(RegisterID src, FPRegisterID dest)
ba379fdc
A
503 {
504 m_assembler.movq_rr(src, dest);
505 }
506
93a37866 507 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
ba379fdc
A
508 {
509 m_assembler.movq_rr(src, dest);
510 }
511
93a37866 512 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
ba379fdc
A
513 {
514 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
515 m_assembler.testq_rr(left, left);
516 else
517 m_assembler.cmpq_ir(right.m_value, left);
518 m_assembler.setCC_r(x86Condition(cond), dest);
519 m_assembler.movzbl_rr(dest, dest);
520 }
6fe7ccc8 521
93a37866 522 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
6fe7ccc8
A
523 {
524 m_assembler.cmpq_rr(right, left);
525 m_assembler.setCC_r(x86Condition(cond), dest);
526 m_assembler.movzbl_rr(dest, dest);
527 }
528
93a37866 529 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
ba379fdc
A
530 {
531 m_assembler.cmpq_rr(right, left);
532 return Jump(m_assembler.jCC(x86Condition(cond)));
533 }
534
93a37866 535 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
ba379fdc 536 {
6fe7ccc8
A
537 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
538 m_assembler.testq_rr(left, left);
539 return Jump(m_assembler.jCC(x86Condition(cond)));
540 }
f9bf01c6 541 move(right, scratchRegister);
93a37866 542 return branch64(cond, left, scratchRegister);
ba379fdc
A
543 }
544
93a37866 545 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
ba379fdc
A
546 {
547 m_assembler.cmpq_mr(right.offset, right.base, left);
548 return Jump(m_assembler.jCC(x86Condition(cond)));
549 }
550
93a37866 551 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
ba379fdc 552 {
14957cd0 553 move(TrustedImmPtr(left.m_ptr), scratchRegister);
93a37866 554 return branch64(cond, Address(scratchRegister), right);
ba379fdc
A
555 }
556
93a37866 557 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
ba379fdc
A
558 {
559 m_assembler.cmpq_rm(right, left.offset, left.base);
560 return Jump(m_assembler.jCC(x86Condition(cond)));
561 }
562
93a37866 563 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
ba379fdc
A
564 {
565 move(right, scratchRegister);
93a37866 566 return branch64(cond, left, scratchRegister);
ba379fdc
A
567 }
568
81345200
A
569 Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right)
570 {
571 m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale);
572 return Jump(m_assembler.jCC(x86Condition(cond)));
573 }
574
575 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
576 {
577 return branch64(cond, left, right);
578 }
579
580 Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right)
581 {
582 move(right, scratchRegister);
583 return branchPtr(cond, left, scratchRegister);
584 }
585
93a37866 586 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
ba379fdc
A
587 {
588 m_assembler.testq_rr(reg, mask);
589 return Jump(m_assembler.jCC(x86Condition(cond)));
590 }
6fe7ccc8 591
93a37866 592 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
ba379fdc
A
593 {
594 // if we are only interested in the low seven bits, this can be tested with a testb
595 if (mask.m_value == -1)
596 m_assembler.testq_rr(reg, reg);
597 else if ((mask.m_value & ~0x7f) == 0)
598 m_assembler.testb_i8r(mask.m_value, reg);
599 else
600 m_assembler.testq_i32r(mask.m_value, reg);
601 return Jump(m_assembler.jCC(x86Condition(cond)));
602 }
603
93a37866 604 void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
6fe7ccc8
A
605 {
606 if (mask.m_value == -1)
607 m_assembler.testq_rr(reg, reg);
608 else if ((mask.m_value & ~0x7f) == 0)
609 m_assembler.testb_i8r(mask.m_value, reg);
610 else
611 m_assembler.testq_i32r(mask.m_value, reg);
612 set32(x86Condition(cond), dest);
613 }
614
93a37866 615 void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
6fe7ccc8
A
616 {
617 m_assembler.testq_rr(reg, mask);
618 set32(x86Condition(cond), dest);
619 }
620
93a37866 621 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
14957cd0 622 {
93a37866
A
623 load64(address.m_ptr, scratchRegister);
624 return branchTest64(cond, scratchRegister, mask);
14957cd0
A
625 }
626
93a37866 627 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
ba379fdc
A
628 {
629 if (mask.m_value == -1)
630 m_assembler.cmpq_im(0, address.offset, address.base);
631 else
632 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
633 return Jump(m_assembler.jCC(x86Condition(cond)));
634 }
635
93a37866
A
636 Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
637 {
638 m_assembler.testq_rm(reg, address.offset, address.base);
639 return Jump(m_assembler.jCC(x86Condition(cond)));
640 }
641
642 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
ba379fdc
A
643 {
644 if (mask.m_value == -1)
645 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
646 else
647 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
648 return Jump(m_assembler.jCC(x86Condition(cond)));
649 }
650
651
93a37866
A
652 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
653 {
654 add64(imm, dest);
655 return Jump(m_assembler.jCC(x86Condition(cond)));
656 }
657
658 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
6fe7ccc8 659 {
93a37866 660 add64(src, dest);
6fe7ccc8
A
661 return Jump(m_assembler.jCC(x86Condition(cond)));
662 }
663
81345200
A
664 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
665 {
666 mul64(src, dest);
667 if (cond != Overflow)
668 m_assembler.testq_rr(dest, dest);
669 return Jump(m_assembler.jCC(x86Condition(cond)));
670 }
671
93a37866 672 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
ba379fdc 673 {
93a37866 674 sub64(imm, dest);
ba379fdc
A
675 return Jump(m_assembler.jCC(x86Condition(cond)));
676 }
677
93a37866 678 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
ba379fdc 679 {
93a37866 680 sub64(src, dest);
ba379fdc
A
681 return Jump(m_assembler.jCC(x86Condition(cond)));
682 }
683
93a37866 684 Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
6fe7ccc8
A
685 {
686 move(src1, dest);
93a37866
A
687 return branchSub64(cond, src2, dest);
688 }
689
81345200
A
690 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
691 {
692 neg64(srcDest);
693 return Jump(m_assembler.jCC(x86Condition(cond)));
694 }
695
696 void abortWithReason(AbortReason reason)
697 {
698 move(TrustedImm32(reason), X86Registers::r11);
699 breakpoint();
700 }
701
702 void abortWithReason(AbortReason reason, intptr_t misc)
703 {
704 move(TrustedImm64(misc), X86Registers::r10);
705 abortWithReason(reason);
706 }
707
93a37866
A
708 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
709 {
710 ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
711 m_assembler.movq_mr(address.offset, address.base, dest);
712 return result;
6fe7ccc8
A
713 }
714
14957cd0 715 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
ba379fdc 716 {
93a37866 717 padBeforePatch();
ba379fdc
A
718 m_assembler.movq_i64r(initialValue.asIntptr(), dest);
719 return DataLabelPtr(this);
720 }
721
81345200
A
722 DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
723 {
724 padBeforePatch();
725 m_assembler.movq_i64r(initialValue.m_value, dest);
726 return DataLabelPtr(this);
727 }
728
14957cd0 729 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
ba379fdc
A
730 {
731 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
93a37866 732 return branch64(cond, left, scratchRegister);
ba379fdc
A
733 }
734
14957cd0 735 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
ba379fdc
A
736 {
737 dataLabel = moveWithPatch(initialRightValue, scratchRegister);
93a37866 738 return branch64(cond, left, scratchRegister);
ba379fdc
A
739 }
740
81345200
A
741 Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
742 {
743 padBeforePatch();
744 m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister);
745 dataLabel = DataLabel32(this);
746 return branch32(cond, left, scratchRegister);
747 }
748
14957cd0 749 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
ba379fdc
A
750 {
751 DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
93a37866 752 store64(scratchRegister, address);
ba379fdc
A
753 return label;
754 }
93a37866 755
81345200
A
756 using MacroAssemblerX86Common::branch8;
757 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
758 {
759 MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister);
760 return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister), right);
761 }
762
4e4e5a6f 763 using MacroAssemblerX86Common::branchTest8;
14957cd0 764 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
4e4e5a6f 765 {
14957cd0 766 TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
4e4e5a6f
A
767 MacroAssemblerX86Common::move(addr, scratchRegister);
768 return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
769 }
93a37866
A
770
771 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
772 {
773 MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister);
774 return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask);
775 }
4e4e5a6f 776
81345200
A
777 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
778 {
779 m_assembler.cvtsi2sdq_rr(src, dest);
780 }
781
6fe7ccc8 782 static bool supportsFloatingPoint() { return true; }
ba379fdc 783 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
6fe7ccc8
A
784 static bool supportsFloatingPointTruncate() { return true; }
785 static bool supportsFloatingPointSqrt() { return true; }
786 static bool supportsFloatingPointAbs() { return true; }
787
788 static FunctionPtr readCallTarget(CodeLocationCall call)
789 {
790 return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
791 }
792
81345200 793 static bool haveScratchRegisterForBlinding() { return true; }
6fe7ccc8 794 static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
ba379fdc 795
93a37866 796 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
81345200 797 static bool canJumpReplacePatchableBranch32WithPatch() { return true; }
93a37866
A
798
799 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
800 {
801 const int rexBytes = 1;
802 const int opcodeBytes = 1;
803 const int immediateBytes = 8;
804 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
805 ASSERT(totalBytes >= maxJumpReplacementSize());
806 return label.labelAtOffset(-totalBytes);
807 }
808
81345200
A
809 static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label)
810 {
811 const int rexBytes = 1;
812 const int opcodeBytes = 1;
813 const int immediateBytes = 4;
814 const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
815 ASSERT(totalBytes >= maxJumpReplacementSize());
816 return label.labelAtOffset(-totalBytes);
817 }
818
93a37866
A
819 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
820 {
821 return startOfBranchPtrWithPatchOnRegister(label);
822 }
81345200
A
823
824 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label)
825 {
826 return startOfBranch32WithPatchOnRegister(label);
827 }
93a37866
A
828
829 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
830 {
831 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
832 }
833
81345200
A
834 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue)
835 {
836 X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, scratchRegister);
837 }
838
93a37866
A
839 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
840 {
841 X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
842 }
843
81345200
A
844#if USE(MASM_PROBE)
845 // This function emits code to preserve the CPUState (e.g. registers),
846 // call a user supplied probe function, and restore the CPUState before
847 // continuing with other JIT generated code.
848 //
849 // The user supplied probe function will be called with a single pointer to
850 // a ProbeContext struct (defined above) which contains, among other things,
851 // the preserved CPUState. This allows the user probe function to inspect
852 // the CPUState at that point in the JIT generated code.
853 //
854 // If the user probe function alters the register values in the ProbeContext,
855 // the altered values will be loaded into the CPU registers when the probe
856 // returns.
857 //
858 // The ProbeContext is stack allocated and is only valid for the duration
859 // of the call to the user probe function.
860
861 void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
862#endif // USE(MASM_PROBE)
863
ba379fdc
A
864private:
865 friend class LinkBuffer;
866 friend class RepatchBuffer;
867
868 static void linkCall(void* code, Call call, FunctionPtr function)
869 {
870 if (!call.isFlagSet(Call::Near))
6fe7ccc8 871 X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
ba379fdc 872 else
6fe7ccc8 873 X86Assembler::linkCall(code, call.m_label, function.value());
ba379fdc
A
874 }
875
876 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
877 {
878 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
879 }
880
881 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
882 {
883 X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
884 }
885
81345200
A
886#if USE(MASM_PROBE)
887 inline TrustedImm64 trustedImm64FromPtr(void* ptr)
888 {
889 return TrustedImm64(TrustedImmPtr(ptr));
890 }
891
892 inline TrustedImm64 trustedImm64FromPtr(ProbeFunction function)
893 {
894 return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function)));
895 }
896
897 inline TrustedImm64 trustedImm64FromPtr(void (*function)())
898 {
899 return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function)));
900 }
901#endif
ba379fdc
A
902};
903
81345200
A
904#if USE(MASM_PROBE)
905
906extern "C" void ctiMasmProbeTrampoline();
907
908// What code is emitted for the probe?
909// ==================================
910// We want to keep the size of the emitted probe invocation code as compact as
911// possible to minimize the perturbation to the JIT generated code. However,
912// we also need to preserve the CPU registers and set up the ProbeContext to be
913// passed to the user probe function.
914//
915// Hence, we do only the minimum here to preserve a scratch register (i.e. rax
916// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments.
917// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation
918// work i.e. saving the CPUState (and setting up the ProbeContext), calling the
919// user probe function, and restoring the CPUState before returning to JIT
920// generated code.
921//
922// What values are in the saved registers?
923// ======================================
924// Conceptually, the saved registers should contain values as if the probe
925// is not present in the JIT generated code. Hence, they should contain values
926// that are expected at the start of the instruction immediately following the
927// probe.
928//
929// Specifcally, the saved stack pointer register will point to the stack
930// position before we push the ProbeContext frame. The saved rip will point to
931// the address of the instruction immediately following the probe.
932
933inline void MacroAssemblerX86_64::probe(MacroAssemblerX86_64::ProbeFunction function, void* arg1, void* arg2)
934{
935 push(RegisterID::esp);
936 push(RegisterID::eax);
937 move(trustedImm64FromPtr(arg2), RegisterID::eax);
938 push(RegisterID::eax);
939 move(trustedImm64FromPtr(arg1), RegisterID::eax);
940 push(RegisterID::eax);
941 move(trustedImm64FromPtr(function), RegisterID::eax);
942 push(RegisterID::eax);
943 move(trustedImm64FromPtr(ctiMasmProbeTrampoline), RegisterID::eax);
944 call(RegisterID::eax);
945}
946#endif // USE(MASM_PROBE)
947
ba379fdc
A
948} // namespace JSC
949
950#endif // ENABLE(ASSEMBLER)
951
952#endif // MacroAssemblerX86_64_h