]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86Common.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86Common.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33
34 namespace JSC {
35
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37 protected:
38 #if CPU(X86_64)
39 static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
40 #endif
41
42 static const int DoubleConditionBitInvert = 0x10;
43 static const int DoubleConditionBitSpecial = 0x20;
44 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
45
46 public:
47 typedef X86Assembler::FPRegisterID FPRegisterID;
48 typedef X86Assembler::XMMRegisterID XMMRegisterID;
49
50 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
51 {
52 return value >= -128 && value <= 127;
53 }
54
55 enum RelationalCondition {
56 Equal = X86Assembler::ConditionE,
57 NotEqual = X86Assembler::ConditionNE,
58 Above = X86Assembler::ConditionA,
59 AboveOrEqual = X86Assembler::ConditionAE,
60 Below = X86Assembler::ConditionB,
61 BelowOrEqual = X86Assembler::ConditionBE,
62 GreaterThan = X86Assembler::ConditionG,
63 GreaterThanOrEqual = X86Assembler::ConditionGE,
64 LessThan = X86Assembler::ConditionL,
65 LessThanOrEqual = X86Assembler::ConditionLE
66 };
67
68 enum ResultCondition {
69 Overflow = X86Assembler::ConditionO,
70 Signed = X86Assembler::ConditionS,
71 PositiveOrZero = X86Assembler::ConditionNS,
72 Zero = X86Assembler::ConditionE,
73 NonZero = X86Assembler::ConditionNE
74 };
75
76 enum DoubleCondition {
77 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
78 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
79 DoubleNotEqual = X86Assembler::ConditionNE,
80 DoubleGreaterThan = X86Assembler::ConditionA,
81 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
82 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
83 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
84 // If either operand is NaN, these conditions always evaluate to true.
85 DoubleEqualOrUnordered = X86Assembler::ConditionE,
86 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
87 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
88 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
89 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
90 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
91 };
92 COMPILE_ASSERT(
93 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
94 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
95
96 static const RegisterID stackPointerRegister = X86Registers::esp;
97
98 #if ENABLE(JIT_CONSTANT_BLINDING)
99 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
100 #if CPU(X86_64)
101 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
102 #if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
103 static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
104 #endif
105 #endif
106 #endif
107
108 // Integer arithmetic operations:
109 //
110 // Operations are typically two operand - operation(source, srcDst)
111 // For many operations the source may be an TrustedImm32, the srcDst operand
112 // may often be a memory location (explictly described using an Address
113 // object).
114
115 void add32(RegisterID src, RegisterID dest)
116 {
117 m_assembler.addl_rr(src, dest);
118 }
119
120 void add32(TrustedImm32 imm, Address address)
121 {
122 m_assembler.addl_im(imm.m_value, address.offset, address.base);
123 }
124
125 void add32(TrustedImm32 imm, RegisterID dest)
126 {
127 m_assembler.addl_ir(imm.m_value, dest);
128 }
129
130 void add32(Address src, RegisterID dest)
131 {
132 m_assembler.addl_mr(src.offset, src.base, dest);
133 }
134
135 void add32(RegisterID src, Address dest)
136 {
137 m_assembler.addl_rm(src, dest.offset, dest.base);
138 }
139
140 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
141 {
142 m_assembler.leal_mr(imm.m_value, src, dest);
143 }
144
145 void and32(RegisterID src, RegisterID dest)
146 {
147 m_assembler.andl_rr(src, dest);
148 }
149
150 void and32(TrustedImm32 imm, RegisterID dest)
151 {
152 m_assembler.andl_ir(imm.m_value, dest);
153 }
154
155 void and32(RegisterID src, Address dest)
156 {
157 m_assembler.andl_rm(src, dest.offset, dest.base);
158 }
159
160 void and32(Address src, RegisterID dest)
161 {
162 m_assembler.andl_mr(src.offset, src.base, dest);
163 }
164
165 void and32(TrustedImm32 imm, Address address)
166 {
167 m_assembler.andl_im(imm.m_value, address.offset, address.base);
168 }
169
170 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
171 {
172 if (op1 == op2)
173 zeroExtend32ToPtr(op1, dest);
174 else if (op1 == dest)
175 and32(op2, dest);
176 else {
177 move(op2, dest);
178 and32(op1, dest);
179 }
180 }
181
182 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
183 {
184 move(src, dest);
185 and32(imm, dest);
186 }
187
188 void lshift32(RegisterID shift_amount, RegisterID dest)
189 {
190 ASSERT(shift_amount != dest);
191
192 if (shift_amount == X86Registers::ecx)
193 m_assembler.shll_CLr(dest);
194 else {
195 // On x86 we can only shift by ecx; if asked to shift by another register we'll
196 // need rejig the shift amount into ecx first, and restore the registers afterwards.
197 // If we dest is ecx, then shift the swapped register!
198 swap(shift_amount, X86Registers::ecx);
199 m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
200 swap(shift_amount, X86Registers::ecx);
201 }
202 }
203
204 void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
205 {
206 ASSERT(shift_amount != dest);
207
208 if (src != dest)
209 move(src, dest);
210 lshift32(shift_amount, dest);
211 }
212
213 void lshift32(TrustedImm32 imm, RegisterID dest)
214 {
215 m_assembler.shll_i8r(imm.m_value, dest);
216 }
217
218 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
219 {
220 if (src != dest)
221 move(src, dest);
222 lshift32(imm, dest);
223 }
224
225 void mul32(RegisterID src, RegisterID dest)
226 {
227 m_assembler.imull_rr(src, dest);
228 }
229
230 void mul32(Address src, RegisterID dest)
231 {
232 m_assembler.imull_mr(src.offset, src.base, dest);
233 }
234
235 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
236 {
237 m_assembler.imull_i32r(src, imm.m_value, dest);
238 }
239
240 void neg32(RegisterID srcDest)
241 {
242 m_assembler.negl_r(srcDest);
243 }
244
245 void neg32(Address srcDest)
246 {
247 m_assembler.negl_m(srcDest.offset, srcDest.base);
248 }
249
250 void or32(RegisterID src, RegisterID dest)
251 {
252 m_assembler.orl_rr(src, dest);
253 }
254
255 void or32(TrustedImm32 imm, RegisterID dest)
256 {
257 m_assembler.orl_ir(imm.m_value, dest);
258 }
259
260 void or32(RegisterID src, Address dest)
261 {
262 m_assembler.orl_rm(src, dest.offset, dest.base);
263 }
264
265 void or32(Address src, RegisterID dest)
266 {
267 m_assembler.orl_mr(src.offset, src.base, dest);
268 }
269
270 void or32(TrustedImm32 imm, Address address)
271 {
272 m_assembler.orl_im(imm.m_value, address.offset, address.base);
273 }
274
275 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
276 {
277 if (op1 == op2)
278 zeroExtend32ToPtr(op1, dest);
279 else if (op1 == dest)
280 or32(op2, dest);
281 else {
282 move(op2, dest);
283 or32(op1, dest);
284 }
285 }
286
287 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
288 {
289 move(src, dest);
290 or32(imm, dest);
291 }
292
293 void rshift32(RegisterID shift_amount, RegisterID dest)
294 {
295 ASSERT(shift_amount != dest);
296
297 if (shift_amount == X86Registers::ecx)
298 m_assembler.sarl_CLr(dest);
299 else {
300 // On x86 we can only shift by ecx; if asked to shift by another register we'll
301 // need rejig the shift amount into ecx first, and restore the registers afterwards.
302 // If we dest is ecx, then shift the swapped register!
303 swap(shift_amount, X86Registers::ecx);
304 m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
305 swap(shift_amount, X86Registers::ecx);
306 }
307 }
308
309 void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
310 {
311 ASSERT(shift_amount != dest);
312
313 if (src != dest)
314 move(src, dest);
315 rshift32(shift_amount, dest);
316 }
317
318 void rshift32(TrustedImm32 imm, RegisterID dest)
319 {
320 m_assembler.sarl_i8r(imm.m_value, dest);
321 }
322
323 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
324 {
325 if (src != dest)
326 move(src, dest);
327 rshift32(imm, dest);
328 }
329
330 void urshift32(RegisterID shift_amount, RegisterID dest)
331 {
332 ASSERT(shift_amount != dest);
333
334 if (shift_amount == X86Registers::ecx)
335 m_assembler.shrl_CLr(dest);
336 else {
337 // On x86 we can only shift by ecx; if asked to shift by another register we'll
338 // need rejig the shift amount into ecx first, and restore the registers afterwards.
339 // If we dest is ecx, then shift the swapped register!
340 swap(shift_amount, X86Registers::ecx);
341 m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
342 swap(shift_amount, X86Registers::ecx);
343 }
344 }
345
346 void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
347 {
348 ASSERT(shift_amount != dest);
349
350 if (src != dest)
351 move(src, dest);
352 urshift32(shift_amount, dest);
353 }
354
355 void urshift32(TrustedImm32 imm, RegisterID dest)
356 {
357 m_assembler.shrl_i8r(imm.m_value, dest);
358 }
359
360 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
361 {
362 if (src != dest)
363 move(src, dest);
364 urshift32(imm, dest);
365 }
366
367 void sub32(RegisterID src, RegisterID dest)
368 {
369 m_assembler.subl_rr(src, dest);
370 }
371
372 void sub32(TrustedImm32 imm, RegisterID dest)
373 {
374 m_assembler.subl_ir(imm.m_value, dest);
375 }
376
377 void sub32(TrustedImm32 imm, Address address)
378 {
379 m_assembler.subl_im(imm.m_value, address.offset, address.base);
380 }
381
382 void sub32(Address src, RegisterID dest)
383 {
384 m_assembler.subl_mr(src.offset, src.base, dest);
385 }
386
387 void sub32(RegisterID src, Address dest)
388 {
389 m_assembler.subl_rm(src, dest.offset, dest.base);
390 }
391
392 void xor32(RegisterID src, RegisterID dest)
393 {
394 m_assembler.xorl_rr(src, dest);
395 }
396
397 void xor32(TrustedImm32 imm, Address dest)
398 {
399 if (imm.m_value == -1)
400 m_assembler.notl_m(dest.offset, dest.base);
401 else
402 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
403 }
404
405 void xor32(TrustedImm32 imm, RegisterID dest)
406 {
407 if (imm.m_value == -1)
408 m_assembler.notl_r(dest);
409 else
410 m_assembler.xorl_ir(imm.m_value, dest);
411 }
412
413 void xor32(RegisterID src, Address dest)
414 {
415 m_assembler.xorl_rm(src, dest.offset, dest.base);
416 }
417
418 void xor32(Address src, RegisterID dest)
419 {
420 m_assembler.xorl_mr(src.offset, src.base, dest);
421 }
422
423 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
424 {
425 if (op1 == op2)
426 move(TrustedImm32(0), dest);
427 else if (op1 == dest)
428 xor32(op2, dest);
429 else {
430 move(op2, dest);
431 xor32(op1, dest);
432 }
433 }
434
435 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
436 {
437 move(src, dest);
438 xor32(imm, dest);
439 }
440
441 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
442 {
443 m_assembler.sqrtsd_rr(src, dst);
444 }
445
446 void absDouble(FPRegisterID src, FPRegisterID dst)
447 {
448 ASSERT(src != dst);
449 static const double negativeZeroConstant = -0.0;
450 loadDouble(&negativeZeroConstant, dst);
451 m_assembler.andnpd_rr(src, dst);
452 }
453
454 void negateDouble(FPRegisterID src, FPRegisterID dst)
455 {
456 ASSERT(src != dst);
457 static const double negativeZeroConstant = -0.0;
458 loadDouble(&negativeZeroConstant, dst);
459 m_assembler.xorpd_rr(src, dst);
460 }
461
462
463 // Memory access operations:
464 //
465 // Loads are of the form load(address, destination) and stores of the form
466 // store(source, address). The source for a store may be an TrustedImm32. Address
467 // operand objects to loads and store will be implicitly constructed if a
468 // register is passed.
469
470 void load32(ImplicitAddress address, RegisterID dest)
471 {
472 m_assembler.movl_mr(address.offset, address.base, dest);
473 }
474
475 void load32(BaseIndex address, RegisterID dest)
476 {
477 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
478 }
479
480 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
481 {
482 load32(address, dest);
483 }
484
485 void load16Unaligned(BaseIndex address, RegisterID dest)
486 {
487 load16(address, dest);
488 }
489
490 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
491 {
492 padBeforePatch();
493 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
494 return DataLabel32(this);
495 }
496
497 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
498 {
499 padBeforePatch();
500 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
501 return DataLabelCompact(this);
502 }
503
504 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
505 {
506 ASSERT(isCompactPtrAlignedAddressOffset(value));
507 AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
508 }
509
510 DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
511 {
512 padBeforePatch();
513 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
514 return DataLabelCompact(this);
515 }
516
517 void load8(BaseIndex address, RegisterID dest)
518 {
519 m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
520 }
521
522 void load8(ImplicitAddress address, RegisterID dest)
523 {
524 m_assembler.movzbl_mr(address.offset, address.base, dest);
525 }
526
527 void load8Signed(BaseIndex address, RegisterID dest)
528 {
529 m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
530 }
531
532 void load8Signed(ImplicitAddress address, RegisterID dest)
533 {
534 m_assembler.movsbl_mr(address.offset, address.base, dest);
535 }
536
537 void load16(BaseIndex address, RegisterID dest)
538 {
539 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
540 }
541
542 void load16(Address address, RegisterID dest)
543 {
544 m_assembler.movzwl_mr(address.offset, address.base, dest);
545 }
546
547 void load16Signed(BaseIndex address, RegisterID dest)
548 {
549 m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
550 }
551
552 void load16Signed(Address address, RegisterID dest)
553 {
554 m_assembler.movswl_mr(address.offset, address.base, dest);
555 }
556
557 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
558 {
559 padBeforePatch();
560 m_assembler.movl_rm_disp32(src, address.offset, address.base);
561 return DataLabel32(this);
562 }
563
564 void store32(RegisterID src, ImplicitAddress address)
565 {
566 m_assembler.movl_rm(src, address.offset, address.base);
567 }
568
569 void store32(RegisterID src, BaseIndex address)
570 {
571 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
572 }
573
574 void store32(TrustedImm32 imm, ImplicitAddress address)
575 {
576 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
577 }
578
579 void store32(TrustedImm32 imm, BaseIndex address)
580 {
581 m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
582 }
583
584 void store8(TrustedImm32 imm, Address address)
585 {
586 ASSERT(-128 <= imm.m_value && imm.m_value < 128);
587 m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
588 }
589
590 void store8(TrustedImm32 imm, BaseIndex address)
591 {
592 ASSERT(-128 <= imm.m_value && imm.m_value < 128);
593 m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
594 }
595
596 void store8(RegisterID src, BaseIndex address)
597 {
598 #if CPU(X86)
599 // On 32-bit x86 we can only store from the first 4 registers;
600 // esp..edi are mapped to the 'h' registers!
601 if (src >= 4) {
602 // Pick a temporary register.
603 RegisterID temp;
604 if (address.base != X86Registers::eax && address.index != X86Registers::eax)
605 temp = X86Registers::eax;
606 else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
607 temp = X86Registers::ebx;
608 else {
609 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
610 temp = X86Registers::ecx;
611 }
612
613 // Swap to the temporary register to perform the store.
614 swap(src, temp);
615 m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
616 swap(src, temp);
617 return;
618 }
619 #endif
620 m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
621 }
622
623 void store16(RegisterID src, BaseIndex address)
624 {
625 #if CPU(X86)
626 // On 32-bit x86 we can only store from the first 4 registers;
627 // esp..edi are mapped to the 'h' registers!
628 if (src >= 4) {
629 // Pick a temporary register.
630 RegisterID temp;
631 if (address.base != X86Registers::eax && address.index != X86Registers::eax)
632 temp = X86Registers::eax;
633 else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
634 temp = X86Registers::ebx;
635 else {
636 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
637 temp = X86Registers::ecx;
638 }
639
640 // Swap to the temporary register to perform the store.
641 swap(src, temp);
642 m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
643 swap(src, temp);
644 return;
645 }
646 #endif
647 m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
648 }
649
650
651 // Floating-point operation:
652 //
653 // Presently only supports SSE, not x87 floating point.
654
655 void moveDouble(FPRegisterID src, FPRegisterID dest)
656 {
657 ASSERT(isSSE2Present());
658 if (src != dest)
659 m_assembler.movsd_rr(src, dest);
660 }
661
662 void loadDouble(const void* address, FPRegisterID dest)
663 {
664 #if CPU(X86)
665 ASSERT(isSSE2Present());
666 m_assembler.movsd_mr(address, dest);
667 #else
668 move(TrustedImmPtr(address), scratchRegister);
669 loadDouble(scratchRegister, dest);
670 #endif
671 }
672
673 void loadDouble(ImplicitAddress address, FPRegisterID dest)
674 {
675 ASSERT(isSSE2Present());
676 m_assembler.movsd_mr(address.offset, address.base, dest);
677 }
678
679 void loadDouble(BaseIndex address, FPRegisterID dest)
680 {
681 ASSERT(isSSE2Present());
682 m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
683 }
684 void loadFloat(BaseIndex address, FPRegisterID dest)
685 {
686 ASSERT(isSSE2Present());
687 m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
688 }
689
690 void storeDouble(FPRegisterID src, ImplicitAddress address)
691 {
692 ASSERT(isSSE2Present());
693 m_assembler.movsd_rm(src, address.offset, address.base);
694 }
695
696 void storeDouble(FPRegisterID src, BaseIndex address)
697 {
698 ASSERT(isSSE2Present());
699 m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
700 }
701
702 void storeFloat(FPRegisterID src, BaseIndex address)
703 {
704 ASSERT(isSSE2Present());
705 m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
706 }
707
708 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
709 {
710 ASSERT(isSSE2Present());
711 m_assembler.cvtsd2ss_rr(src, dst);
712 }
713
714 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
715 {
716 ASSERT(isSSE2Present());
717 m_assembler.cvtss2sd_rr(src, dst);
718 }
719
720 void addDouble(FPRegisterID src, FPRegisterID dest)
721 {
722 ASSERT(isSSE2Present());
723 m_assembler.addsd_rr(src, dest);
724 }
725
726 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
727 {
728 ASSERT(isSSE2Present());
729 if (op1 == dest)
730 addDouble(op2, dest);
731 else {
732 moveDouble(op2, dest);
733 addDouble(op1, dest);
734 }
735 }
736
737 void addDouble(Address src, FPRegisterID dest)
738 {
739 ASSERT(isSSE2Present());
740 m_assembler.addsd_mr(src.offset, src.base, dest);
741 }
742
743 void divDouble(FPRegisterID src, FPRegisterID dest)
744 {
745 ASSERT(isSSE2Present());
746 m_assembler.divsd_rr(src, dest);
747 }
748
749 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
750 {
751 // B := A / B is invalid.
752 ASSERT(op1 == dest || op2 != dest);
753
754 moveDouble(op1, dest);
755 divDouble(op2, dest);
756 }
757
758 void divDouble(Address src, FPRegisterID dest)
759 {
760 ASSERT(isSSE2Present());
761 m_assembler.divsd_mr(src.offset, src.base, dest);
762 }
763
764 void subDouble(FPRegisterID src, FPRegisterID dest)
765 {
766 ASSERT(isSSE2Present());
767 m_assembler.subsd_rr(src, dest);
768 }
769
770 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
771 {
772 // B := A - B is invalid.
773 ASSERT(op1 == dest || op2 != dest);
774
775 moveDouble(op1, dest);
776 subDouble(op2, dest);
777 }
778
779 void subDouble(Address src, FPRegisterID dest)
780 {
781 ASSERT(isSSE2Present());
782 m_assembler.subsd_mr(src.offset, src.base, dest);
783 }
784
785 void mulDouble(FPRegisterID src, FPRegisterID dest)
786 {
787 ASSERT(isSSE2Present());
788 m_assembler.mulsd_rr(src, dest);
789 }
790
791 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
792 {
793 ASSERT(isSSE2Present());
794 if (op1 == dest)
795 mulDouble(op2, dest);
796 else {
797 moveDouble(op2, dest);
798 mulDouble(op1, dest);
799 }
800 }
801
802 void mulDouble(Address src, FPRegisterID dest)
803 {
804 ASSERT(isSSE2Present());
805 m_assembler.mulsd_mr(src.offset, src.base, dest);
806 }
807
808 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
809 {
810 ASSERT(isSSE2Present());
811 m_assembler.cvtsi2sd_rr(src, dest);
812 }
813
814 void convertInt32ToDouble(Address src, FPRegisterID dest)
815 {
816 ASSERT(isSSE2Present());
817 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
818 }
819
820 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
821 {
822 ASSERT(isSSE2Present());
823
824 if (cond & DoubleConditionBitInvert)
825 m_assembler.ucomisd_rr(left, right);
826 else
827 m_assembler.ucomisd_rr(right, left);
828
829 if (cond == DoubleEqual) {
830 if (left == right)
831 return Jump(m_assembler.jnp());
832 Jump isUnordered(m_assembler.jp());
833 Jump result = Jump(m_assembler.je());
834 isUnordered.link(this);
835 return result;
836 } else if (cond == DoubleNotEqualOrUnordered) {
837 if (left == right)
838 return Jump(m_assembler.jp());
839 Jump isUnordered(m_assembler.jp());
840 Jump isEqual(m_assembler.je());
841 isUnordered.link(this);
842 Jump result = jump();
843 isEqual.link(this);
844 return result;
845 }
846
847 ASSERT(!(cond & DoubleConditionBitSpecial));
848 return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
849 }
850
851 // Truncates 'src' to an integer, and places the resulting 'dest'.
852 // If the result is not representable as a 32 bit value, branch.
853 // May also branch for some values that are representable in 32 bits
854 // (specifically, in this case, INT_MIN).
855 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
856 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
857 {
858 ASSERT(isSSE2Present());
859 m_assembler.cvttsd2si_rr(src, dest);
860 return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
861 }
862
863 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
864 {
865 ASSERT(isSSE2Present());
866 m_assembler.cvttsd2si_rr(src, dest);
867 return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0));
868 }
869
870 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
871 {
872 ASSERT(isSSE2Present());
873 m_assembler.cvttsd2si_rr(src, dest);
874 }
875
876 #if CPU(X86_64)
877 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
878 {
879 ASSERT(isSSE2Present());
880 m_assembler.cvttsd2siq_rr(src, dest);
881 }
882 #endif
883
884 // Convert 'src' to an integer, and places the resulting 'dest'.
885 // If the result is not representable as a 32 bit value, branch.
886 // May also branch for some values that are representable in 32 bits
887 // (specifically, in this case, 0).
888 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
889 {
890 ASSERT(isSSE2Present());
891 m_assembler.cvttsd2si_rr(src, dest);
892
893 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
894 if (negZeroCheck)
895 failureCases.append(branchTest32(Zero, dest));
896
897 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
898 convertInt32ToDouble(dest, fpTemp);
899 m_assembler.ucomisd_rr(fpTemp, src);
900 failureCases.append(m_assembler.jp());
901 failureCases.append(m_assembler.jne());
902 }
903
904 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
905 {
906 ASSERT(isSSE2Present());
907 m_assembler.xorpd_rr(scratch, scratch);
908 return branchDouble(DoubleNotEqual, reg, scratch);
909 }
910
911 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
912 {
913 ASSERT(isSSE2Present());
914 m_assembler.xorpd_rr(scratch, scratch);
915 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
916 }
917
918 void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
919 {
920 ASSERT(isSSE2Present());
921 m_assembler.psllq_i8r(imm.m_value, reg);
922 }
923
924 void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
925 {
926 ASSERT(isSSE2Present());
927 m_assembler.psrlq_i8r(imm.m_value, reg);
928 }
929
930 void orPacked(XMMRegisterID src, XMMRegisterID dst)
931 {
932 ASSERT(isSSE2Present());
933 m_assembler.por_rr(src, dst);
934 }
935
936 void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
937 {
938 ASSERT(isSSE2Present());
939 m_assembler.movd_rr(src, dst);
940 }
941
942 void movePackedToInt32(XMMRegisterID src, RegisterID dst)
943 {
944 ASSERT(isSSE2Present());
945 m_assembler.movd_rr(src, dst);
946 }
947
948 // Stack manipulation operations:
949 //
950 // The ABI is assumed to provide a stack abstraction to memory,
951 // containing machine word sized units of data. Push and pop
952 // operations add and remove a single register sized unit of data
953 // to or from the stack. Peek and poke operations read or write
954 // values on the stack, without moving the current stack position.
955
956 void pop(RegisterID dest)
957 {
958 m_assembler.pop_r(dest);
959 }
960
961 void push(RegisterID src)
962 {
963 m_assembler.push_r(src);
964 }
965
966 void push(Address address)
967 {
968 m_assembler.push_m(address.offset, address.base);
969 }
970
971 void push(TrustedImm32 imm)
972 {
973 m_assembler.push_i32(imm.m_value);
974 }
975
976
977 // Register move operations:
978 //
979 // Move values in registers.
980
981 void move(TrustedImm32 imm, RegisterID dest)
982 {
983 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
984 // may be useful to have a separate version that sign extends the value?
985 if (!imm.m_value)
986 m_assembler.xorl_rr(dest, dest);
987 else
988 m_assembler.movl_i32r(imm.m_value, dest);
989 }
990
991 #if CPU(X86_64)
992 void move(RegisterID src, RegisterID dest)
993 {
994 // Note: on 64-bit this is is a full register move; perhaps it would be
995 // useful to have separate move32 & movePtr, with move32 zero extending?
996 if (src != dest)
997 m_assembler.movq_rr(src, dest);
998 }
999
1000 void move(TrustedImmPtr imm, RegisterID dest)
1001 {
1002 m_assembler.movq_i64r(imm.asIntptr(), dest);
1003 }
1004
1005 void move(TrustedImm64 imm, RegisterID dest)
1006 {
1007 m_assembler.movq_i64r(imm.m_value, dest);
1008 }
1009
1010 void swap(RegisterID reg1, RegisterID reg2)
1011 {
1012 if (reg1 != reg2)
1013 m_assembler.xchgq_rr(reg1, reg2);
1014 }
1015
1016 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1017 {
1018 m_assembler.movsxd_rr(src, dest);
1019 }
1020
1021 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1022 {
1023 m_assembler.movl_rr(src, dest);
1024 }
1025 #else
1026 void move(RegisterID src, RegisterID dest)
1027 {
1028 if (src != dest)
1029 m_assembler.movl_rr(src, dest);
1030 }
1031
1032 void move(TrustedImmPtr imm, RegisterID dest)
1033 {
1034 m_assembler.movl_i32r(imm.asIntptr(), dest);
1035 }
1036
1037 void swap(RegisterID reg1, RegisterID reg2)
1038 {
1039 if (reg1 != reg2)
1040 m_assembler.xchgl_rr(reg1, reg2);
1041 }
1042
1043 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1044 {
1045 move(src, dest);
1046 }
1047
1048 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1049 {
1050 move(src, dest);
1051 }
1052 #endif
1053
1054
1055 // Forwards / external control flow operations:
1056 //
1057 // This set of jump and conditional branch operations return a Jump
1058 // object which may linked at a later point, allow forwards jump,
1059 // or jumps that will require external linkage (after the code has been
1060 // relocated).
1061 //
1062 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1063 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1064 // used (representing the names 'below' and 'above').
1065 //
1066 // Operands to the comparision are provided in the expected order, e.g.
1067 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1068 // treated as a signed 32bit value, is less than or equal to 5.
1069 //
1070 // jz and jnz test whether the first operand is equal to zero, and take
1071 // an optional second operand of a mask under which to perform the test.
1072
1073 public:
1074 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1075 {
1076 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1077 return Jump(m_assembler.jCC(x86Condition(cond)));
1078 }
1079
1080 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1081 {
1082 m_assembler.cmpl_rr(right, left);
1083 return Jump(m_assembler.jCC(x86Condition(cond)));
1084 }
1085
1086 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1087 {
1088 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1089 m_assembler.testl_rr(left, left);
1090 else
1091 m_assembler.cmpl_ir(right.m_value, left);
1092 return Jump(m_assembler.jCC(x86Condition(cond)));
1093 }
1094
1095 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1096 {
1097 m_assembler.cmpl_mr(right.offset, right.base, left);
1098 return Jump(m_assembler.jCC(x86Condition(cond)));
1099 }
1100
1101 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1102 {
1103 m_assembler.cmpl_rm(right, left.offset, left.base);
1104 return Jump(m_assembler.jCC(x86Condition(cond)));
1105 }
1106
1107 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1108 {
1109 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
1110 return Jump(m_assembler.jCC(x86Condition(cond)));
1111 }
1112
1113 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1114 {
1115 m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
1116 return Jump(m_assembler.jCC(x86Condition(cond)));
1117 }
1118
1119 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1120 {
1121 return branch32(cond, left, right);
1122 }
1123
1124 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1125 {
1126 m_assembler.testl_rr(reg, mask);
1127 return Jump(m_assembler.jCC(x86Condition(cond)));
1128 }
1129
1130 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1131 {
1132 // if we are only interested in the low seven bits, this can be tested with a testb
1133 if (mask.m_value == -1)
1134 m_assembler.testl_rr(reg, reg);
1135 else
1136 m_assembler.testl_i32r(mask.m_value, reg);
1137 return Jump(m_assembler.jCC(x86Condition(cond)));
1138 }
1139
1140 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1141 {
1142 if (mask.m_value == -1)
1143 m_assembler.cmpl_im(0, address.offset, address.base);
1144 else
1145 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1146 return Jump(m_assembler.jCC(x86Condition(cond)));
1147 }
1148
1149 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1150 {
1151 if (mask.m_value == -1)
1152 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
1153 else
1154 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1155 return Jump(m_assembler.jCC(x86Condition(cond)));
1156 }
1157
1158 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1159 {
1160 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1161 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1162 if (mask.m_value == -1)
1163 m_assembler.cmpb_im(0, address.offset, address.base);
1164 else
1165 m_assembler.testb_im(mask.m_value, address.offset, address.base);
1166 return Jump(m_assembler.jCC(x86Condition(cond)));
1167 }
1168
1169 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1170 {
1171 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1172 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1173 if (mask.m_value == -1)
1174 m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
1175 else
1176 m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
1177 return Jump(m_assembler.jCC(x86Condition(cond)));
1178 }
1179
1180 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1181 {
1182 ASSERT(!(right.m_value & 0xFFFFFF00));
1183
1184 m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
1185 return Jump(m_assembler.jCC(x86Condition(cond)));
1186 }
1187
1188 Jump jump()
1189 {
1190 return Jump(m_assembler.jmp());
1191 }
1192
1193 void jump(RegisterID target)
1194 {
1195 m_assembler.jmp_r(target);
1196 }
1197
1198 // Address is a memory location containing the address to jump to
1199 void jump(Address address)
1200 {
1201 m_assembler.jmp_m(address.offset, address.base);
1202 }
1203
1204
1205 // Arithmetic control flow operations:
1206 //
1207 // This set of conditional branch operations branch based
1208 // on the result of an arithmetic operation. The operation
1209 // is performed as normal, storing the result.
1210 //
1211 // * jz operations branch if the result is zero.
1212 // * jo operations branch if the (signed) arithmetic
1213 // operation caused an overflow to occur.
1214
1215 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1216 {
1217 add32(src, dest);
1218 return Jump(m_assembler.jCC(x86Condition(cond)));
1219 }
1220
1221 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1222 {
1223 add32(imm, dest);
1224 return Jump(m_assembler.jCC(x86Condition(cond)));
1225 }
1226
1227 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
1228 {
1229 add32(src, dest);
1230 return Jump(m_assembler.jCC(x86Condition(cond)));
1231 }
1232
1233 Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
1234 {
1235 add32(src, dest);
1236 return Jump(m_assembler.jCC(x86Condition(cond)));
1237 }
1238
1239 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1240 {
1241 add32(src, dest);
1242 return Jump(m_assembler.jCC(x86Condition(cond)));
1243 }
1244
1245 Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1246 {
1247 if (src1 == dest)
1248 return branchAdd32(cond, src2, dest);
1249 move(src2, dest);
1250 return branchAdd32(cond, src1, dest);
1251 }
1252
1253 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1254 {
1255 move(src, dest);
1256 return branchAdd32(cond, imm, dest);
1257 }
1258
1259 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1260 {
1261 mul32(src, dest);
1262 if (cond != Overflow)
1263 m_assembler.testl_rr(dest, dest);
1264 return Jump(m_assembler.jCC(x86Condition(cond)));
1265 }
1266
1267 Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1268 {
1269 mul32(src, dest);
1270 if (cond != Overflow)
1271 m_assembler.testl_rr(dest, dest);
1272 return Jump(m_assembler.jCC(x86Condition(cond)));
1273 }
1274
1275 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1276 {
1277 mul32(imm, src, dest);
1278 if (cond != Overflow)
1279 m_assembler.testl_rr(dest, dest);
1280 return Jump(m_assembler.jCC(x86Condition(cond)));
1281 }
1282
1283 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1284 {
1285 if (src1 == dest)
1286 return branchMul32(cond, src2, dest);
1287 move(src2, dest);
1288 return branchMul32(cond, src1, dest);
1289 }
1290
1291 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1292 {
1293 sub32(src, dest);
1294 return Jump(m_assembler.jCC(x86Condition(cond)));
1295 }
1296
1297 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1298 {
1299 sub32(imm, dest);
1300 return Jump(m_assembler.jCC(x86Condition(cond)));
1301 }
1302
1303 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1304 {
1305 sub32(imm, dest);
1306 return Jump(m_assembler.jCC(x86Condition(cond)));
1307 }
1308
1309 Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1310 {
1311 sub32(src, dest);
1312 return Jump(m_assembler.jCC(x86Condition(cond)));
1313 }
1314
1315 Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1316 {
1317 sub32(src, dest);
1318 return Jump(m_assembler.jCC(x86Condition(cond)));
1319 }
1320
1321 Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1322 {
1323 // B := A - B is invalid.
1324 ASSERT(src1 == dest || src2 != dest);
1325
1326 move(src1, dest);
1327 return branchSub32(cond, src2, dest);
1328 }
1329
1330 Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1331 {
1332 move(src1, dest);
1333 return branchSub32(cond, src2, dest);
1334 }
1335
1336 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1337 {
1338 neg32(srcDest);
1339 return Jump(m_assembler.jCC(x86Condition(cond)));
1340 }
1341
1342 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1343 {
1344 or32(src, dest);
1345 return Jump(m_assembler.jCC(x86Condition(cond)));
1346 }
1347
1348
1349 // Miscellaneous operations:
1350
1351 void breakpoint()
1352 {
1353 m_assembler.int3();
1354 }
1355
1356 Call nearCall()
1357 {
1358 return Call(m_assembler.call(), Call::LinkableNear);
1359 }
1360
1361 Call call(RegisterID target)
1362 {
1363 return Call(m_assembler.call(target), Call::None);
1364 }
1365
1366 void call(Address address)
1367 {
1368 m_assembler.call_m(address.offset, address.base);
1369 }
1370
1371 void ret()
1372 {
1373 m_assembler.ret();
1374 }
1375
1376 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1377 {
1378 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1379 set32(x86Condition(cond), dest);
1380 }
1381
1382 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1383 {
1384 m_assembler.cmpl_rr(right, left);
1385 set32(x86Condition(cond), dest);
1386 }
1387
1388 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1389 {
1390 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1391 m_assembler.testl_rr(left, left);
1392 else
1393 m_assembler.cmpl_ir(right.m_value, left);
1394 set32(x86Condition(cond), dest);
1395 }
1396
1397 // FIXME:
1398 // The mask should be optional... perhaps the argument order should be
1399 // dest-src, operations always have a dest? ... possibly not true, considering
1400 // asm ops like test, or pseudo ops like pop().
1401
1402 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1403 {
1404 if (mask.m_value == -1)
1405 m_assembler.cmpb_im(0, address.offset, address.base);
1406 else
1407 m_assembler.testb_im(mask.m_value, address.offset, address.base);
1408 set32(x86Condition(cond), dest);
1409 }
1410
1411 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1412 {
1413 if (mask.m_value == -1)
1414 m_assembler.cmpl_im(0, address.offset, address.base);
1415 else
1416 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1417 set32(x86Condition(cond), dest);
1418 }
1419
1420 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1421 static RelationalCondition invert(RelationalCondition cond)
1422 {
1423 return static_cast<RelationalCondition>(cond ^ 1);
1424 }
1425
1426 void nop()
1427 {
1428 m_assembler.nop();
1429 }
1430
1431 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1432 {
1433 X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
1434 }
1435
1436 static ptrdiff_t maxJumpReplacementSize()
1437 {
1438 return X86Assembler::maxJumpReplacementSize();
1439 }
1440
1441 protected:
1442 X86Assembler::Condition x86Condition(RelationalCondition cond)
1443 {
1444 return static_cast<X86Assembler::Condition>(cond);
1445 }
1446
1447 X86Assembler::Condition x86Condition(ResultCondition cond)
1448 {
1449 return static_cast<X86Assembler::Condition>(cond);
1450 }
1451
1452 void set32(X86Assembler::Condition cond, RegisterID dest)
1453 {
1454 #if CPU(X86)
1455 // On 32-bit x86 we can only set the first 4 registers;
1456 // esp..edi are mapped to the 'h' registers!
1457 if (dest >= 4) {
1458 m_assembler.xchgl_rr(dest, X86Registers::eax);
1459 m_assembler.setCC_r(cond, X86Registers::eax);
1460 m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
1461 m_assembler.xchgl_rr(dest, X86Registers::eax);
1462 return;
1463 }
1464 #endif
1465 m_assembler.setCC_r(cond, dest);
1466 m_assembler.movzbl_rr(dest, dest);
1467 }
1468
1469 private:
1470 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1471 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1472 friend class MacroAssemblerX86;
1473
1474 #if CPU(X86)
1475 #if OS(MAC_OS_X)
1476
1477 // All X86 Macs are guaranteed to support at least SSE2,
1478 static bool isSSE2Present()
1479 {
1480 return true;
1481 }
1482
1483 #else // OS(MAC_OS_X)
1484
1485 enum SSE2CheckState {
1486 NotCheckedSSE2,
1487 HasSSE2,
1488 NoSSE2
1489 };
1490
1491 static bool isSSE2Present()
1492 {
1493 if (s_sse2CheckState == NotCheckedSSE2) {
1494 // Default the flags value to zero; if the compiler is
1495 // not MSVC or GCC we will read this as SSE2 not present.
1496 int flags = 0;
1497 #if COMPILER(MSVC)
1498 _asm {
1499 mov eax, 1 // cpuid function 1 gives us the standard feature set
1500 cpuid;
1501 mov flags, edx;
1502 }
1503 #elif COMPILER(GCC)
1504 asm (
1505 "movl $0x1, %%eax;"
1506 "pushl %%ebx;"
1507 "cpuid;"
1508 "popl %%ebx;"
1509 "movl %%edx, %0;"
1510 : "=g" (flags)
1511 :
1512 : "%eax", "%ecx", "%edx"
1513 );
1514 #endif
1515 static const int SSE2FeatureBit = 1 << 26;
1516 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1517 }
1518 // Only check once.
1519 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1520
1521 return s_sse2CheckState == HasSSE2;
1522 }
1523
1524 static SSE2CheckState s_sse2CheckState;
1525
1526 #endif // OS(MAC_OS_X)
1527 #elif !defined(NDEBUG) // CPU(X86)
1528
1529 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1530 // but non debug add this method to keep the asserts above happy.
1531 static bool isSSE2Present()
1532 {
1533 return true;
1534 }
1535
1536 #endif
1537 };
1538
1539 } // namespace JSC
1540
1541 #endif // ENABLE(ASSEMBLER)
1542
1543 #endif // MacroAssemblerX86Common_h