]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86Common.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86Common.h
1 /*
2 * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33
34 namespace JSC {
35
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler, MacroAssemblerX86Common> {
37 public:
38 #if CPU(X86_64)
39 static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
40 #endif
41
42 protected:
43 static const int DoubleConditionBitInvert = 0x10;
44 static const int DoubleConditionBitSpecial = 0x20;
45 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
46
47 public:
48 typedef X86Assembler::XMMRegisterID XMMRegisterID;
49
50 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
51 {
52 return value >= -128 && value <= 127;
53 }
54
55 enum RelationalCondition {
56 Equal = X86Assembler::ConditionE,
57 NotEqual = X86Assembler::ConditionNE,
58 Above = X86Assembler::ConditionA,
59 AboveOrEqual = X86Assembler::ConditionAE,
60 Below = X86Assembler::ConditionB,
61 BelowOrEqual = X86Assembler::ConditionBE,
62 GreaterThan = X86Assembler::ConditionG,
63 GreaterThanOrEqual = X86Assembler::ConditionGE,
64 LessThan = X86Assembler::ConditionL,
65 LessThanOrEqual = X86Assembler::ConditionLE
66 };
67
68 enum ResultCondition {
69 Overflow = X86Assembler::ConditionO,
70 Signed = X86Assembler::ConditionS,
71 PositiveOrZero = X86Assembler::ConditionNS,
72 Zero = X86Assembler::ConditionE,
73 NonZero = X86Assembler::ConditionNE
74 };
75
76 enum DoubleCondition {
77 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
78 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
79 DoubleNotEqual = X86Assembler::ConditionNE,
80 DoubleGreaterThan = X86Assembler::ConditionA,
81 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
82 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
83 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
84 // If either operand is NaN, these conditions always evaluate to true.
85 DoubleEqualOrUnordered = X86Assembler::ConditionE,
86 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
87 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
88 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
89 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
90 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
91 };
92 COMPILE_ASSERT(
93 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
94 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
95
96 static const RegisterID stackPointerRegister = X86Registers::esp;
97 static const RegisterID framePointerRegister = X86Registers::ebp;
98
99 static bool canBlind() { return true; }
100 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
101 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
102
103 // Integer arithmetic operations:
104 //
105 // Operations are typically two operand - operation(source, srcDst)
106 // For many operations the source may be an TrustedImm32, the srcDst operand
107 // may often be a memory location (explictly described using an Address
108 // object).
109
110 void add32(RegisterID src, RegisterID dest)
111 {
112 m_assembler.addl_rr(src, dest);
113 }
114
115 void add32(TrustedImm32 imm, Address address)
116 {
117 m_assembler.addl_im(imm.m_value, address.offset, address.base);
118 }
119
120 void add32(TrustedImm32 imm, RegisterID dest)
121 {
122 if (imm.m_value == 1)
123 m_assembler.inc_r(dest);
124 else
125 m_assembler.addl_ir(imm.m_value, dest);
126 }
127
128 void add32(Address src, RegisterID dest)
129 {
130 m_assembler.addl_mr(src.offset, src.base, dest);
131 }
132
133 void add32(RegisterID src, Address dest)
134 {
135 m_assembler.addl_rm(src, dest.offset, dest.base);
136 }
137
138 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
139 {
140 m_assembler.leal_mr(imm.m_value, src, dest);
141 }
142
143 void and32(RegisterID src, RegisterID dest)
144 {
145 m_assembler.andl_rr(src, dest);
146 }
147
148 void and32(TrustedImm32 imm, RegisterID dest)
149 {
150 m_assembler.andl_ir(imm.m_value, dest);
151 }
152
153 void and32(RegisterID src, Address dest)
154 {
155 m_assembler.andl_rm(src, dest.offset, dest.base);
156 }
157
158 void and32(Address src, RegisterID dest)
159 {
160 m_assembler.andl_mr(src.offset, src.base, dest);
161 }
162
163 void and32(TrustedImm32 imm, Address address)
164 {
165 m_assembler.andl_im(imm.m_value, address.offset, address.base);
166 }
167
168 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
169 {
170 if (op1 == op2)
171 zeroExtend32ToPtr(op1, dest);
172 else if (op1 == dest)
173 and32(op2, dest);
174 else {
175 move(op2, dest);
176 and32(op1, dest);
177 }
178 }
179
180 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
181 {
182 move(src, dest);
183 and32(imm, dest);
184 }
185
186 void countLeadingZeros32(RegisterID src, RegisterID dst)
187 {
188 m_assembler.bsr_rr(src, dst);
189 Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
190 move(TrustedImm32(32), dst);
191
192 Jump skipNonZeroCase = jump();
193 srcIsNonZero.link(this);
194 xor32(TrustedImm32(0x1f), dst);
195 skipNonZeroCase.link(this);
196 }
197
198 void lshift32(RegisterID shift_amount, RegisterID dest)
199 {
200 ASSERT(shift_amount != dest);
201
202 if (shift_amount == X86Registers::ecx)
203 m_assembler.shll_CLr(dest);
204 else {
205 // On x86 we can only shift by ecx; if asked to shift by another register we'll
206 // need rejig the shift amount into ecx first, and restore the registers afterwards.
207 // If we dest is ecx, then shift the swapped register!
208 swap(shift_amount, X86Registers::ecx);
209 m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
210 swap(shift_amount, X86Registers::ecx);
211 }
212 }
213
214 void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
215 {
216 ASSERT(shift_amount != dest);
217
218 if (src != dest)
219 move(src, dest);
220 lshift32(shift_amount, dest);
221 }
222
223 void lshift32(TrustedImm32 imm, RegisterID dest)
224 {
225 m_assembler.shll_i8r(imm.m_value, dest);
226 }
227
228 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
229 {
230 if (src != dest)
231 move(src, dest);
232 lshift32(imm, dest);
233 }
234
235 void mul32(RegisterID src, RegisterID dest)
236 {
237 m_assembler.imull_rr(src, dest);
238 }
239
240 void mul32(Address src, RegisterID dest)
241 {
242 m_assembler.imull_mr(src.offset, src.base, dest);
243 }
244
245 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
246 {
247 m_assembler.imull_i32r(src, imm.m_value, dest);
248 }
249
250 void neg32(RegisterID srcDest)
251 {
252 m_assembler.negl_r(srcDest);
253 }
254
255 void neg32(Address srcDest)
256 {
257 m_assembler.negl_m(srcDest.offset, srcDest.base);
258 }
259
260 void or32(RegisterID src, RegisterID dest)
261 {
262 m_assembler.orl_rr(src, dest);
263 }
264
265 void or32(TrustedImm32 imm, RegisterID dest)
266 {
267 m_assembler.orl_ir(imm.m_value, dest);
268 }
269
270 void or32(RegisterID src, Address dest)
271 {
272 m_assembler.orl_rm(src, dest.offset, dest.base);
273 }
274
275 void or32(Address src, RegisterID dest)
276 {
277 m_assembler.orl_mr(src.offset, src.base, dest);
278 }
279
280 void or32(TrustedImm32 imm, Address address)
281 {
282 m_assembler.orl_im(imm.m_value, address.offset, address.base);
283 }
284
285 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
286 {
287 if (op1 == op2)
288 zeroExtend32ToPtr(op1, dest);
289 else if (op1 == dest)
290 or32(op2, dest);
291 else {
292 move(op2, dest);
293 or32(op1, dest);
294 }
295 }
296
297 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
298 {
299 move(src, dest);
300 or32(imm, dest);
301 }
302
303 void rshift32(RegisterID shift_amount, RegisterID dest)
304 {
305 ASSERT(shift_amount != dest);
306
307 if (shift_amount == X86Registers::ecx)
308 m_assembler.sarl_CLr(dest);
309 else {
310 // On x86 we can only shift by ecx; if asked to shift by another register we'll
311 // need rejig the shift amount into ecx first, and restore the registers afterwards.
312 // If we dest is ecx, then shift the swapped register!
313 swap(shift_amount, X86Registers::ecx);
314 m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
315 swap(shift_amount, X86Registers::ecx);
316 }
317 }
318
319 void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
320 {
321 ASSERT(shift_amount != dest);
322
323 if (src != dest)
324 move(src, dest);
325 rshift32(shift_amount, dest);
326 }
327
328 void rshift32(TrustedImm32 imm, RegisterID dest)
329 {
330 m_assembler.sarl_i8r(imm.m_value, dest);
331 }
332
333 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
334 {
335 if (src != dest)
336 move(src, dest);
337 rshift32(imm, dest);
338 }
339
340 void urshift32(RegisterID shift_amount, RegisterID dest)
341 {
342 ASSERT(shift_amount != dest);
343
344 if (shift_amount == X86Registers::ecx)
345 m_assembler.shrl_CLr(dest);
346 else {
347 // On x86 we can only shift by ecx; if asked to shift by another register we'll
348 // need rejig the shift amount into ecx first, and restore the registers afterwards.
349 // If we dest is ecx, then shift the swapped register!
350 swap(shift_amount, X86Registers::ecx);
351 m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
352 swap(shift_amount, X86Registers::ecx);
353 }
354 }
355
356 void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
357 {
358 ASSERT(shift_amount != dest);
359
360 if (src != dest)
361 move(src, dest);
362 urshift32(shift_amount, dest);
363 }
364
365 void urshift32(TrustedImm32 imm, RegisterID dest)
366 {
367 m_assembler.shrl_i8r(imm.m_value, dest);
368 }
369
370 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
371 {
372 if (src != dest)
373 move(src, dest);
374 urshift32(imm, dest);
375 }
376
377 void sub32(RegisterID src, RegisterID dest)
378 {
379 m_assembler.subl_rr(src, dest);
380 }
381
382 void sub32(TrustedImm32 imm, RegisterID dest)
383 {
384 if (imm.m_value == 1)
385 m_assembler.dec_r(dest);
386 else
387 m_assembler.subl_ir(imm.m_value, dest);
388 }
389
390 void sub32(TrustedImm32 imm, Address address)
391 {
392 m_assembler.subl_im(imm.m_value, address.offset, address.base);
393 }
394
395 void sub32(Address src, RegisterID dest)
396 {
397 m_assembler.subl_mr(src.offset, src.base, dest);
398 }
399
400 void sub32(RegisterID src, Address dest)
401 {
402 m_assembler.subl_rm(src, dest.offset, dest.base);
403 }
404
405 void xor32(RegisterID src, RegisterID dest)
406 {
407 m_assembler.xorl_rr(src, dest);
408 }
409
410 void xor32(TrustedImm32 imm, Address dest)
411 {
412 if (imm.m_value == -1)
413 m_assembler.notl_m(dest.offset, dest.base);
414 else
415 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
416 }
417
418 void xor32(TrustedImm32 imm, RegisterID dest)
419 {
420 if (imm.m_value == -1)
421 m_assembler.notl_r(dest);
422 else
423 m_assembler.xorl_ir(imm.m_value, dest);
424 }
425
426 void xor32(RegisterID src, Address dest)
427 {
428 m_assembler.xorl_rm(src, dest.offset, dest.base);
429 }
430
431 void xor32(Address src, RegisterID dest)
432 {
433 m_assembler.xorl_mr(src.offset, src.base, dest);
434 }
435
436 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
437 {
438 if (op1 == op2)
439 move(TrustedImm32(0), dest);
440 else if (op1 == dest)
441 xor32(op2, dest);
442 else {
443 move(op2, dest);
444 xor32(op1, dest);
445 }
446 }
447
448 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
449 {
450 move(src, dest);
451 xor32(imm, dest);
452 }
453
454 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
455 {
456 m_assembler.sqrtsd_rr(src, dst);
457 }
458
459 void absDouble(FPRegisterID src, FPRegisterID dst)
460 {
461 ASSERT(src != dst);
462 static const double negativeZeroConstant = -0.0;
463 loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
464 m_assembler.andnpd_rr(src, dst);
465 }
466
467 void negateDouble(FPRegisterID src, FPRegisterID dst)
468 {
469 ASSERT(src != dst);
470 static const double negativeZeroConstant = -0.0;
471 loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
472 m_assembler.xorpd_rr(src, dst);
473 }
474
475
476 // Memory access operations:
477 //
478 // Loads are of the form load(address, destination) and stores of the form
479 // store(source, address). The source for a store may be an TrustedImm32. Address
480 // operand objects to loads and store will be implicitly constructed if a
481 // register is passed.
482
483 void load32(ImplicitAddress address, RegisterID dest)
484 {
485 m_assembler.movl_mr(address.offset, address.base, dest);
486 }
487
488 void load32(BaseIndex address, RegisterID dest)
489 {
490 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
491 }
492
493 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
494 {
495 load32(address, dest);
496 }
497
498 void load16Unaligned(BaseIndex address, RegisterID dest)
499 {
500 load16(address, dest);
501 }
502
503 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
504 {
505 padBeforePatch();
506 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
507 return DataLabel32(this);
508 }
509
510 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
511 {
512 padBeforePatch();
513 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
514 return DataLabelCompact(this);
515 }
516
517 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
518 {
519 ASSERT(isCompactPtrAlignedAddressOffset(value));
520 AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
521 }
522
523 DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
524 {
525 padBeforePatch();
526 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
527 return DataLabelCompact(this);
528 }
529
530 void load8(BaseIndex address, RegisterID dest)
531 {
532 m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
533 }
534
535 void load8(ImplicitAddress address, RegisterID dest)
536 {
537 m_assembler.movzbl_mr(address.offset, address.base, dest);
538 }
539
540 void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
541 {
542 m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
543 }
544
545 void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
546 {
547 m_assembler.movsbl_mr(address.offset, address.base, dest);
548 }
549
550 void load16(BaseIndex address, RegisterID dest)
551 {
552 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
553 }
554
555 void load16(Address address, RegisterID dest)
556 {
557 m_assembler.movzwl_mr(address.offset, address.base, dest);
558 }
559
560 void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
561 {
562 m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
563 }
564
565 void load16SignedExtendTo32(Address address, RegisterID dest)
566 {
567 m_assembler.movswl_mr(address.offset, address.base, dest);
568 }
569
570 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
571 {
572 padBeforePatch();
573 m_assembler.movl_rm_disp32(src, address.offset, address.base);
574 return DataLabel32(this);
575 }
576
577 void store32(RegisterID src, ImplicitAddress address)
578 {
579 m_assembler.movl_rm(src, address.offset, address.base);
580 }
581
582 void store32(RegisterID src, BaseIndex address)
583 {
584 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
585 }
586
587 void store32(TrustedImm32 imm, ImplicitAddress address)
588 {
589 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
590 }
591
592 void store32(TrustedImm32 imm, BaseIndex address)
593 {
594 m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
595 }
596
597 void store8(TrustedImm32 imm, Address address)
598 {
599 ASSERT(-128 <= imm.m_value && imm.m_value < 128);
600 m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
601 }
602
603 void store8(TrustedImm32 imm, BaseIndex address)
604 {
605 ASSERT(-128 <= imm.m_value && imm.m_value < 128);
606 m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
607 }
608
609 static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
610 {
611 if (address.base != X86Registers::eax && address.index != X86Registers::eax)
612 return X86Registers::eax;
613
614 if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
615 return X86Registers::ebx;
616
617 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
618 return X86Registers::ecx;
619 }
620
621 static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
622 {
623 if (address.base != X86Registers::eax)
624 return X86Registers::eax;
625
626 ASSERT(address.base != X86Registers::edx);
627 return X86Registers::edx;
628 }
629
630 void store8(RegisterID src, BaseIndex address)
631 {
632 #if CPU(X86)
633 // On 32-bit x86 we can only store from the first 4 registers;
634 // esp..edi are mapped to the 'h' registers!
635 if (src >= 4) {
636 // Pick a temporary register.
637 RegisterID temp = getUnusedRegister(address);
638
639 // Swap to the temporary register to perform the store.
640 swap(src, temp);
641 m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
642 swap(src, temp);
643 return;
644 }
645 #endif
646 m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
647 }
648
649 void store8(RegisterID src, Address address)
650 {
651 #if CPU(X86)
652 // On 32-bit x86 we can only store from the first 4 registers;
653 // esp..edi are mapped to the 'h' registers!
654 if (src >= 4) {
655 // Pick a temporary register.
656 RegisterID temp = getUnusedRegister(address);
657
658 // Swap to the temporary register to perform the store.
659 swap(src, temp);
660 m_assembler.movb_rm(temp, address.offset, address.base);
661 swap(src, temp);
662 return;
663 }
664 #endif
665 m_assembler.movb_rm(src, address.offset, address.base);
666 }
667
668 void store16(RegisterID src, BaseIndex address)
669 {
670 #if CPU(X86)
671 // On 32-bit x86 we can only store from the first 4 registers;
672 // esp..edi are mapped to the 'h' registers!
673 if (src >= 4) {
674 // Pick a temporary register.
675 RegisterID temp = getUnusedRegister(address);
676
677 // Swap to the temporary register to perform the store.
678 swap(src, temp);
679 m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
680 swap(src, temp);
681 return;
682 }
683 #endif
684 m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
685 }
686
687
688 // Floating-point operation:
689 //
690 // Presently only supports SSE, not x87 floating point.
691
692 void moveDouble(FPRegisterID src, FPRegisterID dest)
693 {
694 ASSERT(isSSE2Present());
695 if (src != dest)
696 m_assembler.movsd_rr(src, dest);
697 }
698
699 void loadDouble(TrustedImmPtr address, FPRegisterID dest)
700 {
701 #if CPU(X86)
702 ASSERT(isSSE2Present());
703 m_assembler.movsd_mr(address.m_value, dest);
704 #else
705 move(address, scratchRegister);
706 loadDouble(scratchRegister, dest);
707 #endif
708 }
709
710 void loadDouble(ImplicitAddress address, FPRegisterID dest)
711 {
712 ASSERT(isSSE2Present());
713 m_assembler.movsd_mr(address.offset, address.base, dest);
714 }
715
716 void loadDouble(BaseIndex address, FPRegisterID dest)
717 {
718 ASSERT(isSSE2Present());
719 m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
720 }
721 void loadFloat(BaseIndex address, FPRegisterID dest)
722 {
723 ASSERT(isSSE2Present());
724 m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
725 }
726
727 void storeDouble(FPRegisterID src, ImplicitAddress address)
728 {
729 ASSERT(isSSE2Present());
730 m_assembler.movsd_rm(src, address.offset, address.base);
731 }
732
733 void storeDouble(FPRegisterID src, BaseIndex address)
734 {
735 ASSERT(isSSE2Present());
736 m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
737 }
738
739 void storeFloat(FPRegisterID src, BaseIndex address)
740 {
741 ASSERT(isSSE2Present());
742 m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
743 }
744
745 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
746 {
747 ASSERT(isSSE2Present());
748 m_assembler.cvtsd2ss_rr(src, dst);
749 }
750
751 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
752 {
753 ASSERT(isSSE2Present());
754 m_assembler.cvtss2sd_rr(src, dst);
755 }
756
757 void addDouble(FPRegisterID src, FPRegisterID dest)
758 {
759 ASSERT(isSSE2Present());
760 m_assembler.addsd_rr(src, dest);
761 }
762
763 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
764 {
765 ASSERT(isSSE2Present());
766 if (op1 == dest)
767 addDouble(op2, dest);
768 else {
769 moveDouble(op2, dest);
770 addDouble(op1, dest);
771 }
772 }
773
774 void addDouble(Address src, FPRegisterID dest)
775 {
776 ASSERT(isSSE2Present());
777 m_assembler.addsd_mr(src.offset, src.base, dest);
778 }
779
780 void divDouble(FPRegisterID src, FPRegisterID dest)
781 {
782 ASSERT(isSSE2Present());
783 m_assembler.divsd_rr(src, dest);
784 }
785
786 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
787 {
788 // B := A / B is invalid.
789 ASSERT(op1 == dest || op2 != dest);
790
791 moveDouble(op1, dest);
792 divDouble(op2, dest);
793 }
794
795 void divDouble(Address src, FPRegisterID dest)
796 {
797 ASSERT(isSSE2Present());
798 m_assembler.divsd_mr(src.offset, src.base, dest);
799 }
800
801 void subDouble(FPRegisterID src, FPRegisterID dest)
802 {
803 ASSERT(isSSE2Present());
804 m_assembler.subsd_rr(src, dest);
805 }
806
807 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
808 {
809 // B := A - B is invalid.
810 ASSERT(op1 == dest || op2 != dest);
811
812 moveDouble(op1, dest);
813 subDouble(op2, dest);
814 }
815
816 void subDouble(Address src, FPRegisterID dest)
817 {
818 ASSERT(isSSE2Present());
819 m_assembler.subsd_mr(src.offset, src.base, dest);
820 }
821
822 void mulDouble(FPRegisterID src, FPRegisterID dest)
823 {
824 ASSERT(isSSE2Present());
825 m_assembler.mulsd_rr(src, dest);
826 }
827
828 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
829 {
830 ASSERT(isSSE2Present());
831 if (op1 == dest)
832 mulDouble(op2, dest);
833 else {
834 moveDouble(op2, dest);
835 mulDouble(op1, dest);
836 }
837 }
838
839 void mulDouble(Address src, FPRegisterID dest)
840 {
841 ASSERT(isSSE2Present());
842 m_assembler.mulsd_mr(src.offset, src.base, dest);
843 }
844
845 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
846 {
847 ASSERT(isSSE2Present());
848 m_assembler.cvtsi2sd_rr(src, dest);
849 }
850
851 void convertInt32ToDouble(Address src, FPRegisterID dest)
852 {
853 ASSERT(isSSE2Present());
854 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
855 }
856
857 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
858 {
859 ASSERT(isSSE2Present());
860
861 if (cond & DoubleConditionBitInvert)
862 m_assembler.ucomisd_rr(left, right);
863 else
864 m_assembler.ucomisd_rr(right, left);
865
866 if (cond == DoubleEqual) {
867 if (left == right)
868 return Jump(m_assembler.jnp());
869 Jump isUnordered(m_assembler.jp());
870 Jump result = Jump(m_assembler.je());
871 isUnordered.link(this);
872 return result;
873 } else if (cond == DoubleNotEqualOrUnordered) {
874 if (left == right)
875 return Jump(m_assembler.jp());
876 Jump isUnordered(m_assembler.jp());
877 Jump isEqual(m_assembler.je());
878 isUnordered.link(this);
879 Jump result = jump();
880 isEqual.link(this);
881 return result;
882 }
883
884 ASSERT(!(cond & DoubleConditionBitSpecial));
885 return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
886 }
887
888 // Truncates 'src' to an integer, and places the resulting 'dest'.
889 // If the result is not representable as a 32 bit value, branch.
890 // May also branch for some values that are representable in 32 bits
891 // (specifically, in this case, INT_MIN).
892 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
893 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
894 {
895 ASSERT(isSSE2Present());
896 m_assembler.cvttsd2si_rr(src, dest);
897 return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
898 }
899
900 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
901 {
902 ASSERT(isSSE2Present());
903 m_assembler.cvttsd2si_rr(src, dest);
904 }
905
906 #if CPU(X86_64)
907 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
908 {
909 ASSERT(isSSE2Present());
910 m_assembler.cvttsd2siq_rr(src, dest);
911 }
912 #endif
913
914 // Convert 'src' to an integer, and places the resulting 'dest'.
915 // If the result is not representable as a 32 bit value, branch.
916 // May also branch for some values that are representable in 32 bits
917 // (specifically, in this case, 0).
918 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
919 {
920 ASSERT(isSSE2Present());
921 m_assembler.cvttsd2si_rr(src, dest);
922
923 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
924 #if CPU(X86_64)
925 if (negZeroCheck) {
926 Jump valueIsNonZero = branchTest32(NonZero, dest);
927 m_assembler.movmskpd_rr(src, scratchRegister);
928 failureCases.append(branchTest32(NonZero, scratchRegister, TrustedImm32(1)));
929 valueIsNonZero.link(this);
930 }
931 #else
932 if (negZeroCheck)
933 failureCases.append(branchTest32(Zero, dest));
934 #endif
935
936 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
937 convertInt32ToDouble(dest, fpTemp);
938 m_assembler.ucomisd_rr(fpTemp, src);
939 failureCases.append(m_assembler.jp());
940 failureCases.append(m_assembler.jne());
941 }
942
943 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
944 {
945 ASSERT(isSSE2Present());
946 m_assembler.xorpd_rr(scratch, scratch);
947 return branchDouble(DoubleNotEqual, reg, scratch);
948 }
949
950 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
951 {
952 ASSERT(isSSE2Present());
953 m_assembler.xorpd_rr(scratch, scratch);
954 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
955 }
956
957 void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
958 {
959 ASSERT(isSSE2Present());
960 m_assembler.psllq_i8r(imm.m_value, reg);
961 }
962
963 void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
964 {
965 ASSERT(isSSE2Present());
966 m_assembler.psrlq_i8r(imm.m_value, reg);
967 }
968
969 void orPacked(XMMRegisterID src, XMMRegisterID dst)
970 {
971 ASSERT(isSSE2Present());
972 m_assembler.por_rr(src, dst);
973 }
974
975 void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
976 {
977 ASSERT(isSSE2Present());
978 m_assembler.movd_rr(src, dst);
979 }
980
981 void movePackedToInt32(XMMRegisterID src, RegisterID dst)
982 {
983 ASSERT(isSSE2Present());
984 m_assembler.movd_rr(src, dst);
985 }
986
987 // Stack manipulation operations:
988 //
989 // The ABI is assumed to provide a stack abstraction to memory,
990 // containing machine word sized units of data. Push and pop
991 // operations add and remove a single register sized unit of data
992 // to or from the stack. Peek and poke operations read or write
993 // values on the stack, without moving the current stack position.
994
995 void pop(RegisterID dest)
996 {
997 m_assembler.pop_r(dest);
998 }
999
1000 void push(RegisterID src)
1001 {
1002 m_assembler.push_r(src);
1003 }
1004
1005 void push(Address address)
1006 {
1007 m_assembler.push_m(address.offset, address.base);
1008 }
1009
1010 void push(TrustedImm32 imm)
1011 {
1012 m_assembler.push_i32(imm.m_value);
1013 }
1014
1015
1016 // Register move operations:
1017 //
1018 // Move values in registers.
1019
1020 void move(TrustedImm32 imm, RegisterID dest)
1021 {
1022 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
1023 // may be useful to have a separate version that sign extends the value?
1024 if (!imm.m_value)
1025 m_assembler.xorl_rr(dest, dest);
1026 else
1027 m_assembler.movl_i32r(imm.m_value, dest);
1028 }
1029
1030 #if CPU(X86_64)
1031 void move(RegisterID src, RegisterID dest)
1032 {
1033 // Note: on 64-bit this is is a full register move; perhaps it would be
1034 // useful to have separate move32 & movePtr, with move32 zero extending?
1035 if (src != dest)
1036 m_assembler.movq_rr(src, dest);
1037 }
1038
1039 void move(TrustedImmPtr imm, RegisterID dest)
1040 {
1041 m_assembler.movq_i64r(imm.asIntptr(), dest);
1042 }
1043
1044 void move(TrustedImm64 imm, RegisterID dest)
1045 {
1046 m_assembler.movq_i64r(imm.m_value, dest);
1047 }
1048
1049 void swap(RegisterID reg1, RegisterID reg2)
1050 {
1051 if (reg1 != reg2)
1052 m_assembler.xchgq_rr(reg1, reg2);
1053 }
1054
1055 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1056 {
1057 m_assembler.movsxd_rr(src, dest);
1058 }
1059
1060 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1061 {
1062 m_assembler.movl_rr(src, dest);
1063 }
1064 #else
1065 void move(RegisterID src, RegisterID dest)
1066 {
1067 if (src != dest)
1068 m_assembler.movl_rr(src, dest);
1069 }
1070
1071 void move(TrustedImmPtr imm, RegisterID dest)
1072 {
1073 m_assembler.movl_i32r(imm.asIntptr(), dest);
1074 }
1075
1076 void swap(RegisterID reg1, RegisterID reg2)
1077 {
1078 if (reg1 != reg2)
1079 m_assembler.xchgl_rr(reg1, reg2);
1080 }
1081
1082 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1083 {
1084 move(src, dest);
1085 }
1086
1087 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1088 {
1089 move(src, dest);
1090 }
1091 #endif
1092
1093
1094 // Forwards / external control flow operations:
1095 //
1096 // This set of jump and conditional branch operations return a Jump
1097 // object which may linked at a later point, allow forwards jump,
1098 // or jumps that will require external linkage (after the code has been
1099 // relocated).
1100 //
1101 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1102 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1103 // used (representing the names 'below' and 'above').
1104 //
1105 // Operands to the comparision are provided in the expected order, e.g.
1106 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1107 // treated as a signed 32bit value, is less than or equal to 5.
1108 //
1109 // jz and jnz test whether the first operand is equal to zero, and take
1110 // an optional second operand of a mask under which to perform the test.
1111
1112 public:
1113 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1114 {
1115 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1116 return Jump(m_assembler.jCC(x86Condition(cond)));
1117 }
1118
1119 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1120 {
1121 m_assembler.cmpl_rr(right, left);
1122 return Jump(m_assembler.jCC(x86Condition(cond)));
1123 }
1124
1125 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1126 {
1127 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1128 m_assembler.testl_rr(left, left);
1129 else
1130 m_assembler.cmpl_ir(right.m_value, left);
1131 return Jump(m_assembler.jCC(x86Condition(cond)));
1132 }
1133
1134 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1135 {
1136 m_assembler.cmpl_mr(right.offset, right.base, left);
1137 return Jump(m_assembler.jCC(x86Condition(cond)));
1138 }
1139
1140 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1141 {
1142 m_assembler.cmpl_rm(right, left.offset, left.base);
1143 return Jump(m_assembler.jCC(x86Condition(cond)));
1144 }
1145
1146 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1147 {
1148 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
1149 return Jump(m_assembler.jCC(x86Condition(cond)));
1150 }
1151
1152 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1153 {
1154 m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
1155 return Jump(m_assembler.jCC(x86Condition(cond)));
1156 }
1157
1158 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1159 {
1160 return branch32(cond, left, right);
1161 }
1162
1163 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1164 {
1165 m_assembler.testl_rr(reg, mask);
1166 return Jump(m_assembler.jCC(x86Condition(cond)));
1167 }
1168
1169 void test32(ResultCondition, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1170 {
1171 if (mask.m_value == -1)
1172 m_assembler.testl_rr(reg, reg);
1173 else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
1174 if (mask.m_value == 0xff)
1175 m_assembler.testb_rr(reg, reg);
1176 else
1177 m_assembler.testb_i8r(mask.m_value, reg);
1178 } else
1179 m_assembler.testl_i32r(mask.m_value, reg);
1180 }
1181
1182 Jump branch(ResultCondition cond)
1183 {
1184 return Jump(m_assembler.jCC(x86Condition(cond)));
1185 }
1186
1187 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1188 {
1189 test32(cond, reg, mask);
1190 return branch(cond);
1191 }
1192
1193 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1194 {
1195 generateTest32(address, mask);
1196 return Jump(m_assembler.jCC(x86Condition(cond)));
1197 }
1198
1199 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1200 {
1201 if (mask.m_value == -1)
1202 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
1203 else
1204 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1205 return Jump(m_assembler.jCC(x86Condition(cond)));
1206 }
1207
1208 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1209 {
1210 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1211 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1212 if (mask.m_value == -1)
1213 m_assembler.cmpb_im(0, address.offset, address.base);
1214 else
1215 m_assembler.testb_im(mask.m_value, address.offset, address.base);
1216 return Jump(m_assembler.jCC(x86Condition(cond)));
1217 }
1218
1219 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1220 {
1221 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1222 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1223 if (mask.m_value == -1)
1224 m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
1225 else
1226 m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
1227 return Jump(m_assembler.jCC(x86Condition(cond)));
1228 }
1229
1230 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1231 {
1232 ASSERT(!(right.m_value & 0xFFFFFF00));
1233
1234 m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
1235 return Jump(m_assembler.jCC(x86Condition(cond)));
1236 }
1237
1238 Jump jump()
1239 {
1240 return Jump(m_assembler.jmp());
1241 }
1242
1243 void jump(RegisterID target)
1244 {
1245 m_assembler.jmp_r(target);
1246 }
1247
1248 // Address is a memory location containing the address to jump to
1249 void jump(Address address)
1250 {
1251 m_assembler.jmp_m(address.offset, address.base);
1252 }
1253
1254
1255 // Arithmetic control flow operations:
1256 //
1257 // This set of conditional branch operations branch based
1258 // on the result of an arithmetic operation. The operation
1259 // is performed as normal, storing the result.
1260 //
1261 // * jz operations branch if the result is zero.
1262 // * jo operations branch if the (signed) arithmetic
1263 // operation caused an overflow to occur.
1264
1265 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1266 {
1267 add32(src, dest);
1268 return Jump(m_assembler.jCC(x86Condition(cond)));
1269 }
1270
1271 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1272 {
1273 add32(imm, dest);
1274 return Jump(m_assembler.jCC(x86Condition(cond)));
1275 }
1276
1277 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
1278 {
1279 add32(src, dest);
1280 return Jump(m_assembler.jCC(x86Condition(cond)));
1281 }
1282
1283 Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
1284 {
1285 add32(src, dest);
1286 return Jump(m_assembler.jCC(x86Condition(cond)));
1287 }
1288
1289 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1290 {
1291 add32(src, dest);
1292 return Jump(m_assembler.jCC(x86Condition(cond)));
1293 }
1294
1295 Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1296 {
1297 if (src1 == dest)
1298 return branchAdd32(cond, src2, dest);
1299 move(src2, dest);
1300 return branchAdd32(cond, src1, dest);
1301 }
1302
1303 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1304 {
1305 move(src, dest);
1306 return branchAdd32(cond, imm, dest);
1307 }
1308
1309 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1310 {
1311 mul32(src, dest);
1312 if (cond != Overflow)
1313 m_assembler.testl_rr(dest, dest);
1314 return Jump(m_assembler.jCC(x86Condition(cond)));
1315 }
1316
1317 Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1318 {
1319 mul32(src, dest);
1320 if (cond != Overflow)
1321 m_assembler.testl_rr(dest, dest);
1322 return Jump(m_assembler.jCC(x86Condition(cond)));
1323 }
1324
1325 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1326 {
1327 mul32(imm, src, dest);
1328 if (cond != Overflow)
1329 m_assembler.testl_rr(dest, dest);
1330 return Jump(m_assembler.jCC(x86Condition(cond)));
1331 }
1332
1333 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1334 {
1335 if (src1 == dest)
1336 return branchMul32(cond, src2, dest);
1337 move(src2, dest);
1338 return branchMul32(cond, src1, dest);
1339 }
1340
1341 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1342 {
1343 sub32(src, dest);
1344 return Jump(m_assembler.jCC(x86Condition(cond)));
1345 }
1346
1347 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1348 {
1349 sub32(imm, dest);
1350 return Jump(m_assembler.jCC(x86Condition(cond)));
1351 }
1352
1353 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1354 {
1355 sub32(imm, dest);
1356 return Jump(m_assembler.jCC(x86Condition(cond)));
1357 }
1358
1359 Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1360 {
1361 sub32(src, dest);
1362 return Jump(m_assembler.jCC(x86Condition(cond)));
1363 }
1364
1365 Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1366 {
1367 sub32(src, dest);
1368 return Jump(m_assembler.jCC(x86Condition(cond)));
1369 }
1370
1371 Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1372 {
1373 // B := A - B is invalid.
1374 ASSERT(src1 == dest || src2 != dest);
1375
1376 move(src1, dest);
1377 return branchSub32(cond, src2, dest);
1378 }
1379
1380 Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1381 {
1382 move(src1, dest);
1383 return branchSub32(cond, src2, dest);
1384 }
1385
1386 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1387 {
1388 neg32(srcDest);
1389 return Jump(m_assembler.jCC(x86Condition(cond)));
1390 }
1391
1392 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1393 {
1394 or32(src, dest);
1395 return Jump(m_assembler.jCC(x86Condition(cond)));
1396 }
1397
1398
1399 // Miscellaneous operations:
1400
1401 void breakpoint()
1402 {
1403 m_assembler.int3();
1404 }
1405
1406 Call nearCall()
1407 {
1408 return Call(m_assembler.call(), Call::LinkableNear);
1409 }
1410
1411 Call call(RegisterID target)
1412 {
1413 return Call(m_assembler.call(target), Call::None);
1414 }
1415
1416 void call(Address address)
1417 {
1418 m_assembler.call_m(address.offset, address.base);
1419 }
1420
1421 void ret()
1422 {
1423 m_assembler.ret();
1424 }
1425
1426 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1427 {
1428 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1429 set32(x86Condition(cond), dest);
1430 }
1431
1432 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1433 {
1434 m_assembler.cmpl_rr(right, left);
1435 set32(x86Condition(cond), dest);
1436 }
1437
1438 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1439 {
1440 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1441 m_assembler.testl_rr(left, left);
1442 else
1443 m_assembler.cmpl_ir(right.m_value, left);
1444 set32(x86Condition(cond), dest);
1445 }
1446
1447 // FIXME:
1448 // The mask should be optional... perhaps the argument order should be
1449 // dest-src, operations always have a dest? ... possibly not true, considering
1450 // asm ops like test, or pseudo ops like pop().
1451
1452 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1453 {
1454 if (mask.m_value == -1)
1455 m_assembler.cmpb_im(0, address.offset, address.base);
1456 else
1457 m_assembler.testb_im(mask.m_value, address.offset, address.base);
1458 set32(x86Condition(cond), dest);
1459 }
1460
1461 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1462 {
1463 generateTest32(address, mask);
1464 set32(x86Condition(cond), dest);
1465 }
1466
1467 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1468 static RelationalCondition invert(RelationalCondition cond)
1469 {
1470 return static_cast<RelationalCondition>(cond ^ 1);
1471 }
1472
1473 void nop()
1474 {
1475 m_assembler.nop();
1476 }
1477
1478 void memoryFence()
1479 {
1480 m_assembler.mfence();
1481 }
1482
1483 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1484 {
1485 X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
1486 }
1487
1488 static ptrdiff_t maxJumpReplacementSize()
1489 {
1490 return X86Assembler::maxJumpReplacementSize();
1491 }
1492
1493 #if ENABLE(MASM_PROBE)
1494 // Methods required by the MASM_PROBE mechanism as defined in
1495 // AbstractMacroAssembler.h.
1496 static void printCPURegisters(CPUState&, int indentation = 0);
1497 static void printRegister(CPUState&, RegisterID);
1498 static void printRegister(CPUState&, FPRegisterID);
1499 void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
1500 #endif // ENABLE(MASM_PROBE)
1501
1502 protected:
1503 X86Assembler::Condition x86Condition(RelationalCondition cond)
1504 {
1505 return static_cast<X86Assembler::Condition>(cond);
1506 }
1507
1508 X86Assembler::Condition x86Condition(ResultCondition cond)
1509 {
1510 return static_cast<X86Assembler::Condition>(cond);
1511 }
1512
1513 void set32(X86Assembler::Condition cond, RegisterID dest)
1514 {
1515 #if CPU(X86)
1516 // On 32-bit x86 we can only set the first 4 registers;
1517 // esp..edi are mapped to the 'h' registers!
1518 if (dest >= 4) {
1519 m_assembler.xchgl_rr(dest, X86Registers::eax);
1520 m_assembler.setCC_r(cond, X86Registers::eax);
1521 m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
1522 m_assembler.xchgl_rr(dest, X86Registers::eax);
1523 return;
1524 }
1525 #endif
1526 m_assembler.setCC_r(cond, dest);
1527 m_assembler.movzbl_rr(dest, dest);
1528 }
1529
1530 private:
1531 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1532 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1533 friend class MacroAssemblerX86;
1534
1535 ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1))
1536 {
1537 if (mask.m_value == -1)
1538 m_assembler.cmpl_im(0, address.offset, address.base);
1539 else if (!(mask.m_value & ~0xff))
1540 m_assembler.testb_im(mask.m_value, address.offset, address.base);
1541 else if (!(mask.m_value & ~0xff00))
1542 m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base);
1543 else if (!(mask.m_value & ~0xff0000))
1544 m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base);
1545 else if (!(mask.m_value & ~0xff000000))
1546 m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base);
1547 else
1548 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1549 }
1550
1551 #if CPU(X86)
1552 #if OS(MAC_OS_X)
1553
1554 // All X86 Macs are guaranteed to support at least SSE2,
1555 static bool isSSE2Present()
1556 {
1557 return true;
1558 }
1559
1560 #else // OS(MAC_OS_X)
1561
1562 enum SSE2CheckState {
1563 NotCheckedSSE2,
1564 HasSSE2,
1565 NoSSE2
1566 };
1567
1568 static bool isSSE2Present()
1569 {
1570 if (s_sse2CheckState == NotCheckedSSE2) {
1571 // Default the flags value to zero; if the compiler is
1572 // not MSVC or GCC we will read this as SSE2 not present.
1573 int flags = 0;
1574 #if COMPILER(MSVC)
1575 _asm {
1576 mov eax, 1 // cpuid function 1 gives us the standard feature set
1577 cpuid;
1578 mov flags, edx;
1579 }
1580 #elif COMPILER(GCC)
1581 asm (
1582 "movl $0x1, %%eax;"
1583 "pushl %%ebx;"
1584 "cpuid;"
1585 "popl %%ebx;"
1586 "movl %%edx, %0;"
1587 : "=g" (flags)
1588 :
1589 : "%eax", "%ecx", "%edx"
1590 );
1591 #endif
1592 static const int SSE2FeatureBit = 1 << 26;
1593 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1594 }
1595 // Only check once.
1596 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1597
1598 return s_sse2CheckState == HasSSE2;
1599 }
1600
1601 static SSE2CheckState s_sse2CheckState;
1602
1603 #endif // OS(MAC_OS_X)
1604 #elif !defined(NDEBUG) // CPU(X86)
1605
1606 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1607 // but non debug add this method to keep the asserts above happy.
1608 static bool isSSE2Present()
1609 {
1610 return true;
1611 }
1612
1613 #endif
1614 };
1615
1616 } // namespace JSC
1617
1618 #endif // ENABLE(ASSEMBLER)
1619
1620 #endif // MacroAssemblerX86Common_h