]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86Common.h
JavaScriptCore-903.5.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86Common.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33
34 namespace JSC {
35
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37 static const int DoubleConditionBitInvert = 0x10;
38 static const int DoubleConditionBitSpecial = 0x20;
39 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
40
41 public:
42 typedef X86Assembler::FPRegisterID FPRegisterID;
43
44 static const int MaximumCompactPtrAlignedAddressOffset = 127;
45
46 enum RelationalCondition {
47 Equal = X86Assembler::ConditionE,
48 NotEqual = X86Assembler::ConditionNE,
49 Above = X86Assembler::ConditionA,
50 AboveOrEqual = X86Assembler::ConditionAE,
51 Below = X86Assembler::ConditionB,
52 BelowOrEqual = X86Assembler::ConditionBE,
53 GreaterThan = X86Assembler::ConditionG,
54 GreaterThanOrEqual = X86Assembler::ConditionGE,
55 LessThan = X86Assembler::ConditionL,
56 LessThanOrEqual = X86Assembler::ConditionLE
57 };
58
59 enum ResultCondition {
60 Overflow = X86Assembler::ConditionO,
61 Signed = X86Assembler::ConditionS,
62 Zero = X86Assembler::ConditionE,
63 NonZero = X86Assembler::ConditionNE
64 };
65
66 enum DoubleCondition {
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
69 DoubleNotEqual = X86Assembler::ConditionNE,
70 DoubleGreaterThan = X86Assembler::ConditionA,
71 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
72 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
73 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered = X86Assembler::ConditionE,
76 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
77 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
78 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
79 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
80 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
81 };
82 COMPILE_ASSERT(
83 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
84 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
85
86 static const RegisterID stackPointerRegister = X86Registers::esp;
87
88 // Integer arithmetic operations:
89 //
90 // Operations are typically two operand - operation(source, srcDst)
91 // For many operations the source may be an TrustedImm32, the srcDst operand
92 // may often be a memory location (explictly described using an Address
93 // object).
94
95 void add32(RegisterID src, RegisterID dest)
96 {
97 m_assembler.addl_rr(src, dest);
98 }
99
100 void add32(TrustedImm32 imm, Address address)
101 {
102 m_assembler.addl_im(imm.m_value, address.offset, address.base);
103 }
104
105 void add32(TrustedImm32 imm, RegisterID dest)
106 {
107 m_assembler.addl_ir(imm.m_value, dest);
108 }
109
110 void add32(Address src, RegisterID dest)
111 {
112 m_assembler.addl_mr(src.offset, src.base, dest);
113 }
114
115 void add32(RegisterID src, Address dest)
116 {
117 m_assembler.addl_rm(src, dest.offset, dest.base);
118 }
119
120 void and32(RegisterID src, RegisterID dest)
121 {
122 m_assembler.andl_rr(src, dest);
123 }
124
125 void and32(TrustedImm32 imm, RegisterID dest)
126 {
127 m_assembler.andl_ir(imm.m_value, dest);
128 }
129
130 void and32(RegisterID src, Address dest)
131 {
132 m_assembler.andl_rm(src, dest.offset, dest.base);
133 }
134
135 void and32(Address src, RegisterID dest)
136 {
137 m_assembler.andl_mr(src.offset, src.base, dest);
138 }
139
140 void and32(TrustedImm32 imm, Address address)
141 {
142 m_assembler.andl_im(imm.m_value, address.offset, address.base);
143 }
144
145 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
146 {
147 if (op1 == op2)
148 zeroExtend32ToPtr(op1, dest);
149 else if (op1 == dest)
150 and32(op2, dest);
151 else {
152 move(op2, dest);
153 and32(op1, dest);
154 }
155 }
156
157 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
158 {
159 move(src, dest);
160 and32(imm, dest);
161 }
162
163 void lshift32(RegisterID shift_amount, RegisterID dest)
164 {
165 ASSERT(shift_amount != dest);
166
167 if (shift_amount == X86Registers::ecx)
168 m_assembler.shll_CLr(dest);
169 else {
170 // On x86 we can only shift by ecx; if asked to shift by another register we'll
171 // need rejig the shift amount into ecx first, and restore the registers afterwards.
172 // If we dest is ecx, then shift the swapped register!
173 swap(shift_amount, X86Registers::ecx);
174 m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
175 swap(shift_amount, X86Registers::ecx);
176 }
177 }
178
179 void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
180 {
181 ASSERT(shift_amount != dest);
182
183 if (src != dest)
184 move(src, dest);
185 lshift32(shift_amount, dest);
186 }
187
188 void lshift32(TrustedImm32 imm, RegisterID dest)
189 {
190 m_assembler.shll_i8r(imm.m_value, dest);
191 }
192
193 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
194 {
195 if (src != dest)
196 move(src, dest);
197 lshift32(imm, dest);
198 }
199
200 void mul32(RegisterID src, RegisterID dest)
201 {
202 m_assembler.imull_rr(src, dest);
203 }
204
205 void mul32(Address src, RegisterID dest)
206 {
207 m_assembler.imull_mr(src.offset, src.base, dest);
208 }
209
210 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
211 {
212 m_assembler.imull_i32r(src, imm.m_value, dest);
213 }
214
215 void neg32(RegisterID srcDest)
216 {
217 m_assembler.negl_r(srcDest);
218 }
219
220 void neg32(Address srcDest)
221 {
222 m_assembler.negl_m(srcDest.offset, srcDest.base);
223 }
224
225 void not32(RegisterID srcDest)
226 {
227 m_assembler.notl_r(srcDest);
228 }
229
230 void not32(Address srcDest)
231 {
232 m_assembler.notl_m(srcDest.offset, srcDest.base);
233 }
234
235 void or32(RegisterID src, RegisterID dest)
236 {
237 m_assembler.orl_rr(src, dest);
238 }
239
240 void or32(TrustedImm32 imm, RegisterID dest)
241 {
242 m_assembler.orl_ir(imm.m_value, dest);
243 }
244
245 void or32(RegisterID src, Address dest)
246 {
247 m_assembler.orl_rm(src, dest.offset, dest.base);
248 }
249
250 void or32(Address src, RegisterID dest)
251 {
252 m_assembler.orl_mr(src.offset, src.base, dest);
253 }
254
255 void or32(TrustedImm32 imm, Address address)
256 {
257 m_assembler.orl_im(imm.m_value, address.offset, address.base);
258 }
259
260 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
261 {
262 if (op1 == op2)
263 zeroExtend32ToPtr(op1, dest);
264 else if (op1 == dest)
265 or32(op2, dest);
266 else {
267 move(op2, dest);
268 or32(op1, dest);
269 }
270 }
271
272 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
273 {
274 move(src, dest);
275 or32(imm, dest);
276 }
277
278 void rshift32(RegisterID shift_amount, RegisterID dest)
279 {
280 ASSERT(shift_amount != dest);
281
282 if (shift_amount == X86Registers::ecx)
283 m_assembler.sarl_CLr(dest);
284 else {
285 // On x86 we can only shift by ecx; if asked to shift by another register we'll
286 // need rejig the shift amount into ecx first, and restore the registers afterwards.
287 // If we dest is ecx, then shift the swapped register!
288 swap(shift_amount, X86Registers::ecx);
289 m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
290 swap(shift_amount, X86Registers::ecx);
291 }
292 }
293
294 void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
295 {
296 ASSERT(shift_amount != dest);
297
298 if (src != dest)
299 move(src, dest);
300 rshift32(shift_amount, dest);
301 }
302
303 void rshift32(TrustedImm32 imm, RegisterID dest)
304 {
305 m_assembler.sarl_i8r(imm.m_value, dest);
306 }
307
308 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
309 {
310 if (src != dest)
311 move(src, dest);
312 rshift32(imm, dest);
313 }
314
315 void urshift32(RegisterID shift_amount, RegisterID dest)
316 {
317 ASSERT(shift_amount != dest);
318
319 if (shift_amount == X86Registers::ecx)
320 m_assembler.shrl_CLr(dest);
321 else {
322 // On x86 we can only shift by ecx; if asked to shift by another register we'll
323 // need rejig the shift amount into ecx first, and restore the registers afterwards.
324 // If we dest is ecx, then shift the swapped register!
325 swap(shift_amount, X86Registers::ecx);
326 m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
327 swap(shift_amount, X86Registers::ecx);
328 }
329 }
330
331 void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
332 {
333 ASSERT(shift_amount != dest);
334
335 if (src != dest)
336 move(src, dest);
337 urshift32(shift_amount, dest);
338 }
339
340 void urshift32(TrustedImm32 imm, RegisterID dest)
341 {
342 m_assembler.shrl_i8r(imm.m_value, dest);
343 }
344
345 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
346 {
347 if (src != dest)
348 move(src, dest);
349 urshift32(imm, dest);
350 }
351
352 void sub32(RegisterID src, RegisterID dest)
353 {
354 m_assembler.subl_rr(src, dest);
355 }
356
357 void sub32(TrustedImm32 imm, RegisterID dest)
358 {
359 m_assembler.subl_ir(imm.m_value, dest);
360 }
361
362 void sub32(TrustedImm32 imm, Address address)
363 {
364 m_assembler.subl_im(imm.m_value, address.offset, address.base);
365 }
366
367 void sub32(Address src, RegisterID dest)
368 {
369 m_assembler.subl_mr(src.offset, src.base, dest);
370 }
371
372 void sub32(RegisterID src, Address dest)
373 {
374 m_assembler.subl_rm(src, dest.offset, dest.base);
375 }
376
377
378 void xor32(RegisterID src, RegisterID dest)
379 {
380 m_assembler.xorl_rr(src, dest);
381 }
382
383 void xor32(TrustedImm32 imm, Address dest)
384 {
385 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
386 }
387
388 void xor32(TrustedImm32 imm, RegisterID dest)
389 {
390 m_assembler.xorl_ir(imm.m_value, dest);
391 }
392
393 void xor32(RegisterID src, Address dest)
394 {
395 m_assembler.xorl_rm(src, dest.offset, dest.base);
396 }
397
398 void xor32(Address src, RegisterID dest)
399 {
400 m_assembler.xorl_mr(src.offset, src.base, dest);
401 }
402
403 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
404 {
405 if (op1 == op2)
406 move(TrustedImm32(0), dest);
407 else if (op1 == dest)
408 xor32(op2, dest);
409 else {
410 move(op2, dest);
411 xor32(op1, dest);
412 }
413 }
414
415 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
416 {
417 move(src, dest);
418 xor32(imm, dest);
419 }
420
421 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
422 {
423 m_assembler.sqrtsd_rr(src, dst);
424 }
425
426 // Memory access operations:
427 //
428 // Loads are of the form load(address, destination) and stores of the form
429 // store(source, address). The source for a store may be an TrustedImm32. Address
430 // operand objects to loads and store will be implicitly constructed if a
431 // register is passed.
432
433 void load32(ImplicitAddress address, RegisterID dest)
434 {
435 m_assembler.movl_mr(address.offset, address.base, dest);
436 }
437
438 void load32(BaseIndex address, RegisterID dest)
439 {
440 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
441 }
442
443 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
444 {
445 load32(address, dest);
446 }
447
448 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
449 {
450 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
451 return DataLabel32(this);
452 }
453
454 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
455 {
456 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
457 return DataLabelCompact(this);
458 }
459
460 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
461 {
462 ASSERT(value >= 0);
463 ASSERT(value < MaximumCompactPtrAlignedAddressOffset);
464 AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
465 }
466
467 DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
468 {
469 m_assembler.movl_mr_disp8(address.offset, address.base, dest);
470 return DataLabelCompact(this);
471 }
472
473 void load16(BaseIndex address, RegisterID dest)
474 {
475 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
476 }
477
478 void load16(Address address, RegisterID dest)
479 {
480 m_assembler.movzwl_mr(address.offset, address.base, dest);
481 }
482
483 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
484 {
485 m_assembler.movl_rm_disp32(src, address.offset, address.base);
486 return DataLabel32(this);
487 }
488
489 void store32(RegisterID src, ImplicitAddress address)
490 {
491 m_assembler.movl_rm(src, address.offset, address.base);
492 }
493
494 void store32(RegisterID src, BaseIndex address)
495 {
496 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
497 }
498
499 void store32(TrustedImm32 imm, ImplicitAddress address)
500 {
501 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
502 }
503
504
505 // Floating-point operation:
506 //
507 // Presently only supports SSE, not x87 floating point.
508
509 void moveDouble(FPRegisterID src, FPRegisterID dest)
510 {
511 ASSERT(isSSE2Present());
512 if (src != dest)
513 m_assembler.movsd_rr(src, dest);
514 }
515
516 void loadDouble(ImplicitAddress address, FPRegisterID dest)
517 {
518 ASSERT(isSSE2Present());
519 m_assembler.movsd_mr(address.offset, address.base, dest);
520 }
521
522 void storeDouble(FPRegisterID src, ImplicitAddress address)
523 {
524 ASSERT(isSSE2Present());
525 m_assembler.movsd_rm(src, address.offset, address.base);
526 }
527
528 void addDouble(FPRegisterID src, FPRegisterID dest)
529 {
530 ASSERT(isSSE2Present());
531 m_assembler.addsd_rr(src, dest);
532 }
533
534 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
535 {
536 ASSERT(isSSE2Present());
537 if (op1 == dest)
538 addDouble(op2, dest);
539 else {
540 moveDouble(op2, dest);
541 addDouble(op1, dest);
542 }
543 }
544
545 void addDouble(Address src, FPRegisterID dest)
546 {
547 ASSERT(isSSE2Present());
548 m_assembler.addsd_mr(src.offset, src.base, dest);
549 }
550
551 void divDouble(FPRegisterID src, FPRegisterID dest)
552 {
553 ASSERT(isSSE2Present());
554 m_assembler.divsd_rr(src, dest);
555 }
556
557 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
558 {
559 // B := A / B is invalid.
560 ASSERT(op1 == dest || op2 != dest);
561
562 moveDouble(op1, dest);
563 divDouble(op2, dest);
564 }
565
566 void divDouble(Address src, FPRegisterID dest)
567 {
568 ASSERT(isSSE2Present());
569 m_assembler.divsd_mr(src.offset, src.base, dest);
570 }
571
572 void subDouble(FPRegisterID src, FPRegisterID dest)
573 {
574 ASSERT(isSSE2Present());
575 m_assembler.subsd_rr(src, dest);
576 }
577
578 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
579 {
580 // B := A - B is invalid.
581 ASSERT(op1 == dest || op2 != dest);
582
583 moveDouble(op1, dest);
584 subDouble(op2, dest);
585 }
586
587 void subDouble(Address src, FPRegisterID dest)
588 {
589 ASSERT(isSSE2Present());
590 m_assembler.subsd_mr(src.offset, src.base, dest);
591 }
592
593 void mulDouble(FPRegisterID src, FPRegisterID dest)
594 {
595 ASSERT(isSSE2Present());
596 m_assembler.mulsd_rr(src, dest);
597 }
598
599 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
600 {
601 ASSERT(isSSE2Present());
602 if (op1 == dest)
603 mulDouble(op2, dest);
604 else {
605 moveDouble(op2, dest);
606 mulDouble(op1, dest);
607 }
608 }
609
610 void mulDouble(Address src, FPRegisterID dest)
611 {
612 ASSERT(isSSE2Present());
613 m_assembler.mulsd_mr(src.offset, src.base, dest);
614 }
615
616 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
617 {
618 ASSERT(isSSE2Present());
619 m_assembler.cvtsi2sd_rr(src, dest);
620 }
621
622 void convertInt32ToDouble(Address src, FPRegisterID dest)
623 {
624 ASSERT(isSSE2Present());
625 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
626 }
627
628 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
629 {
630 ASSERT(isSSE2Present());
631
632 if (cond & DoubleConditionBitInvert)
633 m_assembler.ucomisd_rr(left, right);
634 else
635 m_assembler.ucomisd_rr(right, left);
636
637 if (cond == DoubleEqual) {
638 Jump isUnordered(m_assembler.jp());
639 Jump result = Jump(m_assembler.je());
640 isUnordered.link(this);
641 return result;
642 } else if (cond == DoubleNotEqualOrUnordered) {
643 Jump isUnordered(m_assembler.jp());
644 Jump isEqual(m_assembler.je());
645 isUnordered.link(this);
646 Jump result = jump();
647 isEqual.link(this);
648 return result;
649 }
650
651 ASSERT(!(cond & DoubleConditionBitSpecial));
652 return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
653 }
654
655 // Truncates 'src' to an integer, and places the resulting 'dest'.
656 // If the result is not representable as a 32 bit value, branch.
657 // May also branch for some values that are representable in 32 bits
658 // (specifically, in this case, INT_MIN).
659 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
660 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
661 {
662 ASSERT(isSSE2Present());
663 m_assembler.cvttsd2si_rr(src, dest);
664 return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
665 }
666
667 // Convert 'src' to an integer, and places the resulting 'dest'.
668 // If the result is not representable as a 32 bit value, branch.
669 // May also branch for some values that are representable in 32 bits
670 // (specifically, in this case, 0).
671 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
672 {
673 ASSERT(isSSE2Present());
674 m_assembler.cvttsd2si_rr(src, dest);
675
676 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
677 failureCases.append(branchTest32(Zero, dest));
678
679 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
680 convertInt32ToDouble(dest, fpTemp);
681 m_assembler.ucomisd_rr(fpTemp, src);
682 failureCases.append(m_assembler.jp());
683 failureCases.append(m_assembler.jne());
684 }
685
686 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
687 {
688 ASSERT(isSSE2Present());
689 m_assembler.xorpd_rr(scratch, scratch);
690 return branchDouble(DoubleNotEqual, reg, scratch);
691 }
692
693 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
694 {
695 ASSERT(isSSE2Present());
696 m_assembler.xorpd_rr(scratch, scratch);
697 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
698 }
699
700 // Stack manipulation operations:
701 //
702 // The ABI is assumed to provide a stack abstraction to memory,
703 // containing machine word sized units of data. Push and pop
704 // operations add and remove a single register sized unit of data
705 // to or from the stack. Peek and poke operations read or write
706 // values on the stack, without moving the current stack position.
707
708 void pop(RegisterID dest)
709 {
710 m_assembler.pop_r(dest);
711 }
712
713 void push(RegisterID src)
714 {
715 m_assembler.push_r(src);
716 }
717
718 void push(Address address)
719 {
720 m_assembler.push_m(address.offset, address.base);
721 }
722
723 void push(TrustedImm32 imm)
724 {
725 m_assembler.push_i32(imm.m_value);
726 }
727
728
729 // Register move operations:
730 //
731 // Move values in registers.
732
733 void move(TrustedImm32 imm, RegisterID dest)
734 {
735 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
736 // may be useful to have a separate version that sign extends the value?
737 if (!imm.m_value)
738 m_assembler.xorl_rr(dest, dest);
739 else
740 m_assembler.movl_i32r(imm.m_value, dest);
741 }
742
743 #if CPU(X86_64)
744 void move(RegisterID src, RegisterID dest)
745 {
746 // Note: on 64-bit this is is a full register move; perhaps it would be
747 // useful to have separate move32 & movePtr, with move32 zero extending?
748 if (src != dest)
749 m_assembler.movq_rr(src, dest);
750 }
751
752 void move(TrustedImmPtr imm, RegisterID dest)
753 {
754 m_assembler.movq_i64r(imm.asIntptr(), dest);
755 }
756
757 void swap(RegisterID reg1, RegisterID reg2)
758 {
759 if (reg1 != reg2)
760 m_assembler.xchgq_rr(reg1, reg2);
761 }
762
763 void signExtend32ToPtr(RegisterID src, RegisterID dest)
764 {
765 m_assembler.movsxd_rr(src, dest);
766 }
767
768 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
769 {
770 m_assembler.movl_rr(src, dest);
771 }
772 #else
773 void move(RegisterID src, RegisterID dest)
774 {
775 if (src != dest)
776 m_assembler.movl_rr(src, dest);
777 }
778
779 void move(TrustedImmPtr imm, RegisterID dest)
780 {
781 m_assembler.movl_i32r(imm.asIntptr(), dest);
782 }
783
784 void swap(RegisterID reg1, RegisterID reg2)
785 {
786 if (reg1 != reg2)
787 m_assembler.xchgl_rr(reg1, reg2);
788 }
789
790 void signExtend32ToPtr(RegisterID src, RegisterID dest)
791 {
792 move(src, dest);
793 }
794
795 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
796 {
797 move(src, dest);
798 }
799 #endif
800
801
802 // Forwards / external control flow operations:
803 //
804 // This set of jump and conditional branch operations return a Jump
805 // object which may linked at a later point, allow forwards jump,
806 // or jumps that will require external linkage (after the code has been
807 // relocated).
808 //
809 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
810 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
811 // used (representing the names 'below' and 'above').
812 //
813 // Operands to the comparision are provided in the expected order, e.g.
814 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
815 // treated as a signed 32bit value, is less than or equal to 5.
816 //
817 // jz and jnz test whether the first operand is equal to zero, and take
818 // an optional second operand of a mask under which to perform the test.
819
820 public:
821 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
822 {
823 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
824 return Jump(m_assembler.jCC(x86Condition(cond)));
825 }
826
827 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
828 {
829 m_assembler.cmpl_rr(right, left);
830 return Jump(m_assembler.jCC(x86Condition(cond)));
831 }
832
833 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
834 {
835 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
836 m_assembler.testl_rr(left, left);
837 else
838 m_assembler.cmpl_ir(right.m_value, left);
839 return Jump(m_assembler.jCC(x86Condition(cond)));
840 }
841
842 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
843 {
844 if (((cond == Equal) || (cond == NotEqual)) && !left.m_value)
845 m_assembler.testl_rr(right, right);
846 else
847 m_assembler.cmpl_ir(left.m_value, right);
848 return Jump(m_assembler.jCC(x86Condition(commute(cond))));
849 }
850
851 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
852 {
853 m_assembler.cmpl_mr(right.offset, right.base, left);
854 return Jump(m_assembler.jCC(x86Condition(cond)));
855 }
856
857 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
858 {
859 m_assembler.cmpl_rm(right, left.offset, left.base);
860 return Jump(m_assembler.jCC(x86Condition(cond)));
861 }
862
863 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
864 {
865 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
866 return Jump(m_assembler.jCC(x86Condition(cond)));
867 }
868
869 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
870 {
871 m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
872 return Jump(m_assembler.jCC(x86Condition(cond)));
873 }
874
875 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
876 {
877 return branch32(cond, left, right);
878 }
879
880 Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
881 {
882 m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
883 return Jump(m_assembler.jCC(x86Condition(cond)));
884 }
885
886 Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
887 {
888 ASSERT(!(right.m_value & 0xFFFF0000));
889
890 m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
891 return Jump(m_assembler.jCC(x86Condition(cond)));
892 }
893
894 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
895 {
896 m_assembler.testl_rr(reg, mask);
897 return Jump(m_assembler.jCC(x86Condition(cond)));
898 }
899
900 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
901 {
902 // if we are only interested in the low seven bits, this can be tested with a testb
903 if (mask.m_value == -1)
904 m_assembler.testl_rr(reg, reg);
905 else if ((mask.m_value & ~0x7f) == 0)
906 m_assembler.testb_i8r(mask.m_value, reg);
907 else
908 m_assembler.testl_i32r(mask.m_value, reg);
909 return Jump(m_assembler.jCC(x86Condition(cond)));
910 }
911
912 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
913 {
914 if (mask.m_value == -1)
915 m_assembler.cmpl_im(0, address.offset, address.base);
916 else
917 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
918 return Jump(m_assembler.jCC(x86Condition(cond)));
919 }
920
921 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
922 {
923 if (mask.m_value == -1)
924 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
925 else
926 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
927 return Jump(m_assembler.jCC(x86Condition(cond)));
928 }
929
930 Jump branchTest8(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
931 {
932 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
933 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
934 if (mask.m_value == -1)
935 m_assembler.testb_rr(reg, reg);
936 else
937 m_assembler.testb_i8r(mask.m_value, reg);
938 return Jump(m_assembler.jCC(x86Condition(cond)));
939 }
940
941 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
942 {
943 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
944 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
945 if (mask.m_value == -1)
946 m_assembler.cmpb_im(0, address.offset, address.base);
947 else
948 m_assembler.testb_im(mask.m_value, address.offset, address.base);
949 return Jump(m_assembler.jCC(x86Condition(cond)));
950 }
951
952 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
953 {
954 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
955 ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
956 if (mask.m_value == -1)
957 m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
958 else
959 m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
960 return Jump(m_assembler.jCC(x86Condition(cond)));
961 }
962
963 Jump jump()
964 {
965 return Jump(m_assembler.jmp());
966 }
967
968 void jump(RegisterID target)
969 {
970 m_assembler.jmp_r(target);
971 }
972
973 // Address is a memory location containing the address to jump to
974 void jump(Address address)
975 {
976 m_assembler.jmp_m(address.offset, address.base);
977 }
978
979
980 // Arithmetic control flow operations:
981 //
982 // This set of conditional branch operations branch based
983 // on the result of an arithmetic operation. The operation
984 // is performed as normal, storing the result.
985 //
986 // * jz operations branch if the result is zero.
987 // * jo operations branch if the (signed) arithmetic
988 // operation caused an overflow to occur.
989
990 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
991 {
992 add32(src, dest);
993 return Jump(m_assembler.jCC(x86Condition(cond)));
994 }
995
996 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
997 {
998 add32(imm, dest);
999 return Jump(m_assembler.jCC(x86Condition(cond)));
1000 }
1001
1002 Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
1003 {
1004 add32(src, dest);
1005 return Jump(m_assembler.jCC(x86Condition(cond)));
1006 }
1007
1008 Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
1009 {
1010 add32(src, dest);
1011 return Jump(m_assembler.jCC(x86Condition(cond)));
1012 }
1013
1014 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1015 {
1016 add32(src, dest);
1017 return Jump(m_assembler.jCC(x86Condition(cond)));
1018 }
1019
1020 Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1021 {
1022 if (src1 == dest)
1023 return branchAdd32(cond, src2, dest);
1024 move(src2, dest);
1025 return branchAdd32(cond, src1, dest);
1026 }
1027
1028 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1029 {
1030 move(src, dest);
1031 return branchAdd32(cond, imm, dest);
1032 }
1033
1034 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1035 {
1036 mul32(src, dest);
1037 if (cond != Overflow)
1038 m_assembler.testl_rr(dest, dest);
1039 return Jump(m_assembler.jCC(x86Condition(cond)));
1040 }
1041
1042 Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1043 {
1044 mul32(src, dest);
1045 if (cond != Overflow)
1046 m_assembler.testl_rr(dest, dest);
1047 return Jump(m_assembler.jCC(x86Condition(cond)));
1048 }
1049
1050 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1051 {
1052 mul32(imm, src, dest);
1053 if (cond != Overflow)
1054 m_assembler.testl_rr(dest, dest);
1055 return Jump(m_assembler.jCC(x86Condition(cond)));
1056 }
1057
1058 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1059 {
1060 if (src1 == dest)
1061 return branchMul32(cond, src2, dest);
1062 move(src2, dest);
1063 return branchMul32(cond, src1, dest);
1064 }
1065
1066 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1067 {
1068 sub32(src, dest);
1069 return Jump(m_assembler.jCC(x86Condition(cond)));
1070 }
1071
1072 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1073 {
1074 sub32(imm, dest);
1075 return Jump(m_assembler.jCC(x86Condition(cond)));
1076 }
1077
1078 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1079 {
1080 sub32(imm, dest);
1081 return Jump(m_assembler.jCC(x86Condition(cond)));
1082 }
1083
1084 Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1085 {
1086 sub32(src, dest);
1087 return Jump(m_assembler.jCC(x86Condition(cond)));
1088 }
1089
1090 Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1091 {
1092 sub32(src, dest);
1093 return Jump(m_assembler.jCC(x86Condition(cond)));
1094 }
1095
1096 Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1097 {
1098 // B := A - B is invalid.
1099 ASSERT(src1 == dest || src2 != dest);
1100
1101 move(src1, dest);
1102 return branchSub32(cond, src2, dest);
1103 }
1104
1105 Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1106 {
1107 move(src1, dest);
1108 return branchSub32(cond, src2, dest);
1109 }
1110
1111 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1112 {
1113 neg32(srcDest);
1114 return Jump(m_assembler.jCC(x86Condition(cond)));
1115 }
1116
1117 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1118 {
1119 or32(src, dest);
1120 return Jump(m_assembler.jCC(x86Condition(cond)));
1121 }
1122
1123
1124 // Miscellaneous operations:
1125
1126 void breakpoint()
1127 {
1128 m_assembler.int3();
1129 }
1130
1131 Call nearCall()
1132 {
1133 return Call(m_assembler.call(), Call::LinkableNear);
1134 }
1135
1136 Call call(RegisterID target)
1137 {
1138 return Call(m_assembler.call(target), Call::None);
1139 }
1140
1141 void call(Address address)
1142 {
1143 m_assembler.call_m(address.offset, address.base);
1144 }
1145
1146 void ret()
1147 {
1148 m_assembler.ret();
1149 }
1150
1151 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1152 {
1153 m_assembler.cmpl_rr(right, left);
1154 m_assembler.setCC_r(x86Condition(cond), dest);
1155 m_assembler.movzbl_rr(dest, dest);
1156 }
1157
1158 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1159 {
1160 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1161 m_assembler.testl_rr(left, left);
1162 else
1163 m_assembler.cmpl_ir(right.m_value, left);
1164 m_assembler.setCC_r(x86Condition(cond), dest);
1165 m_assembler.movzbl_rr(dest, dest);
1166 }
1167
1168 // FIXME:
1169 // The mask should be optional... paerhaps the argument order should be
1170 // dest-src, operations always have a dest? ... possibly not true, considering
1171 // asm ops like test, or pseudo ops like pop().
1172
1173 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1174 {
1175 if (mask.m_value == -1)
1176 m_assembler.cmpb_im(0, address.offset, address.base);
1177 else
1178 m_assembler.testb_im(mask.m_value, address.offset, address.base);
1179 m_assembler.setCC_r(x86Condition(cond), dest);
1180 m_assembler.movzbl_rr(dest, dest);
1181 }
1182
1183 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1184 {
1185 if (mask.m_value == -1)
1186 m_assembler.cmpl_im(0, address.offset, address.base);
1187 else
1188 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1189 m_assembler.setCC_r(x86Condition(cond), dest);
1190 m_assembler.movzbl_rr(dest, dest);
1191 }
1192
1193 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1194 static RelationalCondition invert(RelationalCondition cond)
1195 {
1196 return static_cast<RelationalCondition>(cond ^ 1);
1197 }
1198
1199 // Commute a relational condition, returns a new condition that will produce
1200 // the same results given the same inputs but with their positions exchanged.
1201 static RelationalCondition commute(RelationalCondition cond)
1202 {
1203 // Equality is commutative!
1204 if (cond == Equal || cond == NotEqual)
1205 return cond;
1206
1207 // Based on the values of x86 condition codes, remap > with < and >= with <=
1208 if (cond >= LessThan) {
1209 ASSERT(cond == LessThan || cond == LessThanOrEqual || cond == GreaterThan || cond == GreaterThanOrEqual);
1210 return static_cast<RelationalCondition>(X86Assembler::ConditionL + X86Assembler::ConditionG - cond);
1211 }
1212
1213 // As above, for unsigned conditions.
1214 ASSERT(cond == Below || cond == BelowOrEqual || cond == Above || cond == AboveOrEqual);
1215 return static_cast<RelationalCondition>(X86Assembler::ConditionB + X86Assembler::ConditionA - cond);
1216 }
1217
1218 void nop()
1219 {
1220 m_assembler.nop();
1221 }
1222
1223 protected:
1224 X86Assembler::Condition x86Condition(RelationalCondition cond)
1225 {
1226 return static_cast<X86Assembler::Condition>(cond);
1227 }
1228
1229 X86Assembler::Condition x86Condition(ResultCondition cond)
1230 {
1231 return static_cast<X86Assembler::Condition>(cond);
1232 }
1233
1234 private:
1235 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1236 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1237 friend class MacroAssemblerX86;
1238
1239 #if CPU(X86)
1240 #if OS(MAC_OS_X)
1241
1242 // All X86 Macs are guaranteed to support at least SSE2,
1243 static bool isSSE2Present()
1244 {
1245 return true;
1246 }
1247
1248 #else // OS(MAC_OS_X)
1249
1250 enum SSE2CheckState {
1251 NotCheckedSSE2,
1252 HasSSE2,
1253 NoSSE2
1254 };
1255
1256 static bool isSSE2Present()
1257 {
1258 if (s_sse2CheckState == NotCheckedSSE2) {
1259 // Default the flags value to zero; if the compiler is
1260 // not MSVC or GCC we will read this as SSE2 not present.
1261 int flags = 0;
1262 #if COMPILER(MSVC)
1263 _asm {
1264 mov eax, 1 // cpuid function 1 gives us the standard feature set
1265 cpuid;
1266 mov flags, edx;
1267 }
1268 #elif COMPILER(GCC)
1269 asm (
1270 "movl $0x1, %%eax;"
1271 "pushl %%ebx;"
1272 "cpuid;"
1273 "popl %%ebx;"
1274 "movl %%edx, %0;"
1275 : "=g" (flags)
1276 :
1277 : "%eax", "%ecx", "%edx"
1278 );
1279 #endif
1280 static const int SSE2FeatureBit = 1 << 26;
1281 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1282 }
1283 // Only check once.
1284 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1285
1286 return s_sse2CheckState == HasSSE2;
1287 }
1288
1289 static SSE2CheckState s_sse2CheckState;
1290
1291 #endif // OS(MAC_OS_X)
1292 #elif !defined(NDEBUG) // CPU(X86)
1293
1294 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1295 // but non debug add this method to keep the asserts above happy.
1296 static bool isSSE2Present()
1297 {
1298 return true;
1299 }
1300
1301 #endif
1302 };
1303
1304 } // namespace JSC
1305
1306 #endif // ENABLE(ASSEMBLER)
1307
1308 #endif // MacroAssemblerX86Common_h