]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86Common.h
cb86da7e4589d7b6d29cf2ecaf388c7cb61f1f3b
[apple/javascriptcore.git] / assembler / MacroAssemblerX86Common.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33
34 namespace JSC {
35
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37 static const int DoubleConditionBitInvert = 0x10;
38 static const int DoubleConditionBitSpecial = 0x20;
39 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
40
41 public:
42 typedef X86Assembler::FPRegisterID FPRegisterID;
43
44 enum Condition {
45 Equal = X86Assembler::ConditionE,
46 NotEqual = X86Assembler::ConditionNE,
47 Above = X86Assembler::ConditionA,
48 AboveOrEqual = X86Assembler::ConditionAE,
49 Below = X86Assembler::ConditionB,
50 BelowOrEqual = X86Assembler::ConditionBE,
51 GreaterThan = X86Assembler::ConditionG,
52 GreaterThanOrEqual = X86Assembler::ConditionGE,
53 LessThan = X86Assembler::ConditionL,
54 LessThanOrEqual = X86Assembler::ConditionLE,
55 Overflow = X86Assembler::ConditionO,
56 Signed = X86Assembler::ConditionS,
57 Zero = X86Assembler::ConditionE,
58 NonZero = X86Assembler::ConditionNE
59 };
60
61 enum DoubleCondition {
62 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
63 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
64 DoubleNotEqual = X86Assembler::ConditionNE,
65 DoubleGreaterThan = X86Assembler::ConditionA,
66 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
67 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
68 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
69 // If either operand is NaN, these conditions always evaluate to true.
70 DoubleEqualOrUnordered = X86Assembler::ConditionE,
71 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
72 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
73 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
74 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
75 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
76 };
77 COMPILE_ASSERT(
78 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
79 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
80
81 static const RegisterID stackPointerRegister = X86Registers::esp;
82
83 // Integer arithmetic operations:
84 //
85 // Operations are typically two operand - operation(source, srcDst)
86 // For many operations the source may be an Imm32, the srcDst operand
87 // may often be a memory location (explictly described using an Address
88 // object).
89
90 void add32(RegisterID src, RegisterID dest)
91 {
92 m_assembler.addl_rr(src, dest);
93 }
94
95 void add32(Imm32 imm, Address address)
96 {
97 m_assembler.addl_im(imm.m_value, address.offset, address.base);
98 }
99
100 void add32(Imm32 imm, RegisterID dest)
101 {
102 m_assembler.addl_ir(imm.m_value, dest);
103 }
104
105 void add32(Address src, RegisterID dest)
106 {
107 m_assembler.addl_mr(src.offset, src.base, dest);
108 }
109
110 void add32(RegisterID src, Address dest)
111 {
112 m_assembler.addl_rm(src, dest.offset, dest.base);
113 }
114
115 void and32(RegisterID src, RegisterID dest)
116 {
117 m_assembler.andl_rr(src, dest);
118 }
119
120 void and32(Imm32 imm, RegisterID dest)
121 {
122 m_assembler.andl_ir(imm.m_value, dest);
123 }
124
125 void and32(RegisterID src, Address dest)
126 {
127 m_assembler.andl_rm(src, dest.offset, dest.base);
128 }
129
130 void and32(Address src, RegisterID dest)
131 {
132 m_assembler.andl_mr(src.offset, src.base, dest);
133 }
134
135 void and32(Imm32 imm, Address address)
136 {
137 m_assembler.andl_im(imm.m_value, address.offset, address.base);
138 }
139
140 void lshift32(Imm32 imm, RegisterID dest)
141 {
142 m_assembler.shll_i8r(imm.m_value, dest);
143 }
144
145 void lshift32(RegisterID shift_amount, RegisterID dest)
146 {
147 // On x86 we can only shift by ecx; if asked to shift by another register we'll
148 // need rejig the shift amount into ecx first, and restore the registers afterwards.
149 if (shift_amount != X86Registers::ecx) {
150 swap(shift_amount, X86Registers::ecx);
151
152 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
153 if (dest == shift_amount)
154 m_assembler.shll_CLr(X86Registers::ecx);
155 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
156 else if (dest == X86Registers::ecx)
157 m_assembler.shll_CLr(shift_amount);
158 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
159 else
160 m_assembler.shll_CLr(dest);
161
162 swap(shift_amount, X86Registers::ecx);
163 } else
164 m_assembler.shll_CLr(dest);
165 }
166
167 void mul32(RegisterID src, RegisterID dest)
168 {
169 m_assembler.imull_rr(src, dest);
170 }
171
172 void mul32(Address src, RegisterID dest)
173 {
174 m_assembler.imull_mr(src.offset, src.base, dest);
175 }
176
177 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
178 {
179 m_assembler.imull_i32r(src, imm.m_value, dest);
180 }
181
182 void neg32(RegisterID srcDest)
183 {
184 m_assembler.negl_r(srcDest);
185 }
186
187 void neg32(Address srcDest)
188 {
189 m_assembler.negl_m(srcDest.offset, srcDest.base);
190 }
191
192 void not32(RegisterID srcDest)
193 {
194 m_assembler.notl_r(srcDest);
195 }
196
197 void not32(Address srcDest)
198 {
199 m_assembler.notl_m(srcDest.offset, srcDest.base);
200 }
201
202 void or32(RegisterID src, RegisterID dest)
203 {
204 m_assembler.orl_rr(src, dest);
205 }
206
207 void or32(Imm32 imm, RegisterID dest)
208 {
209 m_assembler.orl_ir(imm.m_value, dest);
210 }
211
212 void or32(RegisterID src, Address dest)
213 {
214 m_assembler.orl_rm(src, dest.offset, dest.base);
215 }
216
217 void or32(Address src, RegisterID dest)
218 {
219 m_assembler.orl_mr(src.offset, src.base, dest);
220 }
221
222 void or32(Imm32 imm, Address address)
223 {
224 m_assembler.orl_im(imm.m_value, address.offset, address.base);
225 }
226
227 void rshift32(RegisterID shift_amount, RegisterID dest)
228 {
229 // On x86 we can only shift by ecx; if asked to shift by another register we'll
230 // need rejig the shift amount into ecx first, and restore the registers afterwards.
231 if (shift_amount != X86Registers::ecx) {
232 swap(shift_amount, X86Registers::ecx);
233
234 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
235 if (dest == shift_amount)
236 m_assembler.sarl_CLr(X86Registers::ecx);
237 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
238 else if (dest == X86Registers::ecx)
239 m_assembler.sarl_CLr(shift_amount);
240 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
241 else
242 m_assembler.sarl_CLr(dest);
243
244 swap(shift_amount, X86Registers::ecx);
245 } else
246 m_assembler.sarl_CLr(dest);
247 }
248
249 void rshift32(Imm32 imm, RegisterID dest)
250 {
251 m_assembler.sarl_i8r(imm.m_value, dest);
252 }
253
254 void urshift32(RegisterID shift_amount, RegisterID dest)
255 {
256 // On x86 we can only shift by ecx; if asked to shift by another register we'll
257 // need rejig the shift amount into ecx first, and restore the registers afterwards.
258 if (shift_amount != X86Registers::ecx) {
259 swap(shift_amount, X86Registers::ecx);
260
261 // E.g. transform "shrl %eax, %eax" -> "xchgl %eax, %ecx; shrl %ecx, %ecx; xchgl %eax, %ecx"
262 if (dest == shift_amount)
263 m_assembler.shrl_CLr(X86Registers::ecx);
264 // E.g. transform "shrl %eax, %ecx" -> "xchgl %eax, %ecx; shrl %ecx, %eax; xchgl %eax, %ecx"
265 else if (dest == X86Registers::ecx)
266 m_assembler.shrl_CLr(shift_amount);
267 // E.g. transform "shrl %eax, %ebx" -> "xchgl %eax, %ecx; shrl %ecx, %ebx; xchgl %eax, %ecx"
268 else
269 m_assembler.shrl_CLr(dest);
270
271 swap(shift_amount, X86Registers::ecx);
272 } else
273 m_assembler.shrl_CLr(dest);
274 }
275
276 void urshift32(Imm32 imm, RegisterID dest)
277 {
278 m_assembler.shrl_i8r(imm.m_value, dest);
279 }
280
281 void sub32(RegisterID src, RegisterID dest)
282 {
283 m_assembler.subl_rr(src, dest);
284 }
285
286 void sub32(Imm32 imm, RegisterID dest)
287 {
288 m_assembler.subl_ir(imm.m_value, dest);
289 }
290
291 void sub32(Imm32 imm, Address address)
292 {
293 m_assembler.subl_im(imm.m_value, address.offset, address.base);
294 }
295
296 void sub32(Address src, RegisterID dest)
297 {
298 m_assembler.subl_mr(src.offset, src.base, dest);
299 }
300
301 void sub32(RegisterID src, Address dest)
302 {
303 m_assembler.subl_rm(src, dest.offset, dest.base);
304 }
305
306
307 void xor32(RegisterID src, RegisterID dest)
308 {
309 m_assembler.xorl_rr(src, dest);
310 }
311
312 void xor32(Imm32 imm, Address dest)
313 {
314 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
315 }
316
317 void xor32(Imm32 imm, RegisterID dest)
318 {
319 m_assembler.xorl_ir(imm.m_value, dest);
320 }
321
322 void xor32(RegisterID src, Address dest)
323 {
324 m_assembler.xorl_rm(src, dest.offset, dest.base);
325 }
326
327 void xor32(Address src, RegisterID dest)
328 {
329 m_assembler.xorl_mr(src.offset, src.base, dest);
330 }
331
332 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
333 {
334 m_assembler.sqrtsd_rr(src, dst);
335 }
336
337 // Memory access operations:
338 //
339 // Loads are of the form load(address, destination) and stores of the form
340 // store(source, address). The source for a store may be an Imm32. Address
341 // operand objects to loads and store will be implicitly constructed if a
342 // register is passed.
343
344 void load32(ImplicitAddress address, RegisterID dest)
345 {
346 m_assembler.movl_mr(address.offset, address.base, dest);
347 }
348
349 void load32(BaseIndex address, RegisterID dest)
350 {
351 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
352 }
353
354 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
355 {
356 load32(address, dest);
357 }
358
359 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
360 {
361 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
362 return DataLabel32(this);
363 }
364
365 void load16(BaseIndex address, RegisterID dest)
366 {
367 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
368 }
369
370 void load16(Address address, RegisterID dest)
371 {
372 m_assembler.movzwl_mr(address.offset, address.base, dest);
373 }
374
375 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
376 {
377 m_assembler.movl_rm_disp32(src, address.offset, address.base);
378 return DataLabel32(this);
379 }
380
381 void store32(RegisterID src, ImplicitAddress address)
382 {
383 m_assembler.movl_rm(src, address.offset, address.base);
384 }
385
386 void store32(RegisterID src, BaseIndex address)
387 {
388 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
389 }
390
391 void store32(Imm32 imm, ImplicitAddress address)
392 {
393 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
394 }
395
396
397 // Floating-point operation:
398 //
399 // Presently only supports SSE, not x87 floating point.
400
401 void loadDouble(ImplicitAddress address, FPRegisterID dest)
402 {
403 ASSERT(isSSE2Present());
404 m_assembler.movsd_mr(address.offset, address.base, dest);
405 }
406
407 void storeDouble(FPRegisterID src, ImplicitAddress address)
408 {
409 ASSERT(isSSE2Present());
410 m_assembler.movsd_rm(src, address.offset, address.base);
411 }
412
413 void addDouble(FPRegisterID src, FPRegisterID dest)
414 {
415 ASSERT(isSSE2Present());
416 m_assembler.addsd_rr(src, dest);
417 }
418
419 void addDouble(Address src, FPRegisterID dest)
420 {
421 ASSERT(isSSE2Present());
422 m_assembler.addsd_mr(src.offset, src.base, dest);
423 }
424
425 void divDouble(FPRegisterID src, FPRegisterID dest)
426 {
427 ASSERT(isSSE2Present());
428 m_assembler.divsd_rr(src, dest);
429 }
430
431 void divDouble(Address src, FPRegisterID dest)
432 {
433 ASSERT(isSSE2Present());
434 m_assembler.divsd_mr(src.offset, src.base, dest);
435 }
436
437 void subDouble(FPRegisterID src, FPRegisterID dest)
438 {
439 ASSERT(isSSE2Present());
440 m_assembler.subsd_rr(src, dest);
441 }
442
443 void subDouble(Address src, FPRegisterID dest)
444 {
445 ASSERT(isSSE2Present());
446 m_assembler.subsd_mr(src.offset, src.base, dest);
447 }
448
449 void mulDouble(FPRegisterID src, FPRegisterID dest)
450 {
451 ASSERT(isSSE2Present());
452 m_assembler.mulsd_rr(src, dest);
453 }
454
455 void mulDouble(Address src, FPRegisterID dest)
456 {
457 ASSERT(isSSE2Present());
458 m_assembler.mulsd_mr(src.offset, src.base, dest);
459 }
460
461 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
462 {
463 ASSERT(isSSE2Present());
464 m_assembler.cvtsi2sd_rr(src, dest);
465 }
466
467 void convertInt32ToDouble(Address src, FPRegisterID dest)
468 {
469 ASSERT(isSSE2Present());
470 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
471 }
472
473 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
474 {
475 ASSERT(isSSE2Present());
476
477 if (cond & DoubleConditionBitInvert)
478 m_assembler.ucomisd_rr(left, right);
479 else
480 m_assembler.ucomisd_rr(right, left);
481
482 if (cond == DoubleEqual) {
483 Jump isUnordered(m_assembler.jp());
484 Jump result = Jump(m_assembler.je());
485 isUnordered.link(this);
486 return result;
487 } else if (cond == DoubleNotEqualOrUnordered) {
488 Jump isUnordered(m_assembler.jp());
489 Jump isEqual(m_assembler.je());
490 isUnordered.link(this);
491 Jump result = jump();
492 isEqual.link(this);
493 return result;
494 }
495
496 ASSERT(!(cond & DoubleConditionBitSpecial));
497 return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
498 }
499
500 // Truncates 'src' to an integer, and places the resulting 'dest'.
501 // If the result is not representable as a 32 bit value, branch.
502 // May also branch for some values that are representable in 32 bits
503 // (specifically, in this case, INT_MIN).
504 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
505 {
506 ASSERT(isSSE2Present());
507 m_assembler.cvttsd2si_rr(src, dest);
508 return branch32(Equal, dest, Imm32(0x80000000));
509 }
510
511 // Convert 'src' to an integer, and places the resulting 'dest'.
512 // If the result is not representable as a 32 bit value, branch.
513 // May also branch for some values that are representable in 32 bits
514 // (specifically, in this case, 0).
515 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
516 {
517 ASSERT(isSSE2Present());
518 m_assembler.cvttsd2si_rr(src, dest);
519
520 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
521 failureCases.append(branchTest32(Zero, dest));
522
523 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
524 convertInt32ToDouble(dest, fpTemp);
525 m_assembler.ucomisd_rr(fpTemp, src);
526 failureCases.append(m_assembler.jp());
527 failureCases.append(m_assembler.jne());
528 }
529
530 void zeroDouble(FPRegisterID srcDest)
531 {
532 ASSERT(isSSE2Present());
533 m_assembler.xorpd_rr(srcDest, srcDest);
534 }
535
536
537 // Stack manipulation operations:
538 //
539 // The ABI is assumed to provide a stack abstraction to memory,
540 // containing machine word sized units of data. Push and pop
541 // operations add and remove a single register sized unit of data
542 // to or from the stack. Peek and poke operations read or write
543 // values on the stack, without moving the current stack position.
544
545 void pop(RegisterID dest)
546 {
547 m_assembler.pop_r(dest);
548 }
549
550 void push(RegisterID src)
551 {
552 m_assembler.push_r(src);
553 }
554
555 void push(Address address)
556 {
557 m_assembler.push_m(address.offset, address.base);
558 }
559
560 void push(Imm32 imm)
561 {
562 m_assembler.push_i32(imm.m_value);
563 }
564
565
566 // Register move operations:
567 //
568 // Move values in registers.
569
570 void move(Imm32 imm, RegisterID dest)
571 {
572 // Note: on 64-bit the Imm32 value is zero extended into the register, it
573 // may be useful to have a separate version that sign extends the value?
574 if (!imm.m_value)
575 m_assembler.xorl_rr(dest, dest);
576 else
577 m_assembler.movl_i32r(imm.m_value, dest);
578 }
579
580 #if CPU(X86_64)
581 void move(RegisterID src, RegisterID dest)
582 {
583 // Note: on 64-bit this is is a full register move; perhaps it would be
584 // useful to have separate move32 & movePtr, with move32 zero extending?
585 if (src != dest)
586 m_assembler.movq_rr(src, dest);
587 }
588
589 void move(ImmPtr imm, RegisterID dest)
590 {
591 m_assembler.movq_i64r(imm.asIntptr(), dest);
592 }
593
594 void swap(RegisterID reg1, RegisterID reg2)
595 {
596 if (reg1 != reg2)
597 m_assembler.xchgq_rr(reg1, reg2);
598 }
599
600 void signExtend32ToPtr(RegisterID src, RegisterID dest)
601 {
602 m_assembler.movsxd_rr(src, dest);
603 }
604
605 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
606 {
607 m_assembler.movl_rr(src, dest);
608 }
609 #else
610 void move(RegisterID src, RegisterID dest)
611 {
612 if (src != dest)
613 m_assembler.movl_rr(src, dest);
614 }
615
616 void move(ImmPtr imm, RegisterID dest)
617 {
618 m_assembler.movl_i32r(imm.asIntptr(), dest);
619 }
620
621 void swap(RegisterID reg1, RegisterID reg2)
622 {
623 if (reg1 != reg2)
624 m_assembler.xchgl_rr(reg1, reg2);
625 }
626
627 void signExtend32ToPtr(RegisterID src, RegisterID dest)
628 {
629 move(src, dest);
630 }
631
632 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
633 {
634 move(src, dest);
635 }
636 #endif
637
638
639 // Forwards / external control flow operations:
640 //
641 // This set of jump and conditional branch operations return a Jump
642 // object which may linked at a later point, allow forwards jump,
643 // or jumps that will require external linkage (after the code has been
644 // relocated).
645 //
646 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
647 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
648 // used (representing the names 'below' and 'above').
649 //
650 // Operands to the comparision are provided in the expected order, e.g.
651 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
652 // treated as a signed 32bit value, is less than or equal to 5.
653 //
654 // jz and jnz test whether the first operand is equal to zero, and take
655 // an optional second operand of a mask under which to perform the test.
656
657 public:
658 Jump branch8(Condition cond, Address left, Imm32 right)
659 {
660 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
661 return Jump(m_assembler.jCC(x86Condition(cond)));
662 }
663
664 Jump branch32(Condition cond, RegisterID left, RegisterID right)
665 {
666 m_assembler.cmpl_rr(right, left);
667 return Jump(m_assembler.jCC(x86Condition(cond)));
668 }
669
670 Jump branch32(Condition cond, RegisterID left, Imm32 right)
671 {
672 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
673 m_assembler.testl_rr(left, left);
674 else
675 m_assembler.cmpl_ir(right.m_value, left);
676 return Jump(m_assembler.jCC(x86Condition(cond)));
677 }
678
679 Jump branch32(Condition cond, RegisterID left, Address right)
680 {
681 m_assembler.cmpl_mr(right.offset, right.base, left);
682 return Jump(m_assembler.jCC(x86Condition(cond)));
683 }
684
685 Jump branch32(Condition cond, Address left, RegisterID right)
686 {
687 m_assembler.cmpl_rm(right, left.offset, left.base);
688 return Jump(m_assembler.jCC(x86Condition(cond)));
689 }
690
691 Jump branch32(Condition cond, Address left, Imm32 right)
692 {
693 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
694 return Jump(m_assembler.jCC(x86Condition(cond)));
695 }
696
697 Jump branch32(Condition cond, BaseIndex left, Imm32 right)
698 {
699 m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
700 return Jump(m_assembler.jCC(x86Condition(cond)));
701 }
702
703 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
704 {
705 return branch32(cond, left, right);
706 }
707
708 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
709 {
710 m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
711 return Jump(m_assembler.jCC(x86Condition(cond)));
712 }
713
714 Jump branch16(Condition cond, BaseIndex left, Imm32 right)
715 {
716 ASSERT(!(right.m_value & 0xFFFF0000));
717
718 m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
719 return Jump(m_assembler.jCC(x86Condition(cond)));
720 }
721
722 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
723 {
724 ASSERT((cond == Zero) || (cond == NonZero));
725 m_assembler.testl_rr(reg, mask);
726 return Jump(m_assembler.jCC(x86Condition(cond)));
727 }
728
729 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
730 {
731 ASSERT((cond == Zero) || (cond == NonZero));
732 // if we are only interested in the low seven bits, this can be tested with a testb
733 if (mask.m_value == -1)
734 m_assembler.testl_rr(reg, reg);
735 else if ((mask.m_value & ~0x7f) == 0)
736 m_assembler.testb_i8r(mask.m_value, reg);
737 else
738 m_assembler.testl_i32r(mask.m_value, reg);
739 return Jump(m_assembler.jCC(x86Condition(cond)));
740 }
741
742 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
743 {
744 ASSERT((cond == Zero) || (cond == NonZero));
745 if (mask.m_value == -1)
746 m_assembler.cmpl_im(0, address.offset, address.base);
747 else
748 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
749 return Jump(m_assembler.jCC(x86Condition(cond)));
750 }
751
752 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
753 {
754 ASSERT((cond == Zero) || (cond == NonZero));
755 if (mask.m_value == -1)
756 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
757 else
758 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
759 return Jump(m_assembler.jCC(x86Condition(cond)));
760 }
761
762 Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
763 {
764 ASSERT((cond == Zero) || (cond == NonZero));
765 if (mask.m_value == -1)
766 m_assembler.cmpb_im(0, address.offset, address.base);
767 else
768 m_assembler.testb_im(mask.m_value, address.offset, address.base);
769 return Jump(m_assembler.jCC(x86Condition(cond)));
770 }
771
772 Jump branchTest8(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
773 {
774 ASSERT((cond == Zero) || (cond == NonZero));
775 if (mask.m_value == -1)
776 m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
777 else
778 m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
779 return Jump(m_assembler.jCC(x86Condition(cond)));
780 }
781
782 Jump jump()
783 {
784 return Jump(m_assembler.jmp());
785 }
786
787 void jump(RegisterID target)
788 {
789 m_assembler.jmp_r(target);
790 }
791
792 // Address is a memory location containing the address to jump to
793 void jump(Address address)
794 {
795 m_assembler.jmp_m(address.offset, address.base);
796 }
797
798
799 // Arithmetic control flow operations:
800 //
801 // This set of conditional branch operations branch based
802 // on the result of an arithmetic operation. The operation
803 // is performed as normal, storing the result.
804 //
805 // * jz operations branch if the result is zero.
806 // * jo operations branch if the (signed) arithmetic
807 // operation caused an overflow to occur.
808
809 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
810 {
811 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
812 add32(src, dest);
813 return Jump(m_assembler.jCC(x86Condition(cond)));
814 }
815
816 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
817 {
818 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
819 add32(imm, dest);
820 return Jump(m_assembler.jCC(x86Condition(cond)));
821 }
822
823 Jump branchAdd32(Condition cond, Imm32 src, Address dest)
824 {
825 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
826 add32(src, dest);
827 return Jump(m_assembler.jCC(x86Condition(cond)));
828 }
829
830 Jump branchAdd32(Condition cond, RegisterID src, Address dest)
831 {
832 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
833 add32(src, dest);
834 return Jump(m_assembler.jCC(x86Condition(cond)));
835 }
836
837 Jump branchAdd32(Condition cond, Address src, RegisterID dest)
838 {
839 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
840 add32(src, dest);
841 return Jump(m_assembler.jCC(x86Condition(cond)));
842 }
843
844 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
845 {
846 ASSERT(cond == Overflow);
847 mul32(src, dest);
848 return Jump(m_assembler.jCC(x86Condition(cond)));
849 }
850
851 Jump branchMul32(Condition cond, Address src, RegisterID dest)
852 {
853 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
854 mul32(src, dest);
855 return Jump(m_assembler.jCC(x86Condition(cond)));
856 }
857
858 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
859 {
860 ASSERT(cond == Overflow);
861 mul32(imm, src, dest);
862 return Jump(m_assembler.jCC(x86Condition(cond)));
863 }
864
865 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
866 {
867 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
868 sub32(src, dest);
869 return Jump(m_assembler.jCC(x86Condition(cond)));
870 }
871
872 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
873 {
874 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
875 sub32(imm, dest);
876 return Jump(m_assembler.jCC(x86Condition(cond)));
877 }
878
879 Jump branchSub32(Condition cond, Imm32 imm, Address dest)
880 {
881 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
882 sub32(imm, dest);
883 return Jump(m_assembler.jCC(x86Condition(cond)));
884 }
885
886 Jump branchSub32(Condition cond, RegisterID src, Address dest)
887 {
888 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
889 sub32(src, dest);
890 return Jump(m_assembler.jCC(x86Condition(cond)));
891 }
892
893 Jump branchSub32(Condition cond, Address src, RegisterID dest)
894 {
895 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
896 sub32(src, dest);
897 return Jump(m_assembler.jCC(x86Condition(cond)));
898 }
899
900 Jump branchNeg32(Condition cond, RegisterID srcDest)
901 {
902 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
903 neg32(srcDest);
904 return Jump(m_assembler.jCC(x86Condition(cond)));
905 }
906
907 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
908 {
909 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
910 or32(src, dest);
911 return Jump(m_assembler.jCC(x86Condition(cond)));
912 }
913
914
915 // Miscellaneous operations:
916
917 void breakpoint()
918 {
919 m_assembler.int3();
920 }
921
922 Call nearCall()
923 {
924 return Call(m_assembler.call(), Call::LinkableNear);
925 }
926
927 Call call(RegisterID target)
928 {
929 return Call(m_assembler.call(target), Call::None);
930 }
931
932 void call(Address address)
933 {
934 m_assembler.call_m(address.offset, address.base);
935 }
936
937 void ret()
938 {
939 m_assembler.ret();
940 }
941
942 void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
943 {
944 m_assembler.cmpl_rr(right, left);
945 m_assembler.setCC_r(x86Condition(cond), dest);
946 }
947
948 void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
949 {
950 m_assembler.cmpl_mr(left.offset, left.base, right);
951 m_assembler.setCC_r(x86Condition(cond), dest);
952 }
953
954 void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
955 {
956 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
957 m_assembler.testl_rr(left, left);
958 else
959 m_assembler.cmpl_ir(right.m_value, left);
960 m_assembler.setCC_r(x86Condition(cond), dest);
961 }
962
963 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
964 {
965 m_assembler.cmpl_rr(right, left);
966 m_assembler.setCC_r(x86Condition(cond), dest);
967 m_assembler.movzbl_rr(dest, dest);
968 }
969
970 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
971 {
972 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
973 m_assembler.testl_rr(left, left);
974 else
975 m_assembler.cmpl_ir(right.m_value, left);
976 m_assembler.setCC_r(x86Condition(cond), dest);
977 m_assembler.movzbl_rr(dest, dest);
978 }
979
980 // FIXME:
981 // The mask should be optional... paerhaps the argument order should be
982 // dest-src, operations always have a dest? ... possibly not true, considering
983 // asm ops like test, or pseudo ops like pop().
984
985 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
986 {
987 if (mask.m_value == -1)
988 m_assembler.cmpb_im(0, address.offset, address.base);
989 else
990 m_assembler.testb_im(mask.m_value, address.offset, address.base);
991 m_assembler.setCC_r(x86Condition(cond), dest);
992 m_assembler.movzbl_rr(dest, dest);
993 }
994
995 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
996 {
997 if (mask.m_value == -1)
998 m_assembler.cmpl_im(0, address.offset, address.base);
999 else
1000 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1001 m_assembler.setCC_r(x86Condition(cond), dest);
1002 m_assembler.movzbl_rr(dest, dest);
1003 }
1004
1005 protected:
1006 X86Assembler::Condition x86Condition(Condition cond)
1007 {
1008 return static_cast<X86Assembler::Condition>(cond);
1009 }
1010
1011 private:
1012 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1013 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1014 friend class MacroAssemblerX86;
1015
1016 #if CPU(X86)
1017 #if OS(MAC_OS_X)
1018
1019 // All X86 Macs are guaranteed to support at least SSE2,
1020 static bool isSSE2Present()
1021 {
1022 return true;
1023 }
1024
1025 #else // OS(MAC_OS_X)
1026
1027 enum SSE2CheckState {
1028 NotCheckedSSE2,
1029 HasSSE2,
1030 NoSSE2
1031 };
1032
1033 static bool isSSE2Present()
1034 {
1035 if (s_sse2CheckState == NotCheckedSSE2) {
1036 // Default the flags value to zero; if the compiler is
1037 // not MSVC or GCC we will read this as SSE2 not present.
1038 int flags = 0;
1039 #if COMPILER(MSVC)
1040 _asm {
1041 mov eax, 1 // cpuid function 1 gives us the standard feature set
1042 cpuid;
1043 mov flags, edx;
1044 }
1045 #elif COMPILER(GCC)
1046 asm (
1047 "movl $0x1, %%eax;"
1048 "pushl %%ebx;"
1049 "cpuid;"
1050 "popl %%ebx;"
1051 "movl %%edx, %0;"
1052 : "=g" (flags)
1053 :
1054 : "%eax", "%ecx", "%edx"
1055 );
1056 #endif
1057 static const int SSE2FeatureBit = 1 << 26;
1058 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1059 }
1060 // Only check once.
1061 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1062
1063 return s_sse2CheckState == HasSSE2;
1064 }
1065
1066 static SSE2CheckState s_sse2CheckState;
1067
1068 #endif // OS(MAC_OS_X)
1069 #elif !defined(NDEBUG) // CPU(X86)
1070
1071 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1072 // but non debug add this method to keep the asserts above happy.
1073 static bool isSSE2Present()
1074 {
1075 return true;
1076 }
1077
1078 #endif
1079 };
1080
1081 } // namespace JSC
1082
1083 #endif // ENABLE(ASSEMBLER)
1084
1085 #endif // MacroAssemblerX86Common_h