]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerX86Common.h
0731065ef73785a6f54625d2cef25cb08e2287c7
[apple/javascriptcore.git] / assembler / MacroAssemblerX86Common.h
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28
29 #if ENABLE(ASSEMBLER)
30
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33
34 namespace JSC {
35
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37 static const int DoubleConditionBitInvert = 0x10;
38 static const int DoubleConditionBitSpecial = 0x20;
39 static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
40
41 public:
42 typedef X86Assembler::FPRegisterID FPRegisterID;
43
44 enum Condition {
45 Equal = X86Assembler::ConditionE,
46 NotEqual = X86Assembler::ConditionNE,
47 Above = X86Assembler::ConditionA,
48 AboveOrEqual = X86Assembler::ConditionAE,
49 Below = X86Assembler::ConditionB,
50 BelowOrEqual = X86Assembler::ConditionBE,
51 GreaterThan = X86Assembler::ConditionG,
52 GreaterThanOrEqual = X86Assembler::ConditionGE,
53 LessThan = X86Assembler::ConditionL,
54 LessThanOrEqual = X86Assembler::ConditionLE,
55 Overflow = X86Assembler::ConditionO,
56 Signed = X86Assembler::ConditionS,
57 Zero = X86Assembler::ConditionE,
58 NonZero = X86Assembler::ConditionNE
59 };
60
61 enum DoubleCondition {
62 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
63 DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
64 DoubleNotEqual = X86Assembler::ConditionNE,
65 DoubleGreaterThan = X86Assembler::ConditionA,
66 DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
67 DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
68 DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
69 // If either operand is NaN, these conditions always evaluate to true.
70 DoubleEqualOrUnordered = X86Assembler::ConditionE,
71 DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
72 DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
73 DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
74 DoubleLessThanOrUnordered = X86Assembler::ConditionB,
75 DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
76 };
77 COMPILE_ASSERT(
78 !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
79 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
80
81 static const RegisterID stackPointerRegister = X86Registers::esp;
82
83 // Integer arithmetic operations:
84 //
85 // Operations are typically two operand - operation(source, srcDst)
86 // For many operations the source may be an Imm32, the srcDst operand
87 // may often be a memory location (explictly described using an Address
88 // object).
89
90 void add32(RegisterID src, RegisterID dest)
91 {
92 m_assembler.addl_rr(src, dest);
93 }
94
95 void add32(Imm32 imm, Address address)
96 {
97 m_assembler.addl_im(imm.m_value, address.offset, address.base);
98 }
99
100 void add32(Imm32 imm, RegisterID dest)
101 {
102 m_assembler.addl_ir(imm.m_value, dest);
103 }
104
105 void add32(Address src, RegisterID dest)
106 {
107 m_assembler.addl_mr(src.offset, src.base, dest);
108 }
109
110 void add32(RegisterID src, Address dest)
111 {
112 m_assembler.addl_rm(src, dest.offset, dest.base);
113 }
114
115 void and32(RegisterID src, RegisterID dest)
116 {
117 m_assembler.andl_rr(src, dest);
118 }
119
120 void and32(Imm32 imm, RegisterID dest)
121 {
122 m_assembler.andl_ir(imm.m_value, dest);
123 }
124
125 void and32(RegisterID src, Address dest)
126 {
127 m_assembler.andl_rm(src, dest.offset, dest.base);
128 }
129
130 void and32(Address src, RegisterID dest)
131 {
132 m_assembler.andl_mr(src.offset, src.base, dest);
133 }
134
135 void and32(Imm32 imm, Address address)
136 {
137 m_assembler.andl_im(imm.m_value, address.offset, address.base);
138 }
139
140 void lshift32(Imm32 imm, RegisterID dest)
141 {
142 m_assembler.shll_i8r(imm.m_value, dest);
143 }
144
145 void lshift32(RegisterID shift_amount, RegisterID dest)
146 {
147 // On x86 we can only shift by ecx; if asked to shift by another register we'll
148 // need rejig the shift amount into ecx first, and restore the registers afterwards.
149 if (shift_amount != X86Registers::ecx) {
150 swap(shift_amount, X86Registers::ecx);
151
152 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
153 if (dest == shift_amount)
154 m_assembler.shll_CLr(X86Registers::ecx);
155 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
156 else if (dest == X86Registers::ecx)
157 m_assembler.shll_CLr(shift_amount);
158 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
159 else
160 m_assembler.shll_CLr(dest);
161
162 swap(shift_amount, X86Registers::ecx);
163 } else
164 m_assembler.shll_CLr(dest);
165 }
166
167 void mul32(RegisterID src, RegisterID dest)
168 {
169 m_assembler.imull_rr(src, dest);
170 }
171
172 void mul32(Address src, RegisterID dest)
173 {
174 m_assembler.imull_mr(src.offset, src.base, dest);
175 }
176
177 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
178 {
179 m_assembler.imull_i32r(src, imm.m_value, dest);
180 }
181
182 void neg32(RegisterID srcDest)
183 {
184 m_assembler.negl_r(srcDest);
185 }
186
187 void neg32(Address srcDest)
188 {
189 m_assembler.negl_m(srcDest.offset, srcDest.base);
190 }
191
192 void not32(RegisterID srcDest)
193 {
194 m_assembler.notl_r(srcDest);
195 }
196
197 void not32(Address srcDest)
198 {
199 m_assembler.notl_m(srcDest.offset, srcDest.base);
200 }
201
202 void or32(RegisterID src, RegisterID dest)
203 {
204 m_assembler.orl_rr(src, dest);
205 }
206
207 void or32(Imm32 imm, RegisterID dest)
208 {
209 m_assembler.orl_ir(imm.m_value, dest);
210 }
211
212 void or32(RegisterID src, Address dest)
213 {
214 m_assembler.orl_rm(src, dest.offset, dest.base);
215 }
216
217 void or32(Address src, RegisterID dest)
218 {
219 m_assembler.orl_mr(src.offset, src.base, dest);
220 }
221
222 void or32(Imm32 imm, Address address)
223 {
224 m_assembler.orl_im(imm.m_value, address.offset, address.base);
225 }
226
227 void rshift32(RegisterID shift_amount, RegisterID dest)
228 {
229 // On x86 we can only shift by ecx; if asked to shift by another register we'll
230 // need rejig the shift amount into ecx first, and restore the registers afterwards.
231 if (shift_amount != X86Registers::ecx) {
232 swap(shift_amount, X86Registers::ecx);
233
234 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
235 if (dest == shift_amount)
236 m_assembler.sarl_CLr(X86Registers::ecx);
237 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
238 else if (dest == X86Registers::ecx)
239 m_assembler.sarl_CLr(shift_amount);
240 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
241 else
242 m_assembler.sarl_CLr(dest);
243
244 swap(shift_amount, X86Registers::ecx);
245 } else
246 m_assembler.sarl_CLr(dest);
247 }
248
249 void rshift32(Imm32 imm, RegisterID dest)
250 {
251 m_assembler.sarl_i8r(imm.m_value, dest);
252 }
253
254 void urshift32(RegisterID shift_amount, RegisterID dest)
255 {
256 // On x86 we can only shift by ecx; if asked to shift by another register we'll
257 // need rejig the shift amount into ecx first, and restore the registers afterwards.
258 if (shift_amount != X86Registers::ecx) {
259 swap(shift_amount, X86Registers::ecx);
260
261 // E.g. transform "shrl %eax, %eax" -> "xchgl %eax, %ecx; shrl %ecx, %ecx; xchgl %eax, %ecx"
262 if (dest == shift_amount)
263 m_assembler.shrl_CLr(X86Registers::ecx);
264 // E.g. transform "shrl %eax, %ecx" -> "xchgl %eax, %ecx; shrl %ecx, %eax; xchgl %eax, %ecx"
265 else if (dest == X86Registers::ecx)
266 m_assembler.shrl_CLr(shift_amount);
267 // E.g. transform "shrl %eax, %ebx" -> "xchgl %eax, %ecx; shrl %ecx, %ebx; xchgl %eax, %ecx"
268 else
269 m_assembler.shrl_CLr(dest);
270
271 swap(shift_amount, X86Registers::ecx);
272 } else
273 m_assembler.shrl_CLr(dest);
274 }
275
276 void urshift32(Imm32 imm, RegisterID dest)
277 {
278 m_assembler.shrl_i8r(imm.m_value, dest);
279 }
280
281 void sub32(RegisterID src, RegisterID dest)
282 {
283 m_assembler.subl_rr(src, dest);
284 }
285
286 void sub32(Imm32 imm, RegisterID dest)
287 {
288 m_assembler.subl_ir(imm.m_value, dest);
289 }
290
291 void sub32(Imm32 imm, Address address)
292 {
293 m_assembler.subl_im(imm.m_value, address.offset, address.base);
294 }
295
296 void sub32(Address src, RegisterID dest)
297 {
298 m_assembler.subl_mr(src.offset, src.base, dest);
299 }
300
301 void sub32(RegisterID src, Address dest)
302 {
303 m_assembler.subl_rm(src, dest.offset, dest.base);
304 }
305
306
307 void xor32(RegisterID src, RegisterID dest)
308 {
309 m_assembler.xorl_rr(src, dest);
310 }
311
312 void xor32(Imm32 imm, Address dest)
313 {
314 m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
315 }
316
317 void xor32(Imm32 imm, RegisterID dest)
318 {
319 m_assembler.xorl_ir(imm.m_value, dest);
320 }
321
322 void xor32(RegisterID src, Address dest)
323 {
324 m_assembler.xorl_rm(src, dest.offset, dest.base);
325 }
326
327 void xor32(Address src, RegisterID dest)
328 {
329 m_assembler.xorl_mr(src.offset, src.base, dest);
330 }
331
332 void sqrtDouble(FPRegisterID src, FPRegisterID dst)
333 {
334 m_assembler.sqrtsd_rr(src, dst);
335 }
336
337 // Memory access operations:
338 //
339 // Loads are of the form load(address, destination) and stores of the form
340 // store(source, address). The source for a store may be an Imm32. Address
341 // operand objects to loads and store will be implicitly constructed if a
342 // register is passed.
343
344 void load32(ImplicitAddress address, RegisterID dest)
345 {
346 m_assembler.movl_mr(address.offset, address.base, dest);
347 }
348
349 void load32(BaseIndex address, RegisterID dest)
350 {
351 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
352 }
353
354 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
355 {
356 load32(address, dest);
357 }
358
359 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
360 {
361 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
362 return DataLabel32(this);
363 }
364
365 void load16(BaseIndex address, RegisterID dest)
366 {
367 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
368 }
369
370 void load16(Address address, RegisterID dest)
371 {
372 m_assembler.movzwl_mr(address.offset, address.base, dest);
373 }
374
375 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
376 {
377 m_assembler.movl_rm_disp32(src, address.offset, address.base);
378 return DataLabel32(this);
379 }
380
381 void store32(RegisterID src, ImplicitAddress address)
382 {
383 m_assembler.movl_rm(src, address.offset, address.base);
384 }
385
386 void store32(RegisterID src, BaseIndex address)
387 {
388 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
389 }
390
391 void store32(Imm32 imm, ImplicitAddress address)
392 {
393 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
394 }
395
396
397 // Floating-point operation:
398 //
399 // Presently only supports SSE, not x87 floating point.
400
401 void loadDouble(ImplicitAddress address, FPRegisterID dest)
402 {
403 ASSERT(isSSE2Present());
404 m_assembler.movsd_mr(address.offset, address.base, dest);
405 }
406
407 void storeDouble(FPRegisterID src, ImplicitAddress address)
408 {
409 ASSERT(isSSE2Present());
410 m_assembler.movsd_rm(src, address.offset, address.base);
411 }
412
413 void addDouble(FPRegisterID src, FPRegisterID dest)
414 {
415 ASSERT(isSSE2Present());
416 m_assembler.addsd_rr(src, dest);
417 }
418
419 void addDouble(Address src, FPRegisterID dest)
420 {
421 ASSERT(isSSE2Present());
422 m_assembler.addsd_mr(src.offset, src.base, dest);
423 }
424
425 void divDouble(FPRegisterID src, FPRegisterID dest)
426 {
427 ASSERT(isSSE2Present());
428 m_assembler.divsd_rr(src, dest);
429 }
430
431 void divDouble(Address src, FPRegisterID dest)
432 {
433 ASSERT(isSSE2Present());
434 m_assembler.divsd_mr(src.offset, src.base, dest);
435 }
436
437 void subDouble(FPRegisterID src, FPRegisterID dest)
438 {
439 ASSERT(isSSE2Present());
440 m_assembler.subsd_rr(src, dest);
441 }
442
443 void subDouble(Address src, FPRegisterID dest)
444 {
445 ASSERT(isSSE2Present());
446 m_assembler.subsd_mr(src.offset, src.base, dest);
447 }
448
449 void mulDouble(FPRegisterID src, FPRegisterID dest)
450 {
451 ASSERT(isSSE2Present());
452 m_assembler.mulsd_rr(src, dest);
453 }
454
455 void mulDouble(Address src, FPRegisterID dest)
456 {
457 ASSERT(isSSE2Present());
458 m_assembler.mulsd_mr(src.offset, src.base, dest);
459 }
460
461 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
462 {
463 ASSERT(isSSE2Present());
464 m_assembler.cvtsi2sd_rr(src, dest);
465 }
466
467 void convertInt32ToDouble(Address src, FPRegisterID dest)
468 {
469 ASSERT(isSSE2Present());
470 m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
471 }
472
473 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
474 {
475 ASSERT(isSSE2Present());
476
477 if (cond & DoubleConditionBitInvert)
478 m_assembler.ucomisd_rr(left, right);
479 else
480 m_assembler.ucomisd_rr(right, left);
481
482 if (cond == DoubleEqual) {
483 Jump isUnordered(m_assembler.jp());
484 Jump result = Jump(m_assembler.je());
485 isUnordered.link(this);
486 return result;
487 } else if (cond == DoubleNotEqualOrUnordered) {
488 Jump isUnordered(m_assembler.jp());
489 Jump isEqual(m_assembler.je());
490 isUnordered.link(this);
491 Jump result = jump();
492 isEqual.link(this);
493 return result;
494 }
495
496 ASSERT(!(cond & DoubleConditionBitSpecial));
497 return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
498 }
499
500 // Truncates 'src' to an integer, and places the resulting 'dest'.
501 // If the result is not representable as a 32 bit value, branch.
502 // May also branch for some values that are representable in 32 bits
503 // (specifically, in this case, INT_MIN).
504 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
505 {
506 ASSERT(isSSE2Present());
507 m_assembler.cvttsd2si_rr(src, dest);
508 return branch32(Equal, dest, Imm32(0x80000000));
509 }
510
511 // Convert 'src' to an integer, and places the resulting 'dest'.
512 // If the result is not representable as a 32 bit value, branch.
513 // May also branch for some values that are representable in 32 bits
514 // (specifically, in this case, 0).
515 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
516 {
517 ASSERT(isSSE2Present());
518 m_assembler.cvttsd2si_rr(src, dest);
519
520 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
521 failureCases.append(branchTest32(Zero, dest));
522
523 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
524 convertInt32ToDouble(dest, fpTemp);
525 m_assembler.ucomisd_rr(fpTemp, src);
526 failureCases.append(m_assembler.jp());
527 failureCases.append(m_assembler.jne());
528 }
529
530 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
531 {
532 ASSERT(isSSE2Present());
533 m_assembler.xorpd_rr(scratch, scratch);
534 return branchDouble(DoubleNotEqual, reg, scratch);
535 }
536
537 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
538 {
539 ASSERT(isSSE2Present());
540 m_assembler.xorpd_rr(scratch, scratch);
541 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
542 }
543
544 // Stack manipulation operations:
545 //
546 // The ABI is assumed to provide a stack abstraction to memory,
547 // containing machine word sized units of data. Push and pop
548 // operations add and remove a single register sized unit of data
549 // to or from the stack. Peek and poke operations read or write
550 // values on the stack, without moving the current stack position.
551
552 void pop(RegisterID dest)
553 {
554 m_assembler.pop_r(dest);
555 }
556
557 void push(RegisterID src)
558 {
559 m_assembler.push_r(src);
560 }
561
562 void push(Address address)
563 {
564 m_assembler.push_m(address.offset, address.base);
565 }
566
567 void push(Imm32 imm)
568 {
569 m_assembler.push_i32(imm.m_value);
570 }
571
572
573 // Register move operations:
574 //
575 // Move values in registers.
576
577 void move(Imm32 imm, RegisterID dest)
578 {
579 // Note: on 64-bit the Imm32 value is zero extended into the register, it
580 // may be useful to have a separate version that sign extends the value?
581 if (!imm.m_value)
582 m_assembler.xorl_rr(dest, dest);
583 else
584 m_assembler.movl_i32r(imm.m_value, dest);
585 }
586
587 #if CPU(X86_64)
588 void move(RegisterID src, RegisterID dest)
589 {
590 // Note: on 64-bit this is is a full register move; perhaps it would be
591 // useful to have separate move32 & movePtr, with move32 zero extending?
592 if (src != dest)
593 m_assembler.movq_rr(src, dest);
594 }
595
596 void move(ImmPtr imm, RegisterID dest)
597 {
598 m_assembler.movq_i64r(imm.asIntptr(), dest);
599 }
600
601 void swap(RegisterID reg1, RegisterID reg2)
602 {
603 if (reg1 != reg2)
604 m_assembler.xchgq_rr(reg1, reg2);
605 }
606
607 void signExtend32ToPtr(RegisterID src, RegisterID dest)
608 {
609 m_assembler.movsxd_rr(src, dest);
610 }
611
612 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
613 {
614 m_assembler.movl_rr(src, dest);
615 }
616 #else
617 void move(RegisterID src, RegisterID dest)
618 {
619 if (src != dest)
620 m_assembler.movl_rr(src, dest);
621 }
622
623 void move(ImmPtr imm, RegisterID dest)
624 {
625 m_assembler.movl_i32r(imm.asIntptr(), dest);
626 }
627
628 void swap(RegisterID reg1, RegisterID reg2)
629 {
630 if (reg1 != reg2)
631 m_assembler.xchgl_rr(reg1, reg2);
632 }
633
634 void signExtend32ToPtr(RegisterID src, RegisterID dest)
635 {
636 move(src, dest);
637 }
638
639 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
640 {
641 move(src, dest);
642 }
643 #endif
644
645
646 // Forwards / external control flow operations:
647 //
648 // This set of jump and conditional branch operations return a Jump
649 // object which may linked at a later point, allow forwards jump,
650 // or jumps that will require external linkage (after the code has been
651 // relocated).
652 //
653 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
654 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
655 // used (representing the names 'below' and 'above').
656 //
657 // Operands to the comparision are provided in the expected order, e.g.
658 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
659 // treated as a signed 32bit value, is less than or equal to 5.
660 //
661 // jz and jnz test whether the first operand is equal to zero, and take
662 // an optional second operand of a mask under which to perform the test.
663
664 public:
665 Jump branch8(Condition cond, Address left, Imm32 right)
666 {
667 m_assembler.cmpb_im(right.m_value, left.offset, left.base);
668 return Jump(m_assembler.jCC(x86Condition(cond)));
669 }
670
671 Jump branch32(Condition cond, RegisterID left, RegisterID right)
672 {
673 m_assembler.cmpl_rr(right, left);
674 return Jump(m_assembler.jCC(x86Condition(cond)));
675 }
676
677 Jump branch32(Condition cond, RegisterID left, Imm32 right)
678 {
679 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
680 m_assembler.testl_rr(left, left);
681 else
682 m_assembler.cmpl_ir(right.m_value, left);
683 return Jump(m_assembler.jCC(x86Condition(cond)));
684 }
685
686 Jump branch32(Condition cond, RegisterID left, Address right)
687 {
688 m_assembler.cmpl_mr(right.offset, right.base, left);
689 return Jump(m_assembler.jCC(x86Condition(cond)));
690 }
691
692 Jump branch32(Condition cond, Address left, RegisterID right)
693 {
694 m_assembler.cmpl_rm(right, left.offset, left.base);
695 return Jump(m_assembler.jCC(x86Condition(cond)));
696 }
697
698 Jump branch32(Condition cond, Address left, Imm32 right)
699 {
700 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
701 return Jump(m_assembler.jCC(x86Condition(cond)));
702 }
703
704 Jump branch32(Condition cond, BaseIndex left, Imm32 right)
705 {
706 m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
707 return Jump(m_assembler.jCC(x86Condition(cond)));
708 }
709
710 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
711 {
712 return branch32(cond, left, right);
713 }
714
715 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
716 {
717 m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
718 return Jump(m_assembler.jCC(x86Condition(cond)));
719 }
720
721 Jump branch16(Condition cond, BaseIndex left, Imm32 right)
722 {
723 ASSERT(!(right.m_value & 0xFFFF0000));
724
725 m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
726 return Jump(m_assembler.jCC(x86Condition(cond)));
727 }
728
729 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
730 {
731 ASSERT((cond == Zero) || (cond == NonZero));
732 m_assembler.testl_rr(reg, mask);
733 return Jump(m_assembler.jCC(x86Condition(cond)));
734 }
735
736 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
737 {
738 ASSERT((cond == Zero) || (cond == NonZero));
739 // if we are only interested in the low seven bits, this can be tested with a testb
740 if (mask.m_value == -1)
741 m_assembler.testl_rr(reg, reg);
742 else if ((mask.m_value & ~0x7f) == 0)
743 m_assembler.testb_i8r(mask.m_value, reg);
744 else
745 m_assembler.testl_i32r(mask.m_value, reg);
746 return Jump(m_assembler.jCC(x86Condition(cond)));
747 }
748
749 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
750 {
751 ASSERT((cond == Zero) || (cond == NonZero));
752 if (mask.m_value == -1)
753 m_assembler.cmpl_im(0, address.offset, address.base);
754 else
755 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
756 return Jump(m_assembler.jCC(x86Condition(cond)));
757 }
758
759 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
760 {
761 ASSERT((cond == Zero) || (cond == NonZero));
762 if (mask.m_value == -1)
763 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
764 else
765 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
766 return Jump(m_assembler.jCC(x86Condition(cond)));
767 }
768
769 Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
770 {
771 ASSERT((cond == Zero) || (cond == NonZero));
772 if (mask.m_value == -1)
773 m_assembler.cmpb_im(0, address.offset, address.base);
774 else
775 m_assembler.testb_im(mask.m_value, address.offset, address.base);
776 return Jump(m_assembler.jCC(x86Condition(cond)));
777 }
778
779 Jump branchTest8(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
780 {
781 ASSERT((cond == Zero) || (cond == NonZero));
782 if (mask.m_value == -1)
783 m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
784 else
785 m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
786 return Jump(m_assembler.jCC(x86Condition(cond)));
787 }
788
789 Jump jump()
790 {
791 return Jump(m_assembler.jmp());
792 }
793
794 void jump(RegisterID target)
795 {
796 m_assembler.jmp_r(target);
797 }
798
799 // Address is a memory location containing the address to jump to
800 void jump(Address address)
801 {
802 m_assembler.jmp_m(address.offset, address.base);
803 }
804
805
806 // Arithmetic control flow operations:
807 //
808 // This set of conditional branch operations branch based
809 // on the result of an arithmetic operation. The operation
810 // is performed as normal, storing the result.
811 //
812 // * jz operations branch if the result is zero.
813 // * jo operations branch if the (signed) arithmetic
814 // operation caused an overflow to occur.
815
816 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
817 {
818 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
819 add32(src, dest);
820 return Jump(m_assembler.jCC(x86Condition(cond)));
821 }
822
823 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
824 {
825 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
826 add32(imm, dest);
827 return Jump(m_assembler.jCC(x86Condition(cond)));
828 }
829
830 Jump branchAdd32(Condition cond, Imm32 src, Address dest)
831 {
832 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
833 add32(src, dest);
834 return Jump(m_assembler.jCC(x86Condition(cond)));
835 }
836
837 Jump branchAdd32(Condition cond, RegisterID src, Address dest)
838 {
839 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
840 add32(src, dest);
841 return Jump(m_assembler.jCC(x86Condition(cond)));
842 }
843
844 Jump branchAdd32(Condition cond, Address src, RegisterID dest)
845 {
846 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
847 add32(src, dest);
848 return Jump(m_assembler.jCC(x86Condition(cond)));
849 }
850
851 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
852 {
853 ASSERT(cond == Overflow);
854 mul32(src, dest);
855 return Jump(m_assembler.jCC(x86Condition(cond)));
856 }
857
858 Jump branchMul32(Condition cond, Address src, RegisterID dest)
859 {
860 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
861 mul32(src, dest);
862 return Jump(m_assembler.jCC(x86Condition(cond)));
863 }
864
865 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
866 {
867 ASSERT(cond == Overflow);
868 mul32(imm, src, dest);
869 return Jump(m_assembler.jCC(x86Condition(cond)));
870 }
871
872 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
873 {
874 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
875 sub32(src, dest);
876 return Jump(m_assembler.jCC(x86Condition(cond)));
877 }
878
879 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
880 {
881 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
882 sub32(imm, dest);
883 return Jump(m_assembler.jCC(x86Condition(cond)));
884 }
885
886 Jump branchSub32(Condition cond, Imm32 imm, Address dest)
887 {
888 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
889 sub32(imm, dest);
890 return Jump(m_assembler.jCC(x86Condition(cond)));
891 }
892
893 Jump branchSub32(Condition cond, RegisterID src, Address dest)
894 {
895 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
896 sub32(src, dest);
897 return Jump(m_assembler.jCC(x86Condition(cond)));
898 }
899
900 Jump branchSub32(Condition cond, Address src, RegisterID dest)
901 {
902 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
903 sub32(src, dest);
904 return Jump(m_assembler.jCC(x86Condition(cond)));
905 }
906
907 Jump branchNeg32(Condition cond, RegisterID srcDest)
908 {
909 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
910 neg32(srcDest);
911 return Jump(m_assembler.jCC(x86Condition(cond)));
912 }
913
914 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
915 {
916 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
917 or32(src, dest);
918 return Jump(m_assembler.jCC(x86Condition(cond)));
919 }
920
921
922 // Miscellaneous operations:
923
924 void breakpoint()
925 {
926 m_assembler.int3();
927 }
928
929 Call nearCall()
930 {
931 return Call(m_assembler.call(), Call::LinkableNear);
932 }
933
934 Call call(RegisterID target)
935 {
936 return Call(m_assembler.call(target), Call::None);
937 }
938
939 void call(Address address)
940 {
941 m_assembler.call_m(address.offset, address.base);
942 }
943
944 void ret()
945 {
946 m_assembler.ret();
947 }
948
949 void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
950 {
951 m_assembler.cmpl_rr(right, left);
952 m_assembler.setCC_r(x86Condition(cond), dest);
953 }
954
955 void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
956 {
957 m_assembler.cmpl_mr(left.offset, left.base, right);
958 m_assembler.setCC_r(x86Condition(cond), dest);
959 }
960
961 void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
962 {
963 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
964 m_assembler.testl_rr(left, left);
965 else
966 m_assembler.cmpl_ir(right.m_value, left);
967 m_assembler.setCC_r(x86Condition(cond), dest);
968 }
969
970 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
971 {
972 m_assembler.cmpl_rr(right, left);
973 m_assembler.setCC_r(x86Condition(cond), dest);
974 m_assembler.movzbl_rr(dest, dest);
975 }
976
977 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
978 {
979 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
980 m_assembler.testl_rr(left, left);
981 else
982 m_assembler.cmpl_ir(right.m_value, left);
983 m_assembler.setCC_r(x86Condition(cond), dest);
984 m_assembler.movzbl_rr(dest, dest);
985 }
986
987 // FIXME:
988 // The mask should be optional... paerhaps the argument order should be
989 // dest-src, operations always have a dest? ... possibly not true, considering
990 // asm ops like test, or pseudo ops like pop().
991
992 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
993 {
994 if (mask.m_value == -1)
995 m_assembler.cmpb_im(0, address.offset, address.base);
996 else
997 m_assembler.testb_im(mask.m_value, address.offset, address.base);
998 m_assembler.setCC_r(x86Condition(cond), dest);
999 m_assembler.movzbl_rr(dest, dest);
1000 }
1001
1002 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
1003 {
1004 if (mask.m_value == -1)
1005 m_assembler.cmpl_im(0, address.offset, address.base);
1006 else
1007 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1008 m_assembler.setCC_r(x86Condition(cond), dest);
1009 m_assembler.movzbl_rr(dest, dest);
1010 }
1011
1012 protected:
1013 X86Assembler::Condition x86Condition(Condition cond)
1014 {
1015 return static_cast<X86Assembler::Condition>(cond);
1016 }
1017
1018 private:
1019 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1020 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1021 friend class MacroAssemblerX86;
1022
1023 #if CPU(X86)
1024 #if OS(MAC_OS_X)
1025
1026 // All X86 Macs are guaranteed to support at least SSE2,
1027 static bool isSSE2Present()
1028 {
1029 return true;
1030 }
1031
1032 #else // OS(MAC_OS_X)
1033
1034 enum SSE2CheckState {
1035 NotCheckedSSE2,
1036 HasSSE2,
1037 NoSSE2
1038 };
1039
1040 static bool isSSE2Present()
1041 {
1042 if (s_sse2CheckState == NotCheckedSSE2) {
1043 // Default the flags value to zero; if the compiler is
1044 // not MSVC or GCC we will read this as SSE2 not present.
1045 int flags = 0;
1046 #if COMPILER(MSVC)
1047 _asm {
1048 mov eax, 1 // cpuid function 1 gives us the standard feature set
1049 cpuid;
1050 mov flags, edx;
1051 }
1052 #elif COMPILER(GCC)
1053 asm (
1054 "movl $0x1, %%eax;"
1055 "pushl %%ebx;"
1056 "cpuid;"
1057 "popl %%ebx;"
1058 "movl %%edx, %0;"
1059 : "=g" (flags)
1060 :
1061 : "%eax", "%ecx", "%edx"
1062 );
1063 #endif
1064 static const int SSE2FeatureBit = 1 << 26;
1065 s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1066 }
1067 // Only check once.
1068 ASSERT(s_sse2CheckState != NotCheckedSSE2);
1069
1070 return s_sse2CheckState == HasSSE2;
1071 }
1072
1073 static SSE2CheckState s_sse2CheckState;
1074
1075 #endif // OS(MAC_OS_X)
1076 #elif !defined(NDEBUG) // CPU(X86)
1077
1078 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1079 // but non debug add this method to keep the asserts above happy.
1080 static bool isSSE2Present()
1081 {
1082 return true;
1083 }
1084
1085 #endif
1086 };
1087
1088 } // namespace JSC
1089
1090 #endif // ENABLE(ASSEMBLER)
1091
1092 #endif // MacroAssemblerX86Common_h