]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef MacroAssemblerX86Common_h | |
27 | #define MacroAssemblerX86Common_h | |
28 | ||
29 | #if ENABLE(ASSEMBLER) | |
30 | ||
31 | #include "X86Assembler.h" | |
32 | #include "AbstractMacroAssembler.h" | |
33 | ||
34 | namespace JSC { | |
35 | ||
36 | class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> { | |
37 | protected: | |
38 | #if CPU(X86_64) | |
39 | static const X86Registers::RegisterID scratchRegister = X86Registers::r11; | |
40 | #endif | |
41 | ||
42 | static const int DoubleConditionBitInvert = 0x10; | |
43 | static const int DoubleConditionBitSpecial = 0x20; | |
44 | static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial; | |
45 | ||
46 | public: | |
47 | typedef X86Assembler::FPRegisterID FPRegisterID; | |
48 | typedef X86Assembler::XMMRegisterID XMMRegisterID; | |
49 | ||
50 | static const int MaximumCompactPtrAlignedAddressOffset = 127; | |
51 | ||
52 | enum RelationalCondition { | |
53 | Equal = X86Assembler::ConditionE, | |
54 | NotEqual = X86Assembler::ConditionNE, | |
55 | Above = X86Assembler::ConditionA, | |
56 | AboveOrEqual = X86Assembler::ConditionAE, | |
57 | Below = X86Assembler::ConditionB, | |
58 | BelowOrEqual = X86Assembler::ConditionBE, | |
59 | GreaterThan = X86Assembler::ConditionG, | |
60 | GreaterThanOrEqual = X86Assembler::ConditionGE, | |
61 | LessThan = X86Assembler::ConditionL, | |
62 | LessThanOrEqual = X86Assembler::ConditionLE | |
63 | }; | |
64 | ||
65 | enum ResultCondition { | |
66 | Overflow = X86Assembler::ConditionO, | |
67 | Signed = X86Assembler::ConditionS, | |
68 | Zero = X86Assembler::ConditionE, | |
69 | NonZero = X86Assembler::ConditionNE | |
70 | }; | |
71 | ||
72 | enum DoubleCondition { | |
73 | // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. | |
74 | DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial, | |
75 | DoubleNotEqual = X86Assembler::ConditionNE, | |
76 | DoubleGreaterThan = X86Assembler::ConditionA, | |
77 | DoubleGreaterThanOrEqual = X86Assembler::ConditionAE, | |
78 | DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert, | |
79 | DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert, | |
80 | // If either operand is NaN, these conditions always evaluate to true. | |
81 | DoubleEqualOrUnordered = X86Assembler::ConditionE, | |
82 | DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial, | |
83 | DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert, | |
84 | DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert, | |
85 | DoubleLessThanOrUnordered = X86Assembler::ConditionB, | |
86 | DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE, | |
87 | }; | |
88 | COMPILE_ASSERT( | |
89 | !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits), | |
90 | DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes); | |
91 | ||
92 | static const RegisterID stackPointerRegister = X86Registers::esp; | |
93 | ||
94 | #if ENABLE(JIT_CONSTANT_BLINDING) | |
95 | static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } | |
96 | #if CPU(X86_64) | |
97 | static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; } | |
98 | #endif | |
99 | #endif | |
100 | ||
101 | // Integer arithmetic operations: | |
102 | // | |
103 | // Operations are typically two operand - operation(source, srcDst) | |
104 | // For many operations the source may be an TrustedImm32, the srcDst operand | |
105 | // may often be a memory location (explictly described using an Address | |
106 | // object). | |
107 | ||
108 | void add32(RegisterID src, RegisterID dest) | |
109 | { | |
110 | m_assembler.addl_rr(src, dest); | |
111 | } | |
112 | ||
113 | void add32(TrustedImm32 imm, Address address) | |
114 | { | |
115 | m_assembler.addl_im(imm.m_value, address.offset, address.base); | |
116 | } | |
117 | ||
118 | void add32(TrustedImm32 imm, RegisterID dest) | |
119 | { | |
120 | m_assembler.addl_ir(imm.m_value, dest); | |
121 | } | |
122 | ||
123 | void add32(Address src, RegisterID dest) | |
124 | { | |
125 | m_assembler.addl_mr(src.offset, src.base, dest); | |
126 | } | |
127 | ||
128 | void add32(RegisterID src, Address dest) | |
129 | { | |
130 | m_assembler.addl_rm(src, dest.offset, dest.base); | |
131 | } | |
132 | ||
133 | void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) | |
134 | { | |
135 | m_assembler.leal_mr(imm.m_value, src, dest); | |
136 | } | |
137 | ||
138 | void and32(RegisterID src, RegisterID dest) | |
139 | { | |
140 | m_assembler.andl_rr(src, dest); | |
141 | } | |
142 | ||
143 | void and32(TrustedImm32 imm, RegisterID dest) | |
144 | { | |
145 | m_assembler.andl_ir(imm.m_value, dest); | |
146 | } | |
147 | ||
148 | void and32(RegisterID src, Address dest) | |
149 | { | |
150 | m_assembler.andl_rm(src, dest.offset, dest.base); | |
151 | } | |
152 | ||
153 | void and32(Address src, RegisterID dest) | |
154 | { | |
155 | m_assembler.andl_mr(src.offset, src.base, dest); | |
156 | } | |
157 | ||
158 | void and32(TrustedImm32 imm, Address address) | |
159 | { | |
160 | m_assembler.andl_im(imm.m_value, address.offset, address.base); | |
161 | } | |
162 | ||
163 | void and32(RegisterID op1, RegisterID op2, RegisterID dest) | |
164 | { | |
165 | if (op1 == op2) | |
166 | zeroExtend32ToPtr(op1, dest); | |
167 | else if (op1 == dest) | |
168 | and32(op2, dest); | |
169 | else { | |
170 | move(op2, dest); | |
171 | and32(op1, dest); | |
172 | } | |
173 | } | |
174 | ||
175 | void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) | |
176 | { | |
177 | move(src, dest); | |
178 | and32(imm, dest); | |
179 | } | |
180 | ||
181 | void lshift32(RegisterID shift_amount, RegisterID dest) | |
182 | { | |
183 | ASSERT(shift_amount != dest); | |
184 | ||
185 | if (shift_amount == X86Registers::ecx) | |
186 | m_assembler.shll_CLr(dest); | |
187 | else { | |
188 | // On x86 we can only shift by ecx; if asked to shift by another register we'll | |
189 | // need rejig the shift amount into ecx first, and restore the registers afterwards. | |
190 | // If we dest is ecx, then shift the swapped register! | |
191 | swap(shift_amount, X86Registers::ecx); | |
192 | m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest); | |
193 | swap(shift_amount, X86Registers::ecx); | |
194 | } | |
195 | } | |
196 | ||
197 | void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest) | |
198 | { | |
199 | ASSERT(shift_amount != dest); | |
200 | ||
201 | if (src != dest) | |
202 | move(src, dest); | |
203 | lshift32(shift_amount, dest); | |
204 | } | |
205 | ||
206 | void lshift32(TrustedImm32 imm, RegisterID dest) | |
207 | { | |
208 | m_assembler.shll_i8r(imm.m_value, dest); | |
209 | } | |
210 | ||
211 | void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) | |
212 | { | |
213 | if (src != dest) | |
214 | move(src, dest); | |
215 | lshift32(imm, dest); | |
216 | } | |
217 | ||
218 | void mul32(RegisterID src, RegisterID dest) | |
219 | { | |
220 | m_assembler.imull_rr(src, dest); | |
221 | } | |
222 | ||
223 | void mul32(Address src, RegisterID dest) | |
224 | { | |
225 | m_assembler.imull_mr(src.offset, src.base, dest); | |
226 | } | |
227 | ||
228 | void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) | |
229 | { | |
230 | m_assembler.imull_i32r(src, imm.m_value, dest); | |
231 | } | |
232 | ||
233 | void neg32(RegisterID srcDest) | |
234 | { | |
235 | m_assembler.negl_r(srcDest); | |
236 | } | |
237 | ||
238 | void neg32(Address srcDest) | |
239 | { | |
240 | m_assembler.negl_m(srcDest.offset, srcDest.base); | |
241 | } | |
242 | ||
243 | void or32(RegisterID src, RegisterID dest) | |
244 | { | |
245 | m_assembler.orl_rr(src, dest); | |
246 | } | |
247 | ||
248 | void or32(TrustedImm32 imm, RegisterID dest) | |
249 | { | |
250 | m_assembler.orl_ir(imm.m_value, dest); | |
251 | } | |
252 | ||
253 | void or32(RegisterID src, Address dest) | |
254 | { | |
255 | m_assembler.orl_rm(src, dest.offset, dest.base); | |
256 | } | |
257 | ||
258 | void or32(Address src, RegisterID dest) | |
259 | { | |
260 | m_assembler.orl_mr(src.offset, src.base, dest); | |
261 | } | |
262 | ||
263 | void or32(TrustedImm32 imm, Address address) | |
264 | { | |
265 | m_assembler.orl_im(imm.m_value, address.offset, address.base); | |
266 | } | |
267 | ||
268 | void or32(RegisterID op1, RegisterID op2, RegisterID dest) | |
269 | { | |
270 | if (op1 == op2) | |
271 | zeroExtend32ToPtr(op1, dest); | |
272 | else if (op1 == dest) | |
273 | or32(op2, dest); | |
274 | else { | |
275 | move(op2, dest); | |
276 | or32(op1, dest); | |
277 | } | |
278 | } | |
279 | ||
280 | void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) | |
281 | { | |
282 | move(src, dest); | |
283 | or32(imm, dest); | |
284 | } | |
285 | ||
286 | void rshift32(RegisterID shift_amount, RegisterID dest) | |
287 | { | |
288 | ASSERT(shift_amount != dest); | |
289 | ||
290 | if (shift_amount == X86Registers::ecx) | |
291 | m_assembler.sarl_CLr(dest); | |
292 | else { | |
293 | // On x86 we can only shift by ecx; if asked to shift by another register we'll | |
294 | // need rejig the shift amount into ecx first, and restore the registers afterwards. | |
295 | // If we dest is ecx, then shift the swapped register! | |
296 | swap(shift_amount, X86Registers::ecx); | |
297 | m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest); | |
298 | swap(shift_amount, X86Registers::ecx); | |
299 | } | |
300 | } | |
301 | ||
302 | void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest) | |
303 | { | |
304 | ASSERT(shift_amount != dest); | |
305 | ||
306 | if (src != dest) | |
307 | move(src, dest); | |
308 | rshift32(shift_amount, dest); | |
309 | } | |
310 | ||
311 | void rshift32(TrustedImm32 imm, RegisterID dest) | |
312 | { | |
313 | m_assembler.sarl_i8r(imm.m_value, dest); | |
314 | } | |
315 | ||
316 | void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) | |
317 | { | |
318 | if (src != dest) | |
319 | move(src, dest); | |
320 | rshift32(imm, dest); | |
321 | } | |
322 | ||
323 | void urshift32(RegisterID shift_amount, RegisterID dest) | |
324 | { | |
325 | ASSERT(shift_amount != dest); | |
326 | ||
327 | if (shift_amount == X86Registers::ecx) | |
328 | m_assembler.shrl_CLr(dest); | |
329 | else { | |
330 | // On x86 we can only shift by ecx; if asked to shift by another register we'll | |
331 | // need rejig the shift amount into ecx first, and restore the registers afterwards. | |
332 | // If we dest is ecx, then shift the swapped register! | |
333 | swap(shift_amount, X86Registers::ecx); | |
334 | m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest); | |
335 | swap(shift_amount, X86Registers::ecx); | |
336 | } | |
337 | } | |
338 | ||
339 | void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest) | |
340 | { | |
341 | ASSERT(shift_amount != dest); | |
342 | ||
343 | if (src != dest) | |
344 | move(src, dest); | |
345 | urshift32(shift_amount, dest); | |
346 | } | |
347 | ||
348 | void urshift32(TrustedImm32 imm, RegisterID dest) | |
349 | { | |
350 | m_assembler.shrl_i8r(imm.m_value, dest); | |
351 | } | |
352 | ||
353 | void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) | |
354 | { | |
355 | if (src != dest) | |
356 | move(src, dest); | |
357 | urshift32(imm, dest); | |
358 | } | |
359 | ||
360 | void sub32(RegisterID src, RegisterID dest) | |
361 | { | |
362 | m_assembler.subl_rr(src, dest); | |
363 | } | |
364 | ||
365 | void sub32(TrustedImm32 imm, RegisterID dest) | |
366 | { | |
367 | m_assembler.subl_ir(imm.m_value, dest); | |
368 | } | |
369 | ||
370 | void sub32(TrustedImm32 imm, Address address) | |
371 | { | |
372 | m_assembler.subl_im(imm.m_value, address.offset, address.base); | |
373 | } | |
374 | ||
375 | void sub32(Address src, RegisterID dest) | |
376 | { | |
377 | m_assembler.subl_mr(src.offset, src.base, dest); | |
378 | } | |
379 | ||
380 | void sub32(RegisterID src, Address dest) | |
381 | { | |
382 | m_assembler.subl_rm(src, dest.offset, dest.base); | |
383 | } | |
384 | ||
385 | void xor32(RegisterID src, RegisterID dest) | |
386 | { | |
387 | m_assembler.xorl_rr(src, dest); | |
388 | } | |
389 | ||
390 | void xor32(TrustedImm32 imm, Address dest) | |
391 | { | |
392 | if (imm.m_value == -1) | |
393 | m_assembler.notl_m(dest.offset, dest.base); | |
394 | else | |
395 | m_assembler.xorl_im(imm.m_value, dest.offset, dest.base); | |
396 | } | |
397 | ||
398 | void xor32(TrustedImm32 imm, RegisterID dest) | |
399 | { | |
400 | if (imm.m_value == -1) | |
401 | m_assembler.notl_r(dest); | |
402 | else | |
403 | m_assembler.xorl_ir(imm.m_value, dest); | |
404 | } | |
405 | ||
406 | void xor32(RegisterID src, Address dest) | |
407 | { | |
408 | m_assembler.xorl_rm(src, dest.offset, dest.base); | |
409 | } | |
410 | ||
411 | void xor32(Address src, RegisterID dest) | |
412 | { | |
413 | m_assembler.xorl_mr(src.offset, src.base, dest); | |
414 | } | |
415 | ||
416 | void xor32(RegisterID op1, RegisterID op2, RegisterID dest) | |
417 | { | |
418 | if (op1 == op2) | |
419 | move(TrustedImm32(0), dest); | |
420 | else if (op1 == dest) | |
421 | xor32(op2, dest); | |
422 | else { | |
423 | move(op2, dest); | |
424 | xor32(op1, dest); | |
425 | } | |
426 | } | |
427 | ||
428 | void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) | |
429 | { | |
430 | move(src, dest); | |
431 | xor32(imm, dest); | |
432 | } | |
433 | ||
434 | void sqrtDouble(FPRegisterID src, FPRegisterID dst) | |
435 | { | |
436 | m_assembler.sqrtsd_rr(src, dst); | |
437 | } | |
438 | ||
439 | void absDouble(FPRegisterID src, FPRegisterID dst) | |
440 | { | |
441 | ASSERT(src != dst); | |
442 | static const double negativeZeroConstant = -0.0; | |
443 | loadDouble(&negativeZeroConstant, dst); | |
444 | m_assembler.andnpd_rr(src, dst); | |
445 | } | |
446 | ||
447 | void negateDouble(FPRegisterID src, FPRegisterID dst) | |
448 | { | |
449 | ASSERT(src != dst); | |
450 | static const double negativeZeroConstant = -0.0; | |
451 | loadDouble(&negativeZeroConstant, dst); | |
452 | m_assembler.xorpd_rr(src, dst); | |
453 | } | |
454 | ||
455 | ||
456 | // Memory access operations: | |
457 | // | |
458 | // Loads are of the form load(address, destination) and stores of the form | |
459 | // store(source, address). The source for a store may be an TrustedImm32. Address | |
460 | // operand objects to loads and store will be implicitly constructed if a | |
461 | // register is passed. | |
462 | ||
463 | void load32(ImplicitAddress address, RegisterID dest) | |
464 | { | |
465 | m_assembler.movl_mr(address.offset, address.base, dest); | |
466 | } | |
467 | ||
468 | void load32(BaseIndex address, RegisterID dest) | |
469 | { | |
470 | m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest); | |
471 | } | |
472 | ||
473 | void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) | |
474 | { | |
475 | load32(address, dest); | |
476 | } | |
477 | ||
478 | void load16Unaligned(BaseIndex address, RegisterID dest) | |
479 | { | |
480 | load16(address, dest); | |
481 | } | |
482 | ||
483 | DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) | |
484 | { | |
485 | m_assembler.movl_mr_disp32(address.offset, address.base, dest); | |
486 | return DataLabel32(this); | |
487 | } | |
488 | ||
489 | DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest) | |
490 | { | |
491 | m_assembler.movl_mr_disp8(address.offset, address.base, dest); | |
492 | return DataLabelCompact(this); | |
493 | } | |
494 | ||
495 | static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) | |
496 | { | |
497 | ASSERT(value >= 0); | |
498 | ASSERT(value < MaximumCompactPtrAlignedAddressOffset); | |
499 | AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value); | |
500 | } | |
501 | ||
502 | DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest) | |
503 | { | |
504 | m_assembler.movl_mr_disp8(address.offset, address.base, dest); | |
505 | return DataLabelCompact(this); | |
506 | } | |
507 | ||
508 | void load8(BaseIndex address, RegisterID dest) | |
509 | { | |
510 | m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest); | |
511 | } | |
512 | ||
513 | void load8(ImplicitAddress address, RegisterID dest) | |
514 | { | |
515 | m_assembler.movzbl_mr(address.offset, address.base, dest); | |
516 | } | |
517 | ||
518 | void load8Signed(BaseIndex address, RegisterID dest) | |
519 | { | |
520 | m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest); | |
521 | } | |
522 | ||
523 | void load8Signed(ImplicitAddress address, RegisterID dest) | |
524 | { | |
525 | m_assembler.movsbl_mr(address.offset, address.base, dest); | |
526 | } | |
527 | ||
528 | void load16(BaseIndex address, RegisterID dest) | |
529 | { | |
530 | m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest); | |
531 | } | |
532 | ||
533 | void load16(Address address, RegisterID dest) | |
534 | { | |
535 | m_assembler.movzwl_mr(address.offset, address.base, dest); | |
536 | } | |
537 | ||
538 | void load16Signed(BaseIndex address, RegisterID dest) | |
539 | { | |
540 | m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest); | |
541 | } | |
542 | ||
543 | void load16Signed(Address address, RegisterID dest) | |
544 | { | |
545 | m_assembler.movswl_mr(address.offset, address.base, dest); | |
546 | } | |
547 | ||
548 | DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) | |
549 | { | |
550 | m_assembler.movl_rm_disp32(src, address.offset, address.base); | |
551 | return DataLabel32(this); | |
552 | } | |
553 | ||
554 | void store32(RegisterID src, ImplicitAddress address) | |
555 | { | |
556 | m_assembler.movl_rm(src, address.offset, address.base); | |
557 | } | |
558 | ||
559 | void store32(RegisterID src, BaseIndex address) | |
560 | { | |
561 | m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale); | |
562 | } | |
563 | ||
564 | void store32(TrustedImm32 imm, ImplicitAddress address) | |
565 | { | |
566 | m_assembler.movl_i32m(imm.m_value, address.offset, address.base); | |
567 | } | |
568 | ||
569 | void store32(TrustedImm32 imm, BaseIndex address) | |
570 | { | |
571 | m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale); | |
572 | } | |
573 | ||
574 | void store8(TrustedImm32 imm, Address address) | |
575 | { | |
576 | ASSERT(-128 <= imm.m_value && imm.m_value < 128); | |
577 | m_assembler.movb_i8m(imm.m_value, address.offset, address.base); | |
578 | } | |
579 | ||
580 | void store8(TrustedImm32 imm, BaseIndex address) | |
581 | { | |
582 | ASSERT(-128 <= imm.m_value && imm.m_value < 128); | |
583 | m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale); | |
584 | } | |
585 | ||
586 | void store8(RegisterID src, BaseIndex address) | |
587 | { | |
588 | #if CPU(X86) | |
589 | // On 32-bit x86 we can only store from the first 4 registers; | |
590 | // esp..edi are mapped to the 'h' registers! | |
591 | if (src >= 4) { | |
592 | // Pick a temporary register. | |
593 | RegisterID temp; | |
594 | if (address.base != X86Registers::eax && address.index != X86Registers::eax) | |
595 | temp = X86Registers::eax; | |
596 | else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx) | |
597 | temp = X86Registers::ebx; | |
598 | else { | |
599 | ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx); | |
600 | temp = X86Registers::ecx; | |
601 | } | |
602 | ||
603 | // Swap to the temporary register to perform the store. | |
604 | swap(src, temp); | |
605 | m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale); | |
606 | swap(src, temp); | |
607 | return; | |
608 | } | |
609 | #endif | |
610 | m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale); | |
611 | } | |
612 | ||
613 | void store16(RegisterID src, BaseIndex address) | |
614 | { | |
615 | #if CPU(X86) | |
616 | // On 32-bit x86 we can only store from the first 4 registers; | |
617 | // esp..edi are mapped to the 'h' registers! | |
618 | if (src >= 4) { | |
619 | // Pick a temporary register. | |
620 | RegisterID temp; | |
621 | if (address.base != X86Registers::eax && address.index != X86Registers::eax) | |
622 | temp = X86Registers::eax; | |
623 | else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx) | |
624 | temp = X86Registers::ebx; | |
625 | else { | |
626 | ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx); | |
627 | temp = X86Registers::ecx; | |
628 | } | |
629 | ||
630 | // Swap to the temporary register to perform the store. | |
631 | swap(src, temp); | |
632 | m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale); | |
633 | swap(src, temp); | |
634 | return; | |
635 | } | |
636 | #endif | |
637 | m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale); | |
638 | } | |
639 | ||
640 | ||
641 | // Floating-point operation: | |
642 | // | |
643 | // Presently only supports SSE, not x87 floating point. | |
644 | ||
645 | void moveDouble(FPRegisterID src, FPRegisterID dest) | |
646 | { | |
647 | ASSERT(isSSE2Present()); | |
648 | if (src != dest) | |
649 | m_assembler.movsd_rr(src, dest); | |
650 | } | |
651 | ||
652 | void loadDouble(const void* address, FPRegisterID dest) | |
653 | { | |
654 | #if CPU(X86) | |
655 | ASSERT(isSSE2Present()); | |
656 | m_assembler.movsd_mr(address, dest); | |
657 | #else | |
658 | move(TrustedImmPtr(address), scratchRegister); | |
659 | loadDouble(scratchRegister, dest); | |
660 | #endif | |
661 | } | |
662 | ||
663 | void loadDouble(ImplicitAddress address, FPRegisterID dest) | |
664 | { | |
665 | ASSERT(isSSE2Present()); | |
666 | m_assembler.movsd_mr(address.offset, address.base, dest); | |
667 | } | |
668 | ||
669 | void loadDouble(BaseIndex address, FPRegisterID dest) | |
670 | { | |
671 | ASSERT(isSSE2Present()); | |
672 | m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest); | |
673 | } | |
674 | void loadFloat(BaseIndex address, FPRegisterID dest) | |
675 | { | |
676 | ASSERT(isSSE2Present()); | |
677 | m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest); | |
678 | } | |
679 | ||
680 | void storeDouble(FPRegisterID src, ImplicitAddress address) | |
681 | { | |
682 | ASSERT(isSSE2Present()); | |
683 | m_assembler.movsd_rm(src, address.offset, address.base); | |
684 | } | |
685 | ||
686 | void storeDouble(FPRegisterID src, BaseIndex address) | |
687 | { | |
688 | ASSERT(isSSE2Present()); | |
689 | m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale); | |
690 | } | |
691 | ||
692 | void storeFloat(FPRegisterID src, BaseIndex address) | |
693 | { | |
694 | ASSERT(isSSE2Present()); | |
695 | m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale); | |
696 | } | |
697 | ||
698 | void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst) | |
699 | { | |
700 | ASSERT(isSSE2Present()); | |
701 | m_assembler.cvtsd2ss_rr(src, dst); | |
702 | } | |
703 | ||
704 | void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) | |
705 | { | |
706 | ASSERT(isSSE2Present()); | |
707 | m_assembler.cvtss2sd_rr(src, dst); | |
708 | } | |
709 | ||
710 | void addDouble(FPRegisterID src, FPRegisterID dest) | |
711 | { | |
712 | ASSERT(isSSE2Present()); | |
713 | m_assembler.addsd_rr(src, dest); | |
714 | } | |
715 | ||
716 | void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) | |
717 | { | |
718 | ASSERT(isSSE2Present()); | |
719 | if (op1 == dest) | |
720 | addDouble(op2, dest); | |
721 | else { | |
722 | moveDouble(op2, dest); | |
723 | addDouble(op1, dest); | |
724 | } | |
725 | } | |
726 | ||
727 | void addDouble(Address src, FPRegisterID dest) | |
728 | { | |
729 | ASSERT(isSSE2Present()); | |
730 | m_assembler.addsd_mr(src.offset, src.base, dest); | |
731 | } | |
732 | ||
733 | void divDouble(FPRegisterID src, FPRegisterID dest) | |
734 | { | |
735 | ASSERT(isSSE2Present()); | |
736 | m_assembler.divsd_rr(src, dest); | |
737 | } | |
738 | ||
739 | void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) | |
740 | { | |
741 | // B := A / B is invalid. | |
742 | ASSERT(op1 == dest || op2 != dest); | |
743 | ||
744 | moveDouble(op1, dest); | |
745 | divDouble(op2, dest); | |
746 | } | |
747 | ||
748 | void divDouble(Address src, FPRegisterID dest) | |
749 | { | |
750 | ASSERT(isSSE2Present()); | |
751 | m_assembler.divsd_mr(src.offset, src.base, dest); | |
752 | } | |
753 | ||
754 | void subDouble(FPRegisterID src, FPRegisterID dest) | |
755 | { | |
756 | ASSERT(isSSE2Present()); | |
757 | m_assembler.subsd_rr(src, dest); | |
758 | } | |
759 | ||
760 | void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) | |
761 | { | |
762 | // B := A - B is invalid. | |
763 | ASSERT(op1 == dest || op2 != dest); | |
764 | ||
765 | moveDouble(op1, dest); | |
766 | subDouble(op2, dest); | |
767 | } | |
768 | ||
769 | void subDouble(Address src, FPRegisterID dest) | |
770 | { | |
771 | ASSERT(isSSE2Present()); | |
772 | m_assembler.subsd_mr(src.offset, src.base, dest); | |
773 | } | |
774 | ||
775 | void mulDouble(FPRegisterID src, FPRegisterID dest) | |
776 | { | |
777 | ASSERT(isSSE2Present()); | |
778 | m_assembler.mulsd_rr(src, dest); | |
779 | } | |
780 | ||
781 | void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) | |
782 | { | |
783 | ASSERT(isSSE2Present()); | |
784 | if (op1 == dest) | |
785 | mulDouble(op2, dest); | |
786 | else { | |
787 | moveDouble(op2, dest); | |
788 | mulDouble(op1, dest); | |
789 | } | |
790 | } | |
791 | ||
792 | void mulDouble(Address src, FPRegisterID dest) | |
793 | { | |
794 | ASSERT(isSSE2Present()); | |
795 | m_assembler.mulsd_mr(src.offset, src.base, dest); | |
796 | } | |
797 | ||
798 | void convertInt32ToDouble(RegisterID src, FPRegisterID dest) | |
799 | { | |
800 | ASSERT(isSSE2Present()); | |
801 | m_assembler.cvtsi2sd_rr(src, dest); | |
802 | } | |
803 | ||
804 | void convertInt32ToDouble(Address src, FPRegisterID dest) | |
805 | { | |
806 | ASSERT(isSSE2Present()); | |
807 | m_assembler.cvtsi2sd_mr(src.offset, src.base, dest); | |
808 | } | |
809 | ||
810 | Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) | |
811 | { | |
812 | ASSERT(isSSE2Present()); | |
813 | ||
814 | if (cond & DoubleConditionBitInvert) | |
815 | m_assembler.ucomisd_rr(left, right); | |
816 | else | |
817 | m_assembler.ucomisd_rr(right, left); | |
818 | ||
819 | if (cond == DoubleEqual) { | |
820 | Jump isUnordered(m_assembler.jp()); | |
821 | Jump result = Jump(m_assembler.je()); | |
822 | isUnordered.link(this); | |
823 | return result; | |
824 | } else if (cond == DoubleNotEqualOrUnordered) { | |
825 | Jump isUnordered(m_assembler.jp()); | |
826 | Jump isEqual(m_assembler.je()); | |
827 | isUnordered.link(this); | |
828 | Jump result = jump(); | |
829 | isEqual.link(this); | |
830 | return result; | |
831 | } | |
832 | ||
833 | ASSERT(!(cond & DoubleConditionBitSpecial)); | |
834 | return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits))); | |
835 | } | |
836 | ||
837 | // Truncates 'src' to an integer, and places the resulting 'dest'. | |
838 | // If the result is not representable as a 32 bit value, branch. | |
839 | // May also branch for some values that are representable in 32 bits | |
840 | // (specifically, in this case, INT_MIN). | |
841 | enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; | |
842 | Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) | |
843 | { | |
844 | ASSERT(isSSE2Present()); | |
845 | m_assembler.cvttsd2si_rr(src, dest); | |
846 | return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000)); | |
847 | } | |
848 | ||
849 | Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) | |
850 | { | |
851 | ASSERT(isSSE2Present()); | |
852 | m_assembler.cvttsd2si_rr(src, dest); | |
853 | return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0)); | |
854 | } | |
855 | ||
856 | void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) | |
857 | { | |
858 | ASSERT(isSSE2Present()); | |
859 | m_assembler.cvttsd2si_rr(src, dest); | |
860 | } | |
861 | ||
862 | #if CPU(X86_64) | |
863 | void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) | |
864 | { | |
865 | ASSERT(isSSE2Present()); | |
866 | m_assembler.cvttsd2siq_rr(src, dest); | |
867 | } | |
868 | #endif | |
869 | ||
870 | // Convert 'src' to an integer, and places the resulting 'dest'. | |
871 | // If the result is not representable as a 32 bit value, branch. | |
872 | // May also branch for some values that are representable in 32 bits | |
873 | // (specifically, in this case, 0). | |
874 | void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp) | |
875 | { | |
876 | ASSERT(isSSE2Present()); | |
877 | m_assembler.cvttsd2si_rr(src, dest); | |
878 | ||
879 | // If the result is zero, it might have been -0.0, and the double comparison won't catch this! | |
880 | failureCases.append(branchTest32(Zero, dest)); | |
881 | ||
882 | // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. | |
883 | convertInt32ToDouble(dest, fpTemp); | |
884 | m_assembler.ucomisd_rr(fpTemp, src); | |
885 | failureCases.append(m_assembler.jp()); | |
886 | failureCases.append(m_assembler.jne()); | |
887 | } | |
888 | ||
889 | Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) | |
890 | { | |
891 | ASSERT(isSSE2Present()); | |
892 | m_assembler.xorpd_rr(scratch, scratch); | |
893 | return branchDouble(DoubleNotEqual, reg, scratch); | |
894 | } | |
895 | ||
896 | Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch) | |
897 | { | |
898 | ASSERT(isSSE2Present()); | |
899 | m_assembler.xorpd_rr(scratch, scratch); | |
900 | return branchDouble(DoubleEqualOrUnordered, reg, scratch); | |
901 | } | |
902 | ||
903 | void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg) | |
904 | { | |
905 | ASSERT(isSSE2Present()); | |
906 | m_assembler.psllq_i8r(imm.m_value, reg); | |
907 | } | |
908 | ||
909 | void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg) | |
910 | { | |
911 | ASSERT(isSSE2Present()); | |
912 | m_assembler.psrlq_i8r(imm.m_value, reg); | |
913 | } | |
914 | ||
915 | void orPacked(XMMRegisterID src, XMMRegisterID dst) | |
916 | { | |
917 | ASSERT(isSSE2Present()); | |
918 | m_assembler.por_rr(src, dst); | |
919 | } | |
920 | ||
921 | void moveInt32ToPacked(RegisterID src, XMMRegisterID dst) | |
922 | { | |
923 | ASSERT(isSSE2Present()); | |
924 | m_assembler.movd_rr(src, dst); | |
925 | } | |
926 | ||
927 | void movePackedToInt32(XMMRegisterID src, RegisterID dst) | |
928 | { | |
929 | ASSERT(isSSE2Present()); | |
930 | m_assembler.movd_rr(src, dst); | |
931 | } | |
932 | ||
933 | // Stack manipulation operations: | |
934 | // | |
935 | // The ABI is assumed to provide a stack abstraction to memory, | |
936 | // containing machine word sized units of data. Push and pop | |
937 | // operations add and remove a single register sized unit of data | |
938 | // to or from the stack. Peek and poke operations read or write | |
939 | // values on the stack, without moving the current stack position. | |
940 | ||
941 | void pop(RegisterID dest) | |
942 | { | |
943 | m_assembler.pop_r(dest); | |
944 | } | |
945 | ||
946 | void push(RegisterID src) | |
947 | { | |
948 | m_assembler.push_r(src); | |
949 | } | |
950 | ||
951 | void push(Address address) | |
952 | { | |
953 | m_assembler.push_m(address.offset, address.base); | |
954 | } | |
955 | ||
956 | void push(TrustedImm32 imm) | |
957 | { | |
958 | m_assembler.push_i32(imm.m_value); | |
959 | } | |
960 | ||
961 | ||
962 | // Register move operations: | |
963 | // | |
964 | // Move values in registers. | |
965 | ||
966 | void move(TrustedImm32 imm, RegisterID dest) | |
967 | { | |
968 | // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it | |
969 | // may be useful to have a separate version that sign extends the value? | |
970 | if (!imm.m_value) | |
971 | m_assembler.xorl_rr(dest, dest); | |
972 | else | |
973 | m_assembler.movl_i32r(imm.m_value, dest); | |
974 | } | |
975 | ||
976 | #if CPU(X86_64) | |
977 | void move(RegisterID src, RegisterID dest) | |
978 | { | |
979 | // Note: on 64-bit this is is a full register move; perhaps it would be | |
980 | // useful to have separate move32 & movePtr, with move32 zero extending? | |
981 | if (src != dest) | |
982 | m_assembler.movq_rr(src, dest); | |
983 | } | |
984 | ||
985 | void move(TrustedImmPtr imm, RegisterID dest) | |
986 | { | |
987 | m_assembler.movq_i64r(imm.asIntptr(), dest); | |
988 | } | |
989 | ||
990 | void swap(RegisterID reg1, RegisterID reg2) | |
991 | { | |
992 | if (reg1 != reg2) | |
993 | m_assembler.xchgq_rr(reg1, reg2); | |
994 | } | |
995 | ||
996 | void signExtend32ToPtr(RegisterID src, RegisterID dest) | |
997 | { | |
998 | m_assembler.movsxd_rr(src, dest); | |
999 | } | |
1000 | ||
1001 | void zeroExtend32ToPtr(RegisterID src, RegisterID dest) | |
1002 | { | |
1003 | m_assembler.movl_rr(src, dest); | |
1004 | } | |
1005 | #else | |
1006 | void move(RegisterID src, RegisterID dest) | |
1007 | { | |
1008 | if (src != dest) | |
1009 | m_assembler.movl_rr(src, dest); | |
1010 | } | |
1011 | ||
1012 | void move(TrustedImmPtr imm, RegisterID dest) | |
1013 | { | |
1014 | m_assembler.movl_i32r(imm.asIntptr(), dest); | |
1015 | } | |
1016 | ||
1017 | void swap(RegisterID reg1, RegisterID reg2) | |
1018 | { | |
1019 | if (reg1 != reg2) | |
1020 | m_assembler.xchgl_rr(reg1, reg2); | |
1021 | } | |
1022 | ||
1023 | void signExtend32ToPtr(RegisterID src, RegisterID dest) | |
1024 | { | |
1025 | move(src, dest); | |
1026 | } | |
1027 | ||
1028 | void zeroExtend32ToPtr(RegisterID src, RegisterID dest) | |
1029 | { | |
1030 | move(src, dest); | |
1031 | } | |
1032 | #endif | |
1033 | ||
1034 | ||
1035 | // Forwards / external control flow operations: | |
1036 | // | |
1037 | // This set of jump and conditional branch operations return a Jump | |
1038 | // object which may linked at a later point, allow forwards jump, | |
1039 | // or jumps that will require external linkage (after the code has been | |
1040 | // relocated). | |
1041 | // | |
1042 | // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge | |
1043 | // respecitvely, for unsigned comparisons the names b, a, be, and ae are | |
1044 | // used (representing the names 'below' and 'above'). | |
1045 | // | |
1046 | // Operands to the comparision are provided in the expected order, e.g. | |
1047 | // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when | |
1048 | // treated as a signed 32bit value, is less than or equal to 5. | |
1049 | // | |
1050 | // jz and jnz test whether the first operand is equal to zero, and take | |
1051 | // an optional second operand of a mask under which to perform the test. | |
1052 | ||
1053 | public: | |
1054 | Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) | |
1055 | { | |
1056 | m_assembler.cmpb_im(right.m_value, left.offset, left.base); | |
1057 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1058 | } | |
1059 | ||
1060 | Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) | |
1061 | { | |
1062 | m_assembler.cmpl_rr(right, left); | |
1063 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1064 | } | |
1065 | ||
1066 | Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) | |
1067 | { | |
1068 | if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) | |
1069 | m_assembler.testl_rr(left, left); | |
1070 | else | |
1071 | m_assembler.cmpl_ir(right.m_value, left); | |
1072 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1073 | } | |
1074 | ||
1075 | Jump branch32(RelationalCondition cond, RegisterID left, Address right) | |
1076 | { | |
1077 | m_assembler.cmpl_mr(right.offset, right.base, left); | |
1078 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1079 | } | |
1080 | ||
1081 | Jump branch32(RelationalCondition cond, Address left, RegisterID right) | |
1082 | { | |
1083 | m_assembler.cmpl_rm(right, left.offset, left.base); | |
1084 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1085 | } | |
1086 | ||
1087 | Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right) | |
1088 | { | |
1089 | m_assembler.cmpl_im(right.m_value, left.offset, left.base); | |
1090 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1091 | } | |
1092 | ||
1093 | Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right) | |
1094 | { | |
1095 | m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale); | |
1096 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1097 | } | |
1098 | ||
1099 | Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) | |
1100 | { | |
1101 | return branch32(cond, left, right); | |
1102 | } | |
1103 | ||
1104 | Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) | |
1105 | { | |
1106 | m_assembler.testl_rr(reg, mask); | |
1107 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1108 | } | |
1109 | ||
1110 | Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) | |
1111 | { | |
1112 | // if we are only interested in the low seven bits, this can be tested with a testb | |
1113 | if (mask.m_value == -1) | |
1114 | m_assembler.testl_rr(reg, reg); | |
1115 | else | |
1116 | m_assembler.testl_i32r(mask.m_value, reg); | |
1117 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1118 | } | |
1119 | ||
1120 | Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) | |
1121 | { | |
1122 | if (mask.m_value == -1) | |
1123 | m_assembler.cmpl_im(0, address.offset, address.base); | |
1124 | else | |
1125 | m_assembler.testl_i32m(mask.m_value, address.offset, address.base); | |
1126 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1127 | } | |
1128 | ||
1129 | Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) | |
1130 | { | |
1131 | if (mask.m_value == -1) | |
1132 | m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale); | |
1133 | else | |
1134 | m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); | |
1135 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1136 | } | |
1137 | ||
1138 | Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) | |
1139 | { | |
1140 | // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. | |
1141 | ASSERT(mask.m_value >= -128 && mask.m_value <= 255); | |
1142 | if (mask.m_value == -1) | |
1143 | m_assembler.cmpb_im(0, address.offset, address.base); | |
1144 | else | |
1145 | m_assembler.testb_im(mask.m_value, address.offset, address.base); | |
1146 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1147 | } | |
1148 | ||
1149 | Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) | |
1150 | { | |
1151 | // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. | |
1152 | ASSERT(mask.m_value >= -128 && mask.m_value <= 255); | |
1153 | if (mask.m_value == -1) | |
1154 | m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale); | |
1155 | else | |
1156 | m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale); | |
1157 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1158 | } | |
1159 | ||
1160 | Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) | |
1161 | { | |
1162 | ASSERT(!(right.m_value & 0xFFFFFF00)); | |
1163 | ||
1164 | m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale); | |
1165 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1166 | } | |
1167 | ||
1168 | Jump jump() | |
1169 | { | |
1170 | return Jump(m_assembler.jmp()); | |
1171 | } | |
1172 | ||
1173 | void jump(RegisterID target) | |
1174 | { | |
1175 | m_assembler.jmp_r(target); | |
1176 | } | |
1177 | ||
1178 | // Address is a memory location containing the address to jump to | |
1179 | void jump(Address address) | |
1180 | { | |
1181 | m_assembler.jmp_m(address.offset, address.base); | |
1182 | } | |
1183 | ||
1184 | ||
1185 | // Arithmetic control flow operations: | |
1186 | // | |
1187 | // This set of conditional branch operations branch based | |
1188 | // on the result of an arithmetic operation. The operation | |
1189 | // is performed as normal, storing the result. | |
1190 | // | |
1191 | // * jz operations branch if the result is zero. | |
1192 | // * jo operations branch if the (signed) arithmetic | |
1193 | // operation caused an overflow to occur. | |
1194 | ||
1195 | Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) | |
1196 | { | |
1197 | add32(src, dest); | |
1198 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1199 | } | |
1200 | ||
1201 | Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) | |
1202 | { | |
1203 | add32(imm, dest); | |
1204 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1205 | } | |
1206 | ||
1207 | Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest) | |
1208 | { | |
1209 | add32(src, dest); | |
1210 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1211 | } | |
1212 | ||
1213 | Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest) | |
1214 | { | |
1215 | add32(src, dest); | |
1216 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1217 | } | |
1218 | ||
1219 | Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest) | |
1220 | { | |
1221 | add32(src, dest); | |
1222 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1223 | } | |
1224 | ||
1225 | Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) | |
1226 | { | |
1227 | if (src1 == dest) | |
1228 | return branchAdd32(cond, src2, dest); | |
1229 | move(src2, dest); | |
1230 | return branchAdd32(cond, src1, dest); | |
1231 | } | |
1232 | ||
1233 | Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) | |
1234 | { | |
1235 | move(src, dest); | |
1236 | return branchAdd32(cond, imm, dest); | |
1237 | } | |
1238 | ||
1239 | Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) | |
1240 | { | |
1241 | mul32(src, dest); | |
1242 | if (cond != Overflow) | |
1243 | m_assembler.testl_rr(dest, dest); | |
1244 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1245 | } | |
1246 | ||
1247 | Jump branchMul32(ResultCondition cond, Address src, RegisterID dest) | |
1248 | { | |
1249 | mul32(src, dest); | |
1250 | if (cond != Overflow) | |
1251 | m_assembler.testl_rr(dest, dest); | |
1252 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1253 | } | |
1254 | ||
1255 | Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) | |
1256 | { | |
1257 | mul32(imm, src, dest); | |
1258 | if (cond != Overflow) | |
1259 | m_assembler.testl_rr(dest, dest); | |
1260 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1261 | } | |
1262 | ||
1263 | Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) | |
1264 | { | |
1265 | if (src1 == dest) | |
1266 | return branchMul32(cond, src2, dest); | |
1267 | move(src2, dest); | |
1268 | return branchMul32(cond, src1, dest); | |
1269 | } | |
1270 | ||
1271 | Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest) | |
1272 | { | |
1273 | sub32(src, dest); | |
1274 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1275 | } | |
1276 | ||
1277 | Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) | |
1278 | { | |
1279 | sub32(imm, dest); | |
1280 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1281 | } | |
1282 | ||
1283 | Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest) | |
1284 | { | |
1285 | sub32(imm, dest); | |
1286 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1287 | } | |
1288 | ||
1289 | Jump branchSub32(ResultCondition cond, RegisterID src, Address dest) | |
1290 | { | |
1291 | sub32(src, dest); | |
1292 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1293 | } | |
1294 | ||
1295 | Jump branchSub32(ResultCondition cond, Address src, RegisterID dest) | |
1296 | { | |
1297 | sub32(src, dest); | |
1298 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1299 | } | |
1300 | ||
1301 | Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) | |
1302 | { | |
1303 | // B := A - B is invalid. | |
1304 | ASSERT(src1 == dest || src2 != dest); | |
1305 | ||
1306 | move(src1, dest); | |
1307 | return branchSub32(cond, src2, dest); | |
1308 | } | |
1309 | ||
1310 | Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) | |
1311 | { | |
1312 | move(src1, dest); | |
1313 | return branchSub32(cond, src2, dest); | |
1314 | } | |
1315 | ||
1316 | Jump branchNeg32(ResultCondition cond, RegisterID srcDest) | |
1317 | { | |
1318 | neg32(srcDest); | |
1319 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1320 | } | |
1321 | ||
1322 | Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest) | |
1323 | { | |
1324 | or32(src, dest); | |
1325 | return Jump(m_assembler.jCC(x86Condition(cond))); | |
1326 | } | |
1327 | ||
1328 | ||
1329 | // Miscellaneous operations: | |
1330 | ||
1331 | void breakpoint() | |
1332 | { | |
1333 | m_assembler.int3(); | |
1334 | } | |
1335 | ||
1336 | Call nearCall() | |
1337 | { | |
1338 | return Call(m_assembler.call(), Call::LinkableNear); | |
1339 | } | |
1340 | ||
1341 | Call call(RegisterID target) | |
1342 | { | |
1343 | return Call(m_assembler.call(target), Call::None); | |
1344 | } | |
1345 | ||
1346 | void call(Address address) | |
1347 | { | |
1348 | m_assembler.call_m(address.offset, address.base); | |
1349 | } | |
1350 | ||
1351 | void ret() | |
1352 | { | |
1353 | m_assembler.ret(); | |
1354 | } | |
1355 | ||
1356 | void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) | |
1357 | { | |
1358 | m_assembler.cmpb_im(right.m_value, left.offset, left.base); | |
1359 | set32(x86Condition(cond), dest); | |
1360 | } | |
1361 | ||
1362 | void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) | |
1363 | { | |
1364 | m_assembler.cmpl_rr(right, left); | |
1365 | set32(x86Condition(cond), dest); | |
1366 | } | |
1367 | ||
1368 | void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) | |
1369 | { | |
1370 | if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) | |
1371 | m_assembler.testl_rr(left, left); | |
1372 | else | |
1373 | m_assembler.cmpl_ir(right.m_value, left); | |
1374 | set32(x86Condition(cond), dest); | |
1375 | } | |
1376 | ||
1377 | // FIXME: | |
1378 | // The mask should be optional... perhaps the argument order should be | |
1379 | // dest-src, operations always have a dest? ... possibly not true, considering | |
1380 | // asm ops like test, or pseudo ops like pop(). | |
1381 | ||
1382 | void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) | |
1383 | { | |
1384 | if (mask.m_value == -1) | |
1385 | m_assembler.cmpb_im(0, address.offset, address.base); | |
1386 | else | |
1387 | m_assembler.testb_im(mask.m_value, address.offset, address.base); | |
1388 | set32(x86Condition(cond), dest); | |
1389 | } | |
1390 | ||
1391 | void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) | |
1392 | { | |
1393 | if (mask.m_value == -1) | |
1394 | m_assembler.cmpl_im(0, address.offset, address.base); | |
1395 | else | |
1396 | m_assembler.testl_i32m(mask.m_value, address.offset, address.base); | |
1397 | set32(x86Condition(cond), dest); | |
1398 | } | |
1399 | ||
1400 | // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. | |
1401 | static RelationalCondition invert(RelationalCondition cond) | |
1402 | { | |
1403 | return static_cast<RelationalCondition>(cond ^ 1); | |
1404 | } | |
1405 | ||
1406 | void nop() | |
1407 | { | |
1408 | m_assembler.nop(); | |
1409 | } | |
1410 | ||
1411 | protected: | |
1412 | X86Assembler::Condition x86Condition(RelationalCondition cond) | |
1413 | { | |
1414 | return static_cast<X86Assembler::Condition>(cond); | |
1415 | } | |
1416 | ||
1417 | X86Assembler::Condition x86Condition(ResultCondition cond) | |
1418 | { | |
1419 | return static_cast<X86Assembler::Condition>(cond); | |
1420 | } | |
1421 | ||
1422 | void set32(X86Assembler::Condition cond, RegisterID dest) | |
1423 | { | |
1424 | #if CPU(X86) | |
1425 | // On 32-bit x86 we can only set the first 4 registers; | |
1426 | // esp..edi are mapped to the 'h' registers! | |
1427 | if (dest >= 4) { | |
1428 | m_assembler.xchgl_rr(dest, X86Registers::eax); | |
1429 | m_assembler.setCC_r(cond, X86Registers::eax); | |
1430 | m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax); | |
1431 | m_assembler.xchgl_rr(dest, X86Registers::eax); | |
1432 | return; | |
1433 | } | |
1434 | #endif | |
1435 | m_assembler.setCC_r(cond, dest); | |
1436 | m_assembler.movzbl_rr(dest, dest); | |
1437 | } | |
1438 | ||
1439 | private: | |
1440 | // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on | |
1441 | // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'. | |
1442 | friend class MacroAssemblerX86; | |
1443 | ||
1444 | #if CPU(X86) | |
1445 | #if OS(MAC_OS_X) | |
1446 | ||
1447 | // All X86 Macs are guaranteed to support at least SSE2, | |
1448 | static bool isSSE2Present() | |
1449 | { | |
1450 | return true; | |
1451 | } | |
1452 | ||
1453 | #else // OS(MAC_OS_X) | |
1454 | ||
1455 | enum SSE2CheckState { | |
1456 | NotCheckedSSE2, | |
1457 | HasSSE2, | |
1458 | NoSSE2 | |
1459 | }; | |
1460 | ||
1461 | static bool isSSE2Present() | |
1462 | { | |
1463 | if (s_sse2CheckState == NotCheckedSSE2) { | |
1464 | // Default the flags value to zero; if the compiler is | |
1465 | // not MSVC or GCC we will read this as SSE2 not present. | |
1466 | int flags = 0; | |
1467 | #if COMPILER(MSVC) | |
1468 | _asm { | |
1469 | mov eax, 1 // cpuid function 1 gives us the standard feature set | |
1470 | cpuid; | |
1471 | mov flags, edx; | |
1472 | } | |
1473 | #elif COMPILER(GCC) | |
1474 | asm ( | |
1475 | "movl $0x1, %%eax;" | |
1476 | "pushl %%ebx;" | |
1477 | "cpuid;" | |
1478 | "popl %%ebx;" | |
1479 | "movl %%edx, %0;" | |
1480 | : "=g" (flags) | |
1481 | : | |
1482 | : "%eax", "%ecx", "%edx" | |
1483 | ); | |
1484 | #endif | |
1485 | static const int SSE2FeatureBit = 1 << 26; | |
1486 | s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2; | |
1487 | } | |
1488 | // Only check once. | |
1489 | ASSERT(s_sse2CheckState != NotCheckedSSE2); | |
1490 | ||
1491 | return s_sse2CheckState == HasSSE2; | |
1492 | } | |
1493 | ||
1494 | static SSE2CheckState s_sse2CheckState; | |
1495 | ||
1496 | #endif // OS(MAC_OS_X) | |
1497 | #elif !defined(NDEBUG) // CPU(X86) | |
1498 | ||
1499 | // On x86-64 we should never be checking for SSE2 in a non-debug build, | |
1500 | // but non debug add this method to keep the asserts above happy. | |
1501 | static bool isSSE2Present() | |
1502 | { | |
1503 | return true; | |
1504 | } | |
1505 | ||
1506 | #endif | |
1507 | }; | |
1508 | ||
1509 | } // namespace JSC | |
1510 | ||
1511 | #endif // ENABLE(ASSEMBLER) | |
1512 | ||
1513 | #endif // MacroAssemblerX86Common_h |