]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARM.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARM.h
1 /*
2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
30
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
32
33 #include "ARMAssembler.h"
34 #include "AbstractMacroAssembler.h"
35
36 namespace JSC {
37
38 class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
39 static const int DoubleConditionMask = 0x0f;
40 static const int DoubleConditionBitSpecial = 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
42 public:
43 typedef ARMRegisters::FPRegisterID FPRegisterID;
44
45 enum RelationalCondition {
46 Equal = ARMAssembler::EQ,
47 NotEqual = ARMAssembler::NE,
48 Above = ARMAssembler::HI,
49 AboveOrEqual = ARMAssembler::CS,
50 Below = ARMAssembler::CC,
51 BelowOrEqual = ARMAssembler::LS,
52 GreaterThan = ARMAssembler::GT,
53 GreaterThanOrEqual = ARMAssembler::GE,
54 LessThan = ARMAssembler::LT,
55 LessThanOrEqual = ARMAssembler::LE
56 };
57
58 enum ResultCondition {
59 Overflow = ARMAssembler::VS,
60 Signed = ARMAssembler::MI,
61 PositiveOrZero = ARMAssembler::PL,
62 Zero = ARMAssembler::EQ,
63 NonZero = ARMAssembler::NE
64 };
65
66 enum DoubleCondition {
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual = ARMAssembler::EQ,
69 DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
70 DoubleGreaterThan = ARMAssembler::GT,
71 DoubleGreaterThanOrEqual = ARMAssembler::GE,
72 DoubleLessThan = ARMAssembler::CC,
73 DoubleLessThanOrEqual = ARMAssembler::LS,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
76 DoubleNotEqualOrUnordered = ARMAssembler::NE,
77 DoubleGreaterThanOrUnordered = ARMAssembler::HI,
78 DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
79 DoubleLessThanOrUnordered = ARMAssembler::LT,
80 DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
81 };
82
83 static const RegisterID stackPointerRegister = ARMRegisters::sp;
84 static const RegisterID linkRegister = ARMRegisters::lr;
85
86 static const Scale ScalePtr = TimesFour;
87
88 void add32(RegisterID src, RegisterID dest)
89 {
90 m_assembler.adds(dest, dest, src);
91 }
92
93 void add32(RegisterID op1, RegisterID op2, RegisterID dest)
94 {
95 m_assembler.adds(dest, op1, op2);
96 }
97
98 void add32(TrustedImm32 imm, Address address)
99 {
100 load32(address, ARMRegisters::S1);
101 add32(imm, ARMRegisters::S1);
102 store32(ARMRegisters::S1, address);
103 }
104
105 void add32(TrustedImm32 imm, RegisterID dest)
106 {
107 m_assembler.adds(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
108 }
109
110 void add32(AbsoluteAddress src, RegisterID dest)
111 {
112 move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
113 m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0);
114 add32(ARMRegisters::S1, dest);
115 }
116
117 void add32(Address src, RegisterID dest)
118 {
119 load32(src, ARMRegisters::S1);
120 add32(ARMRegisters::S1, dest);
121 }
122
123 void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
124 {
125 m_assembler.adds(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
126 }
127
128 void and32(RegisterID src, RegisterID dest)
129 {
130 m_assembler.bitAnds(dest, dest, src);
131 }
132
133 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
134 {
135 m_assembler.bitAnds(dest, op1, op2);
136 }
137
138 void and32(TrustedImm32 imm, RegisterID dest)
139 {
140 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
141 if (w & ARMAssembler::Op2InvertedImmediate)
142 m_assembler.bics(dest, dest, w & ~ARMAssembler::Op2InvertedImmediate);
143 else
144 m_assembler.bitAnds(dest, dest, w);
145 }
146
147 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
148 {
149 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
150 if (w & ARMAssembler::Op2InvertedImmediate)
151 m_assembler.bics(dest, src, w & ~ARMAssembler::Op2InvertedImmediate);
152 else
153 m_assembler.bitAnds(dest, src, w);
154 }
155
156 void and32(Address src, RegisterID dest)
157 {
158 load32(src, ARMRegisters::S1);
159 and32(ARMRegisters::S1, dest);
160 }
161
162 void lshift32(RegisterID shiftAmount, RegisterID dest)
163 {
164 lshift32(dest, shiftAmount, dest);
165 }
166
167 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
168 {
169 ARMWord w = ARMAssembler::getOp2Byte(0x1f);
170 m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
171
172 m_assembler.movs(dest, m_assembler.lslRegister(src, ARMRegisters::S0));
173 }
174
175 void lshift32(TrustedImm32 imm, RegisterID dest)
176 {
177 m_assembler.movs(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
178 }
179
180 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
181 {
182 m_assembler.movs(dest, m_assembler.lsl(src, imm.m_value & 0x1f));
183 }
184
185 void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
186 {
187 if (op2 == dest) {
188 if (op1 == dest) {
189 move(op2, ARMRegisters::S0);
190 op2 = ARMRegisters::S0;
191 } else {
192 // Swap the operands.
193 RegisterID tmp = op1;
194 op1 = op2;
195 op2 = tmp;
196 }
197 }
198 m_assembler.muls(dest, op1, op2);
199 }
200
201 void mul32(RegisterID src, RegisterID dest)
202 {
203 mul32(src, dest, dest);
204 }
205
206 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
207 {
208 move(imm, ARMRegisters::S0);
209 m_assembler.muls(dest, src, ARMRegisters::S0);
210 }
211
212 void neg32(RegisterID srcDest)
213 {
214 m_assembler.rsbs(srcDest, srcDest, ARMAssembler::getOp2Byte(0));
215 }
216
217 void or32(RegisterID src, RegisterID dest)
218 {
219 m_assembler.orrs(dest, dest, src);
220 }
221
222 void or32(RegisterID src, AbsoluteAddress dest)
223 {
224 move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
225 load32(Address(ARMRegisters::S0), ARMRegisters::S1);
226 or32(src, ARMRegisters::S1);
227 store32(ARMRegisters::S1, ARMRegisters::S0);
228 }
229
230 void or32(TrustedImm32 imm, RegisterID dest)
231 {
232 m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
233 }
234
235 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
236 {
237 m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
238 }
239
240 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
241 {
242 m_assembler.orrs(dest, op1, op2);
243 }
244
245 void rshift32(RegisterID shiftAmount, RegisterID dest)
246 {
247 rshift32(dest, shiftAmount, dest);
248 }
249
250 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
251 {
252 ARMWord w = ARMAssembler::getOp2Byte(0x1f);
253 m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
254
255 m_assembler.movs(dest, m_assembler.asrRegister(src, ARMRegisters::S0));
256 }
257
258 void rshift32(TrustedImm32 imm, RegisterID dest)
259 {
260 rshift32(dest, imm, dest);
261 }
262
263 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
264 {
265 m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
266 }
267
268 void urshift32(RegisterID shiftAmount, RegisterID dest)
269 {
270 urshift32(dest, shiftAmount, dest);
271 }
272
273 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
274 {
275 ARMWord w = ARMAssembler::getOp2Byte(0x1f);
276 m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
277
278 m_assembler.movs(dest, m_assembler.lsrRegister(src, ARMRegisters::S0));
279 }
280
281 void urshift32(TrustedImm32 imm, RegisterID dest)
282 {
283 m_assembler.movs(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
284 }
285
286 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
287 {
288 m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
289 }
290
291 void sub32(RegisterID src, RegisterID dest)
292 {
293 m_assembler.subs(dest, dest, src);
294 }
295
296 void sub32(TrustedImm32 imm, RegisterID dest)
297 {
298 m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
299 }
300
301 void sub32(TrustedImm32 imm, Address address)
302 {
303 load32(address, ARMRegisters::S1);
304 sub32(imm, ARMRegisters::S1);
305 store32(ARMRegisters::S1, address);
306 }
307
308 void sub32(Address src, RegisterID dest)
309 {
310 load32(src, ARMRegisters::S1);
311 sub32(ARMRegisters::S1, dest);
312 }
313
314 void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
315 {
316 m_assembler.subs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
317 }
318
319 void xor32(RegisterID src, RegisterID dest)
320 {
321 m_assembler.eors(dest, dest, src);
322 }
323
324 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
325 {
326 m_assembler.eors(dest, op1, op2);
327 }
328
329 void xor32(TrustedImm32 imm, RegisterID dest)
330 {
331 if (imm.m_value == -1)
332 m_assembler.mvns(dest, dest);
333 else
334 m_assembler.eors(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
335 }
336
337 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
338 {
339 if (imm.m_value == -1)
340 m_assembler.mvns(dest, src);
341 else
342 m_assembler.eors(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
343 }
344
345 void countLeadingZeros32(RegisterID src, RegisterID dest)
346 {
347 #if WTF_ARM_ARCH_AT_LEAST(5)
348 m_assembler.clz(dest, src);
349 #else
350 UNUSED_PARAM(src);
351 UNUSED_PARAM(dest);
352 RELEASE_ASSERT_NOT_REACHED();
353 #endif
354 }
355
356 void load8(ImplicitAddress address, RegisterID dest)
357 {
358 m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.offset);
359 }
360
361 void load8(BaseIndex address, RegisterID dest)
362 {
363 m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
364 }
365
366 void load8Signed(BaseIndex address, RegisterID dest)
367 {
368 m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
369 }
370
371 void load16(ImplicitAddress address, RegisterID dest)
372 {
373 m_assembler.dataTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.offset);
374 }
375
376 void load16(BaseIndex address, RegisterID dest)
377 {
378 m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
379 }
380
381 void load16Signed(BaseIndex address, RegisterID dest)
382 {
383 m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
384 }
385
386 void load32(ImplicitAddress address, RegisterID dest)
387 {
388 m_assembler.dataTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.offset);
389 }
390
391 void load32(BaseIndex address, RegisterID dest)
392 {
393 m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
394 }
395
396 #if CPU(ARMV5_OR_LOWER)
397 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
398 #else
399 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
400 {
401 load32(address, dest);
402 }
403 #endif
404
405 void load16Unaligned(BaseIndex address, RegisterID dest)
406 {
407 load16(address, dest);
408 }
409
410 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
411 {
412 ConvertibleLoadLabel result(this);
413 ASSERT(address.offset >= 0 && address.offset <= 255);
414 m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
415 return result;
416 }
417
418 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
419 {
420 DataLabel32 dataLabel(this);
421 m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
422 m_assembler.dtrUpRegister(ARMAssembler::LoadUint32, dest, address.base, ARMRegisters::S0);
423 return dataLabel;
424 }
425
426 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
427 {
428 return value >= -4095 && value <= 4095;
429 }
430
431 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
432 {
433 DataLabelCompact dataLabel(this);
434 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
435 if (address.offset >= 0)
436 m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
437 else
438 m_assembler.dtrDown(ARMAssembler::LoadUint32, dest, address.base, address.offset);
439 return dataLabel;
440 }
441
442 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
443 {
444 DataLabel32 dataLabel(this);
445 m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
446 m_assembler.dtrUpRegister(ARMAssembler::StoreUint32, src, address.base, ARMRegisters::S0);
447 return dataLabel;
448 }
449
450 void store8(RegisterID src, BaseIndex address)
451 {
452 m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
453 }
454
455 void store8(TrustedImm32 imm, const void* address)
456 {
457 move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
458 move(imm, ARMRegisters::S1);
459 m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
460 }
461
462 void store16(RegisterID src, BaseIndex address)
463 {
464 m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
465 }
466
467 void store32(RegisterID src, ImplicitAddress address)
468 {
469 m_assembler.dataTransfer32(ARMAssembler::StoreUint32, src, address.base, address.offset);
470 }
471
472 void store32(RegisterID src, BaseIndex address)
473 {
474 m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
475 }
476
477 void store32(TrustedImm32 imm, ImplicitAddress address)
478 {
479 move(imm, ARMRegisters::S1);
480 store32(ARMRegisters::S1, address);
481 }
482
483 void store32(TrustedImm32 imm, BaseIndex address)
484 {
485 move(imm, ARMRegisters::S1);
486 m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, ARMRegisters::S1, address.base, address.index, static_cast<int>(address.scale), address.offset);
487 }
488
489 void store32(RegisterID src, const void* address)
490 {
491 m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
492 m_assembler.dtrUp(ARMAssembler::StoreUint32, src, ARMRegisters::S0, 0);
493 }
494
495 void store32(TrustedImm32 imm, const void* address)
496 {
497 m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
498 m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
499 m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0);
500 }
501
502 void pop(RegisterID dest)
503 {
504 m_assembler.pop(dest);
505 }
506
507 void push(RegisterID src)
508 {
509 m_assembler.push(src);
510 }
511
512 void push(Address address)
513 {
514 load32(address, ARMRegisters::S1);
515 push(ARMRegisters::S1);
516 }
517
518 void push(TrustedImm32 imm)
519 {
520 move(imm, ARMRegisters::S0);
521 push(ARMRegisters::S0);
522 }
523
524 void move(TrustedImm32 imm, RegisterID dest)
525 {
526 m_assembler.moveImm(imm.m_value, dest);
527 }
528
529 void move(RegisterID src, RegisterID dest)
530 {
531 if (src != dest)
532 m_assembler.mov(dest, src);
533 }
534
535 void move(TrustedImmPtr imm, RegisterID dest)
536 {
537 move(TrustedImm32(imm), dest);
538 }
539
540 void swap(RegisterID reg1, RegisterID reg2)
541 {
542 xor32(reg1, reg2);
543 xor32(reg2, reg1);
544 xor32(reg1, reg2);
545 }
546
547 void signExtend32ToPtr(RegisterID src, RegisterID dest)
548 {
549 if (src != dest)
550 move(src, dest);
551 }
552
553 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
554 {
555 if (src != dest)
556 move(src, dest);
557 }
558
559 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
560 {
561 load8(left, ARMRegisters::S1);
562 return branch32(cond, ARMRegisters::S1, right);
563 }
564
565 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
566 {
567 ASSERT(!(right.m_value & 0xFFFFFF00));
568 load8(left, ARMRegisters::S1);
569 return branch32(cond, ARMRegisters::S1, right);
570 }
571
572 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
573 {
574 m_assembler.cmp(left, right);
575 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
576 }
577
578 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
579 {
580 internalCompare32(left, right);
581 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
582 }
583
584 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
585 {
586 load32(right, ARMRegisters::S1);
587 return branch32(cond, left, ARMRegisters::S1);
588 }
589
590 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
591 {
592 load32(left, ARMRegisters::S1);
593 return branch32(cond, ARMRegisters::S1, right);
594 }
595
596 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
597 {
598 load32(left, ARMRegisters::S1);
599 return branch32(cond, ARMRegisters::S1, right);
600 }
601
602 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
603 {
604 load32(left, ARMRegisters::S1);
605 return branch32(cond, ARMRegisters::S1, right);
606 }
607
608 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
609 {
610 load32WithUnalignedHalfWords(left, ARMRegisters::S1);
611 return branch32(cond, ARMRegisters::S1, right);
612 }
613
614 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
615 {
616 load8(address, ARMRegisters::S1);
617 return branchTest32(cond, ARMRegisters::S1, mask);
618 }
619
620 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
621 {
622 move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
623 load8(Address(ARMRegisters::S1), ARMRegisters::S1);
624 return branchTest32(cond, ARMRegisters::S1, mask);
625 }
626
627 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
628 {
629 ASSERT((cond == Zero) || (cond == NonZero));
630 m_assembler.tst(reg, mask);
631 return Jump(m_assembler.jmp(ARMCondition(cond)));
632 }
633
634 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
635 {
636 ASSERT((cond == Zero) || (cond == NonZero));
637 ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
638 if (w & ARMAssembler::Op2InvertedImmediate)
639 m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
640 else
641 m_assembler.tst(reg, w);
642 return Jump(m_assembler.jmp(ARMCondition(cond)));
643 }
644
645 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
646 {
647 load32(address, ARMRegisters::S1);
648 return branchTest32(cond, ARMRegisters::S1, mask);
649 }
650
651 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
652 {
653 load32(address, ARMRegisters::S1);
654 return branchTest32(cond, ARMRegisters::S1, mask);
655 }
656
657 Jump jump()
658 {
659 return Jump(m_assembler.jmp());
660 }
661
662 void jump(RegisterID target)
663 {
664 m_assembler.bx(target);
665 }
666
667 void jump(Address address)
668 {
669 load32(address, ARMRegisters::pc);
670 }
671
672 void jump(AbsoluteAddress address)
673 {
674 move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0);
675 load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
676 }
677
678 void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
679 {
680 m_assembler.vmov(dest1, dest2, src);
681 }
682
683 void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
684 {
685 m_assembler.vmov(dest, src1, src2);
686 }
687
688 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
689 {
690 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
691 || (cond == NonZero) || (cond == PositiveOrZero));
692 add32(src, dest);
693 return Jump(m_assembler.jmp(ARMCondition(cond)));
694 }
695
696 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
697 {
698 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
699 || (cond == NonZero) || (cond == PositiveOrZero));
700 add32(op1, op2, dest);
701 return Jump(m_assembler.jmp(ARMCondition(cond)));
702 }
703
704 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
705 {
706 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
707 || (cond == NonZero) || (cond == PositiveOrZero));
708 add32(imm, dest);
709 return Jump(m_assembler.jmp(ARMCondition(cond)));
710 }
711
712 Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
713 {
714 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
715 || (cond == NonZero) || (cond == PositiveOrZero));
716 add32(src, imm, dest);
717 return Jump(m_assembler.jmp(ARMCondition(cond)));
718 }
719
720 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
721 {
722 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
723 || (cond == NonZero) || (cond == PositiveOrZero));
724 add32(imm, dest);
725 return Jump(m_assembler.jmp(ARMCondition(cond)));
726 }
727
728 void mull32(RegisterID op1, RegisterID op2, RegisterID dest)
729 {
730 if (op2 == dest) {
731 if (op1 == dest) {
732 move(op2, ARMRegisters::S0);
733 op2 = ARMRegisters::S0;
734 } else {
735 // Swap the operands.
736 RegisterID tmp = op1;
737 op1 = op2;
738 op2 = tmp;
739 }
740 }
741 m_assembler.mull(ARMRegisters::S1, dest, op1, op2);
742 m_assembler.cmp(ARMRegisters::S1, m_assembler.asr(dest, 31));
743 }
744
745 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
746 {
747 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
748 if (cond == Overflow) {
749 mull32(src1, src2, dest);
750 cond = NonZero;
751 }
752 else
753 mul32(src1, src2, dest);
754 return Jump(m_assembler.jmp(ARMCondition(cond)));
755 }
756
757 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
758 {
759 return branchMul32(cond, src, dest, dest);
760 }
761
762 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
763 {
764 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
765 if (cond == Overflow) {
766 move(imm, ARMRegisters::S0);
767 mull32(ARMRegisters::S0, src, dest);
768 cond = NonZero;
769 }
770 else
771 mul32(imm, src, dest);
772 return Jump(m_assembler.jmp(ARMCondition(cond)));
773 }
774
775 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
776 {
777 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
778 sub32(src, dest);
779 return Jump(m_assembler.jmp(ARMCondition(cond)));
780 }
781
782 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
783 {
784 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
785 sub32(imm, dest);
786 return Jump(m_assembler.jmp(ARMCondition(cond)));
787 }
788
789 Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
790 {
791 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
792 sub32(src, imm, dest);
793 return Jump(m_assembler.jmp(ARMCondition(cond)));
794 }
795
796 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
797 {
798 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
799 m_assembler.subs(dest, op1, op2);
800 return Jump(m_assembler.jmp(ARMCondition(cond)));
801 }
802
803 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
804 {
805 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
806 neg32(srcDest);
807 return Jump(m_assembler.jmp(ARMCondition(cond)));
808 }
809
810 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
811 {
812 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
813 or32(src, dest);
814 return Jump(m_assembler.jmp(ARMCondition(cond)));
815 }
816
817 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
818 {
819 internalCompare32(reg, imm);
820 Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), true));
821 m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
822 return PatchableJump(jump);
823 }
824
825 void breakpoint()
826 {
827 m_assembler.bkpt(0);
828 }
829
830 Call nearCall()
831 {
832 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
833 return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
834 }
835
836 Call call(RegisterID target)
837 {
838 return Call(m_assembler.blx(target), Call::None);
839 }
840
841 void call(Address address)
842 {
843 call32(address.base, address.offset);
844 }
845
846 void ret()
847 {
848 m_assembler.bx(linkRegister);
849 }
850
851 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
852 {
853 m_assembler.cmp(left, right);
854 m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
855 m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
856 }
857
858 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
859 {
860 m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
861 m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
862 m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
863 }
864
865 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
866 {
867 load8(left, ARMRegisters::S1);
868 compare32(cond, ARMRegisters::S1, right, dest);
869 }
870
871 void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
872 {
873 if (mask.m_value == -1)
874 m_assembler.cmp(0, reg);
875 else
876 m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
877 m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
878 m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
879 }
880
881 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
882 {
883 load32(address, ARMRegisters::S1);
884 test32(cond, ARMRegisters::S1, mask, dest);
885 }
886
887 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
888 {
889 load8(address, ARMRegisters::S1);
890 test32(cond, ARMRegisters::S1, mask, dest);
891 }
892
893 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
894 {
895 m_assembler.add(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
896 }
897
898 void add32(TrustedImm32 imm, AbsoluteAddress address)
899 {
900 load32(address.m_ptr, ARMRegisters::S1);
901 add32(imm, ARMRegisters::S1);
902 store32(ARMRegisters::S1, address.m_ptr);
903 }
904
905 void add64(TrustedImm32 imm, AbsoluteAddress address)
906 {
907 ARMWord tmp;
908
909 move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
910 m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
911
912 if ((tmp = ARMAssembler::getOp2(imm.m_value)) != ARMAssembler::InvalidImmediate)
913 m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, tmp);
914 else if ((tmp = ARMAssembler::getOp2(-imm.m_value)) != ARMAssembler::InvalidImmediate)
915 m_assembler.subs(ARMRegisters::S0, ARMRegisters::S0, tmp);
916 else {
917 m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, m_assembler.getImm(imm.m_value, ARMRegisters::S1));
918 move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
919 }
920 m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
921
922 m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
923 if (imm.m_value >= 0)
924 m_assembler.adc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
925 else
926 m_assembler.sbc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
927 m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
928 }
929
930 void sub32(TrustedImm32 imm, AbsoluteAddress address)
931 {
932 load32(address.m_ptr, ARMRegisters::S1);
933 sub32(imm, ARMRegisters::S1);
934 store32(ARMRegisters::S1, address.m_ptr);
935 }
936
937 void load32(const void* address, RegisterID dest)
938 {
939 m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
940 m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, ARMRegisters::S0, 0);
941 }
942
943 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
944 {
945 load32(left.m_ptr, ARMRegisters::S1);
946 return branch32(cond, ARMRegisters::S1, right);
947 }
948
949 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
950 {
951 load32(left.m_ptr, ARMRegisters::S1);
952 return branch32(cond, ARMRegisters::S1, right);
953 }
954
955 void relativeTableJump(RegisterID index, int scale)
956 {
957 ASSERT(scale >= 0 && scale <= 31);
958 m_assembler.add(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
959
960 // NOP the default prefetching
961 m_assembler.mov(ARMRegisters::r0, ARMRegisters::r0);
962 }
963
964 Call call()
965 {
966 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
967 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
968 return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
969 }
970
971 Call tailRecursiveCall()
972 {
973 return Call::fromTailJump(jump());
974 }
975
976 Call makeTailRecursiveCall(Jump oldJump)
977 {
978 return Call::fromTailJump(oldJump);
979 }
980
981 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
982 {
983 DataLabelPtr dataLabel(this);
984 m_assembler.ldrUniqueImmediate(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
985 return dataLabel;
986 }
987
988 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
989 {
990 ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
991 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
992 Jump jump = branch32(cond, left, ARMRegisters::S1, true);
993 return jump;
994 }
995
996 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
997 {
998 load32(left, ARMRegisters::S1);
999 ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
1000 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
1001 Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
1002 return jump;
1003 }
1004
1005 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1006 {
1007 DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
1008 store32(ARMRegisters::S1, address);
1009 return dataLabel;
1010 }
1011
1012 DataLabelPtr storePtrWithPatch(ImplicitAddress address)
1013 {
1014 return storePtrWithPatch(TrustedImmPtr(0), address);
1015 }
1016
1017 // Floating point operators
1018 static bool supportsFloatingPoint()
1019 {
1020 return s_isVFPPresent;
1021 }
1022
1023 static bool supportsFloatingPointTruncate()
1024 {
1025 return false;
1026 }
1027
1028 static bool supportsFloatingPointSqrt()
1029 {
1030 return s_isVFPPresent;
1031 }
1032 static bool supportsFloatingPointAbs() { return false; }
1033
1034 void loadFloat(BaseIndex address, FPRegisterID dest)
1035 {
1036 m_assembler.baseIndexTransferFloat(ARMAssembler::LoadFloat, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
1037 }
1038
1039 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1040 {
1041 m_assembler.dataTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.offset);
1042 }
1043
1044 void loadDouble(BaseIndex address, FPRegisterID dest)
1045 {
1046 m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
1047 }
1048
1049 void loadDouble(const void* address, FPRegisterID dest)
1050 {
1051 move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
1052 m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
1053 }
1054
1055 void storeFloat(FPRegisterID src, BaseIndex address)
1056 {
1057 m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
1058 }
1059
1060 void storeDouble(FPRegisterID src, ImplicitAddress address)
1061 {
1062 m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.offset);
1063 }
1064
1065 void storeDouble(FPRegisterID src, BaseIndex address)
1066 {
1067 m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
1068 }
1069
1070 void storeDouble(FPRegisterID src, const void* address)
1071 {
1072 move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
1073 m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
1074 }
1075
1076 void moveDouble(FPRegisterID src, FPRegisterID dest)
1077 {
1078 if (src != dest)
1079 m_assembler.vmov_f64(dest, src);
1080 }
1081
1082 void addDouble(FPRegisterID src, FPRegisterID dest)
1083 {
1084 m_assembler.vadd_f64(dest, dest, src);
1085 }
1086
1087 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1088 {
1089 m_assembler.vadd_f64(dest, op1, op2);
1090 }
1091
1092 void addDouble(Address src, FPRegisterID dest)
1093 {
1094 loadDouble(src, ARMRegisters::SD0);
1095 addDouble(ARMRegisters::SD0, dest);
1096 }
1097
1098 void addDouble(AbsoluteAddress address, FPRegisterID dest)
1099 {
1100 loadDouble(address.m_ptr, ARMRegisters::SD0);
1101 addDouble(ARMRegisters::SD0, dest);
1102 }
1103
1104 void divDouble(FPRegisterID src, FPRegisterID dest)
1105 {
1106 m_assembler.vdiv_f64(dest, dest, src);
1107 }
1108
1109 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1110 {
1111 m_assembler.vdiv_f64(dest, op1, op2);
1112 }
1113
1114 void divDouble(Address src, FPRegisterID dest)
1115 {
1116 RELEASE_ASSERT_NOT_REACHED(); // Untested
1117 loadDouble(src, ARMRegisters::SD0);
1118 divDouble(ARMRegisters::SD0, dest);
1119 }
1120
1121 void subDouble(FPRegisterID src, FPRegisterID dest)
1122 {
1123 m_assembler.vsub_f64(dest, dest, src);
1124 }
1125
1126 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1127 {
1128 m_assembler.vsub_f64(dest, op1, op2);
1129 }
1130
1131 void subDouble(Address src, FPRegisterID dest)
1132 {
1133 loadDouble(src, ARMRegisters::SD0);
1134 subDouble(ARMRegisters::SD0, dest);
1135 }
1136
1137 void mulDouble(FPRegisterID src, FPRegisterID dest)
1138 {
1139 m_assembler.vmul_f64(dest, dest, src);
1140 }
1141
1142 void mulDouble(Address src, FPRegisterID dest)
1143 {
1144 loadDouble(src, ARMRegisters::SD0);
1145 mulDouble(ARMRegisters::SD0, dest);
1146 }
1147
1148 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1149 {
1150 m_assembler.vmul_f64(dest, op1, op2);
1151 }
1152
1153 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1154 {
1155 m_assembler.vsqrt_f64(dest, src);
1156 }
1157
1158 void absDouble(FPRegisterID src, FPRegisterID dest)
1159 {
1160 m_assembler.vabs_f64(dest, src);
1161 }
1162
1163 void negateDouble(FPRegisterID src, FPRegisterID dest)
1164 {
1165 m_assembler.vneg_f64(dest, src);
1166 }
1167
1168 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1169 {
1170 m_assembler.vmov_vfp32(dest << 1, src);
1171 m_assembler.vcvt_f64_s32(dest, dest << 1);
1172 }
1173
1174 void convertInt32ToDouble(Address src, FPRegisterID dest)
1175 {
1176 load32(src, ARMRegisters::S1);
1177 convertInt32ToDouble(ARMRegisters::S1, dest);
1178 }
1179
1180 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
1181 {
1182 move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
1183 load32(Address(ARMRegisters::S1), ARMRegisters::S1);
1184 convertInt32ToDouble(ARMRegisters::S1, dest);
1185 }
1186
1187 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1188 {
1189 m_assembler.vcvt_f64_f32(dst, src);
1190 }
1191
1192 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1193 {
1194 m_assembler.vcvt_f32_f64(dst, src);
1195 }
1196
1197 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1198 {
1199 m_assembler.vcmp_f64(left, right);
1200 m_assembler.vmrs_apsr();
1201 if (cond & DoubleConditionBitSpecial)
1202 m_assembler.cmp(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
1203 return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
1204 }
1205
1206 // Truncates 'src' to an integer, and places the resulting 'dest'.
1207 // If the result is not representable as a 32 bit value, branch.
1208 // May also branch for some values that are representable in 32 bits
1209 // (specifically, in this case, INT_MIN).
1210 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1211 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1212 {
1213 truncateDoubleToInt32(src, dest);
1214
1215 m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
1216 m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
1217
1218 ARMWord w = ARMAssembler::getOp2(0x80000000);
1219 ASSERT(w != ARMAssembler::InvalidImmediate);
1220 m_assembler.cmp(ARMRegisters::S0, w);
1221 return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
1222 }
1223
1224 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1225 {
1226 truncateDoubleToUint32(src, dest);
1227
1228 m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
1229 m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
1230
1231 m_assembler.cmp(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
1232 return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
1233 }
1234
1235 // Result is undefined if the value is outside of the integer range.
1236 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1237 {
1238 m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
1239 m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
1240 }
1241
1242 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1243 {
1244 m_assembler.vcvt_u32_f64(ARMRegisters::SD0 << 1, src);
1245 m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
1246 }
1247
1248 // Convert 'src' to an integer, and places the resulting 'dest'.
1249 // If the result is not representable as a 32 bit value, branch.
1250 // May also branch for some values that are representable in 32 bits
1251 // (specifically, in this case, 0).
1252 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1253 {
1254 m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
1255 m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
1256
1257 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1258 m_assembler.vcvt_f64_s32(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
1259 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
1260
1261 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
1262 if (negZeroCheck)
1263 failureCases.append(branchTest32(Zero, dest));
1264 }
1265
1266 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
1267 {
1268 m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
1269 convertInt32ToDouble(ARMRegisters::S0, scratch);
1270 return branchDouble(DoubleNotEqual, reg, scratch);
1271 }
1272
1273 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
1274 {
1275 m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
1276 convertInt32ToDouble(ARMRegisters::S0, scratch);
1277 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
1278 }
1279
1280 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1281 static RelationalCondition invert(RelationalCondition cond)
1282 {
1283 ASSERT((static_cast<uint32_t>(cond & 0x0fffffff)) == 0 && static_cast<uint32_t>(cond) < static_cast<uint32_t>(ARMAssembler::AL));
1284 return static_cast<RelationalCondition>(cond ^ 0x10000000);
1285 }
1286
1287 void nop()
1288 {
1289 m_assembler.nop();
1290 }
1291
1292 static FunctionPtr readCallTarget(CodeLocationCall call)
1293 {
1294 return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
1295 }
1296
1297 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1298 {
1299 ARMAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
1300 }
1301
1302 static ptrdiff_t maxJumpReplacementSize()
1303 {
1304 ARMAssembler::maxJumpReplacementSize();
1305 return 0;
1306 }
1307
1308 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1309
1310 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
1311 {
1312 UNREACHABLE_FOR_PLATFORM();
1313 return CodeLocationLabel();
1314 }
1315
1316 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1317 {
1318 return label.labelAtOffset(0);
1319 }
1320
1321 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
1322 {
1323 ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
1324 }
1325
1326 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
1327 {
1328 UNREACHABLE_FOR_PLATFORM();
1329 }
1330
1331 protected:
1332 ARMAssembler::Condition ARMCondition(RelationalCondition cond)
1333 {
1334 return static_cast<ARMAssembler::Condition>(cond);
1335 }
1336
1337 ARMAssembler::Condition ARMCondition(ResultCondition cond)
1338 {
1339 return static_cast<ARMAssembler::Condition>(cond);
1340 }
1341
1342 void ensureSpace(int insnSpace, int constSpace)
1343 {
1344 m_assembler.ensureSpace(insnSpace, constSpace);
1345 }
1346
1347 int sizeOfConstantPool()
1348 {
1349 return m_assembler.sizeOfConstantPool();
1350 }
1351
1352 void call32(RegisterID base, int32_t offset)
1353 {
1354 load32(Address(base, offset), ARMRegisters::S1);
1355 m_assembler.blx(ARMRegisters::S1);
1356 }
1357
1358 private:
1359 friend class LinkBuffer;
1360 friend class RepatchBuffer;
1361
1362 void internalCompare32(RegisterID left, TrustedImm32 right)
1363 {
1364 ARMWord tmp = (static_cast<unsigned>(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
1365 if (tmp != ARMAssembler::InvalidImmediate)
1366 m_assembler.cmn(left, tmp);
1367 else
1368 m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
1369 }
1370
1371 static void linkCall(void* code, Call call, FunctionPtr function)
1372 {
1373 ARMAssembler::linkCall(code, call.m_label, function.value());
1374 }
1375
1376 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1377 {
1378 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1379 }
1380
1381 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1382 {
1383 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1384 }
1385
1386 static const bool s_isVFPPresent;
1387 };
1388
1389 }
1390
1391 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1392
1393 #endif // MacroAssemblerARM_h