]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARM.h
JavaScriptCore-903.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARM.h
1 /*
2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
30
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
32
33 #include "ARMAssembler.h"
34 #include "AbstractMacroAssembler.h"
35
36 namespace JSC {
37
38 class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
39 static const int DoubleConditionMask = 0x0f;
40 static const int DoubleConditionBitSpecial = 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
42 public:
43 typedef ARMRegisters::FPRegisterID FPRegisterID;
44 static const int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF;
45
46 enum RelationalCondition {
47 Equal = ARMAssembler::EQ,
48 NotEqual = ARMAssembler::NE,
49 Above = ARMAssembler::HI,
50 AboveOrEqual = ARMAssembler::CS,
51 Below = ARMAssembler::CC,
52 BelowOrEqual = ARMAssembler::LS,
53 GreaterThan = ARMAssembler::GT,
54 GreaterThanOrEqual = ARMAssembler::GE,
55 LessThan = ARMAssembler::LT,
56 LessThanOrEqual = ARMAssembler::LE
57 };
58
59 enum ResultCondition {
60 Overflow = ARMAssembler::VS,
61 Signed = ARMAssembler::MI,
62 Zero = ARMAssembler::EQ,
63 NonZero = ARMAssembler::NE
64 };
65
66 enum DoubleCondition {
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual = ARMAssembler::EQ,
69 DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
70 DoubleGreaterThan = ARMAssembler::GT,
71 DoubleGreaterThanOrEqual = ARMAssembler::GE,
72 DoubleLessThan = ARMAssembler::CC,
73 DoubleLessThanOrEqual = ARMAssembler::LS,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
76 DoubleNotEqualOrUnordered = ARMAssembler::NE,
77 DoubleGreaterThanOrUnordered = ARMAssembler::HI,
78 DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
79 DoubleLessThanOrUnordered = ARMAssembler::LT,
80 DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
81 };
82
83 static const RegisterID stackPointerRegister = ARMRegisters::sp;
84 static const RegisterID linkRegister = ARMRegisters::lr;
85
86 static const Scale ScalePtr = TimesFour;
87
88 void add32(RegisterID src, RegisterID dest)
89 {
90 m_assembler.adds_r(dest, dest, src);
91 }
92
93 void add32(TrustedImm32 imm, Address address)
94 {
95 load32(address, ARMRegisters::S1);
96 add32(imm, ARMRegisters::S1);
97 store32(ARMRegisters::S1, address);
98 }
99
100 void add32(TrustedImm32 imm, RegisterID dest)
101 {
102 m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
103 }
104
105 void add32(Address src, RegisterID dest)
106 {
107 load32(src, ARMRegisters::S1);
108 add32(ARMRegisters::S1, dest);
109 }
110
111 void and32(RegisterID src, RegisterID dest)
112 {
113 m_assembler.ands_r(dest, dest, src);
114 }
115
116 void and32(TrustedImm32 imm, RegisterID dest)
117 {
118 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
119 if (w & ARMAssembler::OP2_INV_IMM)
120 m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
121 else
122 m_assembler.ands_r(dest, dest, w);
123 }
124
125 void lshift32(RegisterID shift_amount, RegisterID dest)
126 {
127 ARMWord w = ARMAssembler::getOp2(0x1f);
128 ASSERT(w != ARMAssembler::INVALID_IMM);
129 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
130
131 m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
132 }
133
134 void lshift32(TrustedImm32 imm, RegisterID dest)
135 {
136 m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
137 }
138
139 void mul32(RegisterID src, RegisterID dest)
140 {
141 if (src == dest) {
142 move(src, ARMRegisters::S0);
143 src = ARMRegisters::S0;
144 }
145 m_assembler.muls_r(dest, dest, src);
146 }
147
148 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
149 {
150 move(imm, ARMRegisters::S0);
151 m_assembler.muls_r(dest, src, ARMRegisters::S0);
152 }
153
154 void neg32(RegisterID srcDest)
155 {
156 m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
157 }
158
159 void not32(RegisterID dest)
160 {
161 m_assembler.mvns_r(dest, dest);
162 }
163
164 void or32(RegisterID src, RegisterID dest)
165 {
166 m_assembler.orrs_r(dest, dest, src);
167 }
168
169 void or32(TrustedImm32 imm, RegisterID dest)
170 {
171 m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
172 }
173
174 void rshift32(RegisterID shift_amount, RegisterID dest)
175 {
176 ARMWord w = ARMAssembler::getOp2(0x1f);
177 ASSERT(w != ARMAssembler::INVALID_IMM);
178 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
179
180 m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
181 }
182
183 void rshift32(TrustedImm32 imm, RegisterID dest)
184 {
185 m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
186 }
187
188 void urshift32(RegisterID shift_amount, RegisterID dest)
189 {
190 ARMWord w = ARMAssembler::getOp2(0x1f);
191 ASSERT(w != ARMAssembler::INVALID_IMM);
192 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
193
194 m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0));
195 }
196
197 void urshift32(TrustedImm32 imm, RegisterID dest)
198 {
199 m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
200 }
201
202 void sub32(RegisterID src, RegisterID dest)
203 {
204 m_assembler.subs_r(dest, dest, src);
205 }
206
207 void sub32(TrustedImm32 imm, RegisterID dest)
208 {
209 m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
210 }
211
212 void sub32(TrustedImm32 imm, Address address)
213 {
214 load32(address, ARMRegisters::S1);
215 sub32(imm, ARMRegisters::S1);
216 store32(ARMRegisters::S1, address);
217 }
218
219 void sub32(Address src, RegisterID dest)
220 {
221 load32(src, ARMRegisters::S1);
222 sub32(ARMRegisters::S1, dest);
223 }
224
225 void xor32(RegisterID src, RegisterID dest)
226 {
227 m_assembler.eors_r(dest, dest, src);
228 }
229
230 void xor32(TrustedImm32 imm, RegisterID dest)
231 {
232 m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
233 }
234
235 void countLeadingZeros32(RegisterID src, RegisterID dest)
236 {
237 #if WTF_ARM_ARCH_AT_LEAST(5)
238 m_assembler.clz_r(dest, src);
239 #else
240 UNUSED_PARAM(src);
241 UNUSED_PARAM(dest);
242 ASSERT_NOT_REACHED();
243 #endif
244 }
245
246 void load8(ImplicitAddress address, RegisterID dest)
247 {
248 m_assembler.dataTransfer32(true, dest, address.base, address.offset, true);
249 }
250
251 void load32(ImplicitAddress address, RegisterID dest)
252 {
253 m_assembler.dataTransfer32(true, dest, address.base, address.offset);
254 }
255
256 void load32(BaseIndex address, RegisterID dest)
257 {
258 m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
259 }
260
261 #if CPU(ARMV5_OR_LOWER)
262 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
263 #else
264 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
265 {
266 load32(address, dest);
267 }
268 #endif
269
270 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
271 {
272 DataLabel32 dataLabel(this);
273 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
274 m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
275 return dataLabel;
276 }
277
278 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
279 {
280 DataLabelCompact dataLabel(this);
281 load32WithAddressOffsetPatch(address, dest);
282 return dataLabel;
283 }
284
285 void load16(BaseIndex address, RegisterID dest)
286 {
287 m_assembler.add_r(ARMRegisters::S1, address.base, m_assembler.lsl(address.index, address.scale));
288 load16(Address(ARMRegisters::S1, address.offset), dest);
289 }
290
291 void load16(ImplicitAddress address, RegisterID dest)
292 {
293 if (address.offset >= 0)
294 m_assembler.ldrh_u(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(address.offset, ARMRegisters::S0));
295 else
296 m_assembler.ldrh_d(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(-address.offset, ARMRegisters::S0));
297 }
298
299 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
300 {
301 DataLabel32 dataLabel(this);
302 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
303 m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
304 return dataLabel;
305 }
306
307 void store32(RegisterID src, ImplicitAddress address)
308 {
309 m_assembler.dataTransfer32(false, src, address.base, address.offset);
310 }
311
312 void store32(RegisterID src, BaseIndex address)
313 {
314 m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
315 }
316
317 void store32(TrustedImm32 imm, ImplicitAddress address)
318 {
319 if (imm.m_isPointer)
320 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
321 else
322 move(imm, ARMRegisters::S1);
323 store32(ARMRegisters::S1, address);
324 }
325
326 void store32(RegisterID src, void* address)
327 {
328 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
329 m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
330 }
331
332 void store32(TrustedImm32 imm, void* address)
333 {
334 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
335 if (imm.m_isPointer)
336 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
337 else
338 m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
339 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
340 }
341
342 void pop(RegisterID dest)
343 {
344 m_assembler.pop_r(dest);
345 }
346
347 void push(RegisterID src)
348 {
349 m_assembler.push_r(src);
350 }
351
352 void push(Address address)
353 {
354 load32(address, ARMRegisters::S1);
355 push(ARMRegisters::S1);
356 }
357
358 void push(TrustedImm32 imm)
359 {
360 move(imm, ARMRegisters::S0);
361 push(ARMRegisters::S0);
362 }
363
364 void move(TrustedImm32 imm, RegisterID dest)
365 {
366 if (imm.m_isPointer)
367 m_assembler.ldr_un_imm(dest, imm.m_value);
368 else
369 m_assembler.moveImm(imm.m_value, dest);
370 }
371
372 void move(RegisterID src, RegisterID dest)
373 {
374 m_assembler.mov_r(dest, src);
375 }
376
377 void move(TrustedImmPtr imm, RegisterID dest)
378 {
379 move(TrustedImm32(imm), dest);
380 }
381
382 void swap(RegisterID reg1, RegisterID reg2)
383 {
384 m_assembler.mov_r(ARMRegisters::S0, reg1);
385 m_assembler.mov_r(reg1, reg2);
386 m_assembler.mov_r(reg2, ARMRegisters::S0);
387 }
388
389 void signExtend32ToPtr(RegisterID src, RegisterID dest)
390 {
391 if (src != dest)
392 move(src, dest);
393 }
394
395 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
396 {
397 if (src != dest)
398 move(src, dest);
399 }
400
401 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
402 {
403 load8(left, ARMRegisters::S1);
404 return branch32(cond, ARMRegisters::S1, right);
405 }
406
407 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
408 {
409 m_assembler.cmp_r(left, right);
410 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
411 }
412
413 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
414 {
415 if (right.m_isPointer) {
416 m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
417 m_assembler.cmp_r(left, ARMRegisters::S0);
418 } else {
419 ARMWord tmp = m_assembler.getOp2(-right.m_value);
420 if (tmp != ARMAssembler::INVALID_IMM)
421 m_assembler.cmn_r(left, tmp);
422 else
423 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
424 }
425 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
426 }
427
428 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
429 {
430 load32(right, ARMRegisters::S1);
431 return branch32(cond, left, ARMRegisters::S1);
432 }
433
434 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
435 {
436 load32(left, ARMRegisters::S1);
437 return branch32(cond, ARMRegisters::S1, right);
438 }
439
440 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
441 {
442 load32(left, ARMRegisters::S1);
443 return branch32(cond, ARMRegisters::S1, right);
444 }
445
446 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
447 {
448 load32(left, ARMRegisters::S1);
449 return branch32(cond, ARMRegisters::S1, right);
450 }
451
452 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
453 {
454 load32WithUnalignedHalfWords(left, ARMRegisters::S1);
455 return branch32(cond, ARMRegisters::S1, right);
456 }
457
458 Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
459 {
460 UNUSED_PARAM(cond);
461 UNUSED_PARAM(left);
462 UNUSED_PARAM(right);
463 ASSERT_NOT_REACHED();
464 return jump();
465 }
466
467 Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
468 {
469 load16(left, ARMRegisters::S0);
470 move(right, ARMRegisters::S1);
471 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1);
472 return m_assembler.jmp(ARMCondition(cond));
473 }
474
475 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
476 {
477 load8(address, ARMRegisters::S1);
478 return branchTest32(cond, ARMRegisters::S1, mask);
479 }
480
481 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
482 {
483 ASSERT((cond == Zero) || (cond == NonZero));
484 m_assembler.tst_r(reg, mask);
485 return Jump(m_assembler.jmp(ARMCondition(cond)));
486 }
487
488 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
489 {
490 ASSERT((cond == Zero) || (cond == NonZero));
491 ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
492 if (w & ARMAssembler::OP2_INV_IMM)
493 m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
494 else
495 m_assembler.tst_r(reg, w);
496 return Jump(m_assembler.jmp(ARMCondition(cond)));
497 }
498
499 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
500 {
501 load32(address, ARMRegisters::S1);
502 return branchTest32(cond, ARMRegisters::S1, mask);
503 }
504
505 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
506 {
507 load32(address, ARMRegisters::S1);
508 return branchTest32(cond, ARMRegisters::S1, mask);
509 }
510
511 Jump jump()
512 {
513 return Jump(m_assembler.jmp());
514 }
515
516 void jump(RegisterID target)
517 {
518 m_assembler.bx(target);
519 }
520
521 void jump(Address address)
522 {
523 load32(address, ARMRegisters::pc);
524 }
525
526 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
527 {
528 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
529 add32(src, dest);
530 return Jump(m_assembler.jmp(ARMCondition(cond)));
531 }
532
533 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
534 {
535 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
536 add32(imm, dest);
537 return Jump(m_assembler.jmp(ARMCondition(cond)));
538 }
539
540 void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
541 {
542 if (src1 == dest) {
543 move(src1, ARMRegisters::S0);
544 src1 = ARMRegisters::S0;
545 }
546 m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
547 m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
548 }
549
550 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
551 {
552 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
553 if (cond == Overflow) {
554 mull32(src, dest, dest);
555 cond = NonZero;
556 }
557 else
558 mul32(src, dest);
559 return Jump(m_assembler.jmp(ARMCondition(cond)));
560 }
561
562 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
563 {
564 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
565 if (cond == Overflow) {
566 move(imm, ARMRegisters::S0);
567 mull32(ARMRegisters::S0, src, dest);
568 cond = NonZero;
569 }
570 else
571 mul32(imm, src, dest);
572 return Jump(m_assembler.jmp(ARMCondition(cond)));
573 }
574
575 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
576 {
577 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
578 sub32(src, dest);
579 return Jump(m_assembler.jmp(ARMCondition(cond)));
580 }
581
582 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
583 {
584 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
585 sub32(imm, dest);
586 return Jump(m_assembler.jmp(ARMCondition(cond)));
587 }
588
589 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
590 {
591 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
592 neg32(srcDest);
593 return Jump(m_assembler.jmp(ARMCondition(cond)));
594 }
595
596 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
597 {
598 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
599 or32(src, dest);
600 return Jump(m_assembler.jmp(ARMCondition(cond)));
601 }
602
603 void breakpoint()
604 {
605 m_assembler.bkpt(0);
606 }
607
608 Call nearCall()
609 {
610 #if WTF_ARM_ARCH_AT_LEAST(5)
611 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
612 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
613 return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
614 #else
615 prepareCall();
616 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
617 #endif
618 }
619
620 Call call(RegisterID target)
621 {
622 return Call(m_assembler.blx(target), Call::None);
623 }
624
625 void call(Address address)
626 {
627 call32(address.base, address.offset);
628 }
629
630 void ret()
631 {
632 m_assembler.bx(linkRegister);
633 }
634
635 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
636 {
637 m_assembler.cmp_r(left, right);
638 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
639 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
640 }
641
642 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
643 {
644 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
645 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
646 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
647 }
648
649 void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
650 {
651 if (mask.m_value == -1)
652 m_assembler.cmp_r(0, reg);
653 else
654 m_assembler.tst_r(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
655 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
656 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
657 }
658
659 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
660 {
661 load32(address, ARMRegisters::S1);
662 test32(cond, ARMRegisters::S1, mask, dest);
663 }
664
665 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
666 {
667 load8(address, ARMRegisters::S1);
668 test32(cond, ARMRegisters::S1, mask, dest);
669 }
670
671 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
672 {
673 m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
674 }
675
676 void add32(TrustedImm32 imm, AbsoluteAddress address)
677 {
678 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
679 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
680 add32(imm, ARMRegisters::S1);
681 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
682 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
683 }
684
685 void sub32(TrustedImm32 imm, AbsoluteAddress address)
686 {
687 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
688 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
689 sub32(imm, ARMRegisters::S1);
690 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
691 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
692 }
693
694 void load32(const void* address, RegisterID dest)
695 {
696 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
697 m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
698 }
699
700 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
701 {
702 load32(left.m_ptr, ARMRegisters::S1);
703 return branch32(cond, ARMRegisters::S1, right);
704 }
705
706 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
707 {
708 load32(left.m_ptr, ARMRegisters::S1);
709 return branch32(cond, ARMRegisters::S1, right);
710 }
711
712 void relativeTableJump(RegisterID index, int scale)
713 {
714 ASSERT(scale >= 0 && scale <= 31);
715 m_assembler.add_r(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
716
717 // NOP the default prefetching
718 m_assembler.mov_r(ARMRegisters::r0, ARMRegisters::r0);
719 }
720
721 Call call()
722 {
723 #if WTF_ARM_ARCH_AT_LEAST(5)
724 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
725 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
726 return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
727 #else
728 prepareCall();
729 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
730 #endif
731 }
732
733 Call tailRecursiveCall()
734 {
735 return Call::fromTailJump(jump());
736 }
737
738 Call makeTailRecursiveCall(Jump oldJump)
739 {
740 return Call::fromTailJump(oldJump);
741 }
742
743 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
744 {
745 DataLabelPtr dataLabel(this);
746 m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
747 return dataLabel;
748 }
749
750 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
751 {
752 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
753 Jump jump = branch32(cond, left, ARMRegisters::S1, true);
754 return jump;
755 }
756
757 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
758 {
759 load32(left, ARMRegisters::S1);
760 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
761 Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
762 return jump;
763 }
764
765 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
766 {
767 DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
768 store32(ARMRegisters::S1, address);
769 return dataLabel;
770 }
771
772 DataLabelPtr storePtrWithPatch(ImplicitAddress address)
773 {
774 return storePtrWithPatch(TrustedImmPtr(0), address);
775 }
776
777 // Floating point operators
778 bool supportsFloatingPoint() const
779 {
780 return s_isVFPPresent;
781 }
782
783 bool supportsFloatingPointTruncate() const
784 {
785 return s_isVFPPresent;
786 }
787
788 bool supportsFloatingPointSqrt() const
789 {
790 return s_isVFPPresent;
791 }
792
793 void loadDouble(ImplicitAddress address, FPRegisterID dest)
794 {
795 m_assembler.doubleTransfer(true, dest, address.base, address.offset);
796 }
797
798 void loadDouble(const void* address, FPRegisterID dest)
799 {
800 m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
801 m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
802 }
803
804 void storeDouble(FPRegisterID src, ImplicitAddress address)
805 {
806 m_assembler.doubleTransfer(false, src, address.base, address.offset);
807 }
808
809 void addDouble(FPRegisterID src, FPRegisterID dest)
810 {
811 m_assembler.vadd_f64_r(dest, dest, src);
812 }
813
814 void addDouble(Address src, FPRegisterID dest)
815 {
816 loadDouble(src, ARMRegisters::SD0);
817 addDouble(ARMRegisters::SD0, dest);
818 }
819
820 void divDouble(FPRegisterID src, FPRegisterID dest)
821 {
822 m_assembler.vdiv_f64_r(dest, dest, src);
823 }
824
825 void divDouble(Address src, FPRegisterID dest)
826 {
827 ASSERT_NOT_REACHED(); // Untested
828 loadDouble(src, ARMRegisters::SD0);
829 divDouble(ARMRegisters::SD0, dest);
830 }
831
832 void subDouble(FPRegisterID src, FPRegisterID dest)
833 {
834 m_assembler.vsub_f64_r(dest, dest, src);
835 }
836
837 void subDouble(Address src, FPRegisterID dest)
838 {
839 loadDouble(src, ARMRegisters::SD0);
840 subDouble(ARMRegisters::SD0, dest);
841 }
842
843 void mulDouble(FPRegisterID src, FPRegisterID dest)
844 {
845 m_assembler.vmul_f64_r(dest, dest, src);
846 }
847
848 void mulDouble(Address src, FPRegisterID dest)
849 {
850 loadDouble(src, ARMRegisters::SD0);
851 mulDouble(ARMRegisters::SD0, dest);
852 }
853
854 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
855 {
856 m_assembler.vsqrt_f64_r(dest, src);
857 }
858
859 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
860 {
861 m_assembler.vmov_vfp_r(dest << 1, src);
862 m_assembler.vcvt_f64_s32_r(dest, dest << 1);
863 }
864
865 void convertInt32ToDouble(Address src, FPRegisterID dest)
866 {
867 ASSERT_NOT_REACHED(); // Untested
868 // flds does not worth the effort here
869 load32(src, ARMRegisters::S1);
870 convertInt32ToDouble(ARMRegisters::S1, dest);
871 }
872
873 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
874 {
875 ASSERT_NOT_REACHED(); // Untested
876 // flds does not worth the effort here
877 m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
878 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
879 convertInt32ToDouble(ARMRegisters::S1, dest);
880 }
881
882 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
883 {
884 m_assembler.vcmp_f64_r(left, right);
885 m_assembler.vmrs_apsr();
886 if (cond & DoubleConditionBitSpecial)
887 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
888 return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
889 }
890
891 // Truncates 'src' to an integer, and places the resulting 'dest'.
892 // If the result is not representable as a 32 bit value, branch.
893 // May also branch for some values that are representable in 32 bits
894 // (specifically, in this case, INT_MIN and INT_MAX).
895 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
896 {
897 m_assembler.vcvtr_s32_f64_r(ARMRegisters::SD0 << 1, src);
898 // If VCVTR.S32.F64 can't fit the result into a 32-bit
899 // integer, it saturates at INT_MAX or INT_MIN. Testing this is
900 // probably quicker than testing FPSCR for exception.
901 m_assembler.vmov_arm_r(dest, ARMRegisters::SD0 << 1);
902 m_assembler.sub_r(ARMRegisters::S0, dest, ARMAssembler::getOp2(0x80000000));
903 m_assembler.cmn_r(ARMRegisters::S0, ARMAssembler::getOp2(1), ARMCondition(NotEqual));
904 return Jump(m_assembler.jmp(ARMCondition(Equal)));
905 }
906
907 // Convert 'src' to an integer, and places the resulting 'dest'.
908 // If the result is not representable as a 32 bit value, branch.
909 // May also branch for some values that are representable in 32 bits
910 // (specifically, in this case, 0).
911 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
912 {
913 m_assembler.vcvt_s32_f64_r(ARMRegisters::SD0 << 1, src);
914 m_assembler.vmov_arm_r(dest, ARMRegisters::SD0 << 1);
915
916 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
917 m_assembler.vcvt_f64_s32_r(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
918 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
919
920 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
921 failureCases.append(branchTest32(Zero, dest));
922 }
923
924 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
925 {
926 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
927 convertInt32ToDouble(ARMRegisters::S0, scratch);
928 return branchDouble(DoubleNotEqual, reg, scratch);
929 }
930
931 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
932 {
933 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
934 convertInt32ToDouble(ARMRegisters::S0, scratch);
935 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
936 }
937
938 void nop()
939 {
940 m_assembler.nop();
941 }
942
943 protected:
944 ARMAssembler::Condition ARMCondition(RelationalCondition cond)
945 {
946 return static_cast<ARMAssembler::Condition>(cond);
947 }
948
949 ARMAssembler::Condition ARMCondition(ResultCondition cond)
950 {
951 return static_cast<ARMAssembler::Condition>(cond);
952 }
953
954 void ensureSpace(int insnSpace, int constSpace)
955 {
956 m_assembler.ensureSpace(insnSpace, constSpace);
957 }
958
959 int sizeOfConstantPool()
960 {
961 return m_assembler.sizeOfConstantPool();
962 }
963
964 void prepareCall()
965 {
966 #if WTF_ARM_ARCH_VERSION < 5
967 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
968
969 m_assembler.mov_r(linkRegister, ARMRegisters::pc);
970 #endif
971 }
972
973 void call32(RegisterID base, int32_t offset)
974 {
975 #if WTF_ARM_ARCH_AT_LEAST(5)
976 int targetReg = ARMRegisters::S1;
977 #else
978 int targetReg = ARMRegisters::pc;
979 #endif
980 int tmpReg = ARMRegisters::S1;
981
982 if (base == ARMRegisters::sp)
983 offset += 4;
984
985 if (offset >= 0) {
986 if (offset <= 0xfff) {
987 prepareCall();
988 m_assembler.dtr_u(true, targetReg, base, offset);
989 } else if (offset <= 0xfffff) {
990 m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
991 prepareCall();
992 m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff);
993 } else {
994 m_assembler.moveImm(offset, tmpReg);
995 prepareCall();
996 m_assembler.dtr_ur(true, targetReg, base, tmpReg);
997 }
998 } else {
999 offset = -offset;
1000 if (offset <= 0xfff) {
1001 prepareCall();
1002 m_assembler.dtr_d(true, targetReg, base, offset);
1003 } else if (offset <= 0xfffff) {
1004 m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
1005 prepareCall();
1006 m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff);
1007 } else {
1008 m_assembler.moveImm(offset, tmpReg);
1009 prepareCall();
1010 m_assembler.dtr_dr(true, targetReg, base, tmpReg);
1011 }
1012 }
1013 #if WTF_ARM_ARCH_AT_LEAST(5)
1014 m_assembler.blx(targetReg);
1015 #endif
1016 }
1017
1018 private:
1019 friend class LinkBuffer;
1020 friend class RepatchBuffer;
1021
1022 static void linkCall(void* code, Call call, FunctionPtr function)
1023 {
1024 ARMAssembler::linkCall(code, call.m_jmp, function.value());
1025 }
1026
1027 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1028 {
1029 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1030 }
1031
1032 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1033 {
1034 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1035 }
1036
1037 static const bool s_isVFPPresent;
1038 };
1039
1040 }
1041
1042 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1043
1044 #endif // MacroAssemblerARM_h