]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARM.h
1bbb0cce9e903b187f542c579f1fec8cc016a082
[apple/javascriptcore.git] / assembler / MacroAssemblerARM.h
1 /*
2 * Copyright (C) 2008 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef MacroAssemblerARM_h
29 #define MacroAssemblerARM_h
30
31 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
32
33 #include "ARMAssembler.h"
34 #include "AbstractMacroAssembler.h"
35
36 namespace JSC {
37
38 class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
39 static const int DoubleConditionMask = 0x0f;
40 static const int DoubleConditionBitSpecial = 0x10;
41 COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
42 public:
43 typedef ARMRegisters::FPRegisterID FPRegisterID;
44
45 enum Condition {
46 Equal = ARMAssembler::EQ,
47 NotEqual = ARMAssembler::NE,
48 Above = ARMAssembler::HI,
49 AboveOrEqual = ARMAssembler::CS,
50 Below = ARMAssembler::CC,
51 BelowOrEqual = ARMAssembler::LS,
52 GreaterThan = ARMAssembler::GT,
53 GreaterThanOrEqual = ARMAssembler::GE,
54 LessThan = ARMAssembler::LT,
55 LessThanOrEqual = ARMAssembler::LE,
56 Overflow = ARMAssembler::VS,
57 Signed = ARMAssembler::MI,
58 Zero = ARMAssembler::EQ,
59 NonZero = ARMAssembler::NE
60 };
61
62 enum DoubleCondition {
63 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
64 DoubleEqual = ARMAssembler::EQ,
65 DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
66 DoubleGreaterThan = ARMAssembler::GT,
67 DoubleGreaterThanOrEqual = ARMAssembler::GE,
68 DoubleLessThan = ARMAssembler::CC,
69 DoubleLessThanOrEqual = ARMAssembler::LS,
70 // If either operand is NaN, these conditions always evaluate to true.
71 DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
72 DoubleNotEqualOrUnordered = ARMAssembler::NE,
73 DoubleGreaterThanOrUnordered = ARMAssembler::HI,
74 DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
75 DoubleLessThanOrUnordered = ARMAssembler::LT,
76 DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
77 };
78
79 static const RegisterID stackPointerRegister = ARMRegisters::sp;
80 static const RegisterID linkRegister = ARMRegisters::lr;
81
82 static const Scale ScalePtr = TimesFour;
83
84 void add32(RegisterID src, RegisterID dest)
85 {
86 m_assembler.adds_r(dest, dest, src);
87 }
88
89 void add32(Imm32 imm, Address address)
90 {
91 load32(address, ARMRegisters::S1);
92 add32(imm, ARMRegisters::S1);
93 store32(ARMRegisters::S1, address);
94 }
95
96 void add32(Imm32 imm, RegisterID dest)
97 {
98 m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
99 }
100
101 void add32(Address src, RegisterID dest)
102 {
103 load32(src, ARMRegisters::S1);
104 add32(ARMRegisters::S1, dest);
105 }
106
107 void and32(RegisterID src, RegisterID dest)
108 {
109 m_assembler.ands_r(dest, dest, src);
110 }
111
112 void and32(Imm32 imm, RegisterID dest)
113 {
114 ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
115 if (w & ARMAssembler::OP2_INV_IMM)
116 m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
117 else
118 m_assembler.ands_r(dest, dest, w);
119 }
120
121 void lshift32(RegisterID shift_amount, RegisterID dest)
122 {
123 ARMWord w = ARMAssembler::getOp2(0x1f);
124 ASSERT(w != ARMAssembler::INVALID_IMM);
125 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
126
127 m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
128 }
129
130 void lshift32(Imm32 imm, RegisterID dest)
131 {
132 m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
133 }
134
135 void mul32(RegisterID src, RegisterID dest)
136 {
137 if (src == dest) {
138 move(src, ARMRegisters::S0);
139 src = ARMRegisters::S0;
140 }
141 m_assembler.muls_r(dest, dest, src);
142 }
143
144 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
145 {
146 move(imm, ARMRegisters::S0);
147 m_assembler.muls_r(dest, src, ARMRegisters::S0);
148 }
149
150 void neg32(RegisterID srcDest)
151 {
152 m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
153 }
154
155 void not32(RegisterID dest)
156 {
157 m_assembler.mvns_r(dest, dest);
158 }
159
160 void or32(RegisterID src, RegisterID dest)
161 {
162 m_assembler.orrs_r(dest, dest, src);
163 }
164
165 void or32(Imm32 imm, RegisterID dest)
166 {
167 m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
168 }
169
170 void rshift32(RegisterID shift_amount, RegisterID dest)
171 {
172 ARMWord w = ARMAssembler::getOp2(0x1f);
173 ASSERT(w != ARMAssembler::INVALID_IMM);
174 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
175
176 m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
177 }
178
179 void rshift32(Imm32 imm, RegisterID dest)
180 {
181 m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
182 }
183
184 void urshift32(RegisterID shift_amount, RegisterID dest)
185 {
186 ARMWord w = ARMAssembler::getOp2(0x1f);
187 ASSERT(w != ARMAssembler::INVALID_IMM);
188 m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
189
190 m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0));
191 }
192
193 void urshift32(Imm32 imm, RegisterID dest)
194 {
195 m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
196 }
197
198 void sub32(RegisterID src, RegisterID dest)
199 {
200 m_assembler.subs_r(dest, dest, src);
201 }
202
203 void sub32(Imm32 imm, RegisterID dest)
204 {
205 m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
206 }
207
208 void sub32(Imm32 imm, Address address)
209 {
210 load32(address, ARMRegisters::S1);
211 sub32(imm, ARMRegisters::S1);
212 store32(ARMRegisters::S1, address);
213 }
214
215 void sub32(Address src, RegisterID dest)
216 {
217 load32(src, ARMRegisters::S1);
218 sub32(ARMRegisters::S1, dest);
219 }
220
221 void xor32(RegisterID src, RegisterID dest)
222 {
223 m_assembler.eors_r(dest, dest, src);
224 }
225
226 void xor32(Imm32 imm, RegisterID dest)
227 {
228 m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
229 }
230
231 void load8(ImplicitAddress address, RegisterID dest)
232 {
233 m_assembler.dataTransfer32(true, dest, address.base, address.offset, true);
234 }
235
236 void load32(ImplicitAddress address, RegisterID dest)
237 {
238 m_assembler.dataTransfer32(true, dest, address.base, address.offset);
239 }
240
241 void load32(BaseIndex address, RegisterID dest)
242 {
243 m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
244 }
245
246 #if CPU(ARMV5_OR_LOWER)
247 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
248 #else
249 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
250 {
251 load32(address, dest);
252 }
253 #endif
254
255 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
256 {
257 DataLabel32 dataLabel(this);
258 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
259 m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0);
260 return dataLabel;
261 }
262
263 Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
264 {
265 Label label(this);
266 load32(address, dest);
267 return label;
268 }
269
270 void load16(BaseIndex address, RegisterID dest)
271 {
272 m_assembler.add_r(ARMRegisters::S0, address.base, m_assembler.lsl(address.index, address.scale));
273 if (address.offset>=0)
274 m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset));
275 else
276 m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset));
277 }
278
279 void load16(ImplicitAddress address, RegisterID dest)
280 {
281 if (address.offset >= 0)
282 m_assembler.ldrh_u(dest, address.base, ARMAssembler::getOp2Byte(address.offset));
283 else
284 m_assembler.ldrh_d(dest, address.base, ARMAssembler::getOp2Byte(-address.offset));
285 }
286
287 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
288 {
289 DataLabel32 dataLabel(this);
290 m_assembler.ldr_un_imm(ARMRegisters::S0, 0);
291 m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0);
292 return dataLabel;
293 }
294
295 void store32(RegisterID src, ImplicitAddress address)
296 {
297 m_assembler.dataTransfer32(false, src, address.base, address.offset);
298 }
299
300 void store32(RegisterID src, BaseIndex address)
301 {
302 m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
303 }
304
305 void store32(Imm32 imm, ImplicitAddress address)
306 {
307 if (imm.m_isPointer)
308 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
309 else
310 move(imm, ARMRegisters::S1);
311 store32(ARMRegisters::S1, address);
312 }
313
314 void store32(RegisterID src, void* address)
315 {
316 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
317 m_assembler.dtr_u(false, src, ARMRegisters::S0, 0);
318 }
319
320 void store32(Imm32 imm, void* address)
321 {
322 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
323 if (imm.m_isPointer)
324 m_assembler.ldr_un_imm(ARMRegisters::S1, imm.m_value);
325 else
326 m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
327 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
328 }
329
330 void pop(RegisterID dest)
331 {
332 m_assembler.pop_r(dest);
333 }
334
335 void push(RegisterID src)
336 {
337 m_assembler.push_r(src);
338 }
339
340 void push(Address address)
341 {
342 load32(address, ARMRegisters::S1);
343 push(ARMRegisters::S1);
344 }
345
346 void push(Imm32 imm)
347 {
348 move(imm, ARMRegisters::S0);
349 push(ARMRegisters::S0);
350 }
351
352 void move(Imm32 imm, RegisterID dest)
353 {
354 if (imm.m_isPointer)
355 m_assembler.ldr_un_imm(dest, imm.m_value);
356 else
357 m_assembler.moveImm(imm.m_value, dest);
358 }
359
360 void move(RegisterID src, RegisterID dest)
361 {
362 m_assembler.mov_r(dest, src);
363 }
364
365 void move(ImmPtr imm, RegisterID dest)
366 {
367 move(Imm32(imm), dest);
368 }
369
370 void swap(RegisterID reg1, RegisterID reg2)
371 {
372 m_assembler.mov_r(ARMRegisters::S0, reg1);
373 m_assembler.mov_r(reg1, reg2);
374 m_assembler.mov_r(reg2, ARMRegisters::S0);
375 }
376
377 void signExtend32ToPtr(RegisterID src, RegisterID dest)
378 {
379 if (src != dest)
380 move(src, dest);
381 }
382
383 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
384 {
385 if (src != dest)
386 move(src, dest);
387 }
388
389 Jump branch8(Condition cond, Address left, Imm32 right)
390 {
391 load8(left, ARMRegisters::S1);
392 return branch32(cond, ARMRegisters::S1, right);
393 }
394
395 Jump branch32(Condition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
396 {
397 m_assembler.cmp_r(left, right);
398 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
399 }
400
401 Jump branch32(Condition cond, RegisterID left, Imm32 right, int useConstantPool = 0)
402 {
403 if (right.m_isPointer) {
404 m_assembler.ldr_un_imm(ARMRegisters::S0, right.m_value);
405 m_assembler.cmp_r(left, ARMRegisters::S0);
406 } else
407 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
408 return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
409 }
410
411 Jump branch32(Condition cond, RegisterID left, Address right)
412 {
413 load32(right, ARMRegisters::S1);
414 return branch32(cond, left, ARMRegisters::S1);
415 }
416
417 Jump branch32(Condition cond, Address left, RegisterID right)
418 {
419 load32(left, ARMRegisters::S1);
420 return branch32(cond, ARMRegisters::S1, right);
421 }
422
423 Jump branch32(Condition cond, Address left, Imm32 right)
424 {
425 load32(left, ARMRegisters::S1);
426 return branch32(cond, ARMRegisters::S1, right);
427 }
428
429 Jump branch32(Condition cond, BaseIndex left, Imm32 right)
430 {
431 load32(left, ARMRegisters::S1);
432 return branch32(cond, ARMRegisters::S1, right);
433 }
434
435 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
436 {
437 load32WithUnalignedHalfWords(left, ARMRegisters::S1);
438 return branch32(cond, ARMRegisters::S1, right);
439 }
440
441 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
442 {
443 UNUSED_PARAM(cond);
444 UNUSED_PARAM(left);
445 UNUSED_PARAM(right);
446 ASSERT_NOT_REACHED();
447 return jump();
448 }
449
450 Jump branch16(Condition cond, BaseIndex left, Imm32 right)
451 {
452 load16(left, ARMRegisters::S0);
453 move(right, ARMRegisters::S1);
454 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S1);
455 return m_assembler.jmp(ARMCondition(cond));
456 }
457
458 Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
459 {
460 load8(address, ARMRegisters::S1);
461 return branchTest32(cond, ARMRegisters::S1, mask);
462 }
463
464 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
465 {
466 ASSERT((cond == Zero) || (cond == NonZero));
467 m_assembler.tst_r(reg, mask);
468 return Jump(m_assembler.jmp(ARMCondition(cond)));
469 }
470
471 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
472 {
473 ASSERT((cond == Zero) || (cond == NonZero));
474 ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
475 if (w & ARMAssembler::OP2_INV_IMM)
476 m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
477 else
478 m_assembler.tst_r(reg, w);
479 return Jump(m_assembler.jmp(ARMCondition(cond)));
480 }
481
482 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
483 {
484 load32(address, ARMRegisters::S1);
485 return branchTest32(cond, ARMRegisters::S1, mask);
486 }
487
488 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
489 {
490 load32(address, ARMRegisters::S1);
491 return branchTest32(cond, ARMRegisters::S1, mask);
492 }
493
494 Jump jump()
495 {
496 return Jump(m_assembler.jmp());
497 }
498
499 void jump(RegisterID target)
500 {
501 m_assembler.bx(target);
502 }
503
504 void jump(Address address)
505 {
506 load32(address, ARMRegisters::pc);
507 }
508
509 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
510 {
511 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
512 add32(src, dest);
513 return Jump(m_assembler.jmp(ARMCondition(cond)));
514 }
515
516 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
517 {
518 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
519 add32(imm, dest);
520 return Jump(m_assembler.jmp(ARMCondition(cond)));
521 }
522
523 void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
524 {
525 if (src1 == dest) {
526 move(src1, ARMRegisters::S0);
527 src1 = ARMRegisters::S0;
528 }
529 m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1);
530 m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31));
531 }
532
533 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
534 {
535 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
536 if (cond == Overflow) {
537 mull32(src, dest, dest);
538 cond = NonZero;
539 }
540 else
541 mul32(src, dest);
542 return Jump(m_assembler.jmp(ARMCondition(cond)));
543 }
544
545 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
546 {
547 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
548 if (cond == Overflow) {
549 move(imm, ARMRegisters::S0);
550 mull32(ARMRegisters::S0, src, dest);
551 cond = NonZero;
552 }
553 else
554 mul32(imm, src, dest);
555 return Jump(m_assembler.jmp(ARMCondition(cond)));
556 }
557
558 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
559 {
560 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
561 sub32(src, dest);
562 return Jump(m_assembler.jmp(ARMCondition(cond)));
563 }
564
565 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
566 {
567 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
568 sub32(imm, dest);
569 return Jump(m_assembler.jmp(ARMCondition(cond)));
570 }
571
572 Jump branchNeg32(Condition cond, RegisterID srcDest)
573 {
574 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
575 neg32(srcDest);
576 return Jump(m_assembler.jmp(ARMCondition(cond)));
577 }
578
579 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
580 {
581 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
582 or32(src, dest);
583 return Jump(m_assembler.jmp(ARMCondition(cond)));
584 }
585
586 void breakpoint()
587 {
588 m_assembler.bkpt(0);
589 }
590
591 Call nearCall()
592 {
593 #if WTF_ARM_ARCH_AT_LEAST(5)
594 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
595 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
596 return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
597 #else
598 prepareCall();
599 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear);
600 #endif
601 }
602
603 Call call(RegisterID target)
604 {
605 m_assembler.blx(target);
606 JmpSrc jmpSrc;
607 return Call(jmpSrc, Call::None);
608 }
609
610 void call(Address address)
611 {
612 call32(address.base, address.offset);
613 }
614
615 void ret()
616 {
617 m_assembler.bx(linkRegister);
618 }
619
620 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
621 {
622 m_assembler.cmp_r(left, right);
623 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
624 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
625 }
626
627 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
628 {
629 m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
630 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
631 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
632 }
633
634 void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
635 {
636 // ARM doesn't have byte registers
637 set32(cond, left, right, dest);
638 }
639
640 void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
641 {
642 // ARM doesn't have byte registers
643 load32(left, ARMRegisters::S1);
644 set32(cond, ARMRegisters::S1, right, dest);
645 }
646
647 void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
648 {
649 // ARM doesn't have byte registers
650 set32(cond, left, right, dest);
651 }
652
653 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
654 {
655 load32(address, ARMRegisters::S1);
656 if (mask.m_value == -1)
657 m_assembler.cmp_r(0, ARMRegisters::S1);
658 else
659 m_assembler.tst_r(ARMRegisters::S1, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
660 m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
661 m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
662 }
663
664 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
665 {
666 // ARM doesn't have byte registers
667 setTest32(cond, address, mask, dest);
668 }
669
670 void add32(Imm32 imm, RegisterID src, RegisterID dest)
671 {
672 m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
673 }
674
675 void add32(Imm32 imm, AbsoluteAddress address)
676 {
677 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
678 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
679 add32(imm, ARMRegisters::S1);
680 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
681 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
682 }
683
684 void sub32(Imm32 imm, AbsoluteAddress address)
685 {
686 m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr));
687 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
688 sub32(imm, ARMRegisters::S1);
689 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr));
690 m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0);
691 }
692
693 void load32(void* address, RegisterID dest)
694 {
695 m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
696 m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0);
697 }
698
699 Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
700 {
701 load32(left.m_ptr, ARMRegisters::S1);
702 return branch32(cond, ARMRegisters::S1, right);
703 }
704
705 Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
706 {
707 load32(left.m_ptr, ARMRegisters::S1);
708 return branch32(cond, ARMRegisters::S1, right);
709 }
710
711 Call call()
712 {
713 #if WTF_ARM_ARCH_AT_LEAST(5)
714 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
715 m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
716 return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
717 #else
718 prepareCall();
719 return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable);
720 #endif
721 }
722
723 Call tailRecursiveCall()
724 {
725 return Call::fromTailJump(jump());
726 }
727
728 Call makeTailRecursiveCall(Jump oldJump)
729 {
730 return Call::fromTailJump(oldJump);
731 }
732
733 DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
734 {
735 DataLabelPtr dataLabel(this);
736 m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
737 return dataLabel;
738 }
739
740 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
741 {
742 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
743 Jump jump = branch32(cond, left, ARMRegisters::S1, true);
744 return jump;
745 }
746
747 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
748 {
749 load32(left, ARMRegisters::S1);
750 dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
751 Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
752 return jump;
753 }
754
755 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
756 {
757 DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
758 store32(ARMRegisters::S1, address);
759 return dataLabel;
760 }
761
762 DataLabelPtr storePtrWithPatch(ImplicitAddress address)
763 {
764 return storePtrWithPatch(ImmPtr(0), address);
765 }
766
767 // Floating point operators
768 bool supportsFloatingPoint() const
769 {
770 return s_isVFPPresent;
771 }
772
773 bool supportsFloatingPointTruncate() const
774 {
775 return false;
776 }
777
778 bool supportsFloatingPointSqrt() const
779 {
780 return s_isVFPPresent;
781 }
782
783 void loadDouble(ImplicitAddress address, FPRegisterID dest)
784 {
785 m_assembler.doubleTransfer(true, dest, address.base, address.offset);
786 }
787
788 void loadDouble(const void* address, FPRegisterID dest)
789 {
790 m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
791 m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
792 }
793
794 void storeDouble(FPRegisterID src, ImplicitAddress address)
795 {
796 m_assembler.doubleTransfer(false, src, address.base, address.offset);
797 }
798
799 void addDouble(FPRegisterID src, FPRegisterID dest)
800 {
801 m_assembler.faddd_r(dest, dest, src);
802 }
803
804 void addDouble(Address src, FPRegisterID dest)
805 {
806 loadDouble(src, ARMRegisters::SD0);
807 addDouble(ARMRegisters::SD0, dest);
808 }
809
810 void divDouble(FPRegisterID src, FPRegisterID dest)
811 {
812 m_assembler.fdivd_r(dest, dest, src);
813 }
814
815 void divDouble(Address src, FPRegisterID dest)
816 {
817 ASSERT_NOT_REACHED(); // Untested
818 loadDouble(src, ARMRegisters::SD0);
819 divDouble(ARMRegisters::SD0, dest);
820 }
821
822 void subDouble(FPRegisterID src, FPRegisterID dest)
823 {
824 m_assembler.fsubd_r(dest, dest, src);
825 }
826
827 void subDouble(Address src, FPRegisterID dest)
828 {
829 loadDouble(src, ARMRegisters::SD0);
830 subDouble(ARMRegisters::SD0, dest);
831 }
832
833 void mulDouble(FPRegisterID src, FPRegisterID dest)
834 {
835 m_assembler.fmuld_r(dest, dest, src);
836 }
837
838 void mulDouble(Address src, FPRegisterID dest)
839 {
840 loadDouble(src, ARMRegisters::SD0);
841 mulDouble(ARMRegisters::SD0, dest);
842 }
843
844 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
845 {
846 m_assembler.fsqrtd_r(dest, src);
847 }
848
849 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
850 {
851 m_assembler.fmsr_r(dest, src);
852 m_assembler.fsitod_r(dest, dest);
853 }
854
855 void convertInt32ToDouble(Address src, FPRegisterID dest)
856 {
857 ASSERT_NOT_REACHED(); // Untested
858 // flds does not worth the effort here
859 load32(src, ARMRegisters::S1);
860 convertInt32ToDouble(ARMRegisters::S1, dest);
861 }
862
863 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
864 {
865 ASSERT_NOT_REACHED(); // Untested
866 // flds does not worth the effort here
867 m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
868 m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
869 convertInt32ToDouble(ARMRegisters::S1, dest);
870 }
871
872 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
873 {
874 m_assembler.fcmpd_r(left, right);
875 m_assembler.fmstat();
876 if (cond & DoubleConditionBitSpecial)
877 m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
878 return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
879 }
880
881 // Truncates 'src' to an integer, and places the resulting 'dest'.
882 // If the result is not representable as a 32 bit value, branch.
883 // May also branch for some values that are representable in 32 bits
884 // (specifically, in this case, INT_MIN).
885 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
886 {
887 UNUSED_PARAM(src);
888 UNUSED_PARAM(dest);
889 ASSERT_NOT_REACHED();
890 return jump();
891 }
892
893 // Convert 'src' to an integer, and places the resulting 'dest'.
894 // If the result is not representable as a 32 bit value, branch.
895 // May also branch for some values that are representable in 32 bits
896 // (specifically, in this case, 0).
897 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
898 {
899 m_assembler.ftosid_r(ARMRegisters::SD0, src);
900 m_assembler.fmrs_r(dest, ARMRegisters::SD0);
901
902 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
903 m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0);
904 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
905
906 // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
907 failureCases.append(branchTest32(Zero, dest));
908 }
909
910 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
911 {
912 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
913 convertInt32ToDouble(ARMRegisters::S0, scratch);
914 return branchDouble(DoubleNotEqual, reg, scratch);
915 }
916
917 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
918 {
919 m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
920 convertInt32ToDouble(ARMRegisters::S0, scratch);
921 return branchDouble(DoubleEqualOrUnordered, reg, scratch);
922 }
923
924 protected:
925 ARMAssembler::Condition ARMCondition(Condition cond)
926 {
927 return static_cast<ARMAssembler::Condition>(cond);
928 }
929
930 void ensureSpace(int insnSpace, int constSpace)
931 {
932 m_assembler.ensureSpace(insnSpace, constSpace);
933 }
934
935 int sizeOfConstantPool()
936 {
937 return m_assembler.sizeOfConstantPool();
938 }
939
940 void prepareCall()
941 {
942 #if WTF_ARM_ARCH_VERSION < 5
943 ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
944
945 m_assembler.mov_r(linkRegister, ARMRegisters::pc);
946 #endif
947 }
948
949 void call32(RegisterID base, int32_t offset)
950 {
951 #if WTF_ARM_ARCH_AT_LEAST(5)
952 int targetReg = ARMRegisters::S1;
953 #else
954 int targetReg = ARMRegisters::pc;
955 #endif
956 int tmpReg = ARMRegisters::S1;
957
958 if (base == ARMRegisters::sp)
959 offset += 4;
960
961 if (offset >= 0) {
962 if (offset <= 0xfff) {
963 prepareCall();
964 m_assembler.dtr_u(true, targetReg, base, offset);
965 } else if (offset <= 0xfffff) {
966 m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
967 prepareCall();
968 m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff);
969 } else {
970 ARMWord reg = m_assembler.getImm(offset, tmpReg);
971 prepareCall();
972 m_assembler.dtr_ur(true, targetReg, base, reg);
973 }
974 } else {
975 offset = -offset;
976 if (offset <= 0xfff) {
977 prepareCall();
978 m_assembler.dtr_d(true, targetReg, base, offset);
979 } else if (offset <= 0xfffff) {
980 m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
981 prepareCall();
982 m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff);
983 } else {
984 ARMWord reg = m_assembler.getImm(offset, tmpReg);
985 prepareCall();
986 m_assembler.dtr_dr(true, targetReg, base, reg);
987 }
988 }
989 #if WTF_ARM_ARCH_AT_LEAST(5)
990 m_assembler.blx(targetReg);
991 #endif
992 }
993
994 private:
995 friend class LinkBuffer;
996 friend class RepatchBuffer;
997
998 static void linkCall(void* code, Call call, FunctionPtr function)
999 {
1000 ARMAssembler::linkCall(code, call.m_jmp, function.value());
1001 }
1002
1003 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1004 {
1005 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1006 }
1007
1008 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1009 {
1010 ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1011 }
1012
1013 static const bool s_isVFPPresent;
1014 };
1015
1016 }
1017
1018 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1019
1020 #endif // MacroAssemblerARM_h