]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARMv7.h
JavaScriptCore-584.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARMv7.h
1 /*
2 * Copyright (C) 2009 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerARMv7_h
27 #define MacroAssemblerARMv7_h
28
29 #include <wtf/Platform.h>
30
31 #if ENABLE(ASSEMBLER)
32
33 #include "ARMv7Assembler.h"
34 #include "AbstractMacroAssembler.h"
35
36 namespace JSC {
37
38 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
39 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
40 // - dTR is likely used more than aTR, and we'll get better instruction
41 // encoding if it's in the low 8 registers.
42 static const ARMRegisters::RegisterID dataTempRegister = ARMRegisters::ip;
43 static const RegisterID addressTempRegister = ARMRegisters::r3;
44 static const FPRegisterID fpTempRegister = ARMRegisters::d7;
45
46 struct ArmAddress {
47 enum AddressType {
48 HasOffset,
49 HasIndex,
50 } type;
51 RegisterID base;
52 union {
53 int32_t offset;
54 struct {
55 RegisterID index;
56 Scale scale;
57 };
58 } u;
59
60 explicit ArmAddress(RegisterID base, int32_t offset = 0)
61 : type(HasOffset)
62 , base(base)
63 {
64 u.offset = offset;
65 }
66
67 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
68 : type(HasIndex)
69 , base(base)
70 {
71 u.index = index;
72 u.scale = scale;
73 }
74 };
75
76 public:
77
78 static const Scale ScalePtr = TimesFour;
79
80 enum Condition {
81 Equal = ARMv7Assembler::ConditionEQ,
82 NotEqual = ARMv7Assembler::ConditionNE,
83 Above = ARMv7Assembler::ConditionHI,
84 AboveOrEqual = ARMv7Assembler::ConditionHS,
85 Below = ARMv7Assembler::ConditionLO,
86 BelowOrEqual = ARMv7Assembler::ConditionLS,
87 GreaterThan = ARMv7Assembler::ConditionGT,
88 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
89 LessThan = ARMv7Assembler::ConditionLT,
90 LessThanOrEqual = ARMv7Assembler::ConditionLE,
91 Overflow = ARMv7Assembler::ConditionVS,
92 Signed = ARMv7Assembler::ConditionMI,
93 Zero = ARMv7Assembler::ConditionEQ,
94 NonZero = ARMv7Assembler::ConditionNE
95 };
96 enum DoubleCondition {
97 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
98 DoubleEqual = ARMv7Assembler::ConditionEQ,
99 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
100 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
101 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
102 DoubleLessThan = ARMv7Assembler::ConditionLO,
103 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
104 // If either operand is NaN, these conditions always evaluate to true.
105 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
106 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
107 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
108 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
109 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
110 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
111 };
112
113 static const RegisterID stackPointerRegister = ARMRegisters::sp;
114 static const RegisterID linkRegister = ARMRegisters::lr;
115
116 // Integer arithmetic operations:
117 //
118 // Operations are typically two operand - operation(source, srcDst)
119 // For many operations the source may be an Imm32, the srcDst operand
120 // may often be a memory location (explictly described using an Address
121 // object).
122
123 void add32(RegisterID src, RegisterID dest)
124 {
125 m_assembler.add(dest, dest, src);
126 }
127
128 void add32(Imm32 imm, RegisterID dest)
129 {
130 add32(imm, dest, dest);
131 }
132
133 void add32(Imm32 imm, RegisterID src, RegisterID dest)
134 {
135 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
136 if (armImm.isValid())
137 m_assembler.add(dest, src, armImm);
138 else {
139 move(imm, dataTempRegister);
140 m_assembler.add(dest, src, dataTempRegister);
141 }
142 }
143
144 void add32(Imm32 imm, Address address)
145 {
146 load32(address, dataTempRegister);
147
148 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
149 if (armImm.isValid())
150 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
151 else {
152 // Hrrrm, since dataTempRegister holds the data loaded,
153 // use addressTempRegister to hold the immediate.
154 move(imm, addressTempRegister);
155 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
156 }
157
158 store32(dataTempRegister, address);
159 }
160
161 void add32(Address src, RegisterID dest)
162 {
163 load32(src, dataTempRegister);
164 add32(dataTempRegister, dest);
165 }
166
167 void add32(Imm32 imm, AbsoluteAddress address)
168 {
169 load32(address.m_ptr, dataTempRegister);
170
171 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
172 if (armImm.isValid())
173 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
174 else {
175 // Hrrrm, since dataTempRegister holds the data loaded,
176 // use addressTempRegister to hold the immediate.
177 move(imm, addressTempRegister);
178 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
179 }
180
181 store32(dataTempRegister, address.m_ptr);
182 }
183
184 void and32(RegisterID src, RegisterID dest)
185 {
186 m_assembler.ARM_and(dest, dest, src);
187 }
188
189 void and32(Imm32 imm, RegisterID dest)
190 {
191 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
192 if (armImm.isValid())
193 m_assembler.ARM_and(dest, dest, armImm);
194 else {
195 move(imm, dataTempRegister);
196 m_assembler.ARM_and(dest, dest, dataTempRegister);
197 }
198 }
199
200 void lshift32(RegisterID shift_amount, RegisterID dest)
201 {
202 // Clamp the shift to the range 0..31
203 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
204 ASSERT(armImm.isValid());
205 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
206
207 m_assembler.lsl(dest, dest, dataTempRegister);
208 }
209
210 void lshift32(Imm32 imm, RegisterID dest)
211 {
212 m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
213 }
214
215 void mul32(RegisterID src, RegisterID dest)
216 {
217 m_assembler.smull(dest, dataTempRegister, dest, src);
218 }
219
220 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
221 {
222 move(imm, dataTempRegister);
223 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
224 }
225
226 void not32(RegisterID srcDest)
227 {
228 m_assembler.mvn(srcDest, srcDest);
229 }
230
231 void or32(RegisterID src, RegisterID dest)
232 {
233 m_assembler.orr(dest, dest, src);
234 }
235
236 void or32(Imm32 imm, RegisterID dest)
237 {
238 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
239 if (armImm.isValid())
240 m_assembler.orr(dest, dest, armImm);
241 else {
242 move(imm, dataTempRegister);
243 m_assembler.orr(dest, dest, dataTempRegister);
244 }
245 }
246
247 void rshift32(RegisterID shift_amount, RegisterID dest)
248 {
249 // Clamp the shift to the range 0..31
250 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
251 ASSERT(armImm.isValid());
252 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
253
254 m_assembler.asr(dest, dest, dataTempRegister);
255 }
256
257 void rshift32(Imm32 imm, RegisterID dest)
258 {
259 m_assembler.asr(dest, dest, imm.m_value & 0x1f);
260 }
261
262 void sub32(RegisterID src, RegisterID dest)
263 {
264 m_assembler.sub(dest, dest, src);
265 }
266
267 void sub32(Imm32 imm, RegisterID dest)
268 {
269 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
270 if (armImm.isValid())
271 m_assembler.sub(dest, dest, armImm);
272 else {
273 move(imm, dataTempRegister);
274 m_assembler.sub(dest, dest, dataTempRegister);
275 }
276 }
277
278 void sub32(Imm32 imm, Address address)
279 {
280 load32(address, dataTempRegister);
281
282 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
283 if (armImm.isValid())
284 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
285 else {
286 // Hrrrm, since dataTempRegister holds the data loaded,
287 // use addressTempRegister to hold the immediate.
288 move(imm, addressTempRegister);
289 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
290 }
291
292 store32(dataTempRegister, address);
293 }
294
295 void sub32(Address src, RegisterID dest)
296 {
297 load32(src, dataTempRegister);
298 sub32(dataTempRegister, dest);
299 }
300
301 void sub32(Imm32 imm, AbsoluteAddress address)
302 {
303 load32(address.m_ptr, dataTempRegister);
304
305 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
306 if (armImm.isValid())
307 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
308 else {
309 // Hrrrm, since dataTempRegister holds the data loaded,
310 // use addressTempRegister to hold the immediate.
311 move(imm, addressTempRegister);
312 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
313 }
314
315 store32(dataTempRegister, address.m_ptr);
316 }
317
318 void xor32(RegisterID src, RegisterID dest)
319 {
320 m_assembler.eor(dest, dest, src);
321 }
322
323 void xor32(Imm32 imm, RegisterID dest)
324 {
325 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
326 if (armImm.isValid())
327 m_assembler.eor(dest, dest, armImm);
328 else {
329 move(imm, dataTempRegister);
330 m_assembler.eor(dest, dest, dataTempRegister);
331 }
332 }
333
334
335 // Memory access operations:
336 //
337 // Loads are of the form load(address, destination) and stores of the form
338 // store(source, address). The source for a store may be an Imm32. Address
339 // operand objects to loads and store will be implicitly constructed if a
340 // register is passed.
341
342 private:
343 void load32(ArmAddress address, RegisterID dest)
344 {
345 if (address.type == ArmAddress::HasIndex)
346 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
347 else if (address.u.offset >= 0) {
348 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
349 ASSERT(armImm.isValid());
350 m_assembler.ldr(dest, address.base, armImm);
351 } else {
352 ASSERT(address.u.offset >= -255);
353 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
354 }
355 }
356
357 void load16(ArmAddress address, RegisterID dest)
358 {
359 if (address.type == ArmAddress::HasIndex)
360 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
361 else if (address.u.offset >= 0) {
362 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
363 ASSERT(armImm.isValid());
364 m_assembler.ldrh(dest, address.base, armImm);
365 } else {
366 ASSERT(address.u.offset >= -255);
367 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
368 }
369 }
370
371 void store32(RegisterID src, ArmAddress address)
372 {
373 if (address.type == ArmAddress::HasIndex)
374 m_assembler.str(src, address.base, address.u.index, address.u.scale);
375 else if (address.u.offset >= 0) {
376 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
377 ASSERT(armImm.isValid());
378 m_assembler.str(src, address.base, armImm);
379 } else {
380 ASSERT(address.u.offset >= -255);
381 m_assembler.str(src, address.base, address.u.offset, true, false);
382 }
383 }
384
385 public:
386 void load32(ImplicitAddress address, RegisterID dest)
387 {
388 load32(setupArmAddress(address), dest);
389 }
390
391 void load32(BaseIndex address, RegisterID dest)
392 {
393 load32(setupArmAddress(address), dest);
394 }
395
396 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
397 {
398 load32(setupArmAddress(address), dest);
399 }
400
401 void load32(void* address, RegisterID dest)
402 {
403 move(ImmPtr(address), addressTempRegister);
404 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
405 }
406
407 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
408 {
409 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
410 load32(ArmAddress(address.base, dataTempRegister), dest);
411 return label;
412 }
413
414 Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
415 {
416 Label label(this);
417 moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister);
418 load32(ArmAddress(address.base, dataTempRegister), dest);
419 return label;
420 }
421
422 void load16(BaseIndex address, RegisterID dest)
423 {
424 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
425 }
426
427 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
428 {
429 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
430 store32(src, ArmAddress(address.base, dataTempRegister));
431 return label;
432 }
433
434 void store32(RegisterID src, ImplicitAddress address)
435 {
436 store32(src, setupArmAddress(address));
437 }
438
439 void store32(RegisterID src, BaseIndex address)
440 {
441 store32(src, setupArmAddress(address));
442 }
443
444 void store32(Imm32 imm, ImplicitAddress address)
445 {
446 move(imm, dataTempRegister);
447 store32(dataTempRegister, setupArmAddress(address));
448 }
449
450 void store32(RegisterID src, void* address)
451 {
452 move(ImmPtr(address), addressTempRegister);
453 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
454 }
455
456 void store32(Imm32 imm, void* address)
457 {
458 move(imm, dataTempRegister);
459 store32(dataTempRegister, address);
460 }
461
462
463 // Floating-point operations:
464
465 bool supportsFloatingPoint() const { return true; }
466 // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
467 // If a value is not representable as an integer, and possibly for some values that are,
468 // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
469 // a branch will be taken. It is not clear whether this interface will be well suited to
470 // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
471 // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
472 // temporary solution while we work out what this interface should be. Either we need to
473 // decide to make this interface work on all platforms, rework the interface to make it more
474 // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
475 // operations, and make clients go directly to the m_assembler to plant truncation instructions.
476 // In short, FIXME:.
477 bool supportsFloatingPointTruncate() const { return false; }
478
479 void loadDouble(ImplicitAddress address, FPRegisterID dest)
480 {
481 RegisterID base = address.base;
482 int32_t offset = address.offset;
483
484 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
485 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
486 add32(Imm32(offset), base, addressTempRegister);
487 base = addressTempRegister;
488 offset = 0;
489 }
490
491 m_assembler.vldr(dest, base, offset);
492 }
493
494 void storeDouble(FPRegisterID src, ImplicitAddress address)
495 {
496 RegisterID base = address.base;
497 int32_t offset = address.offset;
498
499 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
500 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
501 add32(Imm32(offset), base, addressTempRegister);
502 base = addressTempRegister;
503 offset = 0;
504 }
505
506 m_assembler.vstr(src, base, offset);
507 }
508
509 void addDouble(FPRegisterID src, FPRegisterID dest)
510 {
511 m_assembler.vadd_F64(dest, dest, src);
512 }
513
514 void addDouble(Address src, FPRegisterID dest)
515 {
516 loadDouble(src, fpTempRegister);
517 addDouble(fpTempRegister, dest);
518 }
519
520 void subDouble(FPRegisterID src, FPRegisterID dest)
521 {
522 m_assembler.vsub_F64(dest, dest, src);
523 }
524
525 void subDouble(Address src, FPRegisterID dest)
526 {
527 loadDouble(src, fpTempRegister);
528 subDouble(fpTempRegister, dest);
529 }
530
531 void mulDouble(FPRegisterID src, FPRegisterID dest)
532 {
533 m_assembler.vmul_F64(dest, dest, src);
534 }
535
536 void mulDouble(Address src, FPRegisterID dest)
537 {
538 loadDouble(src, fpTempRegister);
539 mulDouble(fpTempRegister, dest);
540 }
541
542 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
543 {
544 m_assembler.vmov(fpTempRegister, src);
545 m_assembler.vcvt_F64_S32(dest, fpTempRegister);
546 }
547
548 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
549 {
550 m_assembler.vcmp_F64(left, right);
551 m_assembler.vmrs_APSR_nzcv_FPSCR();
552
553 if (cond == DoubleNotEqual) {
554 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
555 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
556 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
557 unordered.link(this);
558 return result;
559 }
560 if (cond == DoubleEqualOrUnordered) {
561 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
562 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
563 unordered.link(this);
564 // We get here if either unordered, or equal.
565 Jump result = makeJump();
566 notEqual.link(this);
567 return result;
568 }
569 return makeBranch(cond);
570 }
571
572 Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
573 {
574 ASSERT_NOT_REACHED();
575 return jump();
576 }
577
578
579 // Stack manipulation operations:
580 //
581 // The ABI is assumed to provide a stack abstraction to memory,
582 // containing machine word sized units of data. Push and pop
583 // operations add and remove a single register sized unit of data
584 // to or from the stack. Peek and poke operations read or write
585 // values on the stack, without moving the current stack position.
586
587 void pop(RegisterID dest)
588 {
589 // store postindexed with writeback
590 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
591 }
592
593 void push(RegisterID src)
594 {
595 // store preindexed with writeback
596 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
597 }
598
599 void push(Address address)
600 {
601 load32(address, dataTempRegister);
602 push(dataTempRegister);
603 }
604
605 void push(Imm32 imm)
606 {
607 move(imm, dataTempRegister);
608 push(dataTempRegister);
609 }
610
611 // Register move operations:
612 //
613 // Move values in registers.
614
615 void move(Imm32 imm, RegisterID dest)
616 {
617 uint32_t value = imm.m_value;
618
619 if (imm.m_isPointer)
620 moveFixedWidthEncoding(imm, dest);
621 else {
622 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
623
624 if (armImm.isValid())
625 m_assembler.mov(dest, armImm);
626 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
627 m_assembler.mvn(dest, armImm);
628 else {
629 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
630 if (value & 0xffff0000)
631 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
632 }
633 }
634 }
635
636 void move(RegisterID src, RegisterID dest)
637 {
638 m_assembler.mov(dest, src);
639 }
640
641 void move(ImmPtr imm, RegisterID dest)
642 {
643 move(Imm32(imm), dest);
644 }
645
646 void swap(RegisterID reg1, RegisterID reg2)
647 {
648 move(reg1, dataTempRegister);
649 move(reg2, reg1);
650 move(dataTempRegister, reg2);
651 }
652
653 void signExtend32ToPtr(RegisterID src, RegisterID dest)
654 {
655 if (src != dest)
656 move(src, dest);
657 }
658
659 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
660 {
661 if (src != dest)
662 move(src, dest);
663 }
664
665
666 // Forwards / external control flow operations:
667 //
668 // This set of jump and conditional branch operations return a Jump
669 // object which may linked at a later point, allow forwards jump,
670 // or jumps that will require external linkage (after the code has been
671 // relocated).
672 //
673 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
674 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
675 // used (representing the names 'below' and 'above').
676 //
677 // Operands to the comparision are provided in the expected order, e.g.
678 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
679 // treated as a signed 32bit value, is less than or equal to 5.
680 //
681 // jz and jnz test whether the first operand is equal to zero, and take
682 // an optional second operand of a mask under which to perform the test.
683 private:
684
685 // Should we be using TEQ for equal/not-equal?
686 void compare32(RegisterID left, Imm32 right)
687 {
688 int32_t imm = right.m_value;
689 if (!imm)
690 m_assembler.tst(left, left);
691 else {
692 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
693 if (armImm.isValid())
694 m_assembler.cmp(left, armImm);
695 if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
696 m_assembler.cmn(left, armImm);
697 else {
698 move(Imm32(imm), dataTempRegister);
699 m_assembler.cmp(left, dataTempRegister);
700 }
701 }
702 }
703
704 void test32(RegisterID reg, Imm32 mask)
705 {
706 int32_t imm = mask.m_value;
707
708 if (imm == -1)
709 m_assembler.tst(reg, reg);
710 else {
711 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
712 if (armImm.isValid())
713 m_assembler.tst(reg, armImm);
714 else {
715 move(mask, dataTempRegister);
716 m_assembler.tst(reg, dataTempRegister);
717 }
718 }
719 }
720
721 public:
722 Jump branch32(Condition cond, RegisterID left, RegisterID right)
723 {
724 m_assembler.cmp(left, right);
725 return Jump(makeBranch(cond));
726 }
727
728 Jump branch32(Condition cond, RegisterID left, Imm32 right)
729 {
730 compare32(left, right);
731 return Jump(makeBranch(cond));
732 }
733
734 Jump branch32(Condition cond, RegisterID left, Address right)
735 {
736 load32(right, dataTempRegister);
737 return branch32(cond, left, dataTempRegister);
738 }
739
740 Jump branch32(Condition cond, Address left, RegisterID right)
741 {
742 load32(left, dataTempRegister);
743 return branch32(cond, dataTempRegister, right);
744 }
745
746 Jump branch32(Condition cond, Address left, Imm32 right)
747 {
748 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
749 load32(left, addressTempRegister);
750 return branch32(cond, addressTempRegister, right);
751 }
752
753 Jump branch32(Condition cond, BaseIndex left, Imm32 right)
754 {
755 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
756 load32(left, addressTempRegister);
757 return branch32(cond, addressTempRegister, right);
758 }
759
760 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
761 {
762 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
763 load32WithUnalignedHalfWords(left, addressTempRegister);
764 return branch32(cond, addressTempRegister, right);
765 }
766
767 Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
768 {
769 load32(left.m_ptr, dataTempRegister);
770 return branch32(cond, dataTempRegister, right);
771 }
772
773 Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
774 {
775 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
776 load32(left.m_ptr, addressTempRegister);
777 return branch32(cond, addressTempRegister, right);
778 }
779
780 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
781 {
782 load16(left, dataTempRegister);
783 m_assembler.lsl(addressTempRegister, right, 16);
784 m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
785 return branch32(cond, dataTempRegister, addressTempRegister);
786 }
787
788 Jump branch16(Condition cond, BaseIndex left, Imm32 right)
789 {
790 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
791 load16(left, addressTempRegister);
792 m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
793 return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
794 }
795
796 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
797 {
798 ASSERT((cond == Zero) || (cond == NonZero));
799 m_assembler.tst(reg, mask);
800 return Jump(makeBranch(cond));
801 }
802
803 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
804 {
805 ASSERT((cond == Zero) || (cond == NonZero));
806 test32(reg, mask);
807 return Jump(makeBranch(cond));
808 }
809
810 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
811 {
812 ASSERT((cond == Zero) || (cond == NonZero));
813 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
814 load32(address, addressTempRegister);
815 return branchTest32(cond, addressTempRegister, mask);
816 }
817
818 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
819 {
820 ASSERT((cond == Zero) || (cond == NonZero));
821 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
822 load32(address, addressTempRegister);
823 return branchTest32(cond, addressTempRegister, mask);
824 }
825
826 Jump jump()
827 {
828 return Jump(makeJump());
829 }
830
831 void jump(RegisterID target)
832 {
833 m_assembler.bx(target);
834 }
835
836 // Address is a memory location containing the address to jump to
837 void jump(Address address)
838 {
839 load32(address, dataTempRegister);
840 m_assembler.bx(dataTempRegister);
841 }
842
843
844 // Arithmetic control flow operations:
845 //
846 // This set of conditional branch operations branch based
847 // on the result of an arithmetic operation. The operation
848 // is performed as normal, storing the result.
849 //
850 // * jz operations branch if the result is zero.
851 // * jo operations branch if the (signed) arithmetic
852 // operation caused an overflow to occur.
853
854 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
855 {
856 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
857 m_assembler.add_S(dest, dest, src);
858 return Jump(makeBranch(cond));
859 }
860
861 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
862 {
863 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
864 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
865 if (armImm.isValid())
866 m_assembler.add_S(dest, dest, armImm);
867 else {
868 move(imm, dataTempRegister);
869 m_assembler.add_S(dest, dest, dataTempRegister);
870 }
871 return Jump(makeBranch(cond));
872 }
873
874 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
875 {
876 ASSERT_UNUSED(cond, cond == Overflow);
877 m_assembler.smull(dest, dataTempRegister, dest, src);
878 m_assembler.asr(addressTempRegister, dest, 31);
879 return branch32(NotEqual, addressTempRegister, dataTempRegister);
880 }
881
882 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
883 {
884 ASSERT_UNUSED(cond, cond == Overflow);
885 move(imm, dataTempRegister);
886 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
887 m_assembler.asr(addressTempRegister, dest, 31);
888 return branch32(NotEqual, addressTempRegister, dataTempRegister);
889 }
890
891 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
892 {
893 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
894 m_assembler.sub_S(dest, dest, src);
895 return Jump(makeBranch(cond));
896 }
897
898 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
899 {
900 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
901 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
902 if (armImm.isValid())
903 m_assembler.sub_S(dest, dest, armImm);
904 else {
905 move(imm, dataTempRegister);
906 m_assembler.sub_S(dest, dest, dataTempRegister);
907 }
908 return Jump(makeBranch(cond));
909 }
910
911
912 // Miscellaneous operations:
913
914 void breakpoint()
915 {
916 m_assembler.bkpt();
917 }
918
919 Call nearCall()
920 {
921 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
922 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
923 }
924
925 Call call()
926 {
927 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
928 return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
929 }
930
931 Call call(RegisterID target)
932 {
933 return Call(m_assembler.blx(target), Call::None);
934 }
935
936 Call call(Address address)
937 {
938 load32(address, dataTempRegister);
939 return Call(m_assembler.blx(dataTempRegister), Call::None);
940 }
941
942 void ret()
943 {
944 m_assembler.bx(linkRegister);
945 }
946
947 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
948 {
949 m_assembler.cmp(left, right);
950 m_assembler.it(armV7Condition(cond), false);
951 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
952 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
953 }
954
955 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
956 {
957 compare32(left, right);
958 m_assembler.it(armV7Condition(cond), false);
959 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
960 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
961 }
962
963 // FIXME:
964 // The mask should be optional... paerhaps the argument order should be
965 // dest-src, operations always have a dest? ... possibly not true, considering
966 // asm ops like test, or pseudo ops like pop().
967 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
968 {
969 load32(address, dataTempRegister);
970 test32(dataTempRegister, mask);
971 m_assembler.it(armV7Condition(cond), false);
972 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
973 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
974 }
975
976
977 DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
978 {
979 moveFixedWidthEncoding(imm, dst);
980 return DataLabel32(this);
981 }
982
983 DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst)
984 {
985 moveFixedWidthEncoding(Imm32(imm), dst);
986 return DataLabelPtr(this);
987 }
988
989 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
990 {
991 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
992 return branch32(cond, left, dataTempRegister);
993 }
994
995 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
996 {
997 load32(left, addressTempRegister);
998 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
999 return branch32(cond, addressTempRegister, dataTempRegister);
1000 }
1001
1002 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
1003 {
1004 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1005 store32(dataTempRegister, address);
1006 return label;
1007 }
1008 DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); }
1009
1010
1011 Call tailRecursiveCall()
1012 {
1013 // Like a normal call, but don't link.
1014 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1015 return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1016 }
1017
1018 Call makeTailRecursiveCall(Jump oldJump)
1019 {
1020 oldJump.link(this);
1021 return tailRecursiveCall();
1022 }
1023
1024
1025 protected:
1026 ARMv7Assembler::JmpSrc makeJump()
1027 {
1028 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1029 return m_assembler.bx(dataTempRegister);
1030 }
1031
1032 ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
1033 {
1034 m_assembler.it(cond, true, true);
1035 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1036 return m_assembler.bx(dataTempRegister);
1037 }
1038 ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
1039 ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1040
1041 ArmAddress setupArmAddress(BaseIndex address)
1042 {
1043 if (address.offset) {
1044 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1045 if (imm.isValid())
1046 m_assembler.add(addressTempRegister, address.base, imm);
1047 else {
1048 move(Imm32(address.offset), addressTempRegister);
1049 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1050 }
1051
1052 return ArmAddress(addressTempRegister, address.index, address.scale);
1053 } else
1054 return ArmAddress(address.base, address.index, address.scale);
1055 }
1056
1057 ArmAddress setupArmAddress(Address address)
1058 {
1059 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1060 return ArmAddress(address.base, address.offset);
1061
1062 move(Imm32(address.offset), addressTempRegister);
1063 return ArmAddress(address.base, addressTempRegister);
1064 }
1065
1066 ArmAddress setupArmAddress(ImplicitAddress address)
1067 {
1068 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1069 return ArmAddress(address.base, address.offset);
1070
1071 move(Imm32(address.offset), addressTempRegister);
1072 return ArmAddress(address.base, addressTempRegister);
1073 }
1074
1075 RegisterID makeBaseIndexBase(BaseIndex address)
1076 {
1077 if (!address.offset)
1078 return address.base;
1079
1080 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1081 if (imm.isValid())
1082 m_assembler.add(addressTempRegister, address.base, imm);
1083 else {
1084 move(Imm32(address.offset), addressTempRegister);
1085 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1086 }
1087
1088 return addressTempRegister;
1089 }
1090
1091 void moveFixedWidthEncoding(Imm32 imm, RegisterID dst)
1092 {
1093 uint32_t value = imm.m_value;
1094 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
1095 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
1096 }
1097
1098 ARMv7Assembler::Condition armV7Condition(Condition cond)
1099 {
1100 return static_cast<ARMv7Assembler::Condition>(cond);
1101 }
1102
1103 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
1104 {
1105 return static_cast<ARMv7Assembler::Condition>(cond);
1106 }
1107
1108 private:
1109 friend class LinkBuffer;
1110 friend class RepatchBuffer;
1111
1112 static void linkCall(void* code, Call call, FunctionPtr function)
1113 {
1114 ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
1115 }
1116
1117 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1118 {
1119 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1120 }
1121
1122 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1123 {
1124 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1125 }
1126 };
1127
1128 } // namespace JSC
1129
1130 #endif // ENABLE(ASSEMBLER)
1131
1132 #endif // MacroAssemblerARMv7_h