]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARMv7.h
e3e928d6eb8b96050ec9a7ee44183628d544e0de
[apple/javascriptcore.git] / assembler / MacroAssemblerARMv7.h
1 /*
2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
29
30 #if ENABLE(ASSEMBLER)
31
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
34
35 namespace JSC {
36
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
39 // - dTR is likely used more than aTR, and we'll get better instruction
40 // encoding if it's in the low 8 registers.
41 static const RegisterID dataTempRegister = ARMRegisters::ip;
42 static const RegisterID addressTempRegister = ARMRegisters::r3;
43
44 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
45 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
46
47 public:
48 typedef ARMv7Assembler::LinkRecord LinkRecord;
49 typedef ARMv7Assembler::JumpType JumpType;
50 typedef ARMv7Assembler::JumpLinkType JumpLinkType;
51
52 MacroAssemblerARMv7()
53 : m_inUninterruptedSequence(false)
54 {
55 }
56
57 void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
58 void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
59 Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
60 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
61 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
62 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
63 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
64 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
65 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
66 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
67
68 struct ArmAddress {
69 enum AddressType {
70 HasOffset,
71 HasIndex,
72 } type;
73 RegisterID base;
74 union {
75 int32_t offset;
76 struct {
77 RegisterID index;
78 Scale scale;
79 };
80 } u;
81
82 explicit ArmAddress(RegisterID base, int32_t offset = 0)
83 : type(HasOffset)
84 , base(base)
85 {
86 u.offset = offset;
87 }
88
89 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
90 : type(HasIndex)
91 , base(base)
92 {
93 u.index = index;
94 u.scale = scale;
95 }
96 };
97
98 public:
99 typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
100
101 static const Scale ScalePtr = TimesFour;
102
103 enum Condition {
104 Equal = ARMv7Assembler::ConditionEQ,
105 NotEqual = ARMv7Assembler::ConditionNE,
106 Above = ARMv7Assembler::ConditionHI,
107 AboveOrEqual = ARMv7Assembler::ConditionHS,
108 Below = ARMv7Assembler::ConditionLO,
109 BelowOrEqual = ARMv7Assembler::ConditionLS,
110 GreaterThan = ARMv7Assembler::ConditionGT,
111 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
112 LessThan = ARMv7Assembler::ConditionLT,
113 LessThanOrEqual = ARMv7Assembler::ConditionLE,
114 Overflow = ARMv7Assembler::ConditionVS,
115 Signed = ARMv7Assembler::ConditionMI,
116 Zero = ARMv7Assembler::ConditionEQ,
117 NonZero = ARMv7Assembler::ConditionNE
118 };
119 enum DoubleCondition {
120 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
121 DoubleEqual = ARMv7Assembler::ConditionEQ,
122 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
123 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
124 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
125 DoubleLessThan = ARMv7Assembler::ConditionLO,
126 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
127 // If either operand is NaN, these conditions always evaluate to true.
128 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
129 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
130 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
131 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
132 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
133 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
134 };
135
136 static const RegisterID stackPointerRegister = ARMRegisters::sp;
137 static const RegisterID linkRegister = ARMRegisters::lr;
138
139 // Integer arithmetic operations:
140 //
141 // Operations are typically two operand - operation(source, srcDst)
142 // For many operations the source may be an Imm32, the srcDst operand
143 // may often be a memory location (explictly described using an Address
144 // object).
145
146 void add32(RegisterID src, RegisterID dest)
147 {
148 m_assembler.add(dest, dest, src);
149 }
150
151 void add32(Imm32 imm, RegisterID dest)
152 {
153 add32(imm, dest, dest);
154 }
155
156 void add32(Imm32 imm, RegisterID src, RegisterID dest)
157 {
158 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
159 if (armImm.isValid())
160 m_assembler.add(dest, src, armImm);
161 else {
162 move(imm, dataTempRegister);
163 m_assembler.add(dest, src, dataTempRegister);
164 }
165 }
166
167 void add32(Imm32 imm, Address address)
168 {
169 load32(address, dataTempRegister);
170
171 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
172 if (armImm.isValid())
173 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
174 else {
175 // Hrrrm, since dataTempRegister holds the data loaded,
176 // use addressTempRegister to hold the immediate.
177 move(imm, addressTempRegister);
178 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
179 }
180
181 store32(dataTempRegister, address);
182 }
183
184 void add32(Address src, RegisterID dest)
185 {
186 load32(src, dataTempRegister);
187 add32(dataTempRegister, dest);
188 }
189
190 void add32(Imm32 imm, AbsoluteAddress address)
191 {
192 load32(address.m_ptr, dataTempRegister);
193
194 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
195 if (armImm.isValid())
196 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
197 else {
198 // Hrrrm, since dataTempRegister holds the data loaded,
199 // use addressTempRegister to hold the immediate.
200 move(imm, addressTempRegister);
201 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
202 }
203
204 store32(dataTempRegister, address.m_ptr);
205 }
206
207 void and32(RegisterID src, RegisterID dest)
208 {
209 m_assembler.ARM_and(dest, dest, src);
210 }
211
212 void and32(Imm32 imm, RegisterID dest)
213 {
214 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
215 if (armImm.isValid())
216 m_assembler.ARM_and(dest, dest, armImm);
217 else {
218 move(imm, dataTempRegister);
219 m_assembler.ARM_and(dest, dest, dataTempRegister);
220 }
221 }
222
223 void lshift32(RegisterID shift_amount, RegisterID dest)
224 {
225 // Clamp the shift to the range 0..31
226 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
227 ASSERT(armImm.isValid());
228 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
229
230 m_assembler.lsl(dest, dest, dataTempRegister);
231 }
232
233 void lshift32(Imm32 imm, RegisterID dest)
234 {
235 m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
236 }
237
238 void mul32(RegisterID src, RegisterID dest)
239 {
240 m_assembler.smull(dest, dataTempRegister, dest, src);
241 }
242
243 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
244 {
245 move(imm, dataTempRegister);
246 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
247 }
248
249 void neg32(RegisterID srcDest)
250 {
251 m_assembler.neg(srcDest, srcDest);
252 }
253
254 void not32(RegisterID srcDest)
255 {
256 m_assembler.mvn(srcDest, srcDest);
257 }
258
259 void or32(RegisterID src, RegisterID dest)
260 {
261 m_assembler.orr(dest, dest, src);
262 }
263
264 void or32(Imm32 imm, RegisterID dest)
265 {
266 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
267 if (armImm.isValid())
268 m_assembler.orr(dest, dest, armImm);
269 else {
270 move(imm, dataTempRegister);
271 m_assembler.orr(dest, dest, dataTempRegister);
272 }
273 }
274
275 void rshift32(RegisterID shift_amount, RegisterID dest)
276 {
277 // Clamp the shift to the range 0..31
278 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
279 ASSERT(armImm.isValid());
280 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
281
282 m_assembler.asr(dest, dest, dataTempRegister);
283 }
284
285 void rshift32(Imm32 imm, RegisterID dest)
286 {
287 m_assembler.asr(dest, dest, imm.m_value & 0x1f);
288 }
289
290 void urshift32(RegisterID shift_amount, RegisterID dest)
291 {
292 // Clamp the shift to the range 0..31
293 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
294 ASSERT(armImm.isValid());
295 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
296
297 m_assembler.lsr(dest, dest, dataTempRegister);
298 }
299
300 void urshift32(Imm32 imm, RegisterID dest)
301 {
302 m_assembler.lsr(dest, dest, imm.m_value & 0x1f);
303 }
304
305 void sub32(RegisterID src, RegisterID dest)
306 {
307 m_assembler.sub(dest, dest, src);
308 }
309
310 void sub32(Imm32 imm, RegisterID dest)
311 {
312 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
313 if (armImm.isValid())
314 m_assembler.sub(dest, dest, armImm);
315 else {
316 move(imm, dataTempRegister);
317 m_assembler.sub(dest, dest, dataTempRegister);
318 }
319 }
320
321 void sub32(Imm32 imm, Address address)
322 {
323 load32(address, dataTempRegister);
324
325 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
326 if (armImm.isValid())
327 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
328 else {
329 // Hrrrm, since dataTempRegister holds the data loaded,
330 // use addressTempRegister to hold the immediate.
331 move(imm, addressTempRegister);
332 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
333 }
334
335 store32(dataTempRegister, address);
336 }
337
338 void sub32(Address src, RegisterID dest)
339 {
340 load32(src, dataTempRegister);
341 sub32(dataTempRegister, dest);
342 }
343
344 void sub32(Imm32 imm, AbsoluteAddress address)
345 {
346 load32(address.m_ptr, dataTempRegister);
347
348 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
349 if (armImm.isValid())
350 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
351 else {
352 // Hrrrm, since dataTempRegister holds the data loaded,
353 // use addressTempRegister to hold the immediate.
354 move(imm, addressTempRegister);
355 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
356 }
357
358 store32(dataTempRegister, address.m_ptr);
359 }
360
361 void xor32(RegisterID src, RegisterID dest)
362 {
363 m_assembler.eor(dest, dest, src);
364 }
365
366 void xor32(Imm32 imm, RegisterID dest)
367 {
368 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
369 if (armImm.isValid())
370 m_assembler.eor(dest, dest, armImm);
371 else {
372 move(imm, dataTempRegister);
373 m_assembler.eor(dest, dest, dataTempRegister);
374 }
375 }
376
377
378 // Memory access operations:
379 //
380 // Loads are of the form load(address, destination) and stores of the form
381 // store(source, address). The source for a store may be an Imm32. Address
382 // operand objects to loads and store will be implicitly constructed if a
383 // register is passed.
384
385 private:
386 void load32(ArmAddress address, RegisterID dest)
387 {
388 if (address.type == ArmAddress::HasIndex)
389 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
390 else if (address.u.offset >= 0) {
391 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
392 ASSERT(armImm.isValid());
393 m_assembler.ldr(dest, address.base, armImm);
394 } else {
395 ASSERT(address.u.offset >= -255);
396 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
397 }
398 }
399
400 void load16(ArmAddress address, RegisterID dest)
401 {
402 if (address.type == ArmAddress::HasIndex)
403 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
404 else if (address.u.offset >= 0) {
405 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
406 ASSERT(armImm.isValid());
407 m_assembler.ldrh(dest, address.base, armImm);
408 } else {
409 ASSERT(address.u.offset >= -255);
410 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
411 }
412 }
413
414 void load8(ArmAddress address, RegisterID dest)
415 {
416 if (address.type == ArmAddress::HasIndex)
417 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
418 else if (address.u.offset >= 0) {
419 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
420 ASSERT(armImm.isValid());
421 m_assembler.ldrb(dest, address.base, armImm);
422 } else {
423 ASSERT(address.u.offset >= -255);
424 m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
425 }
426 }
427
428 void store32(RegisterID src, ArmAddress address)
429 {
430 if (address.type == ArmAddress::HasIndex)
431 m_assembler.str(src, address.base, address.u.index, address.u.scale);
432 else if (address.u.offset >= 0) {
433 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
434 ASSERT(armImm.isValid());
435 m_assembler.str(src, address.base, armImm);
436 } else {
437 ASSERT(address.u.offset >= -255);
438 m_assembler.str(src, address.base, address.u.offset, true, false);
439 }
440 }
441
442 public:
443 void load32(ImplicitAddress address, RegisterID dest)
444 {
445 load32(setupArmAddress(address), dest);
446 }
447
448 void load32(BaseIndex address, RegisterID dest)
449 {
450 load32(setupArmAddress(address), dest);
451 }
452
453 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
454 {
455 load32(setupArmAddress(address), dest);
456 }
457
458 void load32(void* address, RegisterID dest)
459 {
460 move(ImmPtr(address), addressTempRegister);
461 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
462 }
463
464 void load8(ImplicitAddress address, RegisterID dest)
465 {
466 load8(setupArmAddress(address), dest);
467 }
468
469 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
470 {
471 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
472 load32(ArmAddress(address.base, dataTempRegister), dest);
473 return label;
474 }
475
476 Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
477 {
478 Label label(this);
479 moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister);
480 load32(ArmAddress(address.base, dataTempRegister), dest);
481 return label;
482 }
483
484 void load16(BaseIndex address, RegisterID dest)
485 {
486 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
487 }
488
489 void load16(ImplicitAddress address, RegisterID dest)
490 {
491 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
492 if (armImm.isValid())
493 m_assembler.ldrh(dest, address.base, armImm);
494 else {
495 move(Imm32(address.offset), dataTempRegister);
496 m_assembler.ldrh(dest, address.base, dataTempRegister);
497 }
498 }
499
500 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
501 {
502 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
503 store32(src, ArmAddress(address.base, dataTempRegister));
504 return label;
505 }
506
507 void store32(RegisterID src, ImplicitAddress address)
508 {
509 store32(src, setupArmAddress(address));
510 }
511
512 void store32(RegisterID src, BaseIndex address)
513 {
514 store32(src, setupArmAddress(address));
515 }
516
517 void store32(Imm32 imm, ImplicitAddress address)
518 {
519 move(imm, dataTempRegister);
520 store32(dataTempRegister, setupArmAddress(address));
521 }
522
523 void store32(RegisterID src, void* address)
524 {
525 move(ImmPtr(address), addressTempRegister);
526 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
527 }
528
529 void store32(Imm32 imm, void* address)
530 {
531 move(imm, dataTempRegister);
532 store32(dataTempRegister, address);
533 }
534
535
536 // Floating-point operations:
537
538 bool supportsFloatingPoint() const { return true; }
539 // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
540 // If a value is not representable as an integer, and possibly for some values that are,
541 // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
542 // a branch will be taken. It is not clear whether this interface will be well suited to
543 // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
544 // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
545 // temporary solution while we work out what this interface should be. Either we need to
546 // decide to make this interface work on all platforms, rework the interface to make it more
547 // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
548 // operations, and make clients go directly to the m_assembler to plant truncation instructions.
549 // In short, FIXME:.
550 bool supportsFloatingPointTruncate() const { return false; }
551
552 bool supportsFloatingPointSqrt() const
553 {
554 return false;
555 }
556
557 void loadDouble(ImplicitAddress address, FPRegisterID dest)
558 {
559 RegisterID base = address.base;
560 int32_t offset = address.offset;
561
562 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
563 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
564 add32(Imm32(offset), base, addressTempRegister);
565 base = addressTempRegister;
566 offset = 0;
567 }
568
569 m_assembler.vldr(dest, base, offset);
570 }
571
572 void loadDouble(const void* address, FPRegisterID dest)
573 {
574 move(ImmPtr(address), addressTempRegister);
575 m_assembler.vldr(dest, addressTempRegister, 0);
576 }
577
578 void storeDouble(FPRegisterID src, ImplicitAddress address)
579 {
580 RegisterID base = address.base;
581 int32_t offset = address.offset;
582
583 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
584 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
585 add32(Imm32(offset), base, addressTempRegister);
586 base = addressTempRegister;
587 offset = 0;
588 }
589
590 m_assembler.vstr(src, base, offset);
591 }
592
593 void addDouble(FPRegisterID src, FPRegisterID dest)
594 {
595 m_assembler.vadd_F64(dest, dest, src);
596 }
597
598 void addDouble(Address src, FPRegisterID dest)
599 {
600 loadDouble(src, fpTempRegister);
601 addDouble(fpTempRegister, dest);
602 }
603
604 void divDouble(FPRegisterID src, FPRegisterID dest)
605 {
606 m_assembler.vdiv_F64(dest, dest, src);
607 }
608
609 void subDouble(FPRegisterID src, FPRegisterID dest)
610 {
611 m_assembler.vsub_F64(dest, dest, src);
612 }
613
614 void subDouble(Address src, FPRegisterID dest)
615 {
616 loadDouble(src, fpTempRegister);
617 subDouble(fpTempRegister, dest);
618 }
619
620 void mulDouble(FPRegisterID src, FPRegisterID dest)
621 {
622 m_assembler.vmul_F64(dest, dest, src);
623 }
624
625 void mulDouble(Address src, FPRegisterID dest)
626 {
627 loadDouble(src, fpTempRegister);
628 mulDouble(fpTempRegister, dest);
629 }
630
631 void sqrtDouble(FPRegisterID, FPRegisterID)
632 {
633 ASSERT_NOT_REACHED();
634 }
635
636 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
637 {
638 m_assembler.vmov(fpTempRegisterAsSingle(), src);
639 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
640 }
641
642 void convertInt32ToDouble(Address address, FPRegisterID dest)
643 {
644 // Fixme: load directly into the fpr!
645 load32(address, dataTempRegister);
646 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
647 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
648 }
649
650 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
651 {
652 // Fixme: load directly into the fpr!
653 load32(address.m_ptr, dataTempRegister);
654 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
655 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
656 }
657
658 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
659 {
660 m_assembler.vcmp_F64(left, right);
661 m_assembler.vmrs();
662
663 if (cond == DoubleNotEqual) {
664 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
665 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
666 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
667 unordered.link(this);
668 return result;
669 }
670 if (cond == DoubleEqualOrUnordered) {
671 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
672 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
673 unordered.link(this);
674 // We get here if either unordered or equal.
675 Jump result = makeJump();
676 notEqual.link(this);
677 return result;
678 }
679 return makeBranch(cond);
680 }
681
682 Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
683 {
684 ASSERT_NOT_REACHED();
685 return jump();
686 }
687
688 // Convert 'src' to an integer, and places the resulting 'dest'.
689 // If the result is not representable as a 32 bit value, branch.
690 // May also branch for some values that are representable in 32 bits
691 // (specifically, in this case, 0).
692 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
693 {
694 m_assembler.vcvtr_S32_F64(fpTempRegisterAsSingle(), src);
695 m_assembler.vmov(dest, fpTempRegisterAsSingle());
696
697 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
698 m_assembler.vcvt_F64_S32(fpTempRegister, fpTempRegisterAsSingle());
699 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
700
701 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
702 failureCases.append(branchTest32(Zero, dest));
703 }
704
705 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
706 {
707 m_assembler.vcmpz_F64(reg);
708 m_assembler.vmrs();
709 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
710 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
711 unordered.link(this);
712 return result;
713 }
714
715 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
716 {
717 m_assembler.vcmpz_F64(reg);
718 m_assembler.vmrs();
719 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
720 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
721 unordered.link(this);
722 // We get here if either unordered or equal.
723 Jump result = makeJump();
724 notEqual.link(this);
725 return result;
726 }
727
728 // Stack manipulation operations:
729 //
730 // The ABI is assumed to provide a stack abstraction to memory,
731 // containing machine word sized units of data. Push and pop
732 // operations add and remove a single register sized unit of data
733 // to or from the stack. Peek and poke operations read or write
734 // values on the stack, without moving the current stack position.
735
736 void pop(RegisterID dest)
737 {
738 // store postindexed with writeback
739 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
740 }
741
742 void push(RegisterID src)
743 {
744 // store preindexed with writeback
745 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
746 }
747
748 void push(Address address)
749 {
750 load32(address, dataTempRegister);
751 push(dataTempRegister);
752 }
753
754 void push(Imm32 imm)
755 {
756 move(imm, dataTempRegister);
757 push(dataTempRegister);
758 }
759
760 // Register move operations:
761 //
762 // Move values in registers.
763
764 void move(Imm32 imm, RegisterID dest)
765 {
766 uint32_t value = imm.m_value;
767
768 if (imm.m_isPointer)
769 moveFixedWidthEncoding(imm, dest);
770 else {
771 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
772
773 if (armImm.isValid())
774 m_assembler.mov(dest, armImm);
775 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
776 m_assembler.mvn(dest, armImm);
777 else {
778 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
779 if (value & 0xffff0000)
780 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
781 }
782 }
783 }
784
785 void move(RegisterID src, RegisterID dest)
786 {
787 m_assembler.mov(dest, src);
788 }
789
790 void move(ImmPtr imm, RegisterID dest)
791 {
792 move(Imm32(imm), dest);
793 }
794
795 void swap(RegisterID reg1, RegisterID reg2)
796 {
797 move(reg1, dataTempRegister);
798 move(reg2, reg1);
799 move(dataTempRegister, reg2);
800 }
801
802 void signExtend32ToPtr(RegisterID src, RegisterID dest)
803 {
804 if (src != dest)
805 move(src, dest);
806 }
807
808 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
809 {
810 if (src != dest)
811 move(src, dest);
812 }
813
814
815 // Forwards / external control flow operations:
816 //
817 // This set of jump and conditional branch operations return a Jump
818 // object which may linked at a later point, allow forwards jump,
819 // or jumps that will require external linkage (after the code has been
820 // relocated).
821 //
822 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
823 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
824 // used (representing the names 'below' and 'above').
825 //
826 // Operands to the comparision are provided in the expected order, e.g.
827 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
828 // treated as a signed 32bit value, is less than or equal to 5.
829 //
830 // jz and jnz test whether the first operand is equal to zero, and take
831 // an optional second operand of a mask under which to perform the test.
832 private:
833
834 // Should we be using TEQ for equal/not-equal?
835 void compare32(RegisterID left, Imm32 right)
836 {
837 int32_t imm = right.m_value;
838 if (!imm)
839 m_assembler.tst(left, left);
840 else {
841 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
842 if (armImm.isValid())
843 m_assembler.cmp(left, armImm);
844 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
845 m_assembler.cmn(left, armImm);
846 else {
847 move(Imm32(imm), dataTempRegister);
848 m_assembler.cmp(left, dataTempRegister);
849 }
850 }
851 }
852
853 void test32(RegisterID reg, Imm32 mask)
854 {
855 int32_t imm = mask.m_value;
856
857 if (imm == -1)
858 m_assembler.tst(reg, reg);
859 else {
860 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
861 if (armImm.isValid())
862 m_assembler.tst(reg, armImm);
863 else {
864 move(mask, dataTempRegister);
865 m_assembler.tst(reg, dataTempRegister);
866 }
867 }
868 }
869
870 public:
871 Jump branch32(Condition cond, RegisterID left, RegisterID right)
872 {
873 m_assembler.cmp(left, right);
874 return Jump(makeBranch(cond));
875 }
876
877 Jump branch32(Condition cond, RegisterID left, Imm32 right)
878 {
879 compare32(left, right);
880 return Jump(makeBranch(cond));
881 }
882
883 Jump branch32(Condition cond, RegisterID left, Address right)
884 {
885 load32(right, dataTempRegister);
886 return branch32(cond, left, dataTempRegister);
887 }
888
889 Jump branch32(Condition cond, Address left, RegisterID right)
890 {
891 load32(left, dataTempRegister);
892 return branch32(cond, dataTempRegister, right);
893 }
894
895 Jump branch32(Condition cond, Address left, Imm32 right)
896 {
897 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
898 load32(left, addressTempRegister);
899 return branch32(cond, addressTempRegister, right);
900 }
901
902 Jump branch32(Condition cond, BaseIndex left, Imm32 right)
903 {
904 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
905 load32(left, addressTempRegister);
906 return branch32(cond, addressTempRegister, right);
907 }
908
909 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
910 {
911 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
912 load32WithUnalignedHalfWords(left, addressTempRegister);
913 return branch32(cond, addressTempRegister, right);
914 }
915
916 Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
917 {
918 load32(left.m_ptr, dataTempRegister);
919 return branch32(cond, dataTempRegister, right);
920 }
921
922 Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
923 {
924 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
925 load32(left.m_ptr, addressTempRegister);
926 return branch32(cond, addressTempRegister, right);
927 }
928
929 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
930 {
931 load16(left, dataTempRegister);
932 m_assembler.lsl(addressTempRegister, right, 16);
933 m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
934 return branch32(cond, dataTempRegister, addressTempRegister);
935 }
936
937 Jump branch16(Condition cond, BaseIndex left, Imm32 right)
938 {
939 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
940 load16(left, addressTempRegister);
941 m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
942 return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
943 }
944
945 Jump branch8(Condition cond, RegisterID left, Imm32 right)
946 {
947 compare32(left, right);
948 return Jump(makeBranch(cond));
949 }
950
951 Jump branch8(Condition cond, Address left, Imm32 right)
952 {
953 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
954 load8(left, addressTempRegister);
955 return branch8(cond, addressTempRegister, right);
956 }
957
958 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
959 {
960 ASSERT((cond == Zero) || (cond == NonZero));
961 m_assembler.tst(reg, mask);
962 return Jump(makeBranch(cond));
963 }
964
965 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
966 {
967 ASSERT((cond == Zero) || (cond == NonZero));
968 test32(reg, mask);
969 return Jump(makeBranch(cond));
970 }
971
972 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
973 {
974 ASSERT((cond == Zero) || (cond == NonZero));
975 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
976 load32(address, addressTempRegister);
977 return branchTest32(cond, addressTempRegister, mask);
978 }
979
980 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
981 {
982 ASSERT((cond == Zero) || (cond == NonZero));
983 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
984 load32(address, addressTempRegister);
985 return branchTest32(cond, addressTempRegister, mask);
986 }
987
988 Jump branchTest8(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
989 {
990 ASSERT((cond == Zero) || (cond == NonZero));
991 test32(reg, mask);
992 return Jump(makeBranch(cond));
993 }
994
995 Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
996 {
997 ASSERT((cond == Zero) || (cond == NonZero));
998 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
999 load8(address, addressTempRegister);
1000 return branchTest8(cond, addressTempRegister, mask);
1001 }
1002
1003 Jump jump()
1004 {
1005 return Jump(makeJump());
1006 }
1007
1008 void jump(RegisterID target)
1009 {
1010 m_assembler.bx(target, ARMv7Assembler::JumpFixed);
1011 }
1012
1013 // Address is a memory location containing the address to jump to
1014 void jump(Address address)
1015 {
1016 load32(address, dataTempRegister);
1017 m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed);
1018 }
1019
1020
1021 // Arithmetic control flow operations:
1022 //
1023 // This set of conditional branch operations branch based
1024 // on the result of an arithmetic operation. The operation
1025 // is performed as normal, storing the result.
1026 //
1027 // * jz operations branch if the result is zero.
1028 // * jo operations branch if the (signed) arithmetic
1029 // operation caused an overflow to occur.
1030
1031 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
1032 {
1033 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1034 m_assembler.add_S(dest, dest, src);
1035 return Jump(makeBranch(cond));
1036 }
1037
1038 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
1039 {
1040 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1041 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1042 if (armImm.isValid())
1043 m_assembler.add_S(dest, dest, armImm);
1044 else {
1045 move(imm, dataTempRegister);
1046 m_assembler.add_S(dest, dest, dataTempRegister);
1047 }
1048 return Jump(makeBranch(cond));
1049 }
1050
1051 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
1052 {
1053 ASSERT_UNUSED(cond, cond == Overflow);
1054 m_assembler.smull(dest, dataTempRegister, dest, src);
1055 m_assembler.asr(addressTempRegister, dest, 31);
1056 return branch32(NotEqual, addressTempRegister, dataTempRegister);
1057 }
1058
1059 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
1060 {
1061 ASSERT_UNUSED(cond, cond == Overflow);
1062 move(imm, dataTempRegister);
1063 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
1064 m_assembler.asr(addressTempRegister, dest, 31);
1065 return branch32(NotEqual, addressTempRegister, dataTempRegister);
1066 }
1067
1068 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
1069 {
1070 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
1071 m_assembler.orr_S(dest, dest, src);
1072 return Jump(makeBranch(cond));
1073 }
1074
1075 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
1076 {
1077 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1078 m_assembler.sub_S(dest, dest, src);
1079 return Jump(makeBranch(cond));
1080 }
1081
1082 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
1083 {
1084 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1085 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1086 if (armImm.isValid())
1087 m_assembler.sub_S(dest, dest, armImm);
1088 else {
1089 move(imm, dataTempRegister);
1090 m_assembler.sub_S(dest, dest, dataTempRegister);
1091 }
1092 return Jump(makeBranch(cond));
1093 }
1094
1095
1096 // Miscellaneous operations:
1097
1098 void breakpoint()
1099 {
1100 m_assembler.bkpt(0);
1101 }
1102
1103 Call nearCall()
1104 {
1105 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1106 return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::LinkableNear);
1107 }
1108
1109 Call call()
1110 {
1111 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1112 return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable);
1113 }
1114
1115 Call call(RegisterID target)
1116 {
1117 return Call(m_assembler.blx(target, ARMv7Assembler::JumpFixed), Call::None);
1118 }
1119
1120 Call call(Address address)
1121 {
1122 load32(address, dataTempRegister);
1123 return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::None);
1124 }
1125
1126 void ret()
1127 {
1128 m_assembler.bx(linkRegister, ARMv7Assembler::JumpFixed);
1129 }
1130
1131 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
1132 {
1133 m_assembler.cmp(left, right);
1134 m_assembler.it(armV7Condition(cond), false);
1135 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1136 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1137 }
1138
1139 void set32(Condition cond, Address left, RegisterID right, RegisterID dest)
1140 {
1141 load32(left, dataTempRegister);
1142 set32(cond, dataTempRegister, right, dest);
1143 }
1144
1145 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
1146 {
1147 compare32(left, right);
1148 m_assembler.it(armV7Condition(cond), false);
1149 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1150 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1151 }
1152
1153 void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
1154 {
1155 set32(cond, left, right, dest);
1156 }
1157
1158 void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
1159 {
1160 set32(cond, left, right, dest);
1161 }
1162
1163 void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
1164 {
1165 set32(cond, left, right, dest);
1166 }
1167
1168 // FIXME:
1169 // The mask should be optional... paerhaps the argument order should be
1170 // dest-src, operations always have a dest? ... possibly not true, considering
1171 // asm ops like test, or pseudo ops like pop().
1172 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
1173 {
1174 load32(address, dataTempRegister);
1175 test32(dataTempRegister, mask);
1176 m_assembler.it(armV7Condition(cond), false);
1177 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1178 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1179 }
1180
1181 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
1182 {
1183 load8(address, dataTempRegister);
1184 test32(dataTempRegister, mask);
1185 m_assembler.it(armV7Condition(cond), false);
1186 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1187 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1188 }
1189
1190 DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
1191 {
1192 moveFixedWidthEncoding(imm, dst);
1193 return DataLabel32(this);
1194 }
1195
1196 DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst)
1197 {
1198 moveFixedWidthEncoding(Imm32(imm), dst);
1199 return DataLabelPtr(this);
1200 }
1201
1202 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
1203 {
1204 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1205 return branch32(cond, left, dataTempRegister);
1206 }
1207
1208 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
1209 {
1210 load32(left, addressTempRegister);
1211 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1212 return branch32(cond, addressTempRegister, dataTempRegister);
1213 }
1214
1215 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
1216 {
1217 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1218 store32(dataTempRegister, address);
1219 return label;
1220 }
1221 DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); }
1222
1223
1224 Call tailRecursiveCall()
1225 {
1226 // Like a normal call, but don't link.
1227 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1228 return Call(m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable);
1229 }
1230
1231 Call makeTailRecursiveCall(Jump oldJump)
1232 {
1233 oldJump.link(this);
1234 return tailRecursiveCall();
1235 }
1236
1237
1238 int executableOffsetFor(int location)
1239 {
1240 return m_assembler.executableOffsetFor(location);
1241 }
1242
1243 protected:
1244 bool inUninterruptedSequence()
1245 {
1246 return m_inUninterruptedSequence;
1247 }
1248
1249 ARMv7Assembler::JmpSrc makeJump()
1250 {
1251 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1252 return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
1253 }
1254
1255 ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
1256 {
1257 m_assembler.it(cond, true, true);
1258 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1259 return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
1260 }
1261 ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
1262 ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1263
1264 ArmAddress setupArmAddress(BaseIndex address)
1265 {
1266 if (address.offset) {
1267 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1268 if (imm.isValid())
1269 m_assembler.add(addressTempRegister, address.base, imm);
1270 else {
1271 move(Imm32(address.offset), addressTempRegister);
1272 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1273 }
1274
1275 return ArmAddress(addressTempRegister, address.index, address.scale);
1276 } else
1277 return ArmAddress(address.base, address.index, address.scale);
1278 }
1279
1280 ArmAddress setupArmAddress(Address address)
1281 {
1282 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1283 return ArmAddress(address.base, address.offset);
1284
1285 move(Imm32(address.offset), addressTempRegister);
1286 return ArmAddress(address.base, addressTempRegister);
1287 }
1288
1289 ArmAddress setupArmAddress(ImplicitAddress address)
1290 {
1291 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1292 return ArmAddress(address.base, address.offset);
1293
1294 move(Imm32(address.offset), addressTempRegister);
1295 return ArmAddress(address.base, addressTempRegister);
1296 }
1297
1298 RegisterID makeBaseIndexBase(BaseIndex address)
1299 {
1300 if (!address.offset)
1301 return address.base;
1302
1303 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1304 if (imm.isValid())
1305 m_assembler.add(addressTempRegister, address.base, imm);
1306 else {
1307 move(Imm32(address.offset), addressTempRegister);
1308 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1309 }
1310
1311 return addressTempRegister;
1312 }
1313
1314 void moveFixedWidthEncoding(Imm32 imm, RegisterID dst)
1315 {
1316 uint32_t value = imm.m_value;
1317 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
1318 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
1319 }
1320
1321 ARMv7Assembler::Condition armV7Condition(Condition cond)
1322 {
1323 return static_cast<ARMv7Assembler::Condition>(cond);
1324 }
1325
1326 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
1327 {
1328 return static_cast<ARMv7Assembler::Condition>(cond);
1329 }
1330
1331 private:
1332 friend class LinkBuffer;
1333 friend class RepatchBuffer;
1334
1335 static void linkCall(void* code, Call call, FunctionPtr function)
1336 {
1337 ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
1338 }
1339
1340 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1341 {
1342 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1343 }
1344
1345 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1346 {
1347 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1348 }
1349
1350 bool m_inUninterruptedSequence;
1351 };
1352
1353 } // namespace JSC
1354
1355 #endif // ENABLE(ASSEMBLER)
1356
1357 #endif // MacroAssemblerARMv7_h