]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARMv7.h
70b2552363f79a321392f83dd0e28d5f6c3b42ed
[apple/javascriptcore.git] / assembler / MacroAssemblerARMv7.h
1 /*
2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
29
30 #if ENABLE(ASSEMBLER)
31
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
34
35 namespace JSC {
36
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
39 // - dTR is likely used more than aTR, and we'll get better instruction
40 // encoding if it's in the low 8 registers.
41 static const RegisterID dataTempRegister = ARMRegisters::ip;
42 static const RegisterID addressTempRegister = ARMRegisters::r3;
43
44 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
45 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
46
47 public:
48 typedef ARMv7Assembler::LinkRecord LinkRecord;
49 typedef ARMv7Assembler::JumpType JumpType;
50 typedef ARMv7Assembler::JumpLinkType JumpLinkType;
51 // Magic number is the biggest useful offset we can get on ARMv7 with
52 // a LDR_imm_T2 encoding
53 static const int MaximumCompactPtrAlignedAddressOffset = 124;
54
55 MacroAssemblerARMv7()
56 : m_inUninterruptedSequence(false)
57 {
58 }
59
60 void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
61 void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
62 Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
63 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
64 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
65 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
66 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
67 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
68 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
69 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
70
71 struct ArmAddress {
72 enum AddressType {
73 HasOffset,
74 HasIndex,
75 } type;
76 RegisterID base;
77 union {
78 int32_t offset;
79 struct {
80 RegisterID index;
81 Scale scale;
82 };
83 } u;
84
85 explicit ArmAddress(RegisterID base, int32_t offset = 0)
86 : type(HasOffset)
87 , base(base)
88 {
89 u.offset = offset;
90 }
91
92 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
93 : type(HasIndex)
94 , base(base)
95 {
96 u.index = index;
97 u.scale = scale;
98 }
99 };
100
101 public:
102 typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
103
104 static const Scale ScalePtr = TimesFour;
105
106 enum RelationalCondition {
107 Equal = ARMv7Assembler::ConditionEQ,
108 NotEqual = ARMv7Assembler::ConditionNE,
109 Above = ARMv7Assembler::ConditionHI,
110 AboveOrEqual = ARMv7Assembler::ConditionHS,
111 Below = ARMv7Assembler::ConditionLO,
112 BelowOrEqual = ARMv7Assembler::ConditionLS,
113 GreaterThan = ARMv7Assembler::ConditionGT,
114 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
115 LessThan = ARMv7Assembler::ConditionLT,
116 LessThanOrEqual = ARMv7Assembler::ConditionLE
117 };
118
119 enum ResultCondition {
120 Overflow = ARMv7Assembler::ConditionVS,
121 Signed = ARMv7Assembler::ConditionMI,
122 Zero = ARMv7Assembler::ConditionEQ,
123 NonZero = ARMv7Assembler::ConditionNE
124 };
125
126 enum DoubleCondition {
127 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
128 DoubleEqual = ARMv7Assembler::ConditionEQ,
129 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
130 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
131 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
132 DoubleLessThan = ARMv7Assembler::ConditionLO,
133 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
134 // If either operand is NaN, these conditions always evaluate to true.
135 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
136 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
137 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
138 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
139 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
140 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
141 };
142
143 static const RegisterID stackPointerRegister = ARMRegisters::sp;
144 static const RegisterID linkRegister = ARMRegisters::lr;
145
146 // Integer arithmetic operations:
147 //
148 // Operations are typically two operand - operation(source, srcDst)
149 // For many operations the source may be an TrustedImm32, the srcDst operand
150 // may often be a memory location (explictly described using an Address
151 // object).
152
153 void add32(RegisterID src, RegisterID dest)
154 {
155 m_assembler.add(dest, dest, src);
156 }
157
158 void add32(TrustedImm32 imm, RegisterID dest)
159 {
160 add32(imm, dest, dest);
161 }
162
163 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
164 {
165 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
166 if (armImm.isValid())
167 m_assembler.add(dest, src, armImm);
168 else {
169 move(imm, dataTempRegister);
170 m_assembler.add(dest, src, dataTempRegister);
171 }
172 }
173
174 void add32(TrustedImm32 imm, Address address)
175 {
176 load32(address, dataTempRegister);
177
178 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
179 if (armImm.isValid())
180 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
181 else {
182 // Hrrrm, since dataTempRegister holds the data loaded,
183 // use addressTempRegister to hold the immediate.
184 move(imm, addressTempRegister);
185 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
186 }
187
188 store32(dataTempRegister, address);
189 }
190
191 void add32(Address src, RegisterID dest)
192 {
193 load32(src, dataTempRegister);
194 add32(dataTempRegister, dest);
195 }
196
197 void add32(TrustedImm32 imm, AbsoluteAddress address)
198 {
199 load32(address.m_ptr, dataTempRegister);
200
201 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
202 if (armImm.isValid())
203 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
204 else {
205 // Hrrrm, since dataTempRegister holds the data loaded,
206 // use addressTempRegister to hold the immediate.
207 move(imm, addressTempRegister);
208 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
209 }
210
211 store32(dataTempRegister, address.m_ptr);
212 }
213
214 void and32(RegisterID src, RegisterID dest)
215 {
216 m_assembler.ARM_and(dest, dest, src);
217 }
218
219 void and32(TrustedImm32 imm, RegisterID dest)
220 {
221 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
222 if (armImm.isValid())
223 m_assembler.ARM_and(dest, dest, armImm);
224 else {
225 move(imm, dataTempRegister);
226 m_assembler.ARM_and(dest, dest, dataTempRegister);
227 }
228 }
229
230 void countLeadingZeros32(RegisterID src, RegisterID dest)
231 {
232 m_assembler.clz(dest, src);
233 }
234
235 void lshift32(RegisterID shift_amount, RegisterID dest)
236 {
237 // Clamp the shift to the range 0..31
238 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
239 ASSERT(armImm.isValid());
240 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
241
242 m_assembler.lsl(dest, dest, dataTempRegister);
243 }
244
245 void lshift32(TrustedImm32 imm, RegisterID dest)
246 {
247 m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
248 }
249
250 void mul32(RegisterID src, RegisterID dest)
251 {
252 m_assembler.smull(dest, dataTempRegister, dest, src);
253 }
254
255 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
256 {
257 move(imm, dataTempRegister);
258 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
259 }
260
261 void neg32(RegisterID srcDest)
262 {
263 m_assembler.neg(srcDest, srcDest);
264 }
265
266 void not32(RegisterID srcDest)
267 {
268 m_assembler.mvn(srcDest, srcDest);
269 }
270
271 void or32(RegisterID src, RegisterID dest)
272 {
273 m_assembler.orr(dest, dest, src);
274 }
275
276 void or32(TrustedImm32 imm, RegisterID dest)
277 {
278 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
279 if (armImm.isValid())
280 m_assembler.orr(dest, dest, armImm);
281 else {
282 move(imm, dataTempRegister);
283 m_assembler.orr(dest, dest, dataTempRegister);
284 }
285 }
286
287 void rshift32(RegisterID shift_amount, RegisterID dest)
288 {
289 // Clamp the shift to the range 0..31
290 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
291 ASSERT(armImm.isValid());
292 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
293
294 m_assembler.asr(dest, dest, dataTempRegister);
295 }
296
297 void rshift32(TrustedImm32 imm, RegisterID dest)
298 {
299 m_assembler.asr(dest, dest, imm.m_value & 0x1f);
300 }
301
302 void urshift32(RegisterID shift_amount, RegisterID dest)
303 {
304 // Clamp the shift to the range 0..31
305 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
306 ASSERT(armImm.isValid());
307 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
308
309 m_assembler.lsr(dest, dest, dataTempRegister);
310 }
311
312 void urshift32(TrustedImm32 imm, RegisterID dest)
313 {
314 m_assembler.lsr(dest, dest, imm.m_value & 0x1f);
315 }
316
317 void sub32(RegisterID src, RegisterID dest)
318 {
319 m_assembler.sub(dest, dest, src);
320 }
321
322 void sub32(TrustedImm32 imm, RegisterID dest)
323 {
324 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
325 if (armImm.isValid())
326 m_assembler.sub(dest, dest, armImm);
327 else {
328 move(imm, dataTempRegister);
329 m_assembler.sub(dest, dest, dataTempRegister);
330 }
331 }
332
333 void sub32(TrustedImm32 imm, Address address)
334 {
335 load32(address, dataTempRegister);
336
337 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
338 if (armImm.isValid())
339 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
340 else {
341 // Hrrrm, since dataTempRegister holds the data loaded,
342 // use addressTempRegister to hold the immediate.
343 move(imm, addressTempRegister);
344 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
345 }
346
347 store32(dataTempRegister, address);
348 }
349
350 void sub32(Address src, RegisterID dest)
351 {
352 load32(src, dataTempRegister);
353 sub32(dataTempRegister, dest);
354 }
355
356 void sub32(TrustedImm32 imm, AbsoluteAddress address)
357 {
358 load32(address.m_ptr, dataTempRegister);
359
360 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
361 if (armImm.isValid())
362 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
363 else {
364 // Hrrrm, since dataTempRegister holds the data loaded,
365 // use addressTempRegister to hold the immediate.
366 move(imm, addressTempRegister);
367 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
368 }
369
370 store32(dataTempRegister, address.m_ptr);
371 }
372
373 void xor32(RegisterID src, RegisterID dest)
374 {
375 m_assembler.eor(dest, dest, src);
376 }
377
378 void xor32(TrustedImm32 imm, RegisterID dest)
379 {
380 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
381 if (armImm.isValid())
382 m_assembler.eor(dest, dest, armImm);
383 else {
384 move(imm, dataTempRegister);
385 m_assembler.eor(dest, dest, dataTempRegister);
386 }
387 }
388
389
390 // Memory access operations:
391 //
392 // Loads are of the form load(address, destination) and stores of the form
393 // store(source, address). The source for a store may be an TrustedImm32. Address
394 // operand objects to loads and store will be implicitly constructed if a
395 // register is passed.
396
397 private:
398 void load32(ArmAddress address, RegisterID dest)
399 {
400 if (address.type == ArmAddress::HasIndex)
401 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
402 else if (address.u.offset >= 0) {
403 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
404 ASSERT(armImm.isValid());
405 m_assembler.ldr(dest, address.base, armImm);
406 } else {
407 ASSERT(address.u.offset >= -255);
408 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
409 }
410 }
411
412 void load16(ArmAddress address, RegisterID dest)
413 {
414 if (address.type == ArmAddress::HasIndex)
415 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
416 else if (address.u.offset >= 0) {
417 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
418 ASSERT(armImm.isValid());
419 m_assembler.ldrh(dest, address.base, armImm);
420 } else {
421 ASSERT(address.u.offset >= -255);
422 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
423 }
424 }
425
426 void load8(ArmAddress address, RegisterID dest)
427 {
428 if (address.type == ArmAddress::HasIndex)
429 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
430 else if (address.u.offset >= 0) {
431 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
432 ASSERT(armImm.isValid());
433 m_assembler.ldrb(dest, address.base, armImm);
434 } else {
435 ASSERT(address.u.offset >= -255);
436 m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
437 }
438 }
439
440 void store32(RegisterID src, ArmAddress address)
441 {
442 if (address.type == ArmAddress::HasIndex)
443 m_assembler.str(src, address.base, address.u.index, address.u.scale);
444 else if (address.u.offset >= 0) {
445 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
446 ASSERT(armImm.isValid());
447 m_assembler.str(src, address.base, armImm);
448 } else {
449 ASSERT(address.u.offset >= -255);
450 m_assembler.str(src, address.base, address.u.offset, true, false);
451 }
452 }
453
454 public:
455 void load32(ImplicitAddress address, RegisterID dest)
456 {
457 load32(setupArmAddress(address), dest);
458 }
459
460 void load32(BaseIndex address, RegisterID dest)
461 {
462 load32(setupArmAddress(address), dest);
463 }
464
465 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
466 {
467 load32(setupArmAddress(address), dest);
468 }
469
470 void load32(const void* address, RegisterID dest)
471 {
472 move(TrustedImmPtr(address), addressTempRegister);
473 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
474 }
475
476 void load8(ImplicitAddress address, RegisterID dest)
477 {
478 load8(setupArmAddress(address), dest);
479 }
480
481 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
482 {
483 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
484 load32(ArmAddress(address.base, dataTempRegister), dest);
485 return label;
486 }
487
488 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
489 {
490 DataLabelCompact label(this);
491 ASSERT(address.offset >= 0);
492 ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
493 ASSERT(ARMThumbImmediate::makeUInt12(address.offset).isUInt7());
494 m_assembler.ldrCompact(dest, address.base, ARMThumbImmediate::makeUInt12(address.offset));
495 return label;
496 }
497
498 void load16(BaseIndex address, RegisterID dest)
499 {
500 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
501 }
502
503 void load16(ImplicitAddress address, RegisterID dest)
504 {
505 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
506 if (armImm.isValid())
507 m_assembler.ldrh(dest, address.base, armImm);
508 else {
509 move(TrustedImm32(address.offset), dataTempRegister);
510 m_assembler.ldrh(dest, address.base, dataTempRegister);
511 }
512 }
513
514 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
515 {
516 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
517 store32(src, ArmAddress(address.base, dataTempRegister));
518 return label;
519 }
520
521 void store32(RegisterID src, ImplicitAddress address)
522 {
523 store32(src, setupArmAddress(address));
524 }
525
526 void store32(RegisterID src, BaseIndex address)
527 {
528 store32(src, setupArmAddress(address));
529 }
530
531 void store32(TrustedImm32 imm, ImplicitAddress address)
532 {
533 move(imm, dataTempRegister);
534 store32(dataTempRegister, setupArmAddress(address));
535 }
536
537 void store32(RegisterID src, const void* address)
538 {
539 move(TrustedImmPtr(address), addressTempRegister);
540 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
541 }
542
543 void store32(TrustedImm32 imm, const void* address)
544 {
545 move(imm, dataTempRegister);
546 store32(dataTempRegister, address);
547 }
548
549
550 // Floating-point operations:
551
552 bool supportsFloatingPoint() const { return true; }
553 // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
554 // If a value is not representable as an integer, and possibly for some values that are,
555 // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
556 // a branch will be taken. It is not clear whether this interface will be well suited to
557 // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
558 // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
559 // temporary solution while we work out what this interface should be. Either we need to
560 // decide to make this interface work on all platforms, rework the interface to make it more
561 // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
562 // operations, and make clients go directly to the m_assembler to plant truncation instructions.
563 // In short, FIXME:.
564 bool supportsFloatingPointTruncate() const { return false; }
565
566 bool supportsFloatingPointSqrt() const
567 {
568 return false;
569 }
570
571 void loadDouble(ImplicitAddress address, FPRegisterID dest)
572 {
573 RegisterID base = address.base;
574 int32_t offset = address.offset;
575
576 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
577 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
578 add32(TrustedImm32(offset), base, addressTempRegister);
579 base = addressTempRegister;
580 offset = 0;
581 }
582
583 m_assembler.vldr(dest, base, offset);
584 }
585
586 void loadDouble(const void* address, FPRegisterID dest)
587 {
588 move(TrustedImmPtr(address), addressTempRegister);
589 m_assembler.vldr(dest, addressTempRegister, 0);
590 }
591
592 void storeDouble(FPRegisterID src, ImplicitAddress address)
593 {
594 RegisterID base = address.base;
595 int32_t offset = address.offset;
596
597 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
598 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
599 add32(TrustedImm32(offset), base, addressTempRegister);
600 base = addressTempRegister;
601 offset = 0;
602 }
603
604 m_assembler.vstr(src, base, offset);
605 }
606
607 void addDouble(FPRegisterID src, FPRegisterID dest)
608 {
609 m_assembler.vadd_F64(dest, dest, src);
610 }
611
612 void addDouble(Address src, FPRegisterID dest)
613 {
614 loadDouble(src, fpTempRegister);
615 addDouble(fpTempRegister, dest);
616 }
617
618 void divDouble(FPRegisterID src, FPRegisterID dest)
619 {
620 m_assembler.vdiv_F64(dest, dest, src);
621 }
622
623 void subDouble(FPRegisterID src, FPRegisterID dest)
624 {
625 m_assembler.vsub_F64(dest, dest, src);
626 }
627
628 void subDouble(Address src, FPRegisterID dest)
629 {
630 loadDouble(src, fpTempRegister);
631 subDouble(fpTempRegister, dest);
632 }
633
634 void mulDouble(FPRegisterID src, FPRegisterID dest)
635 {
636 m_assembler.vmul_F64(dest, dest, src);
637 }
638
639 void mulDouble(Address src, FPRegisterID dest)
640 {
641 loadDouble(src, fpTempRegister);
642 mulDouble(fpTempRegister, dest);
643 }
644
645 void sqrtDouble(FPRegisterID, FPRegisterID)
646 {
647 ASSERT_NOT_REACHED();
648 }
649
650 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
651 {
652 m_assembler.vmov(fpTempRegisterAsSingle(), src);
653 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
654 }
655
656 void convertInt32ToDouble(Address address, FPRegisterID dest)
657 {
658 // Fixme: load directly into the fpr!
659 load32(address, dataTempRegister);
660 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
661 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
662 }
663
664 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
665 {
666 // Fixme: load directly into the fpr!
667 load32(address.m_ptr, dataTempRegister);
668 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
669 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
670 }
671
672 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
673 {
674 m_assembler.vcmp_F64(left, right);
675 m_assembler.vmrs();
676
677 if (cond == DoubleNotEqual) {
678 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
679 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
680 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
681 unordered.link(this);
682 return result;
683 }
684 if (cond == DoubleEqualOrUnordered) {
685 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
686 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
687 unordered.link(this);
688 // We get here if either unordered or equal.
689 Jump result = jump();
690 notEqual.link(this);
691 return result;
692 }
693 return makeBranch(cond);
694 }
695
696 Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
697 {
698 ASSERT_NOT_REACHED();
699 return jump();
700 }
701
702 // Convert 'src' to an integer, and places the resulting 'dest'.
703 // If the result is not representable as a 32 bit value, branch.
704 // May also branch for some values that are representable in 32 bits
705 // (specifically, in this case, 0).
706 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
707 {
708 m_assembler.vcvtr_S32_F64(fpTempRegisterAsSingle(), src);
709 m_assembler.vmov(dest, fpTempRegisterAsSingle());
710
711 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
712 m_assembler.vcvt_F64_S32(fpTempRegister, fpTempRegisterAsSingle());
713 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
714
715 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
716 failureCases.append(branchTest32(Zero, dest));
717 }
718
719 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
720 {
721 m_assembler.vcmpz_F64(reg);
722 m_assembler.vmrs();
723 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
724 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
725 unordered.link(this);
726 return result;
727 }
728
729 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
730 {
731 m_assembler.vcmpz_F64(reg);
732 m_assembler.vmrs();
733 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
734 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
735 unordered.link(this);
736 // We get here if either unordered or equal.
737 Jump result = jump();
738 notEqual.link(this);
739 return result;
740 }
741
742 // Stack manipulation operations:
743 //
744 // The ABI is assumed to provide a stack abstraction to memory,
745 // containing machine word sized units of data. Push and pop
746 // operations add and remove a single register sized unit of data
747 // to or from the stack. Peek and poke operations read or write
748 // values on the stack, without moving the current stack position.
749
750 void pop(RegisterID dest)
751 {
752 // store postindexed with writeback
753 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
754 }
755
756 void push(RegisterID src)
757 {
758 // store preindexed with writeback
759 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
760 }
761
762 void push(Address address)
763 {
764 load32(address, dataTempRegister);
765 push(dataTempRegister);
766 }
767
768 void push(TrustedImm32 imm)
769 {
770 move(imm, dataTempRegister);
771 push(dataTempRegister);
772 }
773
774 // Register move operations:
775 //
776 // Move values in registers.
777
778 void move(TrustedImm32 imm, RegisterID dest)
779 {
780 uint32_t value = imm.m_value;
781
782 if (imm.m_isPointer)
783 moveFixedWidthEncoding(imm, dest);
784 else {
785 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
786
787 if (armImm.isValid())
788 m_assembler.mov(dest, armImm);
789 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
790 m_assembler.mvn(dest, armImm);
791 else {
792 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
793 if (value & 0xffff0000)
794 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
795 }
796 }
797 }
798
799 void move(RegisterID src, RegisterID dest)
800 {
801 m_assembler.mov(dest, src);
802 }
803
804 void move(TrustedImmPtr imm, RegisterID dest)
805 {
806 move(TrustedImm32(imm), dest);
807 }
808
809 void swap(RegisterID reg1, RegisterID reg2)
810 {
811 move(reg1, dataTempRegister);
812 move(reg2, reg1);
813 move(dataTempRegister, reg2);
814 }
815
816 void signExtend32ToPtr(RegisterID src, RegisterID dest)
817 {
818 if (src != dest)
819 move(src, dest);
820 }
821
822 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
823 {
824 if (src != dest)
825 move(src, dest);
826 }
827
828 void nop()
829 {
830 m_assembler.nop();
831 }
832
833 // Forwards / external control flow operations:
834 //
835 // This set of jump and conditional branch operations return a Jump
836 // object which may linked at a later point, allow forwards jump,
837 // or jumps that will require external linkage (after the code has been
838 // relocated).
839 //
840 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
841 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
842 // used (representing the names 'below' and 'above').
843 //
844 // Operands to the comparision are provided in the expected order, e.g.
845 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
846 // treated as a signed 32bit value, is less than or equal to 5.
847 //
848 // jz and jnz test whether the first operand is equal to zero, and take
849 // an optional second operand of a mask under which to perform the test.
850 private:
851
852 // Should we be using TEQ for equal/not-equal?
853 void compare32(RegisterID left, TrustedImm32 right)
854 {
855 int32_t imm = right.m_value;
856 if (!imm)
857 m_assembler.tst(left, left);
858 else {
859 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
860 if (armImm.isValid())
861 m_assembler.cmp(left, armImm);
862 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
863 m_assembler.cmn(left, armImm);
864 else {
865 move(TrustedImm32(imm), dataTempRegister);
866 m_assembler.cmp(left, dataTempRegister);
867 }
868 }
869 }
870
871 void test32(RegisterID reg, TrustedImm32 mask)
872 {
873 int32_t imm = mask.m_value;
874
875 if (imm == -1)
876 m_assembler.tst(reg, reg);
877 else {
878 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
879 if (armImm.isValid())
880 m_assembler.tst(reg, armImm);
881 else {
882 move(mask, dataTempRegister);
883 m_assembler.tst(reg, dataTempRegister);
884 }
885 }
886 }
887
888 public:
889 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
890 {
891 m_assembler.cmp(left, right);
892 return Jump(makeBranch(cond));
893 }
894
895 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
896 {
897 compare32(left, right);
898 return Jump(makeBranch(cond));
899 }
900
901 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
902 {
903 load32(right, dataTempRegister);
904 return branch32(cond, left, dataTempRegister);
905 }
906
907 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
908 {
909 load32(left, dataTempRegister);
910 return branch32(cond, dataTempRegister, right);
911 }
912
913 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
914 {
915 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
916 load32(left, addressTempRegister);
917 return branch32(cond, addressTempRegister, right);
918 }
919
920 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
921 {
922 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
923 load32(left, addressTempRegister);
924 return branch32(cond, addressTempRegister, right);
925 }
926
927 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
928 {
929 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
930 load32WithUnalignedHalfWords(left, addressTempRegister);
931 return branch32(cond, addressTempRegister, right);
932 }
933
934 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
935 {
936 load32(left.m_ptr, dataTempRegister);
937 return branch32(cond, dataTempRegister, right);
938 }
939
940 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
941 {
942 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
943 load32(left.m_ptr, addressTempRegister);
944 return branch32(cond, addressTempRegister, right);
945 }
946
947 Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
948 {
949 load16(left, dataTempRegister);
950 m_assembler.lsl(addressTempRegister, right, 16);
951 m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
952 return branch32(cond, dataTempRegister, addressTempRegister);
953 }
954
955 Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
956 {
957 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
958 load16(left, addressTempRegister);
959 m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
960 return branch32(cond, addressTempRegister, TrustedImm32(right.m_value << 16));
961 }
962
963 Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
964 {
965 compare32(left, right);
966 return Jump(makeBranch(cond));
967 }
968
969 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
970 {
971 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
972 load8(left, addressTempRegister);
973 return branch8(cond, addressTempRegister, right);
974 }
975
976 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
977 {
978 m_assembler.tst(reg, mask);
979 return Jump(makeBranch(cond));
980 }
981
982 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
983 {
984 test32(reg, mask);
985 return Jump(makeBranch(cond));
986 }
987
988 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
989 {
990 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
991 load32(address, addressTempRegister);
992 return branchTest32(cond, addressTempRegister, mask);
993 }
994
995 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
996 {
997 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
998 load32(address, addressTempRegister);
999 return branchTest32(cond, addressTempRegister, mask);
1000 }
1001
1002 Jump branchTest8(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1003 {
1004 test32(reg, mask);
1005 return Jump(makeBranch(cond));
1006 }
1007
1008 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1009 {
1010 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1011 load8(address, addressTempRegister);
1012 return branchTest8(cond, addressTempRegister, mask);
1013 }
1014
1015 void jump(RegisterID target)
1016 {
1017 m_assembler.bx(target);
1018 }
1019
1020 // Address is a memory location containing the address to jump to
1021 void jump(Address address)
1022 {
1023 load32(address, dataTempRegister);
1024 m_assembler.bx(dataTempRegister);
1025 }
1026
1027
1028 // Arithmetic control flow operations:
1029 //
1030 // This set of conditional branch operations branch based
1031 // on the result of an arithmetic operation. The operation
1032 // is performed as normal, storing the result.
1033 //
1034 // * jz operations branch if the result is zero.
1035 // * jo operations branch if the (signed) arithmetic
1036 // operation caused an overflow to occur.
1037
1038 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1039 {
1040 m_assembler.add_S(dest, dest, src);
1041 return Jump(makeBranch(cond));
1042 }
1043
1044 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1045 {
1046 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1047 if (armImm.isValid())
1048 m_assembler.add_S(dest, dest, armImm);
1049 else {
1050 move(imm, dataTempRegister);
1051 m_assembler.add_S(dest, dest, dataTempRegister);
1052 }
1053 return Jump(makeBranch(cond));
1054 }
1055
1056 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1057 {
1058 m_assembler.smull(dest, dataTempRegister, src1, src2);
1059
1060 if (cond == Overflow) {
1061 m_assembler.asr(addressTempRegister, dest, 31);
1062 return branch32(NotEqual, addressTempRegister, dataTempRegister);
1063 }
1064
1065 return branchTest32(cond, dest);
1066 }
1067
1068 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1069 {
1070 return branchMul32(cond, src, dest, dest);
1071 }
1072
1073 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1074 {
1075 move(imm, dataTempRegister);
1076 return branchMul32(cond, dataTempRegister, src, dest);
1077 }
1078
1079 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1080 {
1081 m_assembler.orr_S(dest, dest, src);
1082 return Jump(makeBranch(cond));
1083 }
1084
1085 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1086 {
1087 m_assembler.sub_S(dest, dest, src);
1088 return Jump(makeBranch(cond));
1089 }
1090
1091 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1092 {
1093 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1094 if (armImm.isValid())
1095 m_assembler.sub_S(dest, dest, armImm);
1096 else {
1097 move(imm, dataTempRegister);
1098 m_assembler.sub_S(dest, dest, dataTempRegister);
1099 }
1100 return Jump(makeBranch(cond));
1101 }
1102
1103 void relativeTableJump(RegisterID index, int scale)
1104 {
1105 ASSERT(scale >= 0 && scale <= 31);
1106
1107 // dataTempRegister will point after the jump if index register contains zero
1108 move(ARMRegisters::pc, dataTempRegister);
1109 m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1110
1111 ShiftTypeAndAmount shift(SRType_LSL, scale);
1112 m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1113 jump(dataTempRegister);
1114 }
1115
1116 // Miscellaneous operations:
1117
1118 void breakpoint()
1119 {
1120 m_assembler.bkpt(0);
1121 }
1122
1123 ALWAYS_INLINE Call nearCall()
1124 {
1125 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1126 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1127 }
1128
1129 ALWAYS_INLINE Call call()
1130 {
1131 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1132 return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1133 }
1134
1135 ALWAYS_INLINE Call call(RegisterID target)
1136 {
1137 return Call(m_assembler.blx(target), Call::None);
1138 }
1139
1140 ALWAYS_INLINE Call call(Address address)
1141 {
1142 load32(address, dataTempRegister);
1143 return Call(m_assembler.blx(dataTempRegister), Call::None);
1144 }
1145
1146 ALWAYS_INLINE void ret()
1147 {
1148 m_assembler.bx(linkRegister);
1149 }
1150
1151 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1152 {
1153 m_assembler.cmp(left, right);
1154 m_assembler.it(armV7Condition(cond), false);
1155 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1156 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1157 }
1158
1159 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1160 {
1161 load32(left, dataTempRegister);
1162 compare32(cond, dataTempRegister, right, dest);
1163 }
1164
1165 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1166 {
1167 compare32(left, right);
1168 m_assembler.it(armV7Condition(cond), false);
1169 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1170 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1171 }
1172
1173 // FIXME:
1174 // The mask should be optional... paerhaps the argument order should be
1175 // dest-src, operations always have a dest? ... possibly not true, considering
1176 // asm ops like test, or pseudo ops like pop().
1177 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1178 {
1179 load32(address, dataTempRegister);
1180 test32(dataTempRegister, mask);
1181 m_assembler.it(armV7Condition(cond), false);
1182 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1183 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1184 }
1185
1186 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1187 {
1188 load8(address, dataTempRegister);
1189 test32(dataTempRegister, mask);
1190 m_assembler.it(armV7Condition(cond), false);
1191 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1192 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1193 }
1194
1195 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1196 {
1197 moveFixedWidthEncoding(imm, dst);
1198 return DataLabel32(this);
1199 }
1200
1201 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1202 {
1203 moveFixedWidthEncoding(TrustedImm32(imm), dst);
1204 return DataLabelPtr(this);
1205 }
1206
1207 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1208 {
1209 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1210 return branch32(cond, left, dataTempRegister);
1211 }
1212
1213 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1214 {
1215 load32(left, addressTempRegister);
1216 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1217 return branch32(cond, addressTempRegister, dataTempRegister);
1218 }
1219
1220 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1221 {
1222 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1223 store32(dataTempRegister, address);
1224 return label;
1225 }
1226 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1227
1228
1229 ALWAYS_INLINE Call tailRecursiveCall()
1230 {
1231 // Like a normal call, but don't link.
1232 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1233 return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1234 }
1235
1236 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1237 {
1238 oldJump.link(this);
1239 return tailRecursiveCall();
1240 }
1241
1242
1243 int executableOffsetFor(int location)
1244 {
1245 return m_assembler.executableOffsetFor(location);
1246 }
1247
1248 protected:
1249 bool inUninterruptedSequence()
1250 {
1251 return m_inUninterruptedSequence;
1252 }
1253
1254 ALWAYS_INLINE Jump jump()
1255 {
1256 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1257 return Jump(m_assembler.bx(dataTempRegister), inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
1258 }
1259
1260 ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
1261 {
1262 m_assembler.it(cond, true, true);
1263 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1264 return Jump(m_assembler.bx(dataTempRegister), inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
1265 }
1266 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
1267 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
1268 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1269
1270 ArmAddress setupArmAddress(BaseIndex address)
1271 {
1272 if (address.offset) {
1273 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1274 if (imm.isValid())
1275 m_assembler.add(addressTempRegister, address.base, imm);
1276 else {
1277 move(TrustedImm32(address.offset), addressTempRegister);
1278 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1279 }
1280
1281 return ArmAddress(addressTempRegister, address.index, address.scale);
1282 } else
1283 return ArmAddress(address.base, address.index, address.scale);
1284 }
1285
1286 ArmAddress setupArmAddress(Address address)
1287 {
1288 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1289 return ArmAddress(address.base, address.offset);
1290
1291 move(TrustedImm32(address.offset), addressTempRegister);
1292 return ArmAddress(address.base, addressTempRegister);
1293 }
1294
1295 ArmAddress setupArmAddress(ImplicitAddress address)
1296 {
1297 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1298 return ArmAddress(address.base, address.offset);
1299
1300 move(TrustedImm32(address.offset), addressTempRegister);
1301 return ArmAddress(address.base, addressTempRegister);
1302 }
1303
1304 RegisterID makeBaseIndexBase(BaseIndex address)
1305 {
1306 if (!address.offset)
1307 return address.base;
1308
1309 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1310 if (imm.isValid())
1311 m_assembler.add(addressTempRegister, address.base, imm);
1312 else {
1313 move(TrustedImm32(address.offset), addressTempRegister);
1314 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1315 }
1316
1317 return addressTempRegister;
1318 }
1319
1320 void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
1321 {
1322 uint32_t value = imm.m_value;
1323 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
1324 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
1325 }
1326
1327 ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
1328 {
1329 return static_cast<ARMv7Assembler::Condition>(cond);
1330 }
1331
1332 ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
1333 {
1334 return static_cast<ARMv7Assembler::Condition>(cond);
1335 }
1336
1337 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
1338 {
1339 return static_cast<ARMv7Assembler::Condition>(cond);
1340 }
1341
1342 private:
1343 friend class LinkBuffer;
1344 friend class RepatchBuffer;
1345
1346 static void linkCall(void* code, Call call, FunctionPtr function)
1347 {
1348 ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
1349 }
1350
1351 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1352 {
1353 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1354 }
1355
1356 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1357 {
1358 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1359 }
1360
1361 bool m_inUninterruptedSequence;
1362 };
1363
1364 } // namespace JSC
1365
1366 #endif // ENABLE(ASSEMBLER)
1367
1368 #endif // MacroAssemblerARMv7_h