]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARMv7.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARMv7.h
1 /*
2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
29
30 #if ENABLE(ASSEMBLER)
31
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
34
35 namespace JSC {
36
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
39 // - dTR is likely used more than aTR, and we'll get better instruction
40 // encoding if it's in the low 8 registers.
41 static const RegisterID dataTempRegister = ARMRegisters::ip;
42 static const RegisterID addressTempRegister = ARMRegisters::r3;
43
44 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
45 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
46
47 public:
48 MacroAssemblerARMv7()
49 : m_makeJumpPatchable(false)
50 {
51 }
52
53 typedef ARMv7Assembler::LinkRecord LinkRecord;
54 typedef ARMv7Assembler::JumpType JumpType;
55 typedef ARMv7Assembler::JumpLinkType JumpLinkType;
56 typedef ARMv7Assembler::Condition Condition;
57
58 static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
59 static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
60
61 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
62 {
63 return value >= -255 && value <= 255;
64 }
65
66 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
67 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
68 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
69 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
70 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
71 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
72 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
73 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
74
75 struct ArmAddress {
76 enum AddressType {
77 HasOffset,
78 HasIndex,
79 } type;
80 RegisterID base;
81 union {
82 int32_t offset;
83 struct {
84 RegisterID index;
85 Scale scale;
86 };
87 } u;
88
89 explicit ArmAddress(RegisterID base, int32_t offset = 0)
90 : type(HasOffset)
91 , base(base)
92 {
93 u.offset = offset;
94 }
95
96 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
97 : type(HasIndex)
98 , base(base)
99 {
100 u.index = index;
101 u.scale = scale;
102 }
103 };
104
105 public:
106 typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
107
108 static const Scale ScalePtr = TimesFour;
109
110 enum RelationalCondition {
111 Equal = ARMv7Assembler::ConditionEQ,
112 NotEqual = ARMv7Assembler::ConditionNE,
113 Above = ARMv7Assembler::ConditionHI,
114 AboveOrEqual = ARMv7Assembler::ConditionHS,
115 Below = ARMv7Assembler::ConditionLO,
116 BelowOrEqual = ARMv7Assembler::ConditionLS,
117 GreaterThan = ARMv7Assembler::ConditionGT,
118 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
119 LessThan = ARMv7Assembler::ConditionLT,
120 LessThanOrEqual = ARMv7Assembler::ConditionLE
121 };
122
123 enum ResultCondition {
124 Overflow = ARMv7Assembler::ConditionVS,
125 Signed = ARMv7Assembler::ConditionMI,
126 PositiveOrZero = ARMv7Assembler::ConditionPL,
127 Zero = ARMv7Assembler::ConditionEQ,
128 NonZero = ARMv7Assembler::ConditionNE
129 };
130
131 enum DoubleCondition {
132 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
133 DoubleEqual = ARMv7Assembler::ConditionEQ,
134 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
135 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
136 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
137 DoubleLessThan = ARMv7Assembler::ConditionLO,
138 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
139 // If either operand is NaN, these conditions always evaluate to true.
140 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
141 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
142 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
143 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
144 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
145 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
146 };
147
148 static const RegisterID stackPointerRegister = ARMRegisters::sp;
149 static const RegisterID linkRegister = ARMRegisters::lr;
150
151 // Integer arithmetic operations:
152 //
153 // Operations are typically two operand - operation(source, srcDst)
154 // For many operations the source may be an TrustedImm32, the srcDst operand
155 // may often be a memory location (explictly described using an Address
156 // object).
157
158 void add32(RegisterID src, RegisterID dest)
159 {
160 m_assembler.add(dest, dest, src);
161 }
162
163 void add32(TrustedImm32 imm, RegisterID dest)
164 {
165 add32(imm, dest, dest);
166 }
167
168 void add32(AbsoluteAddress src, RegisterID dest)
169 {
170 load32(src.m_ptr, dataTempRegister);
171 add32(dataTempRegister, dest);
172 }
173
174 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
175 {
176 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
177 if (armImm.isValid())
178 m_assembler.add(dest, src, armImm);
179 else {
180 move(imm, dataTempRegister);
181 m_assembler.add(dest, src, dataTempRegister);
182 }
183 }
184
185 void add32(TrustedImm32 imm, Address address)
186 {
187 load32(address, dataTempRegister);
188
189 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
190 if (armImm.isValid())
191 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
192 else {
193 // Hrrrm, since dataTempRegister holds the data loaded,
194 // use addressTempRegister to hold the immediate.
195 move(imm, addressTempRegister);
196 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
197 }
198
199 store32(dataTempRegister, address);
200 }
201
202 void add32(Address src, RegisterID dest)
203 {
204 load32(src, dataTempRegister);
205 add32(dataTempRegister, dest);
206 }
207
208 void add32(TrustedImm32 imm, AbsoluteAddress address)
209 {
210 load32(address.m_ptr, dataTempRegister);
211
212 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
213 if (armImm.isValid())
214 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
215 else {
216 // Hrrrm, since dataTempRegister holds the data loaded,
217 // use addressTempRegister to hold the immediate.
218 move(imm, addressTempRegister);
219 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
220 }
221
222 store32(dataTempRegister, address.m_ptr);
223 }
224
225 void add64(TrustedImm32 imm, AbsoluteAddress address)
226 {
227 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
228
229 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
230 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
231 if (armImm.isValid())
232 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
233 else {
234 move(imm, addressTempRegister);
235 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
236 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
237 }
238 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
239
240 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
241 m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
242 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
243 }
244
245 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
246 {
247 m_assembler.ARM_and(dest, op1, op2);
248 }
249
250 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
251 {
252 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
253 if (armImm.isValid())
254 m_assembler.ARM_and(dest, src, armImm);
255 else {
256 move(imm, dataTempRegister);
257 m_assembler.ARM_and(dest, src, dataTempRegister);
258 }
259 }
260
261 void and32(RegisterID src, RegisterID dest)
262 {
263 and32(dest, src, dest);
264 }
265
266 void and32(TrustedImm32 imm, RegisterID dest)
267 {
268 and32(imm, dest, dest);
269 }
270
271 void and32(Address src, RegisterID dest)
272 {
273 load32(src, dataTempRegister);
274 and32(dataTempRegister, dest);
275 }
276
277 void countLeadingZeros32(RegisterID src, RegisterID dest)
278 {
279 m_assembler.clz(dest, src);
280 }
281
282 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
283 {
284 // Clamp the shift to the range 0..31
285 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
286 ASSERT(armImm.isValid());
287 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
288
289 m_assembler.lsl(dest, src, dataTempRegister);
290 }
291
292 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
293 {
294 m_assembler.lsl(dest, src, imm.m_value & 0x1f);
295 }
296
297 void lshift32(RegisterID shiftAmount, RegisterID dest)
298 {
299 lshift32(dest, shiftAmount, dest);
300 }
301
302 void lshift32(TrustedImm32 imm, RegisterID dest)
303 {
304 lshift32(dest, imm, dest);
305 }
306
307 void mul32(RegisterID src, RegisterID dest)
308 {
309 m_assembler.smull(dest, dataTempRegister, dest, src);
310 }
311
312 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
313 {
314 move(imm, dataTempRegister);
315 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
316 }
317
318 void neg32(RegisterID srcDest)
319 {
320 m_assembler.neg(srcDest, srcDest);
321 }
322
323 void or32(RegisterID src, RegisterID dest)
324 {
325 m_assembler.orr(dest, dest, src);
326 }
327
328 void or32(RegisterID src, AbsoluteAddress dest)
329 {
330 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
331 load32(addressTempRegister, dataTempRegister);
332 or32(src, dataTempRegister);
333 store32(dataTempRegister, addressTempRegister);
334 }
335
336 void or32(TrustedImm32 imm, RegisterID dest)
337 {
338 or32(imm, dest, dest);
339 }
340
341 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
342 {
343 m_assembler.orr(dest, op1, op2);
344 }
345
346 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
347 {
348 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
349 if (armImm.isValid())
350 m_assembler.orr(dest, src, armImm);
351 else {
352 move(imm, dataTempRegister);
353 m_assembler.orr(dest, src, dataTempRegister);
354 }
355 }
356
357 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
358 {
359 // Clamp the shift to the range 0..31
360 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
361 ASSERT(armImm.isValid());
362 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
363
364 m_assembler.asr(dest, src, dataTempRegister);
365 }
366
367 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
368 {
369 m_assembler.asr(dest, src, imm.m_value & 0x1f);
370 }
371
372 void rshift32(RegisterID shiftAmount, RegisterID dest)
373 {
374 rshift32(dest, shiftAmount, dest);
375 }
376
377 void rshift32(TrustedImm32 imm, RegisterID dest)
378 {
379 rshift32(dest, imm, dest);
380 }
381
382 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
383 {
384 // Clamp the shift to the range 0..31
385 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
386 ASSERT(armImm.isValid());
387 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
388
389 m_assembler.lsr(dest, src, dataTempRegister);
390 }
391
392 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
393 {
394 m_assembler.lsr(dest, src, imm.m_value & 0x1f);
395 }
396
397 void urshift32(RegisterID shiftAmount, RegisterID dest)
398 {
399 urshift32(dest, shiftAmount, dest);
400 }
401
402 void urshift32(TrustedImm32 imm, RegisterID dest)
403 {
404 urshift32(dest, imm, dest);
405 }
406
407 void sub32(RegisterID src, RegisterID dest)
408 {
409 m_assembler.sub(dest, dest, src);
410 }
411
412 void sub32(TrustedImm32 imm, RegisterID dest)
413 {
414 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
415 if (armImm.isValid())
416 m_assembler.sub(dest, dest, armImm);
417 else {
418 move(imm, dataTempRegister);
419 m_assembler.sub(dest, dest, dataTempRegister);
420 }
421 }
422
423 void sub32(TrustedImm32 imm, Address address)
424 {
425 load32(address, dataTempRegister);
426
427 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
428 if (armImm.isValid())
429 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
430 else {
431 // Hrrrm, since dataTempRegister holds the data loaded,
432 // use addressTempRegister to hold the immediate.
433 move(imm, addressTempRegister);
434 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
435 }
436
437 store32(dataTempRegister, address);
438 }
439
440 void sub32(Address src, RegisterID dest)
441 {
442 load32(src, dataTempRegister);
443 sub32(dataTempRegister, dest);
444 }
445
446 void sub32(TrustedImm32 imm, AbsoluteAddress address)
447 {
448 load32(address.m_ptr, dataTempRegister);
449
450 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
451 if (armImm.isValid())
452 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
453 else {
454 // Hrrrm, since dataTempRegister holds the data loaded,
455 // use addressTempRegister to hold the immediate.
456 move(imm, addressTempRegister);
457 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
458 }
459
460 store32(dataTempRegister, address.m_ptr);
461 }
462
463 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
464 {
465 m_assembler.eor(dest, op1, op2);
466 }
467
468 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
469 {
470 if (imm.m_value == -1) {
471 m_assembler.mvn(dest, src);
472 return;
473 }
474
475 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
476 if (armImm.isValid())
477 m_assembler.eor(dest, src, armImm);
478 else {
479 move(imm, dataTempRegister);
480 m_assembler.eor(dest, src, dataTempRegister);
481 }
482 }
483
484 void xor32(RegisterID src, RegisterID dest)
485 {
486 xor32(dest, src, dest);
487 }
488
489 void xor32(TrustedImm32 imm, RegisterID dest)
490 {
491 if (imm.m_value == -1)
492 m_assembler.mvn(dest, dest);
493 else
494 xor32(imm, dest, dest);
495 }
496
497
498 // Memory access operations:
499 //
500 // Loads are of the form load(address, destination) and stores of the form
501 // store(source, address). The source for a store may be an TrustedImm32. Address
502 // operand objects to loads and store will be implicitly constructed if a
503 // register is passed.
504
505 private:
506 void load32(ArmAddress address, RegisterID dest)
507 {
508 if (address.type == ArmAddress::HasIndex)
509 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
510 else if (address.u.offset >= 0) {
511 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
512 ASSERT(armImm.isValid());
513 m_assembler.ldr(dest, address.base, armImm);
514 } else {
515 ASSERT(address.u.offset >= -255);
516 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
517 }
518 }
519
520 void load16(ArmAddress address, RegisterID dest)
521 {
522 if (address.type == ArmAddress::HasIndex)
523 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
524 else if (address.u.offset >= 0) {
525 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
526 ASSERT(armImm.isValid());
527 m_assembler.ldrh(dest, address.base, armImm);
528 } else {
529 ASSERT(address.u.offset >= -255);
530 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
531 }
532 }
533
534 void load16Signed(ArmAddress address, RegisterID dest)
535 {
536 ASSERT(address.type == ArmAddress::HasIndex);
537 m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
538 }
539
540 void load8(ArmAddress address, RegisterID dest)
541 {
542 if (address.type == ArmAddress::HasIndex)
543 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
544 else if (address.u.offset >= 0) {
545 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
546 ASSERT(armImm.isValid());
547 m_assembler.ldrb(dest, address.base, armImm);
548 } else {
549 ASSERT(address.u.offset >= -255);
550 m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
551 }
552 }
553
554 void load8Signed(ArmAddress address, RegisterID dest)
555 {
556 ASSERT(address.type == ArmAddress::HasIndex);
557 m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
558 }
559
560 protected:
561 void store32(RegisterID src, ArmAddress address)
562 {
563 if (address.type == ArmAddress::HasIndex)
564 m_assembler.str(src, address.base, address.u.index, address.u.scale);
565 else if (address.u.offset >= 0) {
566 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
567 ASSERT(armImm.isValid());
568 m_assembler.str(src, address.base, armImm);
569 } else {
570 ASSERT(address.u.offset >= -255);
571 m_assembler.str(src, address.base, address.u.offset, true, false);
572 }
573 }
574
575 private:
576 void store8(RegisterID src, ArmAddress address)
577 {
578 if (address.type == ArmAddress::HasIndex)
579 m_assembler.strb(src, address.base, address.u.index, address.u.scale);
580 else if (address.u.offset >= 0) {
581 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
582 ASSERT(armImm.isValid());
583 m_assembler.strb(src, address.base, armImm);
584 } else {
585 ASSERT(address.u.offset >= -255);
586 m_assembler.strb(src, address.base, address.u.offset, true, false);
587 }
588 }
589
590 void store16(RegisterID src, ArmAddress address)
591 {
592 if (address.type == ArmAddress::HasIndex)
593 m_assembler.strh(src, address.base, address.u.index, address.u.scale);
594 else if (address.u.offset >= 0) {
595 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
596 ASSERT(armImm.isValid());
597 m_assembler.strh(src, address.base, armImm);
598 } else {
599 ASSERT(address.u.offset >= -255);
600 m_assembler.strh(src, address.base, address.u.offset, true, false);
601 }
602 }
603
604 public:
605 void load32(ImplicitAddress address, RegisterID dest)
606 {
607 load32(setupArmAddress(address), dest);
608 }
609
610 void load32(BaseIndex address, RegisterID dest)
611 {
612 load32(setupArmAddress(address), dest);
613 }
614
615 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
616 {
617 load32(setupArmAddress(address), dest);
618 }
619
620 void load16Unaligned(BaseIndex address, RegisterID dest)
621 {
622 load16(setupArmAddress(address), dest);
623 }
624
625 void load32(const void* address, RegisterID dest)
626 {
627 move(TrustedImmPtr(address), addressTempRegister);
628 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
629 }
630
631 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
632 {
633 ConvertibleLoadLabel result(this);
634 ASSERT(address.offset >= 0 && address.offset <= 255);
635 m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
636 return result;
637 }
638
639 void load8(ImplicitAddress address, RegisterID dest)
640 {
641 load8(setupArmAddress(address), dest);
642 }
643
644 void load8Signed(ImplicitAddress, RegisterID)
645 {
646 UNREACHABLE_FOR_PLATFORM();
647 }
648
649 void load8(BaseIndex address, RegisterID dest)
650 {
651 load8(setupArmAddress(address), dest);
652 }
653
654 void load8Signed(BaseIndex address, RegisterID dest)
655 {
656 load8Signed(setupArmAddress(address), dest);
657 }
658
659 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
660 {
661 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
662 load32(ArmAddress(address.base, dataTempRegister), dest);
663 return label;
664 }
665
666 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
667 {
668 padBeforePatch();
669
670 RegisterID base = address.base;
671
672 DataLabelCompact label(this);
673 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
674
675 m_assembler.ldr(dest, base, address.offset, true, false);
676 return label;
677 }
678
679 void load16(BaseIndex address, RegisterID dest)
680 {
681 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
682 }
683
684 void load16Signed(BaseIndex address, RegisterID dest)
685 {
686 load16Signed(setupArmAddress(address), dest);
687 }
688
689 void load16(ImplicitAddress address, RegisterID dest)
690 {
691 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
692 if (armImm.isValid())
693 m_assembler.ldrh(dest, address.base, armImm);
694 else {
695 move(TrustedImm32(address.offset), dataTempRegister);
696 m_assembler.ldrh(dest, address.base, dataTempRegister);
697 }
698 }
699
700 void load16Signed(ImplicitAddress, RegisterID)
701 {
702 UNREACHABLE_FOR_PLATFORM();
703 }
704
705 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
706 {
707 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
708 store32(src, ArmAddress(address.base, dataTempRegister));
709 return label;
710 }
711
712 void store32(RegisterID src, ImplicitAddress address)
713 {
714 store32(src, setupArmAddress(address));
715 }
716
717 void store32(RegisterID src, BaseIndex address)
718 {
719 store32(src, setupArmAddress(address));
720 }
721
722 void store32(TrustedImm32 imm, ImplicitAddress address)
723 {
724 move(imm, dataTempRegister);
725 store32(dataTempRegister, setupArmAddress(address));
726 }
727
728 void store32(TrustedImm32 imm, BaseIndex address)
729 {
730 move(imm, dataTempRegister);
731 store32(dataTempRegister, setupArmAddress(address));
732 }
733
734 void store32(RegisterID src, const void* address)
735 {
736 move(TrustedImmPtr(address), addressTempRegister);
737 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
738 }
739
740 void store32(TrustedImm32 imm, const void* address)
741 {
742 move(imm, dataTempRegister);
743 store32(dataTempRegister, address);
744 }
745
746 void store8(RegisterID src, BaseIndex address)
747 {
748 store8(src, setupArmAddress(address));
749 }
750
751 void store8(RegisterID src, void* address)
752 {
753 move(TrustedImmPtr(address), addressTempRegister);
754 store8(src, ArmAddress(addressTempRegister, 0));
755 }
756
757 void store8(TrustedImm32 imm, void* address)
758 {
759 move(imm, dataTempRegister);
760 store8(dataTempRegister, address);
761 }
762
763 void store16(RegisterID src, BaseIndex address)
764 {
765 store16(src, setupArmAddress(address));
766 }
767
768 // Possibly clobbers src, but not on this architecture.
769 void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
770 {
771 m_assembler.vmov(dest1, dest2, src);
772 }
773
774 void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
775 {
776 UNUSED_PARAM(scratch);
777 m_assembler.vmov(dest, src1, src2);
778 }
779
780 #if ENABLE(JIT_CONSTANT_BLINDING)
781 static bool shouldBlindForSpecificArch(uint32_t value)
782 {
783 ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
784
785 // Couldn't be encoded as an immediate, so assume it's untrusted.
786 if (!immediate.isValid())
787 return true;
788
789 // If we can encode the immediate, we have less than 16 attacker
790 // controlled bits.
791 if (immediate.isEncodedImm())
792 return false;
793
794 // Don't let any more than 12 bits of an instruction word
795 // be controlled by an attacker.
796 return !immediate.isUInt12();
797 }
798 #endif
799
800 // Floating-point operations:
801
802 static bool supportsFloatingPoint() { return true; }
803 static bool supportsFloatingPointTruncate() { return true; }
804 static bool supportsFloatingPointSqrt() { return true; }
805 static bool supportsFloatingPointAbs() { return true; }
806
807 void loadDouble(ImplicitAddress address, FPRegisterID dest)
808 {
809 RegisterID base = address.base;
810 int32_t offset = address.offset;
811
812 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
813 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
814 add32(TrustedImm32(offset), base, addressTempRegister);
815 base = addressTempRegister;
816 offset = 0;
817 }
818
819 m_assembler.vldr(dest, base, offset);
820 }
821
822 void loadFloat(ImplicitAddress address, FPRegisterID dest)
823 {
824 RegisterID base = address.base;
825 int32_t offset = address.offset;
826
827 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
828 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
829 add32(TrustedImm32(offset), base, addressTempRegister);
830 base = addressTempRegister;
831 offset = 0;
832 }
833
834 m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
835 }
836
837 void loadDouble(BaseIndex address, FPRegisterID dest)
838 {
839 move(address.index, addressTempRegister);
840 lshift32(TrustedImm32(address.scale), addressTempRegister);
841 add32(address.base, addressTempRegister);
842 loadDouble(Address(addressTempRegister, address.offset), dest);
843 }
844
845 void loadFloat(BaseIndex address, FPRegisterID dest)
846 {
847 move(address.index, addressTempRegister);
848 lshift32(TrustedImm32(address.scale), addressTempRegister);
849 add32(address.base, addressTempRegister);
850 loadFloat(Address(addressTempRegister, address.offset), dest);
851 }
852
853 void moveDouble(FPRegisterID src, FPRegisterID dest)
854 {
855 if (src != dest)
856 m_assembler.vmov(dest, src);
857 }
858
859 void loadDouble(const void* address, FPRegisterID dest)
860 {
861 move(TrustedImmPtr(address), addressTempRegister);
862 m_assembler.vldr(dest, addressTempRegister, 0);
863 }
864
865 void storeDouble(FPRegisterID src, ImplicitAddress address)
866 {
867 RegisterID base = address.base;
868 int32_t offset = address.offset;
869
870 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
871 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
872 add32(TrustedImm32(offset), base, addressTempRegister);
873 base = addressTempRegister;
874 offset = 0;
875 }
876
877 m_assembler.vstr(src, base, offset);
878 }
879
880 void storeFloat(FPRegisterID src, ImplicitAddress address)
881 {
882 RegisterID base = address.base;
883 int32_t offset = address.offset;
884
885 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
886 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
887 add32(TrustedImm32(offset), base, addressTempRegister);
888 base = addressTempRegister;
889 offset = 0;
890 }
891
892 m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
893 }
894
895 void storeDouble(FPRegisterID src, const void* address)
896 {
897 move(TrustedImmPtr(address), addressTempRegister);
898 storeDouble(src, addressTempRegister);
899 }
900
901 void storeDouble(FPRegisterID src, BaseIndex address)
902 {
903 move(address.index, addressTempRegister);
904 lshift32(TrustedImm32(address.scale), addressTempRegister);
905 add32(address.base, addressTempRegister);
906 storeDouble(src, Address(addressTempRegister, address.offset));
907 }
908
909 void storeFloat(FPRegisterID src, BaseIndex address)
910 {
911 move(address.index, addressTempRegister);
912 lshift32(TrustedImm32(address.scale), addressTempRegister);
913 add32(address.base, addressTempRegister);
914 storeFloat(src, Address(addressTempRegister, address.offset));
915 }
916
917 void addDouble(FPRegisterID src, FPRegisterID dest)
918 {
919 m_assembler.vadd(dest, dest, src);
920 }
921
922 void addDouble(Address src, FPRegisterID dest)
923 {
924 loadDouble(src, fpTempRegister);
925 addDouble(fpTempRegister, dest);
926 }
927
928 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
929 {
930 m_assembler.vadd(dest, op1, op2);
931 }
932
933 void addDouble(AbsoluteAddress address, FPRegisterID dest)
934 {
935 loadDouble(address.m_ptr, fpTempRegister);
936 m_assembler.vadd(dest, dest, fpTempRegister);
937 }
938
939 void divDouble(FPRegisterID src, FPRegisterID dest)
940 {
941 m_assembler.vdiv(dest, dest, src);
942 }
943
944 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
945 {
946 m_assembler.vdiv(dest, op1, op2);
947 }
948
949 void subDouble(FPRegisterID src, FPRegisterID dest)
950 {
951 m_assembler.vsub(dest, dest, src);
952 }
953
954 void subDouble(Address src, FPRegisterID dest)
955 {
956 loadDouble(src, fpTempRegister);
957 subDouble(fpTempRegister, dest);
958 }
959
960 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
961 {
962 m_assembler.vsub(dest, op1, op2);
963 }
964
965 void mulDouble(FPRegisterID src, FPRegisterID dest)
966 {
967 m_assembler.vmul(dest, dest, src);
968 }
969
970 void mulDouble(Address src, FPRegisterID dest)
971 {
972 loadDouble(src, fpTempRegister);
973 mulDouble(fpTempRegister, dest);
974 }
975
976 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
977 {
978 m_assembler.vmul(dest, op1, op2);
979 }
980
981 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
982 {
983 m_assembler.vsqrt(dest, src);
984 }
985
986 void absDouble(FPRegisterID src, FPRegisterID dest)
987 {
988 m_assembler.vabs(dest, src);
989 }
990
991 void negateDouble(FPRegisterID src, FPRegisterID dest)
992 {
993 m_assembler.vneg(dest, src);
994 }
995
996 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
997 {
998 m_assembler.vmov(fpTempRegister, src, src);
999 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1000 }
1001
1002 void convertInt32ToDouble(Address address, FPRegisterID dest)
1003 {
1004 // Fixme: load directly into the fpr!
1005 load32(address, dataTempRegister);
1006 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1007 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1008 }
1009
1010 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1011 {
1012 // Fixme: load directly into the fpr!
1013 load32(address.m_ptr, dataTempRegister);
1014 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
1015 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
1016 }
1017
1018 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1019 {
1020 m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
1021 }
1022
1023 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1024 {
1025 m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
1026 }
1027
1028 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1029 {
1030 m_assembler.vcmp(left, right);
1031 m_assembler.vmrs();
1032
1033 if (cond == DoubleNotEqual) {
1034 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1035 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1036 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1037 unordered.link(this);
1038 return result;
1039 }
1040 if (cond == DoubleEqualOrUnordered) {
1041 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1042 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1043 unordered.link(this);
1044 // We get here if either unordered or equal.
1045 Jump result = jump();
1046 notEqual.link(this);
1047 return result;
1048 }
1049 return makeBranch(cond);
1050 }
1051
1052 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1053 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1054 {
1055 // Convert into dest.
1056 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1057 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1058
1059 // Calculate 2x dest. If the value potentially underflowed, it will have
1060 // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
1061 // overflow the result will be equal to -2.
1062 Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
1063 Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
1064
1065 // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
1066 underflow.link(this);
1067 if (branchType == BranchIfTruncateSuccessful)
1068 return noOverflow;
1069
1070 // We'll reach the current point in the code on failure, so plant a
1071 // jump here & link the success case.
1072 Jump failure = jump();
1073 noOverflow.link(this);
1074 return failure;
1075 }
1076
1077 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1078 {
1079 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1080 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1081
1082 Jump overflow = branch32(Equal, dest, TrustedImm32(0x7fffffff));
1083 Jump success = branch32(GreaterThanOrEqual, dest, TrustedImm32(0));
1084 overflow.link(this);
1085
1086 if (branchType == BranchIfTruncateSuccessful)
1087 return success;
1088
1089 Jump failure = jump();
1090 success.link(this);
1091 return failure;
1092 }
1093
1094 // Result is undefined if the value is outside of the integer range.
1095 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1096 {
1097 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1098 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1099 }
1100
1101 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1102 {
1103 m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
1104 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1105 }
1106
1107 // Convert 'src' to an integer, and places the resulting 'dest'.
1108 // If the result is not representable as a 32 bit value, branch.
1109 // May also branch for some values that are representable in 32 bits
1110 // (specifically, in this case, 0).
1111 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1112 {
1113 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1114 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1115
1116 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1117 m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
1118 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1119
1120 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1121 if (negZeroCheck)
1122 failureCases.append(branchTest32(Zero, dest));
1123 }
1124
1125 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1126 {
1127 m_assembler.vcmpz(reg);
1128 m_assembler.vmrs();
1129 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1130 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1131 unordered.link(this);
1132 return result;
1133 }
1134
1135 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1136 {
1137 m_assembler.vcmpz(reg);
1138 m_assembler.vmrs();
1139 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1140 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1141 unordered.link(this);
1142 // We get here if either unordered or equal.
1143 Jump result = jump();
1144 notEqual.link(this);
1145 return result;
1146 }
1147
1148 // Stack manipulation operations:
1149 //
1150 // The ABI is assumed to provide a stack abstraction to memory,
1151 // containing machine word sized units of data. Push and pop
1152 // operations add and remove a single register sized unit of data
1153 // to or from the stack. Peek and poke operations read or write
1154 // values on the stack, without moving the current stack position.
1155
1156 void pop(RegisterID dest)
1157 {
1158 // store postindexed with writeback
1159 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
1160 }
1161
1162 void push(RegisterID src)
1163 {
1164 // store preindexed with writeback
1165 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
1166 }
1167
1168 void push(Address address)
1169 {
1170 load32(address, dataTempRegister);
1171 push(dataTempRegister);
1172 }
1173
1174 void push(TrustedImm32 imm)
1175 {
1176 move(imm, dataTempRegister);
1177 push(dataTempRegister);
1178 }
1179
1180 // Register move operations:
1181 //
1182 // Move values in registers.
1183
1184 void move(TrustedImm32 imm, RegisterID dest)
1185 {
1186 uint32_t value = imm.m_value;
1187
1188 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
1189
1190 if (armImm.isValid())
1191 m_assembler.mov(dest, armImm);
1192 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
1193 m_assembler.mvn(dest, armImm);
1194 else {
1195 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
1196 if (value & 0xffff0000)
1197 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
1198 }
1199 }
1200
1201 void move(RegisterID src, RegisterID dest)
1202 {
1203 if (src != dest)
1204 m_assembler.mov(dest, src);
1205 }
1206
1207 void move(TrustedImmPtr imm, RegisterID dest)
1208 {
1209 move(TrustedImm32(imm), dest);
1210 }
1211
1212 void swap(RegisterID reg1, RegisterID reg2)
1213 {
1214 move(reg1, dataTempRegister);
1215 move(reg2, reg1);
1216 move(dataTempRegister, reg2);
1217 }
1218
1219 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1220 {
1221 move(src, dest);
1222 }
1223
1224 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1225 {
1226 move(src, dest);
1227 }
1228
1229 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1230 static RelationalCondition invert(RelationalCondition cond)
1231 {
1232 return static_cast<RelationalCondition>(cond ^ 1);
1233 }
1234
1235 void nop()
1236 {
1237 m_assembler.nop();
1238 }
1239
1240 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1241 {
1242 ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
1243 }
1244
1245 static ptrdiff_t maxJumpReplacementSize()
1246 {
1247 return ARMv7Assembler::maxJumpReplacementSize();
1248 }
1249
1250 // Forwards / external control flow operations:
1251 //
1252 // This set of jump and conditional branch operations return a Jump
1253 // object which may linked at a later point, allow forwards jump,
1254 // or jumps that will require external linkage (after the code has been
1255 // relocated).
1256 //
1257 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1258 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1259 // used (representing the names 'below' and 'above').
1260 //
1261 // Operands to the comparision are provided in the expected order, e.g.
1262 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1263 // treated as a signed 32bit value, is less than or equal to 5.
1264 //
1265 // jz and jnz test whether the first operand is equal to zero, and take
1266 // an optional second operand of a mask under which to perform the test.
1267 private:
1268
1269 // Should we be using TEQ for equal/not-equal?
1270 void compare32(RegisterID left, TrustedImm32 right)
1271 {
1272 int32_t imm = right.m_value;
1273 if (!imm)
1274 m_assembler.tst(left, left);
1275 else {
1276 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1277 if (armImm.isValid())
1278 m_assembler.cmp(left, armImm);
1279 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
1280 m_assembler.cmn(left, armImm);
1281 else {
1282 move(TrustedImm32(imm), dataTempRegister);
1283 m_assembler.cmp(left, dataTempRegister);
1284 }
1285 }
1286 }
1287
1288 void test32(RegisterID reg, TrustedImm32 mask)
1289 {
1290 int32_t imm = mask.m_value;
1291
1292 if (imm == -1)
1293 m_assembler.tst(reg, reg);
1294 else {
1295 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1296 if (armImm.isValid())
1297 m_assembler.tst(reg, armImm);
1298 else {
1299 move(mask, dataTempRegister);
1300 m_assembler.tst(reg, dataTempRegister);
1301 }
1302 }
1303 }
1304
1305 public:
1306 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1307 {
1308 m_assembler.cmp(left, right);
1309 return Jump(makeBranch(cond));
1310 }
1311
1312 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1313 {
1314 compare32(left, right);
1315 return Jump(makeBranch(cond));
1316 }
1317
1318 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1319 {
1320 load32(right, dataTempRegister);
1321 return branch32(cond, left, dataTempRegister);
1322 }
1323
1324 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1325 {
1326 load32(left, dataTempRegister);
1327 return branch32(cond, dataTempRegister, right);
1328 }
1329
1330 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1331 {
1332 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1333 load32(left, addressTempRegister);
1334 return branch32(cond, addressTempRegister, right);
1335 }
1336
1337 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1338 {
1339 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1340 load32(left, addressTempRegister);
1341 return branch32(cond, addressTempRegister, right);
1342 }
1343
1344 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1345 {
1346 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1347 load32WithUnalignedHalfWords(left, addressTempRegister);
1348 return branch32(cond, addressTempRegister, right);
1349 }
1350
1351 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1352 {
1353 load32(left.m_ptr, dataTempRegister);
1354 return branch32(cond, dataTempRegister, right);
1355 }
1356
1357 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1358 {
1359 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1360 load32(left.m_ptr, addressTempRegister);
1361 return branch32(cond, addressTempRegister, right);
1362 }
1363
1364 Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1365 {
1366 compare32(left, right);
1367 return Jump(makeBranch(cond));
1368 }
1369
1370 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1371 {
1372 ASSERT(!(0xffffff00 & right.m_value));
1373 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
1374 load8(left, addressTempRegister);
1375 return branch8(cond, addressTempRegister, right);
1376 }
1377
1378 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1379 {
1380 ASSERT(!(0xffffff00 & right.m_value));
1381 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1382 load8(left, addressTempRegister);
1383 return branch32(cond, addressTempRegister, right);
1384 }
1385
1386 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1387 {
1388 m_assembler.tst(reg, mask);
1389 return Jump(makeBranch(cond));
1390 }
1391
1392 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1393 {
1394 test32(reg, mask);
1395 return Jump(makeBranch(cond));
1396 }
1397
1398 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1399 {
1400 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1401 load32(address, addressTempRegister);
1402 return branchTest32(cond, addressTempRegister, mask);
1403 }
1404
1405 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1406 {
1407 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1408 load32(address, addressTempRegister);
1409 return branchTest32(cond, addressTempRegister, mask);
1410 }
1411
1412 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1413 {
1414 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1415 load8(address, addressTempRegister);
1416 return branchTest32(cond, addressTempRegister, mask);
1417 }
1418
1419 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1420 {
1421 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1422 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1423 load8(Address(addressTempRegister), addressTempRegister);
1424 return branchTest32(cond, addressTempRegister, mask);
1425 }
1426
1427 void jump(RegisterID target)
1428 {
1429 m_assembler.bx(target);
1430 }
1431
1432 // Address is a memory location containing the address to jump to
1433 void jump(Address address)
1434 {
1435 load32(address, dataTempRegister);
1436 m_assembler.bx(dataTempRegister);
1437 }
1438
1439 void jump(AbsoluteAddress address)
1440 {
1441 move(TrustedImmPtr(address.m_ptr), dataTempRegister);
1442 load32(Address(dataTempRegister), dataTempRegister);
1443 m_assembler.bx(dataTempRegister);
1444 }
1445
1446
1447 // Arithmetic control flow operations:
1448 //
1449 // This set of conditional branch operations branch based
1450 // on the result of an arithmetic operation. The operation
1451 // is performed as normal, storing the result.
1452 //
1453 // * jz operations branch if the result is zero.
1454 // * jo operations branch if the (signed) arithmetic
1455 // operation caused an overflow to occur.
1456
1457 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1458 {
1459 m_assembler.add_S(dest, op1, op2);
1460 return Jump(makeBranch(cond));
1461 }
1462
1463 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1464 {
1465 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1466 if (armImm.isValid())
1467 m_assembler.add_S(dest, op1, armImm);
1468 else {
1469 move(imm, dataTempRegister);
1470 m_assembler.add_S(dest, op1, dataTempRegister);
1471 }
1472 return Jump(makeBranch(cond));
1473 }
1474
1475 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1476 {
1477 return branchAdd32(cond, dest, src, dest);
1478 }
1479
1480 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1481 {
1482 return branchAdd32(cond, dest, imm, dest);
1483 }
1484
1485 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
1486 {
1487 // Move the high bits of the address into addressTempRegister,
1488 // and load the value into dataTempRegister.
1489 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1490 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1491
1492 // Do the add.
1493 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1494 if (armImm.isValid())
1495 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
1496 else {
1497 // If the operand does not fit into an immediate then load it temporarily
1498 // into addressTempRegister; since we're overwriting addressTempRegister
1499 // we'll need to reload it with the high bits of the address afterwards.
1500 move(imm, addressTempRegister);
1501 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
1502 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1503 }
1504
1505 // Store the result.
1506 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1507
1508 return Jump(makeBranch(cond));
1509 }
1510
1511 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1512 {
1513 m_assembler.smull(dest, dataTempRegister, src1, src2);
1514
1515 if (cond == Overflow) {
1516 m_assembler.asr(addressTempRegister, dest, 31);
1517 return branch32(NotEqual, addressTempRegister, dataTempRegister);
1518 }
1519
1520 return branchTest32(cond, dest);
1521 }
1522
1523 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1524 {
1525 return branchMul32(cond, src, dest, dest);
1526 }
1527
1528 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1529 {
1530 move(imm, dataTempRegister);
1531 return branchMul32(cond, dataTempRegister, src, dest);
1532 }
1533
1534 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1535 {
1536 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1537 m_assembler.sub_S(srcDest, zero, srcDest);
1538 return Jump(makeBranch(cond));
1539 }
1540
1541 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1542 {
1543 m_assembler.orr_S(dest, dest, src);
1544 return Jump(makeBranch(cond));
1545 }
1546
1547 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1548 {
1549 m_assembler.sub_S(dest, op1, op2);
1550 return Jump(makeBranch(cond));
1551 }
1552
1553 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1554 {
1555 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1556 if (armImm.isValid())
1557 m_assembler.sub_S(dest, op1, armImm);
1558 else {
1559 move(imm, dataTempRegister);
1560 m_assembler.sub_S(dest, op1, dataTempRegister);
1561 }
1562 return Jump(makeBranch(cond));
1563 }
1564
1565 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1566 {
1567 return branchSub32(cond, dest, src, dest);
1568 }
1569
1570 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1571 {
1572 return branchSub32(cond, dest, imm, dest);
1573 }
1574
1575 void relativeTableJump(RegisterID index, int scale)
1576 {
1577 ASSERT(scale >= 0 && scale <= 31);
1578
1579 // dataTempRegister will point after the jump if index register contains zero
1580 move(ARMRegisters::pc, dataTempRegister);
1581 m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1582
1583 ShiftTypeAndAmount shift(SRType_LSL, scale);
1584 m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1585 jump(dataTempRegister);
1586 }
1587
1588 // Miscellaneous operations:
1589
1590 void breakpoint(uint8_t imm = 0)
1591 {
1592 m_assembler.bkpt(imm);
1593 }
1594
1595 ALWAYS_INLINE Call nearCall()
1596 {
1597 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1598 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1599 }
1600
1601 ALWAYS_INLINE Call call()
1602 {
1603 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1604 return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1605 }
1606
1607 ALWAYS_INLINE Call call(RegisterID target)
1608 {
1609 return Call(m_assembler.blx(target), Call::None);
1610 }
1611
1612 ALWAYS_INLINE Call call(Address address)
1613 {
1614 load32(address, dataTempRegister);
1615 return Call(m_assembler.blx(dataTempRegister), Call::None);
1616 }
1617
1618 ALWAYS_INLINE void ret()
1619 {
1620 m_assembler.bx(linkRegister);
1621 }
1622
1623 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1624 {
1625 m_assembler.cmp(left, right);
1626 m_assembler.it(armV7Condition(cond), false);
1627 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1628 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1629 }
1630
1631 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1632 {
1633 load32(left, dataTempRegister);
1634 compare32(cond, dataTempRegister, right, dest);
1635 }
1636
1637 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1638 {
1639 load8(left, addressTempRegister);
1640 compare32(cond, addressTempRegister, right, dest);
1641 }
1642
1643 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1644 {
1645 compare32(left, right);
1646 m_assembler.it(armV7Condition(cond), false);
1647 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1648 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1649 }
1650
1651 // FIXME:
1652 // The mask should be optional... paerhaps the argument order should be
1653 // dest-src, operations always have a dest? ... possibly not true, considering
1654 // asm ops like test, or pseudo ops like pop().
1655 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1656 {
1657 load32(address, dataTempRegister);
1658 test32(dataTempRegister, mask);
1659 m_assembler.it(armV7Condition(cond), false);
1660 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1661 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1662 }
1663
1664 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1665 {
1666 load8(address, dataTempRegister);
1667 test32(dataTempRegister, mask);
1668 m_assembler.it(armV7Condition(cond), false);
1669 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1670 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1671 }
1672
1673 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1674 {
1675 padBeforePatch();
1676 moveFixedWidthEncoding(imm, dst);
1677 return DataLabel32(this);
1678 }
1679
1680 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1681 {
1682 padBeforePatch();
1683 moveFixedWidthEncoding(TrustedImm32(imm), dst);
1684 return DataLabelPtr(this);
1685 }
1686
1687 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1688 {
1689 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1690 return branch32(cond, left, dataTempRegister);
1691 }
1692
1693 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1694 {
1695 load32(left, addressTempRegister);
1696 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1697 return branch32(cond, addressTempRegister, dataTempRegister);
1698 }
1699
1700 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
1701 {
1702 m_makeJumpPatchable = true;
1703 Jump result = branch32(cond, left, TrustedImm32(right));
1704 m_makeJumpPatchable = false;
1705 return PatchableJump(result);
1706 }
1707
1708 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1709 {
1710 m_makeJumpPatchable = true;
1711 Jump result = branchTest32(cond, reg, mask);
1712 m_makeJumpPatchable = false;
1713 return PatchableJump(result);
1714 }
1715
1716 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
1717 {
1718 m_makeJumpPatchable = true;
1719 Jump result = branch32(cond, reg, imm);
1720 m_makeJumpPatchable = false;
1721 return PatchableJump(result);
1722 }
1723
1724 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1725 {
1726 m_makeJumpPatchable = true;
1727 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
1728 m_makeJumpPatchable = false;
1729 return PatchableJump(result);
1730 }
1731
1732 PatchableJump patchableJump()
1733 {
1734 padBeforePatch();
1735 m_makeJumpPatchable = true;
1736 Jump result = jump();
1737 m_makeJumpPatchable = false;
1738 return PatchableJump(result);
1739 }
1740
1741 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1742 {
1743 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1744 store32(dataTempRegister, address);
1745 return label;
1746 }
1747 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1748
1749
1750 ALWAYS_INLINE Call tailRecursiveCall()
1751 {
1752 // Like a normal call, but don't link.
1753 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1754 return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1755 }
1756
1757 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1758 {
1759 oldJump.link(this);
1760 return tailRecursiveCall();
1761 }
1762
1763
1764 int executableOffsetFor(int location)
1765 {
1766 return m_assembler.executableOffsetFor(location);
1767 }
1768
1769 static FunctionPtr readCallTarget(CodeLocationCall call)
1770 {
1771 return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
1772 }
1773
1774 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1775
1776 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1777 {
1778 const unsigned twoWordOpSize = 4;
1779 return label.labelAtOffset(-twoWordOpSize * 2);
1780 }
1781
1782 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
1783 {
1784 #if OS(LINUX) || OS(QNX)
1785 ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
1786 #else
1787 UNUSED_PARAM(rd);
1788 ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
1789 #endif
1790 }
1791
1792 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
1793 {
1794 UNREACHABLE_FOR_PLATFORM();
1795 return CodeLocationLabel();
1796 }
1797
1798 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
1799 {
1800 UNREACHABLE_FOR_PLATFORM();
1801 }
1802
1803 protected:
1804 ALWAYS_INLINE Jump jump()
1805 {
1806 m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
1807 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1808 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
1809 }
1810
1811 ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
1812 {
1813 m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
1814 m_assembler.it(cond, true, true);
1815 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1816 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
1817 }
1818 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
1819 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
1820 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1821
1822 ArmAddress setupArmAddress(BaseIndex address)
1823 {
1824 if (address.offset) {
1825 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1826 if (imm.isValid())
1827 m_assembler.add(addressTempRegister, address.base, imm);
1828 else {
1829 move(TrustedImm32(address.offset), addressTempRegister);
1830 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1831 }
1832
1833 return ArmAddress(addressTempRegister, address.index, address.scale);
1834 } else
1835 return ArmAddress(address.base, address.index, address.scale);
1836 }
1837
1838 ArmAddress setupArmAddress(Address address)
1839 {
1840 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1841 return ArmAddress(address.base, address.offset);
1842
1843 move(TrustedImm32(address.offset), addressTempRegister);
1844 return ArmAddress(address.base, addressTempRegister);
1845 }
1846
1847 ArmAddress setupArmAddress(ImplicitAddress address)
1848 {
1849 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1850 return ArmAddress(address.base, address.offset);
1851
1852 move(TrustedImm32(address.offset), addressTempRegister);
1853 return ArmAddress(address.base, addressTempRegister);
1854 }
1855
1856 RegisterID makeBaseIndexBase(BaseIndex address)
1857 {
1858 if (!address.offset)
1859 return address.base;
1860
1861 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1862 if (imm.isValid())
1863 m_assembler.add(addressTempRegister, address.base, imm);
1864 else {
1865 move(TrustedImm32(address.offset), addressTempRegister);
1866 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1867 }
1868
1869 return addressTempRegister;
1870 }
1871
1872 void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
1873 {
1874 uint32_t value = imm.m_value;
1875 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
1876 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
1877 }
1878
1879 ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
1880 {
1881 return static_cast<ARMv7Assembler::Condition>(cond);
1882 }
1883
1884 ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
1885 {
1886 return static_cast<ARMv7Assembler::Condition>(cond);
1887 }
1888
1889 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
1890 {
1891 return static_cast<ARMv7Assembler::Condition>(cond);
1892 }
1893
1894 private:
1895 friend class LinkBuffer;
1896 friend class RepatchBuffer;
1897
1898 static void linkCall(void* code, Call call, FunctionPtr function)
1899 {
1900 ARMv7Assembler::linkCall(code, call.m_label, function.value());
1901 }
1902
1903 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1904 {
1905 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1906 }
1907
1908 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1909 {
1910 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1911 }
1912
1913 bool m_makeJumpPatchable;
1914 };
1915
1916 } // namespace JSC
1917
1918 #endif // ENABLE(ASSEMBLER)
1919
1920 #endif // MacroAssemblerARMv7_h