]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerARMv7.h
JavaScriptCore-1097.13.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerARMv7.h
1 /*
2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef MacroAssemblerARMv7_h
28 #define MacroAssemblerARMv7_h
29
30 #if ENABLE(ASSEMBLER)
31
32 #include "ARMv7Assembler.h"
33 #include "AbstractMacroAssembler.h"
34
35 namespace JSC {
36
37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
39 // - dTR is likely used more than aTR, and we'll get better instruction
40 // encoding if it's in the low 8 registers.
41 static const RegisterID dataTempRegister = ARMRegisters::ip;
42 static const RegisterID addressTempRegister = ARMRegisters::r3;
43
44 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
45 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
46
47 public:
48 MacroAssemblerARMv7()
49 : m_makeJumpPatchable(false)
50 {
51 }
52
53 typedef ARMv7Assembler::LinkRecord LinkRecord;
54 typedef ARMv7Assembler::JumpType JumpType;
55 typedef ARMv7Assembler::JumpLinkType JumpLinkType;
56 // Magic number is the biggest useful offset we can get on ARMv7 with
57 // a LDR_imm_T2 encoding
58 static const int MaximumCompactPtrAlignedAddressOffset = 124;
59
60 Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
61 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
62 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
63 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
64 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
65 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
66 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
67 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
68
69 struct ArmAddress {
70 enum AddressType {
71 HasOffset,
72 HasIndex,
73 } type;
74 RegisterID base;
75 union {
76 int32_t offset;
77 struct {
78 RegisterID index;
79 Scale scale;
80 };
81 } u;
82
83 explicit ArmAddress(RegisterID base, int32_t offset = 0)
84 : type(HasOffset)
85 , base(base)
86 {
87 u.offset = offset;
88 }
89
90 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
91 : type(HasIndex)
92 , base(base)
93 {
94 u.index = index;
95 u.scale = scale;
96 }
97 };
98
99 public:
100 typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
101
102 static const Scale ScalePtr = TimesFour;
103
104 enum RelationalCondition {
105 Equal = ARMv7Assembler::ConditionEQ,
106 NotEqual = ARMv7Assembler::ConditionNE,
107 Above = ARMv7Assembler::ConditionHI,
108 AboveOrEqual = ARMv7Assembler::ConditionHS,
109 Below = ARMv7Assembler::ConditionLO,
110 BelowOrEqual = ARMv7Assembler::ConditionLS,
111 GreaterThan = ARMv7Assembler::ConditionGT,
112 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
113 LessThan = ARMv7Assembler::ConditionLT,
114 LessThanOrEqual = ARMv7Assembler::ConditionLE
115 };
116
117 enum ResultCondition {
118 Overflow = ARMv7Assembler::ConditionVS,
119 Signed = ARMv7Assembler::ConditionMI,
120 Zero = ARMv7Assembler::ConditionEQ,
121 NonZero = ARMv7Assembler::ConditionNE
122 };
123
124 enum DoubleCondition {
125 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
126 DoubleEqual = ARMv7Assembler::ConditionEQ,
127 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
128 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
129 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
130 DoubleLessThan = ARMv7Assembler::ConditionLO,
131 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
132 // If either operand is NaN, these conditions always evaluate to true.
133 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
134 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
135 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
136 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
137 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
138 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
139 };
140
141 static const RegisterID stackPointerRegister = ARMRegisters::sp;
142 static const RegisterID linkRegister = ARMRegisters::lr;
143
144 // Integer arithmetic operations:
145 //
146 // Operations are typically two operand - operation(source, srcDst)
147 // For many operations the source may be an TrustedImm32, the srcDst operand
148 // may often be a memory location (explictly described using an Address
149 // object).
150
151 void add32(RegisterID src, RegisterID dest)
152 {
153 m_assembler.add(dest, dest, src);
154 }
155
156 void add32(TrustedImm32 imm, RegisterID dest)
157 {
158 add32(imm, dest, dest);
159 }
160
161 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
162 {
163 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
164 if (armImm.isValid())
165 m_assembler.add(dest, src, armImm);
166 else {
167 move(imm, dataTempRegister);
168 m_assembler.add(dest, src, dataTempRegister);
169 }
170 }
171
172 void add32(TrustedImm32 imm, Address address)
173 {
174 load32(address, dataTempRegister);
175
176 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
177 if (armImm.isValid())
178 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
179 else {
180 // Hrrrm, since dataTempRegister holds the data loaded,
181 // use addressTempRegister to hold the immediate.
182 move(imm, addressTempRegister);
183 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
184 }
185
186 store32(dataTempRegister, address);
187 }
188
189 void add32(Address src, RegisterID dest)
190 {
191 load32(src, dataTempRegister);
192 add32(dataTempRegister, dest);
193 }
194
195 void add32(TrustedImm32 imm, AbsoluteAddress address)
196 {
197 load32(address.m_ptr, dataTempRegister);
198
199 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
200 if (armImm.isValid())
201 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
202 else {
203 // Hrrrm, since dataTempRegister holds the data loaded,
204 // use addressTempRegister to hold the immediate.
205 move(imm, addressTempRegister);
206 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
207 }
208
209 store32(dataTempRegister, address.m_ptr);
210 }
211
212 void add64(TrustedImm32 imm, AbsoluteAddress address)
213 {
214 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
215
216 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
217 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
218 if (armImm.isValid())
219 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
220 else {
221 move(imm, addressTempRegister);
222 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
223 move(TrustedImmPtr(address.m_ptr), addressTempRegister);
224 }
225 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
226
227 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
228 m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
229 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
230 }
231
232 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
233 {
234 m_assembler.ARM_and(dest, op1, op2);
235 }
236
237 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
238 {
239 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
240 if (armImm.isValid())
241 m_assembler.ARM_and(dest, src, armImm);
242 else {
243 move(imm, dataTempRegister);
244 m_assembler.ARM_and(dest, src, dataTempRegister);
245 }
246 }
247
248 void and32(RegisterID src, RegisterID dest)
249 {
250 and32(dest, src, dest);
251 }
252
253 void and32(TrustedImm32 imm, RegisterID dest)
254 {
255 and32(imm, dest, dest);
256 }
257
258 void countLeadingZeros32(RegisterID src, RegisterID dest)
259 {
260 m_assembler.clz(dest, src);
261 }
262
263 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
264 {
265 // Clamp the shift to the range 0..31
266 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
267 ASSERT(armImm.isValid());
268 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
269
270 m_assembler.lsl(dest, src, dataTempRegister);
271 }
272
273 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
274 {
275 m_assembler.lsl(dest, src, imm.m_value & 0x1f);
276 }
277
278 void lshift32(RegisterID shiftAmount, RegisterID dest)
279 {
280 lshift32(dest, shiftAmount, dest);
281 }
282
283 void lshift32(TrustedImm32 imm, RegisterID dest)
284 {
285 lshift32(dest, imm, dest);
286 }
287
288 void mul32(RegisterID src, RegisterID dest)
289 {
290 m_assembler.smull(dest, dataTempRegister, dest, src);
291 }
292
293 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
294 {
295 move(imm, dataTempRegister);
296 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
297 }
298
299 void neg32(RegisterID srcDest)
300 {
301 m_assembler.neg(srcDest, srcDest);
302 }
303
304 void or32(RegisterID src, RegisterID dest)
305 {
306 m_assembler.orr(dest, dest, src);
307 }
308
309 void or32(TrustedImm32 imm, RegisterID dest)
310 {
311 or32(imm, dest, dest);
312 }
313
314 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
315 {
316 m_assembler.orr(dest, op1, op2);
317 }
318
319 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
320 {
321 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
322 if (armImm.isValid())
323 m_assembler.orr(dest, src, armImm);
324 else {
325 move(imm, dataTempRegister);
326 m_assembler.orr(dest, src, dataTempRegister);
327 }
328 }
329
330 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
331 {
332 // Clamp the shift to the range 0..31
333 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
334 ASSERT(armImm.isValid());
335 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
336
337 m_assembler.asr(dest, src, dataTempRegister);
338 }
339
340 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
341 {
342 m_assembler.asr(dest, src, imm.m_value & 0x1f);
343 }
344
345 void rshift32(RegisterID shiftAmount, RegisterID dest)
346 {
347 rshift32(dest, shiftAmount, dest);
348 }
349
350 void rshift32(TrustedImm32 imm, RegisterID dest)
351 {
352 rshift32(dest, imm, dest);
353 }
354
355 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
356 {
357 // Clamp the shift to the range 0..31
358 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
359 ASSERT(armImm.isValid());
360 m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
361
362 m_assembler.lsr(dest, src, dataTempRegister);
363 }
364
365 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
366 {
367 m_assembler.lsr(dest, src, imm.m_value & 0x1f);
368 }
369
370 void urshift32(RegisterID shiftAmount, RegisterID dest)
371 {
372 urshift32(dest, shiftAmount, dest);
373 }
374
375 void urshift32(TrustedImm32 imm, RegisterID dest)
376 {
377 urshift32(dest, imm, dest);
378 }
379
380 void sub32(RegisterID src, RegisterID dest)
381 {
382 m_assembler.sub(dest, dest, src);
383 }
384
385 void sub32(TrustedImm32 imm, RegisterID dest)
386 {
387 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
388 if (armImm.isValid())
389 m_assembler.sub(dest, dest, armImm);
390 else {
391 move(imm, dataTempRegister);
392 m_assembler.sub(dest, dest, dataTempRegister);
393 }
394 }
395
396 void sub32(TrustedImm32 imm, Address address)
397 {
398 load32(address, dataTempRegister);
399
400 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
401 if (armImm.isValid())
402 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
403 else {
404 // Hrrrm, since dataTempRegister holds the data loaded,
405 // use addressTempRegister to hold the immediate.
406 move(imm, addressTempRegister);
407 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
408 }
409
410 store32(dataTempRegister, address);
411 }
412
413 void sub32(Address src, RegisterID dest)
414 {
415 load32(src, dataTempRegister);
416 sub32(dataTempRegister, dest);
417 }
418
419 void sub32(TrustedImm32 imm, AbsoluteAddress address)
420 {
421 load32(address.m_ptr, dataTempRegister);
422
423 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
424 if (armImm.isValid())
425 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
426 else {
427 // Hrrrm, since dataTempRegister holds the data loaded,
428 // use addressTempRegister to hold the immediate.
429 move(imm, addressTempRegister);
430 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
431 }
432
433 store32(dataTempRegister, address.m_ptr);
434 }
435
436 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
437 {
438 m_assembler.eor(dest, op1, op2);
439 }
440
441 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
442 {
443 if (imm.m_value == -1) {
444 m_assembler.mvn(dest, src);
445 return;
446 }
447
448 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
449 if (armImm.isValid())
450 m_assembler.eor(dest, src, armImm);
451 else {
452 move(imm, dataTempRegister);
453 m_assembler.eor(dest, src, dataTempRegister);
454 }
455 }
456
457 void xor32(RegisterID src, RegisterID dest)
458 {
459 xor32(dest, src, dest);
460 }
461
462 void xor32(TrustedImm32 imm, RegisterID dest)
463 {
464 if (imm.m_value == -1)
465 m_assembler.mvn(dest, dest);
466 else
467 xor32(imm, dest, dest);
468 }
469
470
471 // Memory access operations:
472 //
473 // Loads are of the form load(address, destination) and stores of the form
474 // store(source, address). The source for a store may be an TrustedImm32. Address
475 // operand objects to loads and store will be implicitly constructed if a
476 // register is passed.
477
478 private:
479 void load32(ArmAddress address, RegisterID dest)
480 {
481 if (address.type == ArmAddress::HasIndex)
482 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
483 else if (address.u.offset >= 0) {
484 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
485 ASSERT(armImm.isValid());
486 m_assembler.ldr(dest, address.base, armImm);
487 } else {
488 ASSERT(address.u.offset >= -255);
489 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
490 }
491 }
492
493 void load16(ArmAddress address, RegisterID dest)
494 {
495 if (address.type == ArmAddress::HasIndex)
496 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
497 else if (address.u.offset >= 0) {
498 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
499 ASSERT(armImm.isValid());
500 m_assembler.ldrh(dest, address.base, armImm);
501 } else {
502 ASSERT(address.u.offset >= -255);
503 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
504 }
505 }
506
507 void load16Signed(ArmAddress address, RegisterID dest)
508 {
509 ASSERT(address.type == ArmAddress::HasIndex);
510 m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
511 }
512
513 void load8(ArmAddress address, RegisterID dest)
514 {
515 if (address.type == ArmAddress::HasIndex)
516 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
517 else if (address.u.offset >= 0) {
518 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
519 ASSERT(armImm.isValid());
520 m_assembler.ldrb(dest, address.base, armImm);
521 } else {
522 ASSERT(address.u.offset >= -255);
523 m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
524 }
525 }
526
527 void load8Signed(ArmAddress address, RegisterID dest)
528 {
529 ASSERT(address.type == ArmAddress::HasIndex);
530 m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
531 }
532
533 protected:
534 void store32(RegisterID src, ArmAddress address)
535 {
536 if (address.type == ArmAddress::HasIndex)
537 m_assembler.str(src, address.base, address.u.index, address.u.scale);
538 else if (address.u.offset >= 0) {
539 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
540 ASSERT(armImm.isValid());
541 m_assembler.str(src, address.base, armImm);
542 } else {
543 ASSERT(address.u.offset >= -255);
544 m_assembler.str(src, address.base, address.u.offset, true, false);
545 }
546 }
547
548 private:
549 void store8(RegisterID src, ArmAddress address)
550 {
551 if (address.type == ArmAddress::HasIndex)
552 m_assembler.strb(src, address.base, address.u.index, address.u.scale);
553 else if (address.u.offset >= 0) {
554 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
555 ASSERT(armImm.isValid());
556 m_assembler.strb(src, address.base, armImm);
557 } else {
558 ASSERT(address.u.offset >= -255);
559 m_assembler.strb(src, address.base, address.u.offset, true, false);
560 }
561 }
562
563 void store16(RegisterID src, ArmAddress address)
564 {
565 if (address.type == ArmAddress::HasIndex)
566 m_assembler.strh(src, address.base, address.u.index, address.u.scale);
567 else if (address.u.offset >= 0) {
568 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
569 ASSERT(armImm.isValid());
570 m_assembler.strh(src, address.base, armImm);
571 } else {
572 ASSERT(address.u.offset >= -255);
573 m_assembler.strh(src, address.base, address.u.offset, true, false);
574 }
575 }
576
577 public:
578 void load32(ImplicitAddress address, RegisterID dest)
579 {
580 load32(setupArmAddress(address), dest);
581 }
582
583 void load32(BaseIndex address, RegisterID dest)
584 {
585 load32(setupArmAddress(address), dest);
586 }
587
588 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
589 {
590 load32(setupArmAddress(address), dest);
591 }
592
593 void load16Unaligned(BaseIndex address, RegisterID dest)
594 {
595 load16(setupArmAddress(address), dest);
596 }
597
598 void load32(const void* address, RegisterID dest)
599 {
600 move(TrustedImmPtr(address), addressTempRegister);
601 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
602 }
603
604 void load8(ImplicitAddress address, RegisterID dest)
605 {
606 load8(setupArmAddress(address), dest);
607 }
608
609 void load8Signed(ImplicitAddress, RegisterID)
610 {
611 unreachableForPlatform();
612 }
613
614 void load8(BaseIndex address, RegisterID dest)
615 {
616 load8(setupArmAddress(address), dest);
617 }
618
619 void load8Signed(BaseIndex address, RegisterID dest)
620 {
621 load8Signed(setupArmAddress(address), dest);
622 }
623
624 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
625 {
626 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
627 load32(ArmAddress(address.base, dataTempRegister), dest);
628 return label;
629 }
630
631 // FIXME: we should be able to plant a compact load relative to/from any base/dest register.
632 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
633 {
634 RegisterID base = address.base;
635
636 if (base >= ARMRegisters::r8) {
637 move(base, addressTempRegister);
638 base = addressTempRegister;
639 }
640
641 DataLabelCompact label(this);
642 ASSERT(address.offset >= 0);
643 ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
644 ASSERT(ARMThumbImmediate::makeUInt12(address.offset).isUInt7());
645
646 if (dest >= ARMRegisters::r8) {
647 m_assembler.ldrCompact(addressTempRegister, base, ARMThumbImmediate::makeUInt12(address.offset));
648 move(addressTempRegister, dest);
649 } else
650 m_assembler.ldrCompact(dest, base, ARMThumbImmediate::makeUInt12(address.offset));
651 return label;
652 }
653
654 void load16(BaseIndex address, RegisterID dest)
655 {
656 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
657 }
658
659 void load16Signed(BaseIndex address, RegisterID dest)
660 {
661 load16Signed(setupArmAddress(address), dest);
662 }
663
664 void load16(ImplicitAddress address, RegisterID dest)
665 {
666 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
667 if (armImm.isValid())
668 m_assembler.ldrh(dest, address.base, armImm);
669 else {
670 move(TrustedImm32(address.offset), dataTempRegister);
671 m_assembler.ldrh(dest, address.base, dataTempRegister);
672 }
673 }
674
675 void load16Signed(ImplicitAddress, RegisterID)
676 {
677 unreachableForPlatform();
678 }
679
680 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
681 {
682 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
683 store32(src, ArmAddress(address.base, dataTempRegister));
684 return label;
685 }
686
687 void store32(RegisterID src, ImplicitAddress address)
688 {
689 store32(src, setupArmAddress(address));
690 }
691
692 void store32(RegisterID src, BaseIndex address)
693 {
694 store32(src, setupArmAddress(address));
695 }
696
697 void store32(TrustedImm32 imm, ImplicitAddress address)
698 {
699 move(imm, dataTempRegister);
700 store32(dataTempRegister, setupArmAddress(address));
701 }
702
703 void store32(TrustedImm32 imm, BaseIndex address)
704 {
705 move(imm, dataTempRegister);
706 store32(dataTempRegister, setupArmAddress(address));
707 }
708
709 void store32(RegisterID src, const void* address)
710 {
711 move(TrustedImmPtr(address), addressTempRegister);
712 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
713 }
714
715 void store32(TrustedImm32 imm, const void* address)
716 {
717 move(imm, dataTempRegister);
718 store32(dataTempRegister, address);
719 }
720
721 void store8(RegisterID src, BaseIndex address)
722 {
723 store8(src, setupArmAddress(address));
724 }
725
726 void store16(RegisterID src, BaseIndex address)
727 {
728 store16(src, setupArmAddress(address));
729 }
730
731 #if ENABLE(JIT_CONSTANT_BLINDING)
732 static bool shouldBlindForSpecificArch(uint32_t value)
733 {
734 ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
735
736 // Couldn't be encoded as an immediate, so assume it's untrusted.
737 if (!immediate.isValid())
738 return true;
739
740 // If we can encode the immediate, we have less than 16 attacker
741 // controlled bits.
742 if (immediate.isEncodedImm())
743 return false;
744
745 // Don't let any more than 12 bits of an instruction word
746 // be controlled by an attacker.
747 return !immediate.isUInt12();
748 }
749 #endif
750
751 // Floating-point operations:
752
753 static bool supportsFloatingPoint() { return true; }
754 static bool supportsFloatingPointTruncate() { return true; }
755 static bool supportsFloatingPointSqrt() { return true; }
756 static bool supportsFloatingPointAbs() { return true; }
757
758 void loadDouble(ImplicitAddress address, FPRegisterID dest)
759 {
760 RegisterID base = address.base;
761 int32_t offset = address.offset;
762
763 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
764 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
765 add32(TrustedImm32(offset), base, addressTempRegister);
766 base = addressTempRegister;
767 offset = 0;
768 }
769
770 m_assembler.vldr(dest, base, offset);
771 }
772
773 void loadFloat(ImplicitAddress address, FPRegisterID dest)
774 {
775 RegisterID base = address.base;
776 int32_t offset = address.offset;
777
778 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
779 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
780 add32(TrustedImm32(offset), base, addressTempRegister);
781 base = addressTempRegister;
782 offset = 0;
783 }
784
785 m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
786 }
787
788 void loadDouble(BaseIndex address, FPRegisterID dest)
789 {
790 move(address.index, addressTempRegister);
791 lshift32(TrustedImm32(address.scale), addressTempRegister);
792 add32(address.base, addressTempRegister);
793 loadDouble(Address(addressTempRegister, address.offset), dest);
794 }
795
796 void loadFloat(BaseIndex address, FPRegisterID dest)
797 {
798 move(address.index, addressTempRegister);
799 lshift32(TrustedImm32(address.scale), addressTempRegister);
800 add32(address.base, addressTempRegister);
801 loadFloat(Address(addressTempRegister, address.offset), dest);
802 }
803
804 void moveDouble(FPRegisterID src, FPRegisterID dest)
805 {
806 if (src != dest)
807 m_assembler.vmov(dest, src);
808 }
809
810 void loadDouble(const void* address, FPRegisterID dest)
811 {
812 move(TrustedImmPtr(address), addressTempRegister);
813 m_assembler.vldr(dest, addressTempRegister, 0);
814 }
815
816 void storeDouble(FPRegisterID src, ImplicitAddress address)
817 {
818 RegisterID base = address.base;
819 int32_t offset = address.offset;
820
821 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
822 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
823 add32(TrustedImm32(offset), base, addressTempRegister);
824 base = addressTempRegister;
825 offset = 0;
826 }
827
828 m_assembler.vstr(src, base, offset);
829 }
830
831 void storeFloat(FPRegisterID src, ImplicitAddress address)
832 {
833 RegisterID base = address.base;
834 int32_t offset = address.offset;
835
836 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
837 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
838 add32(TrustedImm32(offset), base, addressTempRegister);
839 base = addressTempRegister;
840 offset = 0;
841 }
842
843 m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
844 }
845
846 void storeDouble(FPRegisterID src, const void* address)
847 {
848 move(TrustedImmPtr(address), addressTempRegister);
849 storeDouble(src, addressTempRegister);
850 }
851
852 void storeDouble(FPRegisterID src, BaseIndex address)
853 {
854 move(address.index, addressTempRegister);
855 lshift32(TrustedImm32(address.scale), addressTempRegister);
856 add32(address.base, addressTempRegister);
857 storeDouble(src, Address(addressTempRegister, address.offset));
858 }
859
860 void storeFloat(FPRegisterID src, BaseIndex address)
861 {
862 move(address.index, addressTempRegister);
863 lshift32(TrustedImm32(address.scale), addressTempRegister);
864 add32(address.base, addressTempRegister);
865 storeFloat(src, Address(addressTempRegister, address.offset));
866 }
867
868 void addDouble(FPRegisterID src, FPRegisterID dest)
869 {
870 m_assembler.vadd(dest, dest, src);
871 }
872
873 void addDouble(Address src, FPRegisterID dest)
874 {
875 loadDouble(src, fpTempRegister);
876 addDouble(fpTempRegister, dest);
877 }
878
879 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
880 {
881 m_assembler.vadd(dest, op1, op2);
882 }
883
884 void addDouble(AbsoluteAddress address, FPRegisterID dest)
885 {
886 loadDouble(address.m_ptr, fpTempRegister);
887 m_assembler.vadd(dest, dest, fpTempRegister);
888 }
889
890 void divDouble(FPRegisterID src, FPRegisterID dest)
891 {
892 m_assembler.vdiv(dest, dest, src);
893 }
894
895 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
896 {
897 m_assembler.vdiv(dest, op1, op2);
898 }
899
900 void subDouble(FPRegisterID src, FPRegisterID dest)
901 {
902 m_assembler.vsub(dest, dest, src);
903 }
904
905 void subDouble(Address src, FPRegisterID dest)
906 {
907 loadDouble(src, fpTempRegister);
908 subDouble(fpTempRegister, dest);
909 }
910
911 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
912 {
913 m_assembler.vsub(dest, op1, op2);
914 }
915
916 void mulDouble(FPRegisterID src, FPRegisterID dest)
917 {
918 m_assembler.vmul(dest, dest, src);
919 }
920
921 void mulDouble(Address src, FPRegisterID dest)
922 {
923 loadDouble(src, fpTempRegister);
924 mulDouble(fpTempRegister, dest);
925 }
926
927 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
928 {
929 m_assembler.vmul(dest, op1, op2);
930 }
931
932 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
933 {
934 m_assembler.vsqrt(dest, src);
935 }
936
937 void absDouble(FPRegisterID src, FPRegisterID dest)
938 {
939 m_assembler.vabs(dest, src);
940 }
941
942 void negateDouble(FPRegisterID src, FPRegisterID dest)
943 {
944 m_assembler.vneg(dest, src);
945 }
946
947 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
948 {
949 m_assembler.vmov(fpTempRegister, src, src);
950 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
951 }
952
953 void convertInt32ToDouble(Address address, FPRegisterID dest)
954 {
955 // Fixme: load directly into the fpr!
956 load32(address, dataTempRegister);
957 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
958 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
959 }
960
961 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
962 {
963 // Fixme: load directly into the fpr!
964 load32(address.m_ptr, dataTempRegister);
965 m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
966 m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
967 }
968
969 void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
970 {
971 m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
972 }
973
974 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
975 {
976 m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
977 }
978
979 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
980 {
981 m_assembler.vcmp(left, right);
982 m_assembler.vmrs();
983
984 if (cond == DoubleNotEqual) {
985 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
986 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
987 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
988 unordered.link(this);
989 return result;
990 }
991 if (cond == DoubleEqualOrUnordered) {
992 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
993 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
994 unordered.link(this);
995 // We get here if either unordered or equal.
996 Jump result = jump();
997 notEqual.link(this);
998 return result;
999 }
1000 return makeBranch(cond);
1001 }
1002
1003 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1004 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1005 {
1006 // Convert into dest.
1007 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1008 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1009
1010 // Calculate 2x dest. If the value potentially underflowed, it will have
1011 // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
1012 // overflow the result will be equal to -2.
1013 Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
1014 Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
1015
1016 // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
1017 underflow.link(this);
1018 if (branchType == BranchIfTruncateSuccessful)
1019 return noOverflow;
1020
1021 // We'll reach the current point in the code on failure, so plant a
1022 // jump here & link the success case.
1023 Jump failure = jump();
1024 noOverflow.link(this);
1025 return failure;
1026 }
1027
1028 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1029 {
1030 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1031 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1032
1033 Jump overflow = branch32(Equal, dest, TrustedImm32(0x7fffffff));
1034 Jump success = branch32(GreaterThanOrEqual, dest, TrustedImm32(0));
1035 overflow.link(this);
1036
1037 if (branchType == BranchIfTruncateSuccessful)
1038 return success;
1039
1040 Jump failure = jump();
1041 success.link(this);
1042 return failure;
1043 }
1044
1045 // Result is undefined if the value is outside of the integer range.
1046 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1047 {
1048 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1049 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1050 }
1051
1052 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1053 {
1054 m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
1055 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1056 }
1057
1058 // Convert 'src' to an integer, and places the resulting 'dest'.
1059 // If the result is not representable as a 32 bit value, branch.
1060 // May also branch for some values that are representable in 32 bits
1061 // (specifically, in this case, 0).
1062 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
1063 {
1064 m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
1065 m_assembler.vmov(dest, fpTempRegisterAsSingle());
1066
1067 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1068 m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
1069 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1070
1071 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1072 failureCases.append(branchTest32(Zero, dest));
1073 }
1074
1075 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1076 {
1077 m_assembler.vcmpz(reg);
1078 m_assembler.vmrs();
1079 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1080 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
1081 unordered.link(this);
1082 return result;
1083 }
1084
1085 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1086 {
1087 m_assembler.vcmpz(reg);
1088 m_assembler.vmrs();
1089 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
1090 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
1091 unordered.link(this);
1092 // We get here if either unordered or equal.
1093 Jump result = jump();
1094 notEqual.link(this);
1095 return result;
1096 }
1097
1098 // Stack manipulation operations:
1099 //
1100 // The ABI is assumed to provide a stack abstraction to memory,
1101 // containing machine word sized units of data. Push and pop
1102 // operations add and remove a single register sized unit of data
1103 // to or from the stack. Peek and poke operations read or write
1104 // values on the stack, without moving the current stack position.
1105
1106 void pop(RegisterID dest)
1107 {
1108 // store postindexed with writeback
1109 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
1110 }
1111
1112 void push(RegisterID src)
1113 {
1114 // store preindexed with writeback
1115 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
1116 }
1117
1118 void push(Address address)
1119 {
1120 load32(address, dataTempRegister);
1121 push(dataTempRegister);
1122 }
1123
1124 void push(TrustedImm32 imm)
1125 {
1126 move(imm, dataTempRegister);
1127 push(dataTempRegister);
1128 }
1129
1130 // Register move operations:
1131 //
1132 // Move values in registers.
1133
1134 void move(TrustedImm32 imm, RegisterID dest)
1135 {
1136 uint32_t value = imm.m_value;
1137
1138 if (imm.m_isPointer)
1139 moveFixedWidthEncoding(imm, dest);
1140 else {
1141 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
1142
1143 if (armImm.isValid())
1144 m_assembler.mov(dest, armImm);
1145 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
1146 m_assembler.mvn(dest, armImm);
1147 else {
1148 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
1149 if (value & 0xffff0000)
1150 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
1151 }
1152 }
1153 }
1154
1155 void move(RegisterID src, RegisterID dest)
1156 {
1157 if (src != dest)
1158 m_assembler.mov(dest, src);
1159 }
1160
1161 void move(TrustedImmPtr imm, RegisterID dest)
1162 {
1163 move(TrustedImm32(imm), dest);
1164 }
1165
1166 void swap(RegisterID reg1, RegisterID reg2)
1167 {
1168 move(reg1, dataTempRegister);
1169 move(reg2, reg1);
1170 move(dataTempRegister, reg2);
1171 }
1172
1173 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1174 {
1175 move(src, dest);
1176 }
1177
1178 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1179 {
1180 move(src, dest);
1181 }
1182
1183 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1184 static RelationalCondition invert(RelationalCondition cond)
1185 {
1186 return static_cast<RelationalCondition>(cond ^ 1);
1187 }
1188
1189 void nop()
1190 {
1191 m_assembler.nop();
1192 }
1193
1194 // Forwards / external control flow operations:
1195 //
1196 // This set of jump and conditional branch operations return a Jump
1197 // object which may linked at a later point, allow forwards jump,
1198 // or jumps that will require external linkage (after the code has been
1199 // relocated).
1200 //
1201 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1202 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1203 // used (representing the names 'below' and 'above').
1204 //
1205 // Operands to the comparision are provided in the expected order, e.g.
1206 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1207 // treated as a signed 32bit value, is less than or equal to 5.
1208 //
1209 // jz and jnz test whether the first operand is equal to zero, and take
1210 // an optional second operand of a mask under which to perform the test.
1211 private:
1212
1213 // Should we be using TEQ for equal/not-equal?
1214 void compare32(RegisterID left, TrustedImm32 right)
1215 {
1216 int32_t imm = right.m_value;
1217 if (!imm)
1218 m_assembler.tst(left, left);
1219 else {
1220 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1221 if (armImm.isValid())
1222 m_assembler.cmp(left, armImm);
1223 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
1224 m_assembler.cmn(left, armImm);
1225 else {
1226 move(TrustedImm32(imm), dataTempRegister);
1227 m_assembler.cmp(left, dataTempRegister);
1228 }
1229 }
1230 }
1231
1232 void test32(RegisterID reg, TrustedImm32 mask)
1233 {
1234 int32_t imm = mask.m_value;
1235
1236 if (imm == -1)
1237 m_assembler.tst(reg, reg);
1238 else {
1239 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
1240 if (armImm.isValid())
1241 m_assembler.tst(reg, armImm);
1242 else {
1243 move(mask, dataTempRegister);
1244 m_assembler.tst(reg, dataTempRegister);
1245 }
1246 }
1247 }
1248
1249 public:
1250 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1251 {
1252 m_assembler.cmp(left, right);
1253 return Jump(makeBranch(cond));
1254 }
1255
1256 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1257 {
1258 compare32(left, right);
1259 return Jump(makeBranch(cond));
1260 }
1261
1262 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1263 {
1264 load32(right, dataTempRegister);
1265 return branch32(cond, left, dataTempRegister);
1266 }
1267
1268 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1269 {
1270 load32(left, dataTempRegister);
1271 return branch32(cond, dataTempRegister, right);
1272 }
1273
1274 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1275 {
1276 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1277 load32(left, addressTempRegister);
1278 return branch32(cond, addressTempRegister, right);
1279 }
1280
1281 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1282 {
1283 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1284 load32(left, addressTempRegister);
1285 return branch32(cond, addressTempRegister, right);
1286 }
1287
1288 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1289 {
1290 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1291 load32WithUnalignedHalfWords(left, addressTempRegister);
1292 return branch32(cond, addressTempRegister, right);
1293 }
1294
1295 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1296 {
1297 load32(left.m_ptr, dataTempRegister);
1298 return branch32(cond, dataTempRegister, right);
1299 }
1300
1301 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1302 {
1303 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1304 load32(left.m_ptr, addressTempRegister);
1305 return branch32(cond, addressTempRegister, right);
1306 }
1307
1308 Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1309 {
1310 compare32(left, right);
1311 return Jump(makeBranch(cond));
1312 }
1313
1314 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1315 {
1316 ASSERT(!(0xffffff00 & right.m_value));
1317 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
1318 load8(left, addressTempRegister);
1319 return branch8(cond, addressTempRegister, right);
1320 }
1321
1322 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1323 {
1324 ASSERT(!(0xffffff00 & right.m_value));
1325 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
1326 load8(left, addressTempRegister);
1327 return branch32(cond, addressTempRegister, right);
1328 }
1329
1330 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1331 {
1332 m_assembler.tst(reg, mask);
1333 return Jump(makeBranch(cond));
1334 }
1335
1336 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1337 {
1338 test32(reg, mask);
1339 return Jump(makeBranch(cond));
1340 }
1341
1342 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1343 {
1344 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1345 load32(address, addressTempRegister);
1346 return branchTest32(cond, addressTempRegister, mask);
1347 }
1348
1349 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1350 {
1351 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
1352 load32(address, addressTempRegister);
1353 return branchTest32(cond, addressTempRegister, mask);
1354 }
1355
1356 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1357 {
1358 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
1359 load8(address, addressTempRegister);
1360 return branchTest32(cond, addressTempRegister, mask);
1361 }
1362
1363 void jump(RegisterID target)
1364 {
1365 m_assembler.bx(target);
1366 }
1367
1368 // Address is a memory location containing the address to jump to
1369 void jump(Address address)
1370 {
1371 load32(address, dataTempRegister);
1372 m_assembler.bx(dataTempRegister);
1373 }
1374
1375 void jump(AbsoluteAddress address)
1376 {
1377 move(TrustedImmPtr(address.m_ptr), dataTempRegister);
1378 load32(Address(dataTempRegister), dataTempRegister);
1379 m_assembler.bx(dataTempRegister);
1380 }
1381
1382
1383 // Arithmetic control flow operations:
1384 //
1385 // This set of conditional branch operations branch based
1386 // on the result of an arithmetic operation. The operation
1387 // is performed as normal, storing the result.
1388 //
1389 // * jz operations branch if the result is zero.
1390 // * jo operations branch if the (signed) arithmetic
1391 // operation caused an overflow to occur.
1392
1393 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1394 {
1395 m_assembler.add_S(dest, op1, op2);
1396 return Jump(makeBranch(cond));
1397 }
1398
1399 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1400 {
1401 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1402 if (armImm.isValid())
1403 m_assembler.add_S(dest, op1, armImm);
1404 else {
1405 move(imm, dataTempRegister);
1406 m_assembler.add_S(dest, op1, dataTempRegister);
1407 }
1408 return Jump(makeBranch(cond));
1409 }
1410
1411 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1412 {
1413 return branchAdd32(cond, dest, src, dest);
1414 }
1415
1416 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1417 {
1418 return branchAdd32(cond, dest, imm, dest);
1419 }
1420
1421 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
1422 {
1423 // Move the high bits of the address into addressTempRegister,
1424 // and load the value into dataTempRegister.
1425 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1426 m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1427
1428 // Do the add.
1429 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1430 if (armImm.isValid())
1431 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
1432 else {
1433 // If the operand does not fit into an immediate then load it temporarily
1434 // into addressTempRegister; since we're overwriting addressTempRegister
1435 // we'll need to reload it with the high bits of the address afterwards.
1436 move(imm, addressTempRegister);
1437 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
1438 move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
1439 }
1440
1441 // Store the result.
1442 m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
1443
1444 return Jump(makeBranch(cond));
1445 }
1446
1447 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1448 {
1449 m_assembler.smull(dest, dataTempRegister, src1, src2);
1450
1451 if (cond == Overflow) {
1452 m_assembler.asr(addressTempRegister, dest, 31);
1453 return branch32(NotEqual, addressTempRegister, dataTempRegister);
1454 }
1455
1456 return branchTest32(cond, dest);
1457 }
1458
1459 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1460 {
1461 return branchMul32(cond, src, dest, dest);
1462 }
1463
1464 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1465 {
1466 move(imm, dataTempRegister);
1467 return branchMul32(cond, dataTempRegister, src, dest);
1468 }
1469
1470 Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1471 {
1472 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1473 m_assembler.sub_S(srcDest, zero, srcDest);
1474 return Jump(makeBranch(cond));
1475 }
1476
1477 Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1478 {
1479 m_assembler.orr_S(dest, dest, src);
1480 return Jump(makeBranch(cond));
1481 }
1482
1483 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1484 {
1485 m_assembler.sub_S(dest, op1, op2);
1486 return Jump(makeBranch(cond));
1487 }
1488
1489 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1490 {
1491 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
1492 if (armImm.isValid())
1493 m_assembler.sub_S(dest, op1, armImm);
1494 else {
1495 move(imm, dataTempRegister);
1496 m_assembler.sub_S(dest, op1, dataTempRegister);
1497 }
1498 return Jump(makeBranch(cond));
1499 }
1500
1501 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1502 {
1503 return branchSub32(cond, dest, src, dest);
1504 }
1505
1506 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1507 {
1508 return branchSub32(cond, dest, imm, dest);
1509 }
1510
1511 void relativeTableJump(RegisterID index, int scale)
1512 {
1513 ASSERT(scale >= 0 && scale <= 31);
1514
1515 // dataTempRegister will point after the jump if index register contains zero
1516 move(ARMRegisters::pc, dataTempRegister);
1517 m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
1518
1519 ShiftTypeAndAmount shift(SRType_LSL, scale);
1520 m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
1521 jump(dataTempRegister);
1522 }
1523
1524 // Miscellaneous operations:
1525
1526 void breakpoint(uint8_t imm = 0)
1527 {
1528 m_assembler.bkpt(imm);
1529 }
1530
1531 ALWAYS_INLINE Call nearCall()
1532 {
1533 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1534 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
1535 }
1536
1537 ALWAYS_INLINE Call call()
1538 {
1539 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1540 return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1541 }
1542
1543 ALWAYS_INLINE Call call(RegisterID target)
1544 {
1545 return Call(m_assembler.blx(target), Call::None);
1546 }
1547
1548 ALWAYS_INLINE Call call(Address address)
1549 {
1550 load32(address, dataTempRegister);
1551 return Call(m_assembler.blx(dataTempRegister), Call::None);
1552 }
1553
1554 ALWAYS_INLINE void ret()
1555 {
1556 m_assembler.bx(linkRegister);
1557 }
1558
1559 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1560 {
1561 m_assembler.cmp(left, right);
1562 m_assembler.it(armV7Condition(cond), false);
1563 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1564 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1565 }
1566
1567 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
1568 {
1569 load32(left, dataTempRegister);
1570 compare32(cond, dataTempRegister, right, dest);
1571 }
1572
1573 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1574 {
1575 load8(left, addressTempRegister);
1576 compare32(cond, addressTempRegister, right, dest);
1577 }
1578
1579 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1580 {
1581 compare32(left, right);
1582 m_assembler.it(armV7Condition(cond), false);
1583 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1584 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1585 }
1586
1587 // FIXME:
1588 // The mask should be optional... paerhaps the argument order should be
1589 // dest-src, operations always have a dest? ... possibly not true, considering
1590 // asm ops like test, or pseudo ops like pop().
1591 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1592 {
1593 load32(address, dataTempRegister);
1594 test32(dataTempRegister, mask);
1595 m_assembler.it(armV7Condition(cond), false);
1596 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1597 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1598 }
1599
1600 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1601 {
1602 load8(address, dataTempRegister);
1603 test32(dataTempRegister, mask);
1604 m_assembler.it(armV7Condition(cond), false);
1605 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1606 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1607 }
1608
1609 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
1610 {
1611 moveFixedWidthEncoding(imm, dst);
1612 return DataLabel32(this);
1613 }
1614
1615 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
1616 {
1617 moveFixedWidthEncoding(TrustedImm32(imm), dst);
1618 return DataLabelPtr(this);
1619 }
1620
1621 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1622 {
1623 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1624 return branch32(cond, left, dataTempRegister);
1625 }
1626
1627 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1628 {
1629 load32(left, addressTempRegister);
1630 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1631 return branch32(cond, addressTempRegister, dataTempRegister);
1632 }
1633
1634 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1635 {
1636 m_makeJumpPatchable = true;
1637 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
1638 m_makeJumpPatchable = false;
1639 return PatchableJump(result);
1640 }
1641
1642 PatchableJump patchableJump()
1643 {
1644 m_makeJumpPatchable = true;
1645 Jump result = jump();
1646 m_makeJumpPatchable = false;
1647 return PatchableJump(result);
1648 }
1649
1650 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1651 {
1652 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1653 store32(dataTempRegister, address);
1654 return label;
1655 }
1656 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
1657
1658
1659 ALWAYS_INLINE Call tailRecursiveCall()
1660 {
1661 // Like a normal call, but don't link.
1662 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1663 return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1664 }
1665
1666 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
1667 {
1668 oldJump.link(this);
1669 return tailRecursiveCall();
1670 }
1671
1672
1673 int executableOffsetFor(int location)
1674 {
1675 return m_assembler.executableOffsetFor(location);
1676 }
1677
1678 static FunctionPtr readCallTarget(CodeLocationCall call)
1679 {
1680 return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
1681 }
1682
1683 protected:
1684 ALWAYS_INLINE Jump jump()
1685 {
1686 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1687 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
1688 }
1689
1690 ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
1691 {
1692 m_assembler.it(cond, true, true);
1693 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
1694 return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
1695 }
1696 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
1697 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
1698 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1699
1700 ArmAddress setupArmAddress(BaseIndex address)
1701 {
1702 if (address.offset) {
1703 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1704 if (imm.isValid())
1705 m_assembler.add(addressTempRegister, address.base, imm);
1706 else {
1707 move(TrustedImm32(address.offset), addressTempRegister);
1708 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1709 }
1710
1711 return ArmAddress(addressTempRegister, address.index, address.scale);
1712 } else
1713 return ArmAddress(address.base, address.index, address.scale);
1714 }
1715
1716 ArmAddress setupArmAddress(Address address)
1717 {
1718 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1719 return ArmAddress(address.base, address.offset);
1720
1721 move(TrustedImm32(address.offset), addressTempRegister);
1722 return ArmAddress(address.base, addressTempRegister);
1723 }
1724
1725 ArmAddress setupArmAddress(ImplicitAddress address)
1726 {
1727 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1728 return ArmAddress(address.base, address.offset);
1729
1730 move(TrustedImm32(address.offset), addressTempRegister);
1731 return ArmAddress(address.base, addressTempRegister);
1732 }
1733
1734 RegisterID makeBaseIndexBase(BaseIndex address)
1735 {
1736 if (!address.offset)
1737 return address.base;
1738
1739 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1740 if (imm.isValid())
1741 m_assembler.add(addressTempRegister, address.base, imm);
1742 else {
1743 move(TrustedImm32(address.offset), addressTempRegister);
1744 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1745 }
1746
1747 return addressTempRegister;
1748 }
1749
1750 void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
1751 {
1752 uint32_t value = imm.m_value;
1753 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
1754 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
1755 }
1756
1757 ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
1758 {
1759 return static_cast<ARMv7Assembler::Condition>(cond);
1760 }
1761
1762 ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
1763 {
1764 return static_cast<ARMv7Assembler::Condition>(cond);
1765 }
1766
1767 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
1768 {
1769 return static_cast<ARMv7Assembler::Condition>(cond);
1770 }
1771
1772 private:
1773 friend class LinkBuffer;
1774 friend class RepatchBuffer;
1775
1776 static void linkCall(void* code, Call call, FunctionPtr function)
1777 {
1778 ARMv7Assembler::linkCall(code, call.m_label, function.value());
1779 }
1780
1781 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1782 {
1783 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1784 }
1785
1786 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1787 {
1788 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1789 }
1790
1791 bool m_makeJumpPatchable;
1792 };
1793
1794 } // namespace JSC
1795
1796 #endif // ENABLE(ASSEMBLER)
1797
1798 #endif // MacroAssemblerARMv7_h