]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/ARMv7Assembler.h
13ad3e0d3259d83e4147a139082734bc62b376f4
[apple/javascriptcore.git] / assembler / ARMv7Assembler.h
1 /*
2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
29
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
31
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
35 #include <stdint.h>
36
37 namespace JSC {
38
39 namespace ARMRegisters {
40 typedef enum {
41 r0,
42 r1,
43 r2,
44 r3,
45 r4,
46 r5,
47 r6,
48 r7, wr = r7, // thumb work register
49 r8,
50 r9, sb = r9, // static base
51 r10, sl = r10, // stack limit
52 r11, fp = r11, // frame pointer
53 r12, ip = r12,
54 r13, sp = r13,
55 r14, lr = r14,
56 r15, pc = r15,
57 } RegisterID;
58
59 typedef enum {
60 s0,
61 s1,
62 s2,
63 s3,
64 s4,
65 s5,
66 s6,
67 s7,
68 s8,
69 s9,
70 s10,
71 s11,
72 s12,
73 s13,
74 s14,
75 s15,
76 s16,
77 s17,
78 s18,
79 s19,
80 s20,
81 s21,
82 s22,
83 s23,
84 s24,
85 s25,
86 s26,
87 s27,
88 s28,
89 s29,
90 s30,
91 s31,
92 } FPSingleRegisterID;
93
94 typedef enum {
95 d0,
96 d1,
97 d2,
98 d3,
99 d4,
100 d5,
101 d6,
102 d7,
103 d8,
104 d9,
105 d10,
106 d11,
107 d12,
108 d13,
109 d14,
110 d15,
111 d16,
112 d17,
113 d18,
114 d19,
115 d20,
116 d21,
117 d22,
118 d23,
119 d24,
120 d25,
121 d26,
122 d27,
123 d28,
124 d29,
125 d30,
126 d31,
127 } FPDoubleRegisterID;
128
129 typedef enum {
130 q0,
131 q1,
132 q2,
133 q3,
134 q4,
135 q5,
136 q6,
137 q7,
138 q8,
139 q9,
140 q10,
141 q11,
142 q12,
143 q13,
144 q14,
145 q15,
146 q16,
147 q17,
148 q18,
149 q19,
150 q20,
151 q21,
152 q22,
153 q23,
154 q24,
155 q25,
156 q26,
157 q27,
158 q28,
159 q29,
160 q30,
161 q31,
162 } FPQuadRegisterID;
163
164 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
165 {
166 ASSERT(reg < d16);
167 return (FPSingleRegisterID)(reg << 1);
168 }
169
170 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
171 {
172 ASSERT(!(reg & 1));
173 return (FPDoubleRegisterID)(reg >> 1);
174 }
175 }
176
177 class ARMv7Assembler;
178 class ARMThumbImmediate {
179 friend class ARMv7Assembler;
180
181 typedef uint8_t ThumbImmediateType;
182 static const ThumbImmediateType TypeInvalid = 0;
183 static const ThumbImmediateType TypeEncoded = 1;
184 static const ThumbImmediateType TypeUInt16 = 2;
185
186 typedef union {
187 int16_t asInt;
188 struct {
189 unsigned imm8 : 8;
190 unsigned imm3 : 3;
191 unsigned i : 1;
192 unsigned imm4 : 4;
193 };
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
195 struct {
196 unsigned shiftValue7 : 7;
197 unsigned shiftAmount : 5;
198 };
199 struct {
200 unsigned immediate : 8;
201 unsigned pattern : 4;
202 };
203 } ThumbImmediateValue;
204
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
206 typedef union {
207 int32_t asInt;
208 struct {
209 uint8_t byte0;
210 uint8_t byte1;
211 uint8_t byte2;
212 uint8_t byte3;
213 };
214 } PatternBytes;
215
216 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
217 {
218 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value >>= N; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros += N; /* then we have identified N leading zeros */
222 }
223
224 static int32_t countLeadingZeros(uint32_t value)
225 {
226 if (!value)
227 return 32;
228
229 int32_t zeros = 0;
230 countLeadingZerosPartial(value, zeros, 16);
231 countLeadingZerosPartial(value, zeros, 8);
232 countLeadingZerosPartial(value, zeros, 4);
233 countLeadingZerosPartial(value, zeros, 2);
234 countLeadingZerosPartial(value, zeros, 1);
235 return zeros;
236 }
237
238 ARMThumbImmediate()
239 : m_type(TypeInvalid)
240 {
241 m_value.asInt = 0;
242 }
243
244 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
245 : m_type(type)
246 , m_value(value)
247 {
248 }
249
250 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
251 : m_type(TypeUInt16)
252 {
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type, type == TypeUInt16);
257
258 m_value.asInt = value;
259 }
260
261 public:
262 static ARMThumbImmediate makeEncodedImm(uint32_t value)
263 {
264 ThumbImmediateValue encoding;
265 encoding.asInt = 0;
266
267 // okay, these are easy.
268 if (value < 256) {
269 encoding.immediate = value;
270 encoding.pattern = 0;
271 return ARMThumbImmediate(TypeEncoded, encoding);
272 }
273
274 int32_t leadingZeros = countLeadingZeros(value);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros < 24);
277
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount = 24 - leadingZeros;
282 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding.shiftValue7 = value >> rightShiftAmount;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding.shiftAmount = 8 + leadingZeros;
288 return ARMThumbImmediate(TypeEncoded, encoding);
289 }
290
291 PatternBytes bytes;
292 bytes.asInt = value;
293
294 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
295 encoding.immediate = bytes.byte0;
296 encoding.pattern = 3;
297 return ARMThumbImmediate(TypeEncoded, encoding);
298 }
299
300 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
301 encoding.immediate = bytes.byte0;
302 encoding.pattern = 1;
303 return ARMThumbImmediate(TypeEncoded, encoding);
304 }
305
306 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
307 encoding.immediate = bytes.byte1;
308 encoding.pattern = 2;
309 return ARMThumbImmediate(TypeEncoded, encoding);
310 }
311
312 return ARMThumbImmediate();
313 }
314
315 static ARMThumbImmediate makeUInt12(int32_t value)
316 {
317 return (!(value & 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
319 : ARMThumbImmediate();
320 }
321
322 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
323 {
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value & 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
327 : makeEncodedImm(value);
328 }
329
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate makeUInt16(uint16_t value)
334 {
335 return ARMThumbImmediate(TypeUInt16, value);
336 }
337
338 bool isValid()
339 {
340 return m_type != TypeInvalid;
341 }
342
343 // These methods rely on the format of encoded byte values.
344 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
345 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
346 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
347 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
348 bool isUInt7() { return !(m_value.asInt & 0xff80); }
349 bool isUInt8() { return !(m_value.asInt & 0xff00); }
350 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
351 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
352 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
353 bool isUInt16() { return m_type == TypeUInt16; }
354 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
355 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
356 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
357 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
358 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
359 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
360 uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
361 uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
362 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
363 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
364
365 bool isEncodedImm() { return m_type == TypeEncoded; }
366
367 private:
368 ThumbImmediateType m_type;
369 ThumbImmediateValue m_value;
370 };
371
372 class VFPImmediate {
373 public:
374 VFPImmediate(double d)
375 : m_value(-1)
376 {
377 union {
378 uint64_t i;
379 double d;
380 } u;
381
382 u.d = d;
383
384 int sign = static_cast<int>(u.i >> 63);
385 int exponent = static_cast<int>(u.i >> 52) & 0x7ff;
386 uint64_t mantissa = u.i & 0x000fffffffffffffull;
387
388 if ((exponent >= 0x3fc) && (exponent <= 0x403) && !(mantissa & 0x0000ffffffffffffull))
389 m_value = (sign << 7) | ((exponent & 7) << 4) | (int)(mantissa >> 48);
390 }
391
392 bool isValid()
393 {
394 return m_value != -1;
395 }
396
397 uint8_t value()
398 {
399 return (uint8_t)m_value;
400 }
401
402 private:
403 int m_value;
404 };
405
406 typedef enum {
407 SRType_LSL,
408 SRType_LSR,
409 SRType_ASR,
410 SRType_ROR,
411
412 SRType_RRX = SRType_ROR
413 } ARMShiftType;
414
415 class ARMv7Assembler;
416 class ShiftTypeAndAmount {
417 friend class ARMv7Assembler;
418
419 public:
420 ShiftTypeAndAmount()
421 {
422 m_u.type = (ARMShiftType)0;
423 m_u.amount = 0;
424 }
425
426 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
427 {
428 m_u.type = type;
429 m_u.amount = amount & 31;
430 }
431
432 unsigned lo4() { return m_u.lo4; }
433 unsigned hi4() { return m_u.hi4; }
434
435 private:
436 union {
437 struct {
438 unsigned lo4 : 4;
439 unsigned hi4 : 4;
440 };
441 struct {
442 unsigned type : 2;
443 unsigned amount : 6;
444 };
445 } m_u;
446 };
447
448 class ARMv7Assembler {
449 public:
450 ~ARMv7Assembler()
451 {
452 ASSERT(m_jumpsToLink.isEmpty());
453 }
454
455 typedef ARMRegisters::RegisterID RegisterID;
456 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
457 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
458 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
459
460 // (HS, LO, HI, LS) -> (AE, B, A, BE)
461 // (VS, VC) -> (O, NO)
462 typedef enum {
463 ConditionEQ,
464 ConditionNE,
465 ConditionHS,
466 ConditionLO,
467 ConditionMI,
468 ConditionPL,
469 ConditionVS,
470 ConditionVC,
471 ConditionHI,
472 ConditionLS,
473 ConditionGE,
474 ConditionLT,
475 ConditionGT,
476 ConditionLE,
477 ConditionAL,
478
479 ConditionCS = ConditionHS,
480 ConditionCC = ConditionLO,
481 } Condition;
482
483 enum JumpType { JumpFixed, JumpNoCondition, JumpCondition, JumpNoConditionFixedSize, JumpConditionFixedSize, JumpTypeCount };
484 enum JumpLinkType { LinkInvalid, LinkJumpT1, LinkJumpT2, LinkJumpT3,
485 LinkJumpT4, LinkConditionalJumpT4, LinkBX, LinkConditionalBX, JumpLinkTypeCount };
486 static const int JumpSizes[JumpLinkTypeCount];
487 static const int JumpPaddingSizes[JumpTypeCount];
488 class LinkRecord {
489 public:
490 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
491 : m_from(from)
492 , m_to(to)
493 , m_type(type)
494 , m_linkType(LinkInvalid)
495 , m_condition(condition)
496 {
497 }
498 intptr_t from() const { return m_from; }
499 void setFrom(intptr_t from) { m_from = from; }
500 intptr_t to() const { return m_to; }
501 JumpType type() const { return m_type; }
502 JumpLinkType linkType() const { return m_linkType; }
503 void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; }
504 Condition condition() const { return m_condition; }
505 private:
506 intptr_t m_from : 31;
507 intptr_t m_to : 31;
508 JumpType m_type : 3;
509 JumpLinkType m_linkType : 4;
510 Condition m_condition : 16;
511 };
512
513 class JmpSrc {
514 friend class ARMv7Assembler;
515 friend class ARMInstructionFormatter;
516 friend class LinkBuffer;
517 public:
518 JmpSrc()
519 : m_offset(-1)
520 {
521 }
522
523 private:
524 JmpSrc(int offset, JumpType type)
525 : m_offset(offset)
526 , m_condition(0xffff)
527 , m_type(type)
528 {
529 ASSERT(m_type == JumpFixed || m_type == JumpNoCondition || m_type == JumpNoConditionFixedSize);
530 }
531
532 JmpSrc(int offset, JumpType type, Condition condition)
533 : m_offset(offset)
534 , m_condition(condition)
535 , m_type(type)
536 {
537 ASSERT(m_type == JumpFixed || m_type == JumpCondition || m_type == JumpConditionFixedSize);
538 }
539
540 int m_offset;
541 Condition m_condition : 16;
542 JumpType m_type : 16;
543
544 };
545
546 class JmpDst {
547 friend class ARMv7Assembler;
548 friend class ARMInstructionFormatter;
549 friend class LinkBuffer;
550 public:
551 JmpDst()
552 : m_offset(-1)
553 , m_used(false)
554 {
555 }
556
557 bool isUsed() const { return m_used; }
558 void used() { m_used = true; }
559 private:
560 JmpDst(int offset)
561 : m_offset(offset)
562 , m_used(false)
563 {
564 ASSERT(m_offset == offset);
565 }
566
567 int m_offset : 31;
568 int m_used : 1;
569 };
570
571 private:
572
573 // ARMv7, Appx-A.6.3
574 bool BadReg(RegisterID reg)
575 {
576 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
577 }
578
579 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
580 {
581 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
582 if (rdNum & 1)
583 rdMask |= 1 << lowBitShift;
584 return rdMask;
585 }
586
587 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
588 {
589 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
590 if (rdNum & 16)
591 rdMask |= 1 << highBitShift;
592 return rdMask;
593 }
594
595 typedef enum {
596 OP_ADD_reg_T1 = 0x1800,
597 OP_SUB_reg_T1 = 0x1A00,
598 OP_ADD_imm_T1 = 0x1C00,
599 OP_SUB_imm_T1 = 0x1E00,
600 OP_MOV_imm_T1 = 0x2000,
601 OP_CMP_imm_T1 = 0x2800,
602 OP_ADD_imm_T2 = 0x3000,
603 OP_SUB_imm_T2 = 0x3800,
604 OP_AND_reg_T1 = 0x4000,
605 OP_EOR_reg_T1 = 0x4040,
606 OP_TST_reg_T1 = 0x4200,
607 OP_RSB_imm_T1 = 0x4240,
608 OP_CMP_reg_T1 = 0x4280,
609 OP_ORR_reg_T1 = 0x4300,
610 OP_MVN_reg_T1 = 0x43C0,
611 OP_ADD_reg_T2 = 0x4400,
612 OP_MOV_reg_T1 = 0x4600,
613 OP_BLX = 0x4700,
614 OP_BX = 0x4700,
615 OP_STR_reg_T1 = 0x5000,
616 OP_LDR_reg_T1 = 0x5800,
617 OP_LDRH_reg_T1 = 0x5A00,
618 OP_LDRB_reg_T1 = 0x5C00,
619 OP_STR_imm_T1 = 0x6000,
620 OP_LDR_imm_T1 = 0x6800,
621 OP_LDRB_imm_T1 = 0x7800,
622 OP_LDRH_imm_T1 = 0x8800,
623 OP_STR_imm_T2 = 0x9000,
624 OP_LDR_imm_T2 = 0x9800,
625 OP_ADD_SP_imm_T1 = 0xA800,
626 OP_ADD_SP_imm_T2 = 0xB000,
627 OP_SUB_SP_imm_T1 = 0xB080,
628 OP_BKPT = 0xBE00,
629 OP_IT = 0xBF00,
630 OP_NOP_T1 = 0xBF00,
631 } OpcodeID;
632
633 typedef enum {
634 OP_B_T1 = 0xD000,
635 OP_B_T2 = 0xE000,
636 OP_AND_reg_T2 = 0xEA00,
637 OP_TST_reg_T2 = 0xEA10,
638 OP_ORR_reg_T2 = 0xEA40,
639 OP_ORR_S_reg_T2 = 0xEA50,
640 OP_ASR_imm_T1 = 0xEA4F,
641 OP_LSL_imm_T1 = 0xEA4F,
642 OP_LSR_imm_T1 = 0xEA4F,
643 OP_ROR_imm_T1 = 0xEA4F,
644 OP_MVN_reg_T2 = 0xEA6F,
645 OP_EOR_reg_T2 = 0xEA80,
646 OP_ADD_reg_T3 = 0xEB00,
647 OP_ADD_S_reg_T3 = 0xEB10,
648 OP_SUB_reg_T2 = 0xEBA0,
649 OP_SUB_S_reg_T2 = 0xEBB0,
650 OP_CMP_reg_T2 = 0xEBB0,
651 OP_VSTR = 0xED00,
652 OP_VLDR = 0xED10,
653 OP_VMOV_StoC = 0xEE00,
654 OP_VMOV_CtoS = 0xEE10,
655 OP_VMUL_T2 = 0xEE20,
656 OP_VADD_T2 = 0xEE30,
657 OP_VSUB_T2 = 0xEE30,
658 OP_VDIV = 0xEE80,
659 OP_VCMP = 0xEEB0,
660 OP_VCVT_FPIVFP = 0xEEB0,
661 OP_VMOV_IMM_T2 = 0xEEB0,
662 OP_VMRS = 0xEEB0,
663 OP_B_T3a = 0xF000,
664 OP_B_T4a = 0xF000,
665 OP_AND_imm_T1 = 0xF000,
666 OP_TST_imm = 0xF010,
667 OP_ORR_imm_T1 = 0xF040,
668 OP_MOV_imm_T2 = 0xF040,
669 OP_MVN_imm = 0xF060,
670 OP_EOR_imm_T1 = 0xF080,
671 OP_ADD_imm_T3 = 0xF100,
672 OP_ADD_S_imm_T3 = 0xF110,
673 OP_CMN_imm = 0xF110,
674 OP_SUB_imm_T3 = 0xF1A0,
675 OP_SUB_S_imm_T3 = 0xF1B0,
676 OP_CMP_imm_T2 = 0xF1B0,
677 OP_RSB_imm_T2 = 0xF1C0,
678 OP_ADD_imm_T4 = 0xF200,
679 OP_MOV_imm_T3 = 0xF240,
680 OP_SUB_imm_T4 = 0xF2A0,
681 OP_MOVT = 0xF2C0,
682 OP_NOP_T2a = 0xF3AF,
683 OP_LDRB_imm_T3 = 0xF810,
684 OP_LDRB_reg_T2 = 0xF810,
685 OP_LDRH_reg_T2 = 0xF830,
686 OP_LDRH_imm_T3 = 0xF830,
687 OP_STR_imm_T4 = 0xF840,
688 OP_STR_reg_T2 = 0xF840,
689 OP_LDR_imm_T4 = 0xF850,
690 OP_LDR_reg_T2 = 0xF850,
691 OP_LDRB_imm_T2 = 0xF890,
692 OP_LDRH_imm_T2 = 0xF8B0,
693 OP_STR_imm_T3 = 0xF8C0,
694 OP_LDR_imm_T3 = 0xF8D0,
695 OP_LSL_reg_T2 = 0xFA00,
696 OP_LSR_reg_T2 = 0xFA20,
697 OP_ASR_reg_T2 = 0xFA40,
698 OP_ROR_reg_T2 = 0xFA60,
699 OP_SMULL_T1 = 0xFB80,
700 } OpcodeID1;
701
702 typedef enum {
703 OP_VADD_T2b = 0x0A00,
704 OP_VDIVb = 0x0A00,
705 OP_VLDRb = 0x0A00,
706 OP_VMOV_IMM_T2b = 0x0A00,
707 OP_VMUL_T2b = 0x0A00,
708 OP_VSTRb = 0x0A00,
709 OP_VMOV_CtoSb = 0x0A10,
710 OP_VMOV_StoCb = 0x0A10,
711 OP_VMRSb = 0x0A10,
712 OP_VCMPb = 0x0A40,
713 OP_VCVT_FPIVFPb = 0x0A40,
714 OP_VSUB_T2b = 0x0A40,
715 OP_NOP_T2b = 0x8000,
716 OP_B_T3b = 0x8000,
717 OP_B_T4b = 0x9000,
718 } OpcodeID2;
719
720 struct FourFours {
721 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
722 {
723 m_u.f0 = f0;
724 m_u.f1 = f1;
725 m_u.f2 = f2;
726 m_u.f3 = f3;
727 }
728
729 union {
730 unsigned value;
731 struct {
732 unsigned f0 : 4;
733 unsigned f1 : 4;
734 unsigned f2 : 4;
735 unsigned f3 : 4;
736 };
737 } m_u;
738 };
739
740 class ARMInstructionFormatter;
741
742 // false means else!
743 bool ifThenElseConditionBit(Condition condition, bool isIf)
744 {
745 return isIf ? (condition & 1) : !(condition & 1);
746 }
747 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
748 {
749 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
750 | (ifThenElseConditionBit(condition, inst3if) << 2)
751 | (ifThenElseConditionBit(condition, inst4if) << 1)
752 | 1;
753 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
754 return (condition << 4) | mask;
755 }
756 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
757 {
758 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
759 | (ifThenElseConditionBit(condition, inst3if) << 2)
760 | 2;
761 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
762 return (condition << 4) | mask;
763 }
764 uint8_t ifThenElse(Condition condition, bool inst2if)
765 {
766 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
767 | 4;
768 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
769 return (condition << 4) | mask;
770 }
771
772 uint8_t ifThenElse(Condition condition)
773 {
774 int mask = 8;
775 return (condition << 4) | mask;
776 }
777
778 public:
779
780 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
781 {
782 // Rd can only be SP if Rn is also SP.
783 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
784 ASSERT(rd != ARMRegisters::pc);
785 ASSERT(rn != ARMRegisters::pc);
786 ASSERT(imm.isValid());
787
788 if (rn == ARMRegisters::sp) {
789 if (!(rd & 8) && imm.isUInt10()) {
790 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2);
791 return;
792 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
793 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2);
794 return;
795 }
796 } else if (!((rd | rn) & 8)) {
797 if (imm.isUInt3()) {
798 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
799 return;
800 } else if ((rd == rn) && imm.isUInt8()) {
801 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
802 return;
803 }
804 }
805
806 if (imm.isEncodedImm())
807 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
808 else {
809 ASSERT(imm.isUInt12());
810 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
811 }
812 }
813
814 void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
815 {
816 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
817 ASSERT(rd != ARMRegisters::pc);
818 ASSERT(rn != ARMRegisters::pc);
819 ASSERT(!BadReg(rm));
820 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
821 }
822
823 // NOTE: In an IT block, add doesn't modify the flags register.
824 void add(RegisterID rd, RegisterID rn, RegisterID rm)
825 {
826 if (rd == rn)
827 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
828 else if (rd == rm)
829 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
830 else if (!((rd | rn | rm) & 8))
831 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
832 else
833 add(rd, rn, rm, ShiftTypeAndAmount());
834 }
835
836 // Not allowed in an IT (if then) block.
837 void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
838 {
839 // Rd can only be SP if Rn is also SP.
840 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
841 ASSERT(rd != ARMRegisters::pc);
842 ASSERT(rn != ARMRegisters::pc);
843 ASSERT(imm.isEncodedImm());
844
845 if (!((rd | rn) & 8)) {
846 if (imm.isUInt3()) {
847 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
848 return;
849 } else if ((rd == rn) && imm.isUInt8()) {
850 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
851 return;
852 }
853 }
854
855 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
856 }
857
858 // Not allowed in an IT (if then) block?
859 void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
860 {
861 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
862 ASSERT(rd != ARMRegisters::pc);
863 ASSERT(rn != ARMRegisters::pc);
864 ASSERT(!BadReg(rm));
865 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
866 }
867
868 // Not allowed in an IT (if then) block.
869 void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
870 {
871 if (!((rd | rn | rm) & 8))
872 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
873 else
874 add_S(rd, rn, rm, ShiftTypeAndAmount());
875 }
876
877 void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
878 {
879 ASSERT(!BadReg(rd));
880 ASSERT(!BadReg(rn));
881 ASSERT(imm.isEncodedImm());
882 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
883 }
884
885 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
886 {
887 ASSERT(!BadReg(rd));
888 ASSERT(!BadReg(rn));
889 ASSERT(!BadReg(rm));
890 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
891 }
892
893 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
894 {
895 if ((rd == rn) && !((rd | rm) & 8))
896 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
897 else if ((rd == rm) && !((rd | rn) & 8))
898 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
899 else
900 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
901 }
902
903 void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
904 {
905 ASSERT(!BadReg(rd));
906 ASSERT(!BadReg(rm));
907 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
908 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
909 }
910
911 void asr(RegisterID rd, RegisterID rn, RegisterID rm)
912 {
913 ASSERT(!BadReg(rd));
914 ASSERT(!BadReg(rn));
915 ASSERT(!BadReg(rm));
916 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
917 }
918
919 // Only allowed in IT (if then) block if last instruction.
920 JmpSrc b(JumpType type)
921 {
922 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
923 return JmpSrc(m_formatter.size(), type);
924 }
925
926 // Only allowed in IT (if then) block if last instruction.
927 JmpSrc blx(RegisterID rm, JumpType type)
928 {
929 ASSERT(rm != ARMRegisters::pc);
930 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
931 return JmpSrc(m_formatter.size(), type);
932 }
933
934 // Only allowed in IT (if then) block if last instruction.
935 JmpSrc bx(RegisterID rm, JumpType type, Condition condition)
936 {
937 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
938 return JmpSrc(m_formatter.size(), type, condition);
939 }
940
941 JmpSrc bx(RegisterID rm, JumpType type)
942 {
943 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
944 return JmpSrc(m_formatter.size(), type);
945 }
946
947 void bkpt(uint8_t imm=0)
948 {
949 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
950 }
951
952 void cmn(RegisterID rn, ARMThumbImmediate imm)
953 {
954 ASSERT(rn != ARMRegisters::pc);
955 ASSERT(imm.isEncodedImm());
956
957 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
958 }
959
960 void cmp(RegisterID rn, ARMThumbImmediate imm)
961 {
962 ASSERT(rn != ARMRegisters::pc);
963 ASSERT(imm.isEncodedImm());
964
965 if (!(rn & 8) && imm.isUInt8())
966 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
967 else
968 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
969 }
970
971 void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
972 {
973 ASSERT(rn != ARMRegisters::pc);
974 ASSERT(!BadReg(rm));
975 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
976 }
977
978 void cmp(RegisterID rn, RegisterID rm)
979 {
980 if ((rn | rm) & 8)
981 cmp(rn, rm, ShiftTypeAndAmount());
982 else
983 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
984 }
985
986 // xor is not spelled with an 'e'. :-(
987 void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
988 {
989 ASSERT(!BadReg(rd));
990 ASSERT(!BadReg(rn));
991 ASSERT(imm.isEncodedImm());
992 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
993 }
994
995 // xor is not spelled with an 'e'. :-(
996 void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
997 {
998 ASSERT(!BadReg(rd));
999 ASSERT(!BadReg(rn));
1000 ASSERT(!BadReg(rm));
1001 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1002 }
1003
1004 // xor is not spelled with an 'e'. :-(
1005 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1006 {
1007 if ((rd == rn) && !((rd | rm) & 8))
1008 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1009 else if ((rd == rm) && !((rd | rn) & 8))
1010 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1011 else
1012 eor(rd, rn, rm, ShiftTypeAndAmount());
1013 }
1014
1015 void it(Condition cond)
1016 {
1017 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1018 }
1019
1020 void it(Condition cond, bool inst2if)
1021 {
1022 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1023 }
1024
1025 void it(Condition cond, bool inst2if, bool inst3if)
1026 {
1027 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1028 }
1029
1030 void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1031 {
1032 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1033 }
1034
1035 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1036 void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1037 {
1038 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1039 ASSERT(imm.isUInt12());
1040
1041 if (!((rt | rn) & 8) && imm.isUInt7())
1042 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1043 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1044 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2);
1045 else
1046 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1047 }
1048
1049 // If index is set, this is a regular offset or a pre-indexed load;
1050 // if index is not set then is is a post-index load.
1051 //
1052 // If wback is set rn is updated - this is a pre or post index load,
1053 // if wback is not set this is a regular offset memory access.
1054 //
1055 // (-255 <= offset <= 255)
1056 // _reg = REG[rn]
1057 // _tmp = _reg + offset
1058 // MEM[index ? _tmp : _reg] = REG[rt]
1059 // if (wback) REG[rn] = _tmp
1060 void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1061 {
1062 ASSERT(rt != ARMRegisters::pc);
1063 ASSERT(rn != ARMRegisters::pc);
1064 ASSERT(index || wback);
1065 ASSERT(!wback | (rt != rn));
1066
1067 bool add = true;
1068 if (offset < 0) {
1069 add = false;
1070 offset = -offset;
1071 }
1072 ASSERT((offset & ~0xff) == 0);
1073
1074 offset |= (wback << 8);
1075 offset |= (add << 9);
1076 offset |= (index << 10);
1077 offset |= (1 << 11);
1078
1079 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1080 }
1081
1082 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1083 void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
1084 {
1085 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1086 ASSERT(!BadReg(rm));
1087 ASSERT(shift <= 3);
1088
1089 if (!shift && !((rt | rn | rm) & 8))
1090 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1091 else
1092 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1093 }
1094
1095 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1096 void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1097 {
1098 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1099 ASSERT(imm.isUInt12());
1100
1101 if (!((rt | rn) & 8) && imm.isUInt6())
1102 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1103 else
1104 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1105 }
1106
1107 // If index is set, this is a regular offset or a pre-indexed load;
1108 // if index is not set then is is a post-index load.
1109 //
1110 // If wback is set rn is updated - this is a pre or post index load,
1111 // if wback is not set this is a regular offset memory access.
1112 //
1113 // (-255 <= offset <= 255)
1114 // _reg = REG[rn]
1115 // _tmp = _reg + offset
1116 // MEM[index ? _tmp : _reg] = REG[rt]
1117 // if (wback) REG[rn] = _tmp
1118 void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1119 {
1120 ASSERT(rt != ARMRegisters::pc);
1121 ASSERT(rn != ARMRegisters::pc);
1122 ASSERT(index || wback);
1123 ASSERT(!wback | (rt != rn));
1124
1125 bool add = true;
1126 if (offset < 0) {
1127 add = false;
1128 offset = -offset;
1129 }
1130 ASSERT((offset & ~0xff) == 0);
1131
1132 offset |= (wback << 8);
1133 offset |= (add << 9);
1134 offset |= (index << 10);
1135 offset |= (1 << 11);
1136
1137 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1138 }
1139
1140 void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
1141 {
1142 ASSERT(!BadReg(rt)); // Memory hint
1143 ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1144 ASSERT(!BadReg(rm));
1145 ASSERT(shift <= 3);
1146
1147 if (!shift && !((rt | rn | rm) & 8))
1148 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1149 else
1150 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1151 }
1152
1153 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1154 {
1155 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1156 ASSERT(imm.isUInt12());
1157
1158 if (!((rt | rn) & 8) && imm.isUInt5())
1159 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1160 else
1161 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1162 }
1163
1164 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1165 {
1166 ASSERT(rt != ARMRegisters::pc);
1167 ASSERT(rn != ARMRegisters::pc);
1168 ASSERT(index || wback);
1169 ASSERT(!wback | (rt != rn));
1170
1171 bool add = true;
1172 if (offset < 0) {
1173 add = false;
1174 offset = -offset;
1175 }
1176
1177 ASSERT(!(offset & ~0xff));
1178
1179 offset |= (wback << 8);
1180 offset |= (add << 9);
1181 offset |= (index << 10);
1182 offset |= (1 << 11);
1183
1184 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1185 }
1186
1187 void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1188 {
1189 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1190 ASSERT(!BadReg(rm));
1191 ASSERT(shift <= 3);
1192
1193 if (!shift && !((rt | rn | rm) & 8))
1194 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1195 else
1196 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1197 }
1198
1199 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1200 {
1201 ASSERT(!BadReg(rd));
1202 ASSERT(!BadReg(rm));
1203 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1204 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1205 }
1206
1207 void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1208 {
1209 ASSERT(!BadReg(rd));
1210 ASSERT(!BadReg(rn));
1211 ASSERT(!BadReg(rm));
1212 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1213 }
1214
1215 void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1216 {
1217 ASSERT(!BadReg(rd));
1218 ASSERT(!BadReg(rm));
1219 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1220 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1221 }
1222
1223 void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1224 {
1225 ASSERT(!BadReg(rd));
1226 ASSERT(!BadReg(rn));
1227 ASSERT(!BadReg(rm));
1228 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1229 }
1230
1231 void movT3(RegisterID rd, ARMThumbImmediate imm)
1232 {
1233 ASSERT(imm.isValid());
1234 ASSERT(!imm.isEncodedImm());
1235 ASSERT(!BadReg(rd));
1236
1237 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1238 }
1239
1240 void mov(RegisterID rd, ARMThumbImmediate imm)
1241 {
1242 ASSERT(imm.isValid());
1243 ASSERT(!BadReg(rd));
1244
1245 if ((rd < 8) && imm.isUInt8())
1246 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1247 else if (imm.isEncodedImm())
1248 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1249 else
1250 movT3(rd, imm);
1251 }
1252
1253 void mov(RegisterID rd, RegisterID rm)
1254 {
1255 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1256 }
1257
1258 void movt(RegisterID rd, ARMThumbImmediate imm)
1259 {
1260 ASSERT(imm.isUInt16());
1261 ASSERT(!BadReg(rd));
1262 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1263 }
1264
1265 void mvn(RegisterID rd, ARMThumbImmediate imm)
1266 {
1267 ASSERT(imm.isEncodedImm());
1268 ASSERT(!BadReg(rd));
1269
1270 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1271 }
1272
1273 void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1274 {
1275 ASSERT(!BadReg(rd));
1276 ASSERT(!BadReg(rm));
1277 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1278 }
1279
1280 void mvn(RegisterID rd, RegisterID rm)
1281 {
1282 if (!((rd | rm) & 8))
1283 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1284 else
1285 mvn(rd, rm, ShiftTypeAndAmount());
1286 }
1287
1288 void neg(RegisterID rd, RegisterID rm)
1289 {
1290 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1291 sub(rd, zero, rm);
1292 }
1293
1294 void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1295 {
1296 ASSERT(!BadReg(rd));
1297 ASSERT(!BadReg(rn));
1298 ASSERT(imm.isEncodedImm());
1299 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1300 }
1301
1302 void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1303 {
1304 ASSERT(!BadReg(rd));
1305 ASSERT(!BadReg(rn));
1306 ASSERT(!BadReg(rm));
1307 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1308 }
1309
1310 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1311 {
1312 if ((rd == rn) && !((rd | rm) & 8))
1313 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1314 else if ((rd == rm) && !((rd | rn) & 8))
1315 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1316 else
1317 orr(rd, rn, rm, ShiftTypeAndAmount());
1318 }
1319
1320 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1321 {
1322 ASSERT(!BadReg(rd));
1323 ASSERT(!BadReg(rn));
1324 ASSERT(!BadReg(rm));
1325 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1326 }
1327
1328 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1329 {
1330 if ((rd == rn) && !((rd | rm) & 8))
1331 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1332 else if ((rd == rm) && !((rd | rn) & 8))
1333 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1334 else
1335 orr_S(rd, rn, rm, ShiftTypeAndAmount());
1336 }
1337
1338 void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1339 {
1340 ASSERT(!BadReg(rd));
1341 ASSERT(!BadReg(rm));
1342 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1343 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1344 }
1345
1346 void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1347 {
1348 ASSERT(!BadReg(rd));
1349 ASSERT(!BadReg(rn));
1350 ASSERT(!BadReg(rm));
1351 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1352 }
1353
1354 void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1355 {
1356 ASSERT(!BadReg(rdLo));
1357 ASSERT(!BadReg(rdHi));
1358 ASSERT(!BadReg(rn));
1359 ASSERT(!BadReg(rm));
1360 ASSERT(rdLo != rdHi);
1361 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1362 }
1363
1364 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1365 void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1366 {
1367 ASSERT(rt != ARMRegisters::pc);
1368 ASSERT(rn != ARMRegisters::pc);
1369 ASSERT(imm.isUInt12());
1370
1371 if (!((rt | rn) & 8) && imm.isUInt7())
1372 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1373 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1374 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2);
1375 else
1376 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1377 }
1378
1379 // If index is set, this is a regular offset or a pre-indexed store;
1380 // if index is not set then is is a post-index store.
1381 //
1382 // If wback is set rn is updated - this is a pre or post index store,
1383 // if wback is not set this is a regular offset memory access.
1384 //
1385 // (-255 <= offset <= 255)
1386 // _reg = REG[rn]
1387 // _tmp = _reg + offset
1388 // MEM[index ? _tmp : _reg] = REG[rt]
1389 // if (wback) REG[rn] = _tmp
1390 void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1391 {
1392 ASSERT(rt != ARMRegisters::pc);
1393 ASSERT(rn != ARMRegisters::pc);
1394 ASSERT(index || wback);
1395 ASSERT(!wback | (rt != rn));
1396
1397 bool add = true;
1398 if (offset < 0) {
1399 add = false;
1400 offset = -offset;
1401 }
1402 ASSERT((offset & ~0xff) == 0);
1403
1404 offset |= (wback << 8);
1405 offset |= (add << 9);
1406 offset |= (index << 10);
1407 offset |= (1 << 11);
1408
1409 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1410 }
1411
1412 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1413 void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
1414 {
1415 ASSERT(rn != ARMRegisters::pc);
1416 ASSERT(!BadReg(rm));
1417 ASSERT(shift <= 3);
1418
1419 if (!shift && !((rt | rn | rm) & 8))
1420 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1421 else
1422 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1423 }
1424
1425 void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1426 {
1427 // Rd can only be SP if Rn is also SP.
1428 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1429 ASSERT(rd != ARMRegisters::pc);
1430 ASSERT(rn != ARMRegisters::pc);
1431 ASSERT(imm.isValid());
1432
1433 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1434 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
1435 return;
1436 } else if (!((rd | rn) & 8)) {
1437 if (imm.isUInt3()) {
1438 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1439 return;
1440 } else if ((rd == rn) && imm.isUInt8()) {
1441 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1442 return;
1443 }
1444 }
1445
1446 if (imm.isEncodedImm())
1447 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1448 else {
1449 ASSERT(imm.isUInt12());
1450 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1451 }
1452 }
1453
1454 void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1455 {
1456 ASSERT(rd != ARMRegisters::pc);
1457 ASSERT(rn != ARMRegisters::pc);
1458 ASSERT(imm.isValid());
1459 ASSERT(imm.isUInt12());
1460
1461 if (!((rd | rn) & 8) && !imm.getUInt12())
1462 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1463 else
1464 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1465 }
1466
1467 void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1468 {
1469 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1470 ASSERT(rd != ARMRegisters::pc);
1471 ASSERT(rn != ARMRegisters::pc);
1472 ASSERT(!BadReg(rm));
1473 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1474 }
1475
1476 // NOTE: In an IT block, add doesn't modify the flags register.
1477 void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1478 {
1479 if (!((rd | rn | rm) & 8))
1480 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1481 else
1482 sub(rd, rn, rm, ShiftTypeAndAmount());
1483 }
1484
1485 // Not allowed in an IT (if then) block.
1486 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1487 {
1488 // Rd can only be SP if Rn is also SP.
1489 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1490 ASSERT(rd != ARMRegisters::pc);
1491 ASSERT(rn != ARMRegisters::pc);
1492 ASSERT(imm.isValid());
1493
1494 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1495 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
1496 return;
1497 } else if (!((rd | rn) & 8)) {
1498 if (imm.isUInt3()) {
1499 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1500 return;
1501 } else if ((rd == rn) && imm.isUInt8()) {
1502 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1503 return;
1504 }
1505 }
1506
1507 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1508 }
1509
1510 // Not allowed in an IT (if then) block?
1511 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1512 {
1513 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1514 ASSERT(rd != ARMRegisters::pc);
1515 ASSERT(rn != ARMRegisters::pc);
1516 ASSERT(!BadReg(rm));
1517 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1518 }
1519
1520 // Not allowed in an IT (if then) block.
1521 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1522 {
1523 if (!((rd | rn | rm) & 8))
1524 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1525 else
1526 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1527 }
1528
1529 void tst(RegisterID rn, ARMThumbImmediate imm)
1530 {
1531 ASSERT(!BadReg(rn));
1532 ASSERT(imm.isEncodedImm());
1533
1534 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1535 }
1536
1537 void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1538 {
1539 ASSERT(!BadReg(rn));
1540 ASSERT(!BadReg(rm));
1541 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1542 }
1543
1544 void tst(RegisterID rn, RegisterID rm)
1545 {
1546 if ((rn | rm) & 8)
1547 tst(rn, rm, ShiftTypeAndAmount());
1548 else
1549 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1550 }
1551
1552 void vadd_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1553 {
1554 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1555 }
1556
1557 void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1558 {
1559 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1560 }
1561
1562 void vcmpz_F64(FPDoubleRegisterID rd)
1563 {
1564 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1565 }
1566
1567 void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1568 {
1569 // boolean values are 64bit (toInt, unsigned, roundZero)
1570 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1571 }
1572
1573 void vcvtr_S32_F64(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1574 {
1575 // boolean values are 64bit (toInt, unsigned, roundZero)
1576 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1577 }
1578
1579 void vdiv_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1580 {
1581 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1582 }
1583
1584 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1585 {
1586 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1587 }
1588
1589 void vmov(RegisterID rd, FPSingleRegisterID rn)
1590 {
1591 ASSERT(!BadReg(rd));
1592 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rn, rd, VFPOperand(0));
1593 }
1594
1595 void vmov(FPSingleRegisterID rd, RegisterID rn)
1596 {
1597 ASSERT(!BadReg(rn));
1598 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rd, rn, VFPOperand(0));
1599 }
1600
1601 void vmrs(RegisterID reg = ARMRegisters::pc)
1602 {
1603 ASSERT(reg != ARMRegisters::sp);
1604 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1605 }
1606
1607 void vmul_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1608 {
1609 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1610 }
1611
1612 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1613 {
1614 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1615 }
1616
1617 void vsub_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1618 {
1619 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1620 }
1621
1622 JmpDst label()
1623 {
1624 return JmpDst(m_formatter.size());
1625 }
1626
1627 JmpDst align(int alignment)
1628 {
1629 while (!m_formatter.isAligned(alignment))
1630 bkpt();
1631
1632 return label();
1633 }
1634
1635 static void* getRelocatedAddress(void* code, JmpSrc jump)
1636 {
1637 ASSERT(jump.m_offset != -1);
1638
1639 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1640 }
1641
1642 static void* getRelocatedAddress(void* code, JmpDst destination)
1643 {
1644 ASSERT(destination.m_offset != -1);
1645
1646 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1647 }
1648
1649 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1650 {
1651 return dst.m_offset - src.m_offset;
1652 }
1653
1654 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1655 {
1656 return dst.m_offset - src.m_offset;
1657 }
1658
1659 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1660 {
1661 return dst.m_offset - src.m_offset;
1662 }
1663
1664 int executableOffsetFor(int location)
1665 {
1666 if (!location)
1667 return 0;
1668 return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
1669 }
1670
1671 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JumpPaddingSizes[jumpType] - JumpSizes[jumpLinkType]; }
1672
1673 // Assembler admin methods:
1674
1675 size_t size() const
1676 {
1677 return m_formatter.size();
1678 }
1679
1680 static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
1681 {
1682 return a.from() < b.from();
1683 }
1684
1685 bool canCompact(JumpType jumpType)
1686 {
1687 // The following cannot be compacted:
1688 // JumpFixed: represents custom jump sequence
1689 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1690 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1691 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
1692 }
1693
1694 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
1695 {
1696 if (jumpType == JumpFixed)
1697 return LinkInvalid;
1698
1699 // for patchable jump we must leave space for the longest code sequence
1700 if (jumpType == JumpNoConditionFixedSize)
1701 return LinkBX;
1702 if (jumpType == JumpConditionFixedSize)
1703 return LinkConditionalBX;
1704
1705 const int paddingSize = JumpPaddingSizes[jumpType];
1706 bool mayTriggerErrata = false;
1707
1708 if (jumpType == JumpCondition) {
1709 // 2-byte conditional T1
1710 const uint16_t* jumpT1Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT1]));
1711 if (canBeJumpT1(jumpT1Location, to))
1712 return LinkJumpT1;
1713 // 4-byte conditional T3
1714 const uint16_t* jumpT3Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT3]));
1715 if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) {
1716 if (!mayTriggerErrata)
1717 return LinkJumpT3;
1718 }
1719 // 4-byte conditional T4 with IT
1720 const uint16_t* conditionalJumpT4Location =
1721 reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkConditionalJumpT4]));
1722 if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) {
1723 if (!mayTriggerErrata)
1724 return LinkConditionalJumpT4;
1725 }
1726 } else {
1727 // 2-byte unconditional T2
1728 const uint16_t* jumpT2Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT2]));
1729 if (canBeJumpT2(jumpT2Location, to))
1730 return LinkJumpT2;
1731 // 4-byte unconditional T4
1732 const uint16_t* jumpT4Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT4]));
1733 if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) {
1734 if (!mayTriggerErrata)
1735 return LinkJumpT4;
1736 }
1737 // use long jump sequence
1738 return LinkBX;
1739 }
1740
1741 ASSERT(jumpType == JumpCondition);
1742 return LinkConditionalBX;
1743 }
1744
1745 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
1746 {
1747 JumpLinkType linkType = computeJumpType(record.type(), from, to);
1748 record.setLinkType(linkType);
1749 return linkType;
1750 }
1751
1752 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
1753 {
1754 int32_t ptr = regionStart / sizeof(int32_t);
1755 const int32_t end = regionEnd / sizeof(int32_t);
1756 int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
1757 while (ptr < end)
1758 offsets[ptr++] = offset;
1759 }
1760
1761 Vector<LinkRecord>& jumpsToLink()
1762 {
1763 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
1764 return m_jumpsToLink;
1765 }
1766
1767 void link(LinkRecord& record, uint8_t* from, uint8_t* to)
1768 {
1769 switch (record.linkType()) {
1770 case LinkJumpT1:
1771 linkJumpT1(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1772 break;
1773 case LinkJumpT2:
1774 linkJumpT2(reinterpret_cast<uint16_t*>(from), to);
1775 break;
1776 case LinkJumpT3:
1777 linkJumpT3(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1778 break;
1779 case LinkJumpT4:
1780 linkJumpT4(reinterpret_cast<uint16_t*>(from), to);
1781 break;
1782 case LinkConditionalJumpT4:
1783 linkConditionalJumpT4(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1784 break;
1785 case LinkConditionalBX:
1786 linkConditionalBX(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1787 break;
1788 case LinkBX:
1789 linkBX(reinterpret_cast<uint16_t*>(from), to);
1790 break;
1791 default:
1792 ASSERT_NOT_REACHED();
1793 break;
1794 }
1795 }
1796
1797 void* unlinkedCode() { return m_formatter.data(); }
1798
1799 static unsigned getCallReturnOffset(JmpSrc call)
1800 {
1801 ASSERT(call.m_offset >= 0);
1802 return call.m_offset;
1803 }
1804
1805 // Linking & patching:
1806 //
1807 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1808 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1809 // code has been finalized it is (platform support permitting) within a non-
1810 // writable region of memory; to modify the code in an execute-only execuable
1811 // pool the 'repatch' and 'relink' methods should be used.
1812
1813 void linkJump(JmpSrc from, JmpDst to)
1814 {
1815 ASSERT(to.m_offset != -1);
1816 ASSERT(from.m_offset != -1);
1817 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, from.m_type, from.m_condition));
1818 }
1819
1820 static void linkJump(void* code, JmpSrc from, void* to)
1821 {
1822 ASSERT(from.m_offset != -1);
1823
1824 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
1825 linkJumpAbsolute(location, to);
1826 }
1827
1828 // bah, this mathod should really be static, since it is used by the LinkBuffer.
1829 // return a bool saying whether the link was successful?
1830 static void linkCall(void* code, JmpSrc from, void* to)
1831 {
1832 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
1833 ASSERT(from.m_offset != -1);
1834 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
1835
1836 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
1837 }
1838
1839 static void linkPointer(void* code, JmpDst where, void* value)
1840 {
1841 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1842 }
1843
1844 static void relinkJump(void* from, void* to)
1845 {
1846 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
1847 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
1848
1849 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
1850
1851 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
1852 }
1853
1854 static void relinkCall(void* from, void* to)
1855 {
1856 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
1857 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
1858
1859 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
1860 }
1861
1862 static void repatchInt32(void* where, int32_t value)
1863 {
1864 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1865
1866 setInt32(where, value);
1867 }
1868
1869 static void repatchPointer(void* where, void* value)
1870 {
1871 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1872
1873 setPointer(where, value);
1874 }
1875
1876 static void repatchLoadPtrToLEA(void* where)
1877 {
1878 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1879 uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
1880
1881 ASSERT((loadOp[0] & 0xfff0) == OP_LDR_reg_T2);
1882 ASSERT((loadOp[1] & 0x0ff0) == 0);
1883 int rn = loadOp[0] & 0xf;
1884 int rt = loadOp[1] >> 12;
1885 int rm = loadOp[1] & 0xf;
1886
1887 loadOp[0] = OP_ADD_reg_T3 | rn;
1888 loadOp[1] = rt << 8 | rm;
1889 ExecutableAllocator::cacheFlush(loadOp, sizeof(uint32_t));
1890 }
1891
1892 private:
1893 // VFP operations commonly take one or more 5-bit operands, typically representing a
1894 // floating point register number. This will commonly be encoded in the instruction
1895 // in two parts, with one single bit field, and one 4-bit field. In the case of
1896 // double precision operands the high bit of the register number will be encoded
1897 // separately, and for single precision operands the high bit of the register number
1898 // will be encoded individually.
1899 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
1900 // field to be encoded together in the instruction (the low 4-bits of a double
1901 // register number, or the high 4-bits of a single register number), and bit 4
1902 // contains the bit value to be encoded individually.
1903 struct VFPOperand {
1904 explicit VFPOperand(uint32_t value)
1905 : m_value(value)
1906 {
1907 ASSERT(!(m_value & ~0x1f));
1908 }
1909
1910 VFPOperand(FPDoubleRegisterID reg)
1911 : m_value(reg)
1912 {
1913 }
1914
1915 VFPOperand(RegisterID reg)
1916 : m_value(reg)
1917 {
1918 }
1919
1920 VFPOperand(FPSingleRegisterID reg)
1921 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
1922 {
1923 }
1924
1925 uint32_t bits1()
1926 {
1927 return m_value >> 4;
1928 }
1929
1930 uint32_t bits4()
1931 {
1932 return m_value & 0xf;
1933 }
1934
1935 uint32_t m_value;
1936 };
1937
1938 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
1939 {
1940 // Cannot specify rounding when converting to float.
1941 ASSERT(toInteger || !isRoundZero);
1942
1943 uint32_t op = 0x8;
1944 if (toInteger) {
1945 // opc2 indicates both toInteger & isUnsigned.
1946 op |= isUnsigned ? 0x4 : 0x5;
1947 // 'op' field in instruction is isRoundZero
1948 if (isRoundZero)
1949 op |= 0x10;
1950 } else {
1951 // 'op' field in instruction is isUnsigned
1952 if (!isUnsigned)
1953 op |= 0x10;
1954 }
1955 return VFPOperand(op);
1956 }
1957
1958 static void setInt32(void* code, uint32_t value)
1959 {
1960 uint16_t* location = reinterpret_cast<uint16_t*>(code);
1961 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
1962
1963 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
1964 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
1965 location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
1966 location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
1967 location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
1968 location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
1969
1970 ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
1971 }
1972
1973 static void setPointer(void* code, void* value)
1974 {
1975 setInt32(code, reinterpret_cast<uint32_t>(value));
1976 }
1977
1978 static bool isB(void* address)
1979 {
1980 uint16_t* instruction = static_cast<uint16_t*>(address);
1981 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
1982 }
1983
1984 static bool isBX(void* address)
1985 {
1986 uint16_t* instruction = static_cast<uint16_t*>(address);
1987 return (instruction[0] & 0xff87) == OP_BX;
1988 }
1989
1990 static bool isMOV_imm_T3(void* address)
1991 {
1992 uint16_t* instruction = static_cast<uint16_t*>(address);
1993 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
1994 }
1995
1996 static bool isMOVT(void* address)
1997 {
1998 uint16_t* instruction = static_cast<uint16_t*>(address);
1999 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2000 }
2001
2002 static bool isNOP_T1(void* address)
2003 {
2004 uint16_t* instruction = static_cast<uint16_t*>(address);
2005 return instruction[0] == OP_NOP_T1;
2006 }
2007
2008 static bool isNOP_T2(void* address)
2009 {
2010 uint16_t* instruction = static_cast<uint16_t*>(address);
2011 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2012 }
2013
2014 static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2015 {
2016 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2017 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2018
2019 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2020 // It does not appear to be documented in the ARM ARM (big surprise), but
2021 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2022 // less than the actual displacement.
2023 relative -= 2;
2024 return ((relative << 23) >> 23) == relative;
2025 }
2026
2027 static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2028 {
2029 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2030 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2031
2032 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2033 // It does not appear to be documented in the ARM ARM (big surprise), but
2034 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2035 // less than the actual displacement.
2036 relative -= 2;
2037 return ((relative << 20) >> 20) == relative;
2038 }
2039
2040 static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
2041 {
2042 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2043 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2044
2045 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2046 // From Cortex-A8 errata:
2047 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2048 // the target of the branch falls within the first region it is
2049 // possible for the processor to incorrectly determine the branch
2050 // instruction, and it is also possible in some cases for the processor
2051 // to enter a deadlock state.
2052 // The instruction is spanning two pages if it ends at an address ending 0x002
2053 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
2054 mayTriggerErrata = spansTwo4K;
2055 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2056 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
2057 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
2058 return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata;
2059 }
2060
2061 static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
2062 {
2063 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2064 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2065
2066 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2067 // From Cortex-A8 errata:
2068 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2069 // the target of the branch falls within the first region it is
2070 // possible for the processor to incorrectly determine the branch
2071 // instruction, and it is also possible in some cases for the processor
2072 // to enter a deadlock state.
2073 // The instruction is spanning two pages if it ends at an address ending 0x002
2074 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
2075 mayTriggerErrata = spansTwo4K;
2076 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2077 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
2078 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
2079 return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata;
2080 }
2081
2082 void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2083 {
2084 // FIMXE: this should be up in the MacroAssembler layer. :-(
2085 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2086 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2087 ASSERT(canBeJumpT1(instruction, target));
2088
2089 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2090 // It does not appear to be documented in the ARM ARM (big surprise), but
2091 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2092 // less than the actual displacement.
2093 relative -= 2;
2094
2095 // All branch offsets should be an even distance.
2096 ASSERT(!(relative & 1));
2097 instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2098 }
2099
2100 static void linkJumpT2(uint16_t* instruction, void* target)
2101 {
2102 // FIMXE: this should be up in the MacroAssembler layer. :-(
2103 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2104 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2105 ASSERT(canBeJumpT2(instruction, target));
2106
2107 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2108 // It does not appear to be documented in the ARM ARM (big surprise), but
2109 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2110 // less than the actual displacement.
2111 relative -= 2;
2112
2113 // All branch offsets should be an even distance.
2114 ASSERT(!(relative & 1));
2115 instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2116 }
2117
2118 void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2119 {
2120 // FIMXE: this should be up in the MacroAssembler layer. :-(
2121 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2122 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2123 bool scratch;
2124 UNUSED_PARAM(scratch);
2125 ASSERT(canBeJumpT3(instruction, target, scratch));
2126
2127 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2128
2129 // All branch offsets should be an even distance.
2130 ASSERT(!(relative & 1));
2131 instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2132 instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2133 }
2134
2135 static void linkJumpT4(uint16_t* instruction, void* target)
2136 {
2137 // FIMXE: this should be up in the MacroAssembler layer. :-(
2138 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2139 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2140 bool scratch;
2141 UNUSED_PARAM(scratch);
2142 ASSERT(canBeJumpT4(instruction, target, scratch));
2143
2144 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2145 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2146 if (relative >= 0)
2147 relative ^= 0xC00000;
2148
2149 // All branch offsets should be an even distance.
2150 ASSERT(!(relative & 1));
2151 instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2152 instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2153 }
2154
2155 void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2156 {
2157 // FIMXE: this should be up in the MacroAssembler layer. :-(
2158 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2159 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2160
2161 instruction[-3] = ifThenElse(cond) | OP_IT;
2162 linkJumpT4(instruction, target);
2163 }
2164
2165 static void linkBX(uint16_t* instruction, void* target)
2166 {
2167 // FIMXE: this should be up in the MacroAssembler layer. :-(
2168 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2169 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2170
2171 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2172 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2173 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2174 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2175 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2176 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2177 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2178 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2179 }
2180
2181 void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2182 {
2183 // FIMXE: this should be up in the MacroAssembler layer. :-(
2184 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2185 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2186
2187 linkBX(instruction, target);
2188 instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2189 }
2190
2191 static void linkJumpAbsolute(uint16_t* instruction, void* target)
2192 {
2193 // FIMXE: this should be up in the MacroAssembler layer. :-(
2194 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2195 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2196
2197 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2198 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2199
2200 bool scratch;
2201 if (canBeJumpT4(instruction, target, scratch)) {
2202 // There may be a better way to fix this, but right now put the NOPs first, since in the
2203 // case of an conditional branch this will be coming after an ITTT predicating *three*
2204 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2205 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2206 // actually be the second half of a 2-word op.
2207 instruction[-5] = OP_NOP_T1;
2208 instruction[-4] = OP_NOP_T2a;
2209 instruction[-3] = OP_NOP_T2b;
2210 linkJumpT4(instruction, target);
2211 } else {
2212 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2213 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2214 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2215 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2216 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2217 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2218 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2219 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2220 }
2221 }
2222
2223 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2224 {
2225 return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2226 }
2227
2228 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2229 {
2230 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2231 }
2232
2233 class ARMInstructionFormatter {
2234 public:
2235 void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2236 {
2237 m_buffer.putShort(op | (rd << 8) | imm);
2238 }
2239
2240 void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2241 {
2242 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2243 }
2244
2245 void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2246 {
2247 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2248 }
2249
2250 void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2251 {
2252 m_buffer.putShort(op | imm);
2253 }
2254
2255 void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2256 {
2257 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2258 }
2259 void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2260 {
2261 m_buffer.putShort(op | imm);
2262 }
2263
2264 void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2265 {
2266 m_buffer.putShort(op | (reg1 << 3) | reg2);
2267 }
2268
2269 void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2270 {
2271 m_buffer.putShort(op | reg);
2272 m_buffer.putShort(ff.m_u.value);
2273 }
2274
2275 void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2276 {
2277 m_buffer.putShort(op);
2278 m_buffer.putShort(ff.m_u.value);
2279 }
2280
2281 void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2282 {
2283 m_buffer.putShort(op1);
2284 m_buffer.putShort(op2);
2285 }
2286
2287 void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2288 {
2289 ARMThumbImmediate newImm = imm;
2290 newImm.m_value.imm4 = imm4;
2291
2292 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2293 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2294 }
2295
2296 void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2297 {
2298 m_buffer.putShort(op | reg1);
2299 m_buffer.putShort((reg2 << 12) | imm);
2300 }
2301
2302 // Formats up instructions of the pattern:
2303 // 111111111B11aaaa:bbbb222SA2C2cccc
2304 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2305 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2306 void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2307 {
2308 ASSERT(!(op1 & 0x004f));
2309 ASSERT(!(op2 & 0xf1af));
2310 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2311 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2312 }
2313
2314 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2315 // (i.e. +/-(0..255) 32-bit words)
2316 void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2317 {
2318 bool up = true;
2319 if (imm < 0) {
2320 imm = -imm;
2321 up = false;
2322 }
2323
2324 uint32_t offset = imm;
2325 ASSERT(!(offset & ~0x3fc));
2326 offset >>= 2;
2327
2328 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2329 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2330 }
2331
2332 // Administrative methods:
2333
2334 size_t size() const { return m_buffer.size(); }
2335 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2336 void* data() const { return m_buffer.data(); }
2337 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
2338
2339 private:
2340 AssemblerBuffer m_buffer;
2341 } m_formatter;
2342
2343 Vector<LinkRecord> m_jumpsToLink;
2344 Vector<int32_t> m_offsets;
2345 };
2346
2347 } // namespace JSC
2348
2349 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2350
2351 #endif // ARMAssembler_h