]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/ARMv7Assembler.h
JavaScriptCore-903.5.tar.gz
[apple/javascriptcore.git] / assembler / ARMv7Assembler.h
1 /*
2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
29
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
31
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
35 #include <stdint.h>
36
37 namespace JSC {
38
39 namespace ARMRegisters {
40 typedef enum {
41 r0,
42 r1,
43 r2,
44 r3,
45 r4,
46 r5,
47 r6,
48 r7, wr = r7, // thumb work register
49 r8,
50 r9, sb = r9, // static base
51 r10, sl = r10, // stack limit
52 r11, fp = r11, // frame pointer
53 r12, ip = r12,
54 r13, sp = r13,
55 r14, lr = r14,
56 r15, pc = r15,
57 } RegisterID;
58
59 typedef enum {
60 s0,
61 s1,
62 s2,
63 s3,
64 s4,
65 s5,
66 s6,
67 s7,
68 s8,
69 s9,
70 s10,
71 s11,
72 s12,
73 s13,
74 s14,
75 s15,
76 s16,
77 s17,
78 s18,
79 s19,
80 s20,
81 s21,
82 s22,
83 s23,
84 s24,
85 s25,
86 s26,
87 s27,
88 s28,
89 s29,
90 s30,
91 s31,
92 } FPSingleRegisterID;
93
94 typedef enum {
95 d0,
96 d1,
97 d2,
98 d3,
99 d4,
100 d5,
101 d6,
102 d7,
103 d8,
104 d9,
105 d10,
106 d11,
107 d12,
108 d13,
109 d14,
110 d15,
111 d16,
112 d17,
113 d18,
114 d19,
115 d20,
116 d21,
117 d22,
118 d23,
119 d24,
120 d25,
121 d26,
122 d27,
123 d28,
124 d29,
125 d30,
126 d31,
127 } FPDoubleRegisterID;
128
129 typedef enum {
130 q0,
131 q1,
132 q2,
133 q3,
134 q4,
135 q5,
136 q6,
137 q7,
138 q8,
139 q9,
140 q10,
141 q11,
142 q12,
143 q13,
144 q14,
145 q15,
146 q16,
147 q17,
148 q18,
149 q19,
150 q20,
151 q21,
152 q22,
153 q23,
154 q24,
155 q25,
156 q26,
157 q27,
158 q28,
159 q29,
160 q30,
161 q31,
162 } FPQuadRegisterID;
163
164 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
165 {
166 ASSERT(reg < d16);
167 return (FPSingleRegisterID)(reg << 1);
168 }
169
170 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
171 {
172 ASSERT(!(reg & 1));
173 return (FPDoubleRegisterID)(reg >> 1);
174 }
175 }
176
177 class ARMv7Assembler;
178 class ARMThumbImmediate {
179 friend class ARMv7Assembler;
180
181 typedef uint8_t ThumbImmediateType;
182 static const ThumbImmediateType TypeInvalid = 0;
183 static const ThumbImmediateType TypeEncoded = 1;
184 static const ThumbImmediateType TypeUInt16 = 2;
185
186 typedef union {
187 int16_t asInt;
188 struct {
189 unsigned imm8 : 8;
190 unsigned imm3 : 3;
191 unsigned i : 1;
192 unsigned imm4 : 4;
193 };
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
195 struct {
196 unsigned shiftValue7 : 7;
197 unsigned shiftAmount : 5;
198 };
199 struct {
200 unsigned immediate : 8;
201 unsigned pattern : 4;
202 };
203 } ThumbImmediateValue;
204
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
206 typedef union {
207 int32_t asInt;
208 struct {
209 uint8_t byte0;
210 uint8_t byte1;
211 uint8_t byte2;
212 uint8_t byte3;
213 };
214 } PatternBytes;
215
216 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
217 {
218 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value >>= N; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros += N; /* then we have identified N leading zeros */
222 }
223
224 static int32_t countLeadingZeros(uint32_t value)
225 {
226 if (!value)
227 return 32;
228
229 int32_t zeros = 0;
230 countLeadingZerosPartial(value, zeros, 16);
231 countLeadingZerosPartial(value, zeros, 8);
232 countLeadingZerosPartial(value, zeros, 4);
233 countLeadingZerosPartial(value, zeros, 2);
234 countLeadingZerosPartial(value, zeros, 1);
235 return zeros;
236 }
237
238 ARMThumbImmediate()
239 : m_type(TypeInvalid)
240 {
241 m_value.asInt = 0;
242 }
243
244 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
245 : m_type(type)
246 , m_value(value)
247 {
248 }
249
250 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
251 : m_type(TypeUInt16)
252 {
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type, type == TypeUInt16);
257
258 m_value.asInt = value;
259 }
260
261 public:
262 static ARMThumbImmediate makeEncodedImm(uint32_t value)
263 {
264 ThumbImmediateValue encoding;
265 encoding.asInt = 0;
266
267 // okay, these are easy.
268 if (value < 256) {
269 encoding.immediate = value;
270 encoding.pattern = 0;
271 return ARMThumbImmediate(TypeEncoded, encoding);
272 }
273
274 int32_t leadingZeros = countLeadingZeros(value);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros < 24);
277
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount = 24 - leadingZeros;
282 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding.shiftValue7 = value >> rightShiftAmount;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding.shiftAmount = 8 + leadingZeros;
288 return ARMThumbImmediate(TypeEncoded, encoding);
289 }
290
291 PatternBytes bytes;
292 bytes.asInt = value;
293
294 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
295 encoding.immediate = bytes.byte0;
296 encoding.pattern = 3;
297 return ARMThumbImmediate(TypeEncoded, encoding);
298 }
299
300 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
301 encoding.immediate = bytes.byte0;
302 encoding.pattern = 1;
303 return ARMThumbImmediate(TypeEncoded, encoding);
304 }
305
306 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
307 encoding.immediate = bytes.byte1;
308 encoding.pattern = 2;
309 return ARMThumbImmediate(TypeEncoded, encoding);
310 }
311
312 return ARMThumbImmediate();
313 }
314
315 static ARMThumbImmediate makeUInt12(int32_t value)
316 {
317 return (!(value & 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
319 : ARMThumbImmediate();
320 }
321
322 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
323 {
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value & 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
327 : makeEncodedImm(value);
328 }
329
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate makeUInt16(uint16_t value)
334 {
335 return ARMThumbImmediate(TypeUInt16, value);
336 }
337
338 bool isValid()
339 {
340 return m_type != TypeInvalid;
341 }
342
343 uint16_t asUInt16() const { return m_value.asInt; }
344
345 // These methods rely on the format of encoded byte values.
346 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
347 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
348 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
349 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
350 bool isUInt7() { return !(m_value.asInt & 0xff80); }
351 bool isUInt8() { return !(m_value.asInt & 0xff00); }
352 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
353 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
354 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
355 bool isUInt16() { return m_type == TypeUInt16; }
356 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
357 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
358 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
359 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
360 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
361 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
362 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
363 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
364 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
365 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
366
367 bool isEncodedImm() { return m_type == TypeEncoded; }
368
369 private:
370 ThumbImmediateType m_type;
371 ThumbImmediateValue m_value;
372 };
373
374 typedef enum {
375 SRType_LSL,
376 SRType_LSR,
377 SRType_ASR,
378 SRType_ROR,
379
380 SRType_RRX = SRType_ROR
381 } ARMShiftType;
382
383 class ShiftTypeAndAmount {
384 friend class ARMv7Assembler;
385
386 public:
387 ShiftTypeAndAmount()
388 {
389 m_u.type = (ARMShiftType)0;
390 m_u.amount = 0;
391 }
392
393 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
394 {
395 m_u.type = type;
396 m_u.amount = amount & 31;
397 }
398
399 unsigned lo4() { return m_u.lo4; }
400 unsigned hi4() { return m_u.hi4; }
401
402 private:
403 union {
404 struct {
405 unsigned lo4 : 4;
406 unsigned hi4 : 4;
407 };
408 struct {
409 unsigned type : 2;
410 unsigned amount : 6;
411 };
412 } m_u;
413 };
414
415 class ARMv7Assembler {
416 public:
417 ~ARMv7Assembler()
418 {
419 ASSERT(m_jumpsToLink.isEmpty());
420 }
421
422 typedef ARMRegisters::RegisterID RegisterID;
423 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
424 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
425 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
426
427 // (HS, LO, HI, LS) -> (AE, B, A, BE)
428 // (VS, VC) -> (O, NO)
429 typedef enum {
430 ConditionEQ,
431 ConditionNE,
432 ConditionHS, ConditionCS = ConditionHS,
433 ConditionLO, ConditionCC = ConditionLO,
434 ConditionMI,
435 ConditionPL,
436 ConditionVS,
437 ConditionVC,
438 ConditionHI,
439 ConditionLS,
440 ConditionGE,
441 ConditionLT,
442 ConditionGT,
443 ConditionLE,
444 ConditionAL,
445 ConditionInvalid
446 } Condition;
447
448 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
449 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
450 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
451 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
452 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
453 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
454 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
455 };
456 enum JumpLinkType {
457 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
458 LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
459 LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
460 LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
461 LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
462 LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
463 LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
464 LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
465 };
466
467 class LinkRecord {
468 public:
469 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
470 : m_from(from)
471 , m_to(to)
472 , m_type(type)
473 , m_linkType(LinkInvalid)
474 , m_condition(condition)
475 {
476 }
477 intptr_t from() const { return m_from; }
478 void setFrom(intptr_t from) { m_from = from; }
479 intptr_t to() const { return m_to; }
480 JumpType type() const { return m_type; }
481 JumpLinkType linkType() const { return m_linkType; }
482 void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; }
483 Condition condition() const { return m_condition; }
484 private:
485 intptr_t m_from : 31;
486 intptr_t m_to : 31;
487 JumpType m_type : 8;
488 JumpLinkType m_linkType : 8;
489 Condition m_condition : 16;
490 };
491
492 private:
493
494 // ARMv7, Appx-A.6.3
495 bool BadReg(RegisterID reg)
496 {
497 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
498 }
499
500 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
501 {
502 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
503 if (rdNum & 1)
504 rdMask |= 1 << lowBitShift;
505 return rdMask;
506 }
507
508 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
509 {
510 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
511 if (rdNum & 16)
512 rdMask |= 1 << highBitShift;
513 return rdMask;
514 }
515
516 typedef enum {
517 OP_ADD_reg_T1 = 0x1800,
518 OP_SUB_reg_T1 = 0x1A00,
519 OP_ADD_imm_T1 = 0x1C00,
520 OP_SUB_imm_T1 = 0x1E00,
521 OP_MOV_imm_T1 = 0x2000,
522 OP_CMP_imm_T1 = 0x2800,
523 OP_ADD_imm_T2 = 0x3000,
524 OP_SUB_imm_T2 = 0x3800,
525 OP_AND_reg_T1 = 0x4000,
526 OP_EOR_reg_T1 = 0x4040,
527 OP_TST_reg_T1 = 0x4200,
528 OP_RSB_imm_T1 = 0x4240,
529 OP_CMP_reg_T1 = 0x4280,
530 OP_ORR_reg_T1 = 0x4300,
531 OP_MVN_reg_T1 = 0x43C0,
532 OP_ADD_reg_T2 = 0x4400,
533 OP_MOV_reg_T1 = 0x4600,
534 OP_BLX = 0x4700,
535 OP_BX = 0x4700,
536 OP_STR_reg_T1 = 0x5000,
537 OP_LDR_reg_T1 = 0x5800,
538 OP_LDRH_reg_T1 = 0x5A00,
539 OP_LDRB_reg_T1 = 0x5C00,
540 OP_STR_imm_T1 = 0x6000,
541 OP_LDR_imm_T1 = 0x6800,
542 OP_LDRB_imm_T1 = 0x7800,
543 OP_LDRH_imm_T1 = 0x8800,
544 OP_STR_imm_T2 = 0x9000,
545 OP_LDR_imm_T2 = 0x9800,
546 OP_ADD_SP_imm_T1 = 0xA800,
547 OP_ADD_SP_imm_T2 = 0xB000,
548 OP_SUB_SP_imm_T1 = 0xB080,
549 OP_BKPT = 0xBE00,
550 OP_IT = 0xBF00,
551 OP_NOP_T1 = 0xBF00,
552 } OpcodeID;
553
554 typedef enum {
555 OP_B_T1 = 0xD000,
556 OP_B_T2 = 0xE000,
557 OP_AND_reg_T2 = 0xEA00,
558 OP_TST_reg_T2 = 0xEA10,
559 OP_ORR_reg_T2 = 0xEA40,
560 OP_ORR_S_reg_T2 = 0xEA50,
561 OP_ASR_imm_T1 = 0xEA4F,
562 OP_LSL_imm_T1 = 0xEA4F,
563 OP_LSR_imm_T1 = 0xEA4F,
564 OP_ROR_imm_T1 = 0xEA4F,
565 OP_MVN_reg_T2 = 0xEA6F,
566 OP_EOR_reg_T2 = 0xEA80,
567 OP_ADD_reg_T3 = 0xEB00,
568 OP_ADD_S_reg_T3 = 0xEB10,
569 OP_SUB_reg_T2 = 0xEBA0,
570 OP_SUB_S_reg_T2 = 0xEBB0,
571 OP_CMP_reg_T2 = 0xEBB0,
572 OP_VSTR = 0xED00,
573 OP_VLDR = 0xED10,
574 OP_VMOV_StoC = 0xEE00,
575 OP_VMOV_CtoS = 0xEE10,
576 OP_VMUL_T2 = 0xEE20,
577 OP_VADD_T2 = 0xEE30,
578 OP_VSUB_T2 = 0xEE30,
579 OP_VDIV = 0xEE80,
580 OP_VCMP = 0xEEB0,
581 OP_VCVT_FPIVFP = 0xEEB0,
582 OP_VMOV_IMM_T2 = 0xEEB0,
583 OP_VMRS = 0xEEB0,
584 OP_B_T3a = 0xF000,
585 OP_B_T4a = 0xF000,
586 OP_AND_imm_T1 = 0xF000,
587 OP_TST_imm = 0xF010,
588 OP_ORR_imm_T1 = 0xF040,
589 OP_MOV_imm_T2 = 0xF040,
590 OP_MVN_imm = 0xF060,
591 OP_EOR_imm_T1 = 0xF080,
592 OP_ADD_imm_T3 = 0xF100,
593 OP_ADD_S_imm_T3 = 0xF110,
594 OP_CMN_imm = 0xF110,
595 OP_SUB_imm_T3 = 0xF1A0,
596 OP_SUB_S_imm_T3 = 0xF1B0,
597 OP_CMP_imm_T2 = 0xF1B0,
598 OP_RSB_imm_T2 = 0xF1C0,
599 OP_ADD_imm_T4 = 0xF200,
600 OP_MOV_imm_T3 = 0xF240,
601 OP_SUB_imm_T4 = 0xF2A0,
602 OP_MOVT = 0xF2C0,
603 OP_NOP_T2a = 0xF3AF,
604 OP_LDRB_imm_T3 = 0xF810,
605 OP_LDRB_reg_T2 = 0xF810,
606 OP_LDRH_reg_T2 = 0xF830,
607 OP_LDRH_imm_T3 = 0xF830,
608 OP_STR_imm_T4 = 0xF840,
609 OP_STR_reg_T2 = 0xF840,
610 OP_LDR_imm_T4 = 0xF850,
611 OP_LDR_reg_T2 = 0xF850,
612 OP_LDRB_imm_T2 = 0xF890,
613 OP_LDRH_imm_T2 = 0xF8B0,
614 OP_STR_imm_T3 = 0xF8C0,
615 OP_LDR_imm_T3 = 0xF8D0,
616 OP_LSL_reg_T2 = 0xFA00,
617 OP_LSR_reg_T2 = 0xFA20,
618 OP_ASR_reg_T2 = 0xFA40,
619 OP_ROR_reg_T2 = 0xFA60,
620 OP_CLZ = 0xFAB0,
621 OP_SMULL_T1 = 0xFB80,
622 } OpcodeID1;
623
624 typedef enum {
625 OP_VADD_T2b = 0x0A00,
626 OP_VDIVb = 0x0A00,
627 OP_VLDRb = 0x0A00,
628 OP_VMOV_IMM_T2b = 0x0A00,
629 OP_VMUL_T2b = 0x0A00,
630 OP_VSTRb = 0x0A00,
631 OP_VMOV_CtoSb = 0x0A10,
632 OP_VMOV_StoCb = 0x0A10,
633 OP_VMRSb = 0x0A10,
634 OP_VCMPb = 0x0A40,
635 OP_VCVT_FPIVFPb = 0x0A40,
636 OP_VSUB_T2b = 0x0A40,
637 OP_NOP_T2b = 0x8000,
638 OP_B_T3b = 0x8000,
639 OP_B_T4b = 0x9000,
640 } OpcodeID2;
641
642 struct FourFours {
643 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
644 {
645 m_u.f0 = f0;
646 m_u.f1 = f1;
647 m_u.f2 = f2;
648 m_u.f3 = f3;
649 }
650
651 union {
652 unsigned value;
653 struct {
654 unsigned f0 : 4;
655 unsigned f1 : 4;
656 unsigned f2 : 4;
657 unsigned f3 : 4;
658 };
659 } m_u;
660 };
661
662 class ARMInstructionFormatter;
663
664 // false means else!
665 bool ifThenElseConditionBit(Condition condition, bool isIf)
666 {
667 return isIf ? (condition & 1) : !(condition & 1);
668 }
669 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
670 {
671 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
672 | (ifThenElseConditionBit(condition, inst3if) << 2)
673 | (ifThenElseConditionBit(condition, inst4if) << 1)
674 | 1;
675 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
676 return (condition << 4) | mask;
677 }
678 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
679 {
680 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
681 | (ifThenElseConditionBit(condition, inst3if) << 2)
682 | 2;
683 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
684 return (condition << 4) | mask;
685 }
686 uint8_t ifThenElse(Condition condition, bool inst2if)
687 {
688 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
689 | 4;
690 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
691 return (condition << 4) | mask;
692 }
693
694 uint8_t ifThenElse(Condition condition)
695 {
696 int mask = 8;
697 return (condition << 4) | mask;
698 }
699
700 public:
701
702 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
703 {
704 // Rd can only be SP if Rn is also SP.
705 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
706 ASSERT(rd != ARMRegisters::pc);
707 ASSERT(rn != ARMRegisters::pc);
708 ASSERT(imm.isValid());
709
710 if (rn == ARMRegisters::sp) {
711 if (!(rd & 8) && imm.isUInt10()) {
712 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
713 return;
714 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
715 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
716 return;
717 }
718 } else if (!((rd | rn) & 8)) {
719 if (imm.isUInt3()) {
720 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
721 return;
722 } else if ((rd == rn) && imm.isUInt8()) {
723 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
724 return;
725 }
726 }
727
728 if (imm.isEncodedImm())
729 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
730 else {
731 ASSERT(imm.isUInt12());
732 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
733 }
734 }
735
736 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
737 {
738 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
739 ASSERT(rd != ARMRegisters::pc);
740 ASSERT(rn != ARMRegisters::pc);
741 ASSERT(!BadReg(rm));
742 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
743 }
744
745 // NOTE: In an IT block, add doesn't modify the flags register.
746 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
747 {
748 if (rd == rn)
749 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
750 else if (rd == rm)
751 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
752 else if (!((rd | rn | rm) & 8))
753 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
754 else
755 add(rd, rn, rm, ShiftTypeAndAmount());
756 }
757
758 // Not allowed in an IT (if then) block.
759 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
760 {
761 // Rd can only be SP if Rn is also SP.
762 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
763 ASSERT(rd != ARMRegisters::pc);
764 ASSERT(rn != ARMRegisters::pc);
765 ASSERT(imm.isEncodedImm());
766
767 if (!((rd | rn) & 8)) {
768 if (imm.isUInt3()) {
769 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
770 return;
771 } else if ((rd == rn) && imm.isUInt8()) {
772 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
773 return;
774 }
775 }
776
777 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
778 }
779
780 // Not allowed in an IT (if then) block?
781 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
782 {
783 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
784 ASSERT(rd != ARMRegisters::pc);
785 ASSERT(rn != ARMRegisters::pc);
786 ASSERT(!BadReg(rm));
787 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
788 }
789
790 // Not allowed in an IT (if then) block.
791 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
792 {
793 if (!((rd | rn | rm) & 8))
794 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
795 else
796 add_S(rd, rn, rm, ShiftTypeAndAmount());
797 }
798
799 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
800 {
801 ASSERT(!BadReg(rd));
802 ASSERT(!BadReg(rn));
803 ASSERT(imm.isEncodedImm());
804 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
805 }
806
807 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
808 {
809 ASSERT(!BadReg(rd));
810 ASSERT(!BadReg(rn));
811 ASSERT(!BadReg(rm));
812 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
813 }
814
815 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
816 {
817 if ((rd == rn) && !((rd | rm) & 8))
818 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
819 else if ((rd == rm) && !((rd | rn) & 8))
820 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
821 else
822 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
823 }
824
825 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
826 {
827 ASSERT(!BadReg(rd));
828 ASSERT(!BadReg(rm));
829 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
830 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
831 }
832
833 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
834 {
835 ASSERT(!BadReg(rd));
836 ASSERT(!BadReg(rn));
837 ASSERT(!BadReg(rm));
838 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
839 }
840
841 // Only allowed in IT (if then) block if last instruction.
842 ALWAYS_INLINE AssemblerLabel b()
843 {
844 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
845 return m_formatter.label();
846 }
847
848 // Only allowed in IT (if then) block if last instruction.
849 ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
850 {
851 ASSERT(rm != ARMRegisters::pc);
852 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
853 return m_formatter.label();
854 }
855
856 // Only allowed in IT (if then) block if last instruction.
857 ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
858 {
859 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
860 return m_formatter.label();
861 }
862
863 void bkpt(uint8_t imm=0)
864 {
865 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
866 }
867
868 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
869 {
870 ASSERT(!BadReg(rd));
871 ASSERT(!BadReg(rm));
872 m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
873 }
874
875 ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
876 {
877 ASSERT(rn != ARMRegisters::pc);
878 ASSERT(imm.isEncodedImm());
879
880 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
881 }
882
883 ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
884 {
885 ASSERT(rn != ARMRegisters::pc);
886 ASSERT(imm.isEncodedImm());
887
888 if (!(rn & 8) && imm.isUInt8())
889 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
890 else
891 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
892 }
893
894 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
895 {
896 ASSERT(rn != ARMRegisters::pc);
897 ASSERT(!BadReg(rm));
898 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
899 }
900
901 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
902 {
903 if ((rn | rm) & 8)
904 cmp(rn, rm, ShiftTypeAndAmount());
905 else
906 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
907 }
908
909 // xor is not spelled with an 'e'. :-(
910 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
911 {
912 ASSERT(!BadReg(rd));
913 ASSERT(!BadReg(rn));
914 ASSERT(imm.isEncodedImm());
915 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
916 }
917
918 // xor is not spelled with an 'e'. :-(
919 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
920 {
921 ASSERT(!BadReg(rd));
922 ASSERT(!BadReg(rn));
923 ASSERT(!BadReg(rm));
924 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
925 }
926
927 // xor is not spelled with an 'e'. :-(
928 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
929 {
930 if ((rd == rn) && !((rd | rm) & 8))
931 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
932 else if ((rd == rm) && !((rd | rn) & 8))
933 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
934 else
935 eor(rd, rn, rm, ShiftTypeAndAmount());
936 }
937
938 ALWAYS_INLINE void it(Condition cond)
939 {
940 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
941 }
942
943 ALWAYS_INLINE void it(Condition cond, bool inst2if)
944 {
945 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
946 }
947
948 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
949 {
950 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
951 }
952
953 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
954 {
955 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
956 }
957
958 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
959 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
960 {
961 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
962 ASSERT(imm.isUInt12());
963
964 if (!((rt | rn) & 8) && imm.isUInt7())
965 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
966 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
967 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
968 else
969 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
970 }
971
972 ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
973 {
974 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
975 ASSERT(imm.isUInt7());
976 ASSERT(!((rt | rn) & 8));
977 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
978 }
979
980 // If index is set, this is a regular offset or a pre-indexed load;
981 // if index is not set then is is a post-index load.
982 //
983 // If wback is set rn is updated - this is a pre or post index load,
984 // if wback is not set this is a regular offset memory access.
985 //
986 // (-255 <= offset <= 255)
987 // _reg = REG[rn]
988 // _tmp = _reg + offset
989 // MEM[index ? _tmp : _reg] = REG[rt]
990 // if (wback) REG[rn] = _tmp
991 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
992 {
993 ASSERT(rt != ARMRegisters::pc);
994 ASSERT(rn != ARMRegisters::pc);
995 ASSERT(index || wback);
996 ASSERT(!wback | (rt != rn));
997
998 bool add = true;
999 if (offset < 0) {
1000 add = false;
1001 offset = -offset;
1002 }
1003 ASSERT((offset & ~0xff) == 0);
1004
1005 offset |= (wback << 8);
1006 offset |= (add << 9);
1007 offset |= (index << 10);
1008 offset |= (1 << 11);
1009
1010 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1011 }
1012
1013 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1014 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1015 {
1016 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1017 ASSERT(!BadReg(rm));
1018 ASSERT(shift <= 3);
1019
1020 if (!shift && !((rt | rn | rm) & 8))
1021 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1022 else
1023 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1024 }
1025
1026 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1027 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1028 {
1029 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1030 ASSERT(imm.isUInt12());
1031
1032 if (!((rt | rn) & 8) && imm.isUInt6())
1033 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1034 else
1035 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1036 }
1037
1038 // If index is set, this is a regular offset or a pre-indexed load;
1039 // if index is not set then is is a post-index load.
1040 //
1041 // If wback is set rn is updated - this is a pre or post index load,
1042 // if wback is not set this is a regular offset memory access.
1043 //
1044 // (-255 <= offset <= 255)
1045 // _reg = REG[rn]
1046 // _tmp = _reg + offset
1047 // MEM[index ? _tmp : _reg] = REG[rt]
1048 // if (wback) REG[rn] = _tmp
1049 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1050 {
1051 ASSERT(rt != ARMRegisters::pc);
1052 ASSERT(rn != ARMRegisters::pc);
1053 ASSERT(index || wback);
1054 ASSERT(!wback | (rt != rn));
1055
1056 bool add = true;
1057 if (offset < 0) {
1058 add = false;
1059 offset = -offset;
1060 }
1061 ASSERT((offset & ~0xff) == 0);
1062
1063 offset |= (wback << 8);
1064 offset |= (add << 9);
1065 offset |= (index << 10);
1066 offset |= (1 << 11);
1067
1068 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1069 }
1070
1071 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1072 {
1073 ASSERT(!BadReg(rt)); // Memory hint
1074 ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1075 ASSERT(!BadReg(rm));
1076 ASSERT(shift <= 3);
1077
1078 if (!shift && !((rt | rn | rm) & 8))
1079 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1080 else
1081 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1082 }
1083
1084 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1085 {
1086 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1087 ASSERT(imm.isUInt12());
1088
1089 if (!((rt | rn) & 8) && imm.isUInt5())
1090 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1091 else
1092 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1093 }
1094
1095 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1096 {
1097 ASSERT(rt != ARMRegisters::pc);
1098 ASSERT(rn != ARMRegisters::pc);
1099 ASSERT(index || wback);
1100 ASSERT(!wback | (rt != rn));
1101
1102 bool add = true;
1103 if (offset < 0) {
1104 add = false;
1105 offset = -offset;
1106 }
1107
1108 ASSERT(!(offset & ~0xff));
1109
1110 offset |= (wback << 8);
1111 offset |= (add << 9);
1112 offset |= (index << 10);
1113 offset |= (1 << 11);
1114
1115 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1116 }
1117
1118 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1119 {
1120 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1121 ASSERT(!BadReg(rm));
1122 ASSERT(shift <= 3);
1123
1124 if (!shift && !((rt | rn | rm) & 8))
1125 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1126 else
1127 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1128 }
1129
1130 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1131 {
1132 ASSERT(!BadReg(rd));
1133 ASSERT(!BadReg(rm));
1134 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1135 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1136 }
1137
1138 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1139 {
1140 ASSERT(!BadReg(rd));
1141 ASSERT(!BadReg(rn));
1142 ASSERT(!BadReg(rm));
1143 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1144 }
1145
1146 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1147 {
1148 ASSERT(!BadReg(rd));
1149 ASSERT(!BadReg(rm));
1150 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1151 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1152 }
1153
1154 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1155 {
1156 ASSERT(!BadReg(rd));
1157 ASSERT(!BadReg(rn));
1158 ASSERT(!BadReg(rm));
1159 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1160 }
1161
1162 ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1163 {
1164 ASSERT(imm.isValid());
1165 ASSERT(!imm.isEncodedImm());
1166 ASSERT(!BadReg(rd));
1167
1168 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1169 }
1170
1171 ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1172 {
1173 ASSERT(imm.isValid());
1174 ASSERT(!BadReg(rd));
1175
1176 if ((rd < 8) && imm.isUInt8())
1177 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1178 else if (imm.isEncodedImm())
1179 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1180 else
1181 movT3(rd, imm);
1182 }
1183
1184 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1185 {
1186 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1187 }
1188
1189 ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1190 {
1191 ASSERT(imm.isUInt16());
1192 ASSERT(!BadReg(rd));
1193 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1194 }
1195
1196 ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1197 {
1198 ASSERT(imm.isEncodedImm());
1199 ASSERT(!BadReg(rd));
1200
1201 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1202 }
1203
1204 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1205 {
1206 ASSERT(!BadReg(rd));
1207 ASSERT(!BadReg(rm));
1208 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1209 }
1210
1211 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1212 {
1213 if (!((rd | rm) & 8))
1214 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1215 else
1216 mvn(rd, rm, ShiftTypeAndAmount());
1217 }
1218
1219 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1220 {
1221 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1222 sub(rd, zero, rm);
1223 }
1224
1225 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1226 {
1227 ASSERT(!BadReg(rd));
1228 ASSERT(!BadReg(rn));
1229 ASSERT(imm.isEncodedImm());
1230 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1231 }
1232
1233 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1234 {
1235 ASSERT(!BadReg(rd));
1236 ASSERT(!BadReg(rn));
1237 ASSERT(!BadReg(rm));
1238 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1239 }
1240
1241 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1242 {
1243 if ((rd == rn) && !((rd | rm) & 8))
1244 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1245 else if ((rd == rm) && !((rd | rn) & 8))
1246 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1247 else
1248 orr(rd, rn, rm, ShiftTypeAndAmount());
1249 }
1250
1251 ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1252 {
1253 ASSERT(!BadReg(rd));
1254 ASSERT(!BadReg(rn));
1255 ASSERT(!BadReg(rm));
1256 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1257 }
1258
1259 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1260 {
1261 if ((rd == rn) && !((rd | rm) & 8))
1262 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1263 else if ((rd == rm) && !((rd | rn) & 8))
1264 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1265 else
1266 orr_S(rd, rn, rm, ShiftTypeAndAmount());
1267 }
1268
1269 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1270 {
1271 ASSERT(!BadReg(rd));
1272 ASSERT(!BadReg(rm));
1273 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1274 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1275 }
1276
1277 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1278 {
1279 ASSERT(!BadReg(rd));
1280 ASSERT(!BadReg(rn));
1281 ASSERT(!BadReg(rm));
1282 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1283 }
1284
1285 ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1286 {
1287 ASSERT(!BadReg(rdLo));
1288 ASSERT(!BadReg(rdHi));
1289 ASSERT(!BadReg(rn));
1290 ASSERT(!BadReg(rm));
1291 ASSERT(rdLo != rdHi);
1292 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1293 }
1294
1295 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1296 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1297 {
1298 ASSERT(rt != ARMRegisters::pc);
1299 ASSERT(rn != ARMRegisters::pc);
1300 ASSERT(imm.isUInt12());
1301
1302 if (!((rt | rn) & 8) && imm.isUInt7())
1303 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1304 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1305 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1306 else
1307 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1308 }
1309
1310 // If index is set, this is a regular offset or a pre-indexed store;
1311 // if index is not set then is is a post-index store.
1312 //
1313 // If wback is set rn is updated - this is a pre or post index store,
1314 // if wback is not set this is a regular offset memory access.
1315 //
1316 // (-255 <= offset <= 255)
1317 // _reg = REG[rn]
1318 // _tmp = _reg + offset
1319 // MEM[index ? _tmp : _reg] = REG[rt]
1320 // if (wback) REG[rn] = _tmp
1321 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1322 {
1323 ASSERT(rt != ARMRegisters::pc);
1324 ASSERT(rn != ARMRegisters::pc);
1325 ASSERT(index || wback);
1326 ASSERT(!wback | (rt != rn));
1327
1328 bool add = true;
1329 if (offset < 0) {
1330 add = false;
1331 offset = -offset;
1332 }
1333 ASSERT((offset & ~0xff) == 0);
1334
1335 offset |= (wback << 8);
1336 offset |= (add << 9);
1337 offset |= (index << 10);
1338 offset |= (1 << 11);
1339
1340 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1341 }
1342
1343 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1344 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1345 {
1346 ASSERT(rn != ARMRegisters::pc);
1347 ASSERT(!BadReg(rm));
1348 ASSERT(shift <= 3);
1349
1350 if (!shift && !((rt | rn | rm) & 8))
1351 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1352 else
1353 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1354 }
1355
1356 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1357 {
1358 // Rd can only be SP if Rn is also SP.
1359 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1360 ASSERT(rd != ARMRegisters::pc);
1361 ASSERT(rn != ARMRegisters::pc);
1362 ASSERT(imm.isValid());
1363
1364 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1365 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1366 return;
1367 } else if (!((rd | rn) & 8)) {
1368 if (imm.isUInt3()) {
1369 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1370 return;
1371 } else if ((rd == rn) && imm.isUInt8()) {
1372 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1373 return;
1374 }
1375 }
1376
1377 if (imm.isEncodedImm())
1378 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1379 else {
1380 ASSERT(imm.isUInt12());
1381 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1382 }
1383 }
1384
1385 ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1386 {
1387 ASSERT(rd != ARMRegisters::pc);
1388 ASSERT(rn != ARMRegisters::pc);
1389 ASSERT(imm.isValid());
1390 ASSERT(imm.isUInt12());
1391
1392 if (!((rd | rn) & 8) && !imm.getUInt12())
1393 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1394 else
1395 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1396 }
1397
1398 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1399 {
1400 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1401 ASSERT(rd != ARMRegisters::pc);
1402 ASSERT(rn != ARMRegisters::pc);
1403 ASSERT(!BadReg(rm));
1404 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1405 }
1406
1407 // NOTE: In an IT block, add doesn't modify the flags register.
1408 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1409 {
1410 if (!((rd | rn | rm) & 8))
1411 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1412 else
1413 sub(rd, rn, rm, ShiftTypeAndAmount());
1414 }
1415
1416 // Not allowed in an IT (if then) block.
1417 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1418 {
1419 // Rd can only be SP if Rn is also SP.
1420 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1421 ASSERT(rd != ARMRegisters::pc);
1422 ASSERT(rn != ARMRegisters::pc);
1423 ASSERT(imm.isValid());
1424
1425 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1426 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1427 return;
1428 } else if (!((rd | rn) & 8)) {
1429 if (imm.isUInt3()) {
1430 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1431 return;
1432 } else if ((rd == rn) && imm.isUInt8()) {
1433 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1434 return;
1435 }
1436 }
1437
1438 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1439 }
1440
1441 // Not allowed in an IT (if then) block?
1442 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1443 {
1444 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1445 ASSERT(rd != ARMRegisters::pc);
1446 ASSERT(rn != ARMRegisters::pc);
1447 ASSERT(!BadReg(rm));
1448 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1449 }
1450
1451 // Not allowed in an IT (if then) block.
1452 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1453 {
1454 if (!((rd | rn | rm) & 8))
1455 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1456 else
1457 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1458 }
1459
1460 ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1461 {
1462 ASSERT(!BadReg(rn));
1463 ASSERT(imm.isEncodedImm());
1464
1465 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1466 }
1467
1468 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1469 {
1470 ASSERT(!BadReg(rn));
1471 ASSERT(!BadReg(rm));
1472 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1473 }
1474
1475 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1476 {
1477 if ((rn | rm) & 8)
1478 tst(rn, rm, ShiftTypeAndAmount());
1479 else
1480 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1481 }
1482
1483 void vadd_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1484 {
1485 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1486 }
1487
1488 void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1489 {
1490 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1491 }
1492
1493 void vcmpz_F64(FPDoubleRegisterID rd)
1494 {
1495 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1496 }
1497
1498 void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1499 {
1500 // boolean values are 64bit (toInt, unsigned, roundZero)
1501 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1502 }
1503
1504 void vcvtr_S32_F64(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1505 {
1506 // boolean values are 64bit (toInt, unsigned, roundZero)
1507 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1508 }
1509
1510 void vdiv_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1511 {
1512 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1513 }
1514
1515 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1516 {
1517 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1518 }
1519
1520 void vmov(RegisterID rd, FPSingleRegisterID rn)
1521 {
1522 ASSERT(!BadReg(rd));
1523 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rn, rd, VFPOperand(0));
1524 }
1525
1526 void vmov(FPSingleRegisterID rd, RegisterID rn)
1527 {
1528 ASSERT(!BadReg(rn));
1529 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rd, rn, VFPOperand(0));
1530 }
1531
1532 void vmrs(RegisterID reg = ARMRegisters::pc)
1533 {
1534 ASSERT(reg != ARMRegisters::sp);
1535 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1536 }
1537
1538 void vmul_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1539 {
1540 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1541 }
1542
1543 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1544 {
1545 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1546 }
1547
1548 void vsub_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1549 {
1550 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1551 }
1552
1553 void nop()
1554 {
1555 m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
1556 }
1557
1558 AssemblerLabel label()
1559 {
1560 return m_formatter.label();
1561 }
1562
1563 AssemblerLabel align(int alignment)
1564 {
1565 while (!m_formatter.isAligned(alignment))
1566 bkpt();
1567
1568 return label();
1569 }
1570
1571 static void* getRelocatedAddress(void* code, AssemblerLabel label)
1572 {
1573 ASSERT(label.isSet());
1574 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1575 }
1576
1577 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1578 {
1579 return b.m_offset - a.m_offset;
1580 }
1581
1582 int executableOffsetFor(int location)
1583 {
1584 if (!location)
1585 return 0;
1586 return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
1587 }
1588
1589 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
1590
1591 // Assembler admin methods:
1592
1593 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
1594 {
1595 return a.from() < b.from();
1596 }
1597
1598 bool canCompact(JumpType jumpType)
1599 {
1600 // The following cannot be compacted:
1601 // JumpFixed: represents custom jump sequence
1602 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1603 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1604 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
1605 }
1606
1607 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
1608 {
1609 if (jumpType == JumpFixed)
1610 return LinkInvalid;
1611
1612 // for patchable jump we must leave space for the longest code sequence
1613 if (jumpType == JumpNoConditionFixedSize)
1614 return LinkBX;
1615 if (jumpType == JumpConditionFixedSize)
1616 return LinkConditionalBX;
1617
1618 const int paddingSize = JUMP_ENUM_SIZE(jumpType);
1619 bool mayTriggerErrata = false;
1620
1621 if (jumpType == JumpCondition) {
1622 // 2-byte conditional T1
1623 const uint16_t* jumpT1Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
1624 if (canBeJumpT1(jumpT1Location, to))
1625 return LinkJumpT1;
1626 // 4-byte conditional T3
1627 const uint16_t* jumpT3Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
1628 if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) {
1629 if (!mayTriggerErrata)
1630 return LinkJumpT3;
1631 }
1632 // 4-byte conditional T4 with IT
1633 const uint16_t* conditionalJumpT4Location =
1634 reinterpret_cast<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
1635 if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) {
1636 if (!mayTriggerErrata)
1637 return LinkConditionalJumpT4;
1638 }
1639 } else {
1640 // 2-byte unconditional T2
1641 const uint16_t* jumpT2Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
1642 if (canBeJumpT2(jumpT2Location, to))
1643 return LinkJumpT2;
1644 // 4-byte unconditional T4
1645 const uint16_t* jumpT4Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
1646 if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) {
1647 if (!mayTriggerErrata)
1648 return LinkJumpT4;
1649 }
1650 // use long jump sequence
1651 return LinkBX;
1652 }
1653
1654 ASSERT(jumpType == JumpCondition);
1655 return LinkConditionalBX;
1656 }
1657
1658 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
1659 {
1660 JumpLinkType linkType = computeJumpType(record.type(), from, to);
1661 record.setLinkType(linkType);
1662 return linkType;
1663 }
1664
1665 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
1666 {
1667 int32_t ptr = regionStart / sizeof(int32_t);
1668 const int32_t end = regionEnd / sizeof(int32_t);
1669 int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
1670 while (ptr < end)
1671 offsets[ptr++] = offset;
1672 }
1673
1674 Vector<LinkRecord>& jumpsToLink()
1675 {
1676 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
1677 return m_jumpsToLink;
1678 }
1679
1680 void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
1681 {
1682 switch (record.linkType()) {
1683 case LinkJumpT1:
1684 linkJumpT1(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1685 break;
1686 case LinkJumpT2:
1687 linkJumpT2(reinterpret_cast<uint16_t*>(from), to);
1688 break;
1689 case LinkJumpT3:
1690 linkJumpT3(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1691 break;
1692 case LinkJumpT4:
1693 linkJumpT4(reinterpret_cast<uint16_t*>(from), to);
1694 break;
1695 case LinkConditionalJumpT4:
1696 linkConditionalJumpT4(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1697 break;
1698 case LinkConditionalBX:
1699 linkConditionalBX(record.condition(), reinterpret_cast<uint16_t*>(from), to);
1700 break;
1701 case LinkBX:
1702 linkBX(reinterpret_cast<uint16_t*>(from), to);
1703 break;
1704 default:
1705 ASSERT_NOT_REACHED();
1706 break;
1707 }
1708 }
1709
1710 void* unlinkedCode() { return m_formatter.data(); }
1711 size_t codeSize() const { return m_formatter.codeSize(); }
1712
1713 static unsigned getCallReturnOffset(AssemblerLabel call)
1714 {
1715 ASSERT(call.isSet());
1716 return call.m_offset;
1717 }
1718
1719 // Linking & patching:
1720 //
1721 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1722 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1723 // code has been finalized it is (platform support permitting) within a non-
1724 // writable region of memory; to modify the code in an execute-only execuable
1725 // pool the 'repatch' and 'relink' methods should be used.
1726
1727 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
1728 {
1729 ASSERT(to.isSet());
1730 ASSERT(from.isSet());
1731 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
1732 }
1733
1734 static void linkJump(void* code, AssemblerLabel from, void* to)
1735 {
1736 ASSERT(from.isSet());
1737
1738 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
1739 linkJumpAbsolute(location, to);
1740 }
1741
1742 static void linkCall(void* code, AssemblerLabel from, void* to)
1743 {
1744 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
1745 ASSERT(from.isSet());
1746 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
1747
1748 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
1749 }
1750
1751 static void linkPointer(void* code, AssemblerLabel where, void* value)
1752 {
1753 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1754 }
1755
1756 static void relinkJump(void* from, void* to)
1757 {
1758 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
1759 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
1760
1761 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
1762
1763 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
1764 }
1765
1766 static void relinkCall(void* from, void* to)
1767 {
1768 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
1769 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
1770
1771 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
1772 }
1773
1774 static void repatchInt32(void* where, int32_t value)
1775 {
1776 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1777
1778 setInt32(where, value);
1779 }
1780
1781 static void repatchCompact(void* where, int32_t value)
1782 {
1783 ASSERT(value >= 0);
1784 ASSERT(ARMThumbImmediate::makeUInt12(value).isUInt7());
1785 setUInt7ForLoad(where, ARMThumbImmediate::makeUInt12(value));
1786 }
1787
1788 static void repatchPointer(void* where, void* value)
1789 {
1790 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1791
1792 setPointer(where, value);
1793 }
1794
1795 static void* readPointer(void* where)
1796 {
1797 return reinterpret_cast<void*>(readInt32(where));
1798 }
1799
1800 private:
1801 // VFP operations commonly take one or more 5-bit operands, typically representing a
1802 // floating point register number. This will commonly be encoded in the instruction
1803 // in two parts, with one single bit field, and one 4-bit field. In the case of
1804 // double precision operands the high bit of the register number will be encoded
1805 // separately, and for single precision operands the high bit of the register number
1806 // will be encoded individually.
1807 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
1808 // field to be encoded together in the instruction (the low 4-bits of a double
1809 // register number, or the high 4-bits of a single register number), and bit 4
1810 // contains the bit value to be encoded individually.
1811 struct VFPOperand {
1812 explicit VFPOperand(uint32_t value)
1813 : m_value(value)
1814 {
1815 ASSERT(!(m_value & ~0x1f));
1816 }
1817
1818 VFPOperand(FPDoubleRegisterID reg)
1819 : m_value(reg)
1820 {
1821 }
1822
1823 VFPOperand(RegisterID reg)
1824 : m_value(reg)
1825 {
1826 }
1827
1828 VFPOperand(FPSingleRegisterID reg)
1829 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
1830 {
1831 }
1832
1833 uint32_t bits1()
1834 {
1835 return m_value >> 4;
1836 }
1837
1838 uint32_t bits4()
1839 {
1840 return m_value & 0xf;
1841 }
1842
1843 uint32_t m_value;
1844 };
1845
1846 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
1847 {
1848 // Cannot specify rounding when converting to float.
1849 ASSERT(toInteger || !isRoundZero);
1850
1851 uint32_t op = 0x8;
1852 if (toInteger) {
1853 // opc2 indicates both toInteger & isUnsigned.
1854 op |= isUnsigned ? 0x4 : 0x5;
1855 // 'op' field in instruction is isRoundZero
1856 if (isRoundZero)
1857 op |= 0x10;
1858 } else {
1859 // 'op' field in instruction is isUnsigned
1860 if (!isUnsigned)
1861 op |= 0x10;
1862 }
1863 return VFPOperand(op);
1864 }
1865
1866 static void setInt32(void* code, uint32_t value)
1867 {
1868 uint16_t* location = reinterpret_cast<uint16_t*>(code);
1869 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
1870
1871 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
1872 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
1873 location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
1874 location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
1875 location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
1876 location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
1877
1878 ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
1879 }
1880
1881 static int32_t readInt32(void* code)
1882 {
1883 uint16_t* location = reinterpret_cast<uint16_t*>(code);
1884 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
1885
1886 ARMThumbImmediate lo16;
1887 ARMThumbImmediate hi16;
1888 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
1889 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
1890 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
1891 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
1892 uint32_t result = hi16.asUInt16();
1893 result <<= 16;
1894 result |= lo16.asUInt16();
1895 return static_cast<int32_t>(result);
1896 }
1897
1898 static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
1899 {
1900 // Requires us to have planted a LDR_imm_T1
1901 ASSERT(imm.isValid());
1902 ASSERT(imm.isUInt7());
1903 uint16_t* location = reinterpret_cast<uint16_t*>(code);
1904 location[0] |= (imm.getUInt7() >> 2) << 6;
1905 ExecutableAllocator::cacheFlush(location, sizeof(uint16_t));
1906 }
1907
1908 static void setPointer(void* code, void* value)
1909 {
1910 setInt32(code, reinterpret_cast<uint32_t>(value));
1911 }
1912
1913 static bool isB(void* address)
1914 {
1915 uint16_t* instruction = static_cast<uint16_t*>(address);
1916 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
1917 }
1918
1919 static bool isBX(void* address)
1920 {
1921 uint16_t* instruction = static_cast<uint16_t*>(address);
1922 return (instruction[0] & 0xff87) == OP_BX;
1923 }
1924
1925 static bool isMOV_imm_T3(void* address)
1926 {
1927 uint16_t* instruction = static_cast<uint16_t*>(address);
1928 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
1929 }
1930
1931 static bool isMOVT(void* address)
1932 {
1933 uint16_t* instruction = static_cast<uint16_t*>(address);
1934 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
1935 }
1936
1937 static bool isNOP_T1(void* address)
1938 {
1939 uint16_t* instruction = static_cast<uint16_t*>(address);
1940 return instruction[0] == OP_NOP_T1;
1941 }
1942
1943 static bool isNOP_T2(void* address)
1944 {
1945 uint16_t* instruction = static_cast<uint16_t*>(address);
1946 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
1947 }
1948
1949 static bool canBeJumpT1(const uint16_t* instruction, const void* target)
1950 {
1951 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
1952 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
1953
1954 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
1955 // It does not appear to be documented in the ARM ARM (big surprise), but
1956 // for OP_B_T1 the branch displacement encoded in the instruction is 2
1957 // less than the actual displacement.
1958 relative -= 2;
1959 return ((relative << 23) >> 23) == relative;
1960 }
1961
1962 static bool canBeJumpT2(const uint16_t* instruction, const void* target)
1963 {
1964 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
1965 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
1966
1967 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
1968 // It does not appear to be documented in the ARM ARM (big surprise), but
1969 // for OP_B_T2 the branch displacement encoded in the instruction is 2
1970 // less than the actual displacement.
1971 relative -= 2;
1972 return ((relative << 20) >> 20) == relative;
1973 }
1974
1975 static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
1976 {
1977 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
1978 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
1979
1980 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
1981 // From Cortex-A8 errata:
1982 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
1983 // the target of the branch falls within the first region it is
1984 // possible for the processor to incorrectly determine the branch
1985 // instruction, and it is also possible in some cases for the processor
1986 // to enter a deadlock state.
1987 // The instruction is spanning two pages if it ends at an address ending 0x002
1988 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
1989 mayTriggerErrata = spansTwo4K;
1990 // The target is in the first page if the jump branch back by [3..0x1002] bytes
1991 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
1992 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
1993 return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata;
1994 }
1995
1996 static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
1997 {
1998 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
1999 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2000
2001 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2002 // From Cortex-A8 errata:
2003 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2004 // the target of the branch falls within the first region it is
2005 // possible for the processor to incorrectly determine the branch
2006 // instruction, and it is also possible in some cases for the processor
2007 // to enter a deadlock state.
2008 // The instruction is spanning two pages if it ends at an address ending 0x002
2009 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
2010 mayTriggerErrata = spansTwo4K;
2011 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2012 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
2013 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
2014 return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata;
2015 }
2016
2017 void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2018 {
2019 // FIMXE: this should be up in the MacroAssembler layer. :-(
2020 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2021 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2022 ASSERT(canBeJumpT1(instruction, target));
2023
2024 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2025 // It does not appear to be documented in the ARM ARM (big surprise), but
2026 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2027 // less than the actual displacement.
2028 relative -= 2;
2029
2030 // All branch offsets should be an even distance.
2031 ASSERT(!(relative & 1));
2032 instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2033 }
2034
2035 static void linkJumpT2(uint16_t* instruction, void* target)
2036 {
2037 // FIMXE: this should be up in the MacroAssembler layer. :-(
2038 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2039 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2040 ASSERT(canBeJumpT2(instruction, target));
2041
2042 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2043 // It does not appear to be documented in the ARM ARM (big surprise), but
2044 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2045 // less than the actual displacement.
2046 relative -= 2;
2047
2048 // All branch offsets should be an even distance.
2049 ASSERT(!(relative & 1));
2050 instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2051 }
2052
2053 void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2054 {
2055 // FIMXE: this should be up in the MacroAssembler layer. :-(
2056 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2057 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2058 bool scratch;
2059 UNUSED_PARAM(scratch);
2060 ASSERT(canBeJumpT3(instruction, target, scratch));
2061
2062 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2063
2064 // All branch offsets should be an even distance.
2065 ASSERT(!(relative & 1));
2066 instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2067 instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2068 }
2069
2070 static void linkJumpT4(uint16_t* instruction, void* target)
2071 {
2072 // FIMXE: this should be up in the MacroAssembler layer. :-(
2073 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2074 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2075 bool scratch;
2076 UNUSED_PARAM(scratch);
2077 ASSERT(canBeJumpT4(instruction, target, scratch));
2078
2079 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2080 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2081 if (relative >= 0)
2082 relative ^= 0xC00000;
2083
2084 // All branch offsets should be an even distance.
2085 ASSERT(!(relative & 1));
2086 instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2087 instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2088 }
2089
2090 void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2091 {
2092 // FIMXE: this should be up in the MacroAssembler layer. :-(
2093 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2094 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2095
2096 instruction[-3] = ifThenElse(cond) | OP_IT;
2097 linkJumpT4(instruction, target);
2098 }
2099
2100 static void linkBX(uint16_t* instruction, void* target)
2101 {
2102 // FIMXE: this should be up in the MacroAssembler layer. :-(
2103 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2104 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2105
2106 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2107 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2108 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2109 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2110 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2111 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2112 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2113 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2114 }
2115
2116 void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2117 {
2118 // FIMXE: this should be up in the MacroAssembler layer. :-(
2119 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2120 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2121
2122 linkBX(instruction, target);
2123 instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2124 }
2125
2126 static void linkJumpAbsolute(uint16_t* instruction, void* target)
2127 {
2128 // FIMXE: this should be up in the MacroAssembler layer. :-(
2129 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2130 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2131
2132 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2133 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2134
2135 bool scratch;
2136 if (canBeJumpT4(instruction, target, scratch)) {
2137 // There may be a better way to fix this, but right now put the NOPs first, since in the
2138 // case of an conditional branch this will be coming after an ITTT predicating *three*
2139 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2140 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2141 // actually be the second half of a 2-word op.
2142 instruction[-5] = OP_NOP_T1;
2143 instruction[-4] = OP_NOP_T2a;
2144 instruction[-3] = OP_NOP_T2b;
2145 linkJumpT4(instruction, target);
2146 } else {
2147 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2148 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2149 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2150 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2151 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2152 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2153 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2154 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2155 }
2156 }
2157
2158 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2159 {
2160 return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2161 }
2162
2163 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2164 {
2165 result.m_value.i = (value >> 10) & 1;
2166 result.m_value.imm4 = value & 15;
2167 }
2168
2169 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2170 {
2171 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2172 }
2173
2174 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2175 {
2176 result.m_value.imm3 = (value >> 12) & 7;
2177 result.m_value.imm8 = value & 255;
2178 }
2179
2180 class ARMInstructionFormatter {
2181 public:
2182 ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2183 {
2184 m_buffer.putShort(op | (rd << 8) | imm);
2185 }
2186
2187 ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2188 {
2189 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2190 }
2191
2192 ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2193 {
2194 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2195 }
2196
2197 ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2198 {
2199 m_buffer.putShort(op | imm);
2200 }
2201
2202 ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2203 {
2204 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2205 }
2206
2207 ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2208 {
2209 m_buffer.putShort(op | imm);
2210 }
2211
2212 ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2213 {
2214 m_buffer.putShort(op | (reg1 << 3) | reg2);
2215 }
2216
2217 ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2218 {
2219 m_buffer.putShort(op | reg);
2220 m_buffer.putShort(ff.m_u.value);
2221 }
2222
2223 ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2224 {
2225 m_buffer.putShort(op);
2226 m_buffer.putShort(ff.m_u.value);
2227 }
2228
2229 ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2230 {
2231 m_buffer.putShort(op1);
2232 m_buffer.putShort(op2);
2233 }
2234
2235 ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2236 {
2237 ARMThumbImmediate newImm = imm;
2238 newImm.m_value.imm4 = imm4;
2239
2240 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2241 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2242 }
2243
2244 ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2245 {
2246 m_buffer.putShort(op | reg1);
2247 m_buffer.putShort((reg2 << 12) | imm);
2248 }
2249
2250 // Formats up instructions of the pattern:
2251 // 111111111B11aaaa:bbbb222SA2C2cccc
2252 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2253 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2254 ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2255 {
2256 ASSERT(!(op1 & 0x004f));
2257 ASSERT(!(op2 & 0xf1af));
2258 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2259 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2260 }
2261
2262 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2263 // (i.e. +/-(0..255) 32-bit words)
2264 ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2265 {
2266 bool up = true;
2267 if (imm < 0) {
2268 imm = -imm;
2269 up = false;
2270 }
2271
2272 uint32_t offset = imm;
2273 ASSERT(!(offset & ~0x3fc));
2274 offset >>= 2;
2275
2276 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2277 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2278 }
2279
2280 // Administrative methods:
2281
2282 size_t codeSize() const { return m_buffer.codeSize(); }
2283 AssemblerLabel label() const { return m_buffer.label(); }
2284 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2285 void* data() const { return m_buffer.data(); }
2286
2287 #ifndef NDEBUG
2288 unsigned debugOffset() { return m_buffer.debugOffset(); }
2289 #endif
2290
2291 private:
2292 AssemblerBuffer m_buffer;
2293 } m_formatter;
2294
2295 Vector<LinkRecord> m_jumpsToLink;
2296 Vector<int32_t> m_offsets;
2297 };
2298
2299 } // namespace JSC
2300
2301 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2302
2303 #endif // ARMAssembler_h