]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/ARMv7Assembler.h
JavaScriptCore-1097.3.3.tar.gz
[apple/javascriptcore.git] / assembler / ARMv7Assembler.h
1 /*
2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
29
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
31
32 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
35 #include <stdint.h>
36
37 namespace JSC {
38
39 namespace ARMRegisters {
40 typedef enum {
41 r0,
42 r1,
43 r2,
44 r3,
45 r4,
46 r5,
47 r6,
48 r7, wr = r7, // thumb work register
49 r8,
50 r9, sb = r9, // static base
51 r10, sl = r10, // stack limit
52 r11, fp = r11, // frame pointer
53 r12, ip = r12,
54 r13, sp = r13,
55 r14, lr = r14,
56 r15, pc = r15,
57 } RegisterID;
58
59 typedef enum {
60 s0,
61 s1,
62 s2,
63 s3,
64 s4,
65 s5,
66 s6,
67 s7,
68 s8,
69 s9,
70 s10,
71 s11,
72 s12,
73 s13,
74 s14,
75 s15,
76 s16,
77 s17,
78 s18,
79 s19,
80 s20,
81 s21,
82 s22,
83 s23,
84 s24,
85 s25,
86 s26,
87 s27,
88 s28,
89 s29,
90 s30,
91 s31,
92 } FPSingleRegisterID;
93
94 typedef enum {
95 d0,
96 d1,
97 d2,
98 d3,
99 d4,
100 d5,
101 d6,
102 d7,
103 d8,
104 d9,
105 d10,
106 d11,
107 d12,
108 d13,
109 d14,
110 d15,
111 d16,
112 d17,
113 d18,
114 d19,
115 d20,
116 d21,
117 d22,
118 d23,
119 d24,
120 d25,
121 d26,
122 d27,
123 d28,
124 d29,
125 d30,
126 d31,
127 } FPDoubleRegisterID;
128
129 typedef enum {
130 q0,
131 q1,
132 q2,
133 q3,
134 q4,
135 q5,
136 q6,
137 q7,
138 q8,
139 q9,
140 q10,
141 q11,
142 q12,
143 q13,
144 q14,
145 q15,
146 q16,
147 q17,
148 q18,
149 q19,
150 q20,
151 q21,
152 q22,
153 q23,
154 q24,
155 q25,
156 q26,
157 q27,
158 q28,
159 q29,
160 q30,
161 q31,
162 } FPQuadRegisterID;
163
164 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
165 {
166 ASSERT(reg < d16);
167 return (FPSingleRegisterID)(reg << 1);
168 }
169
170 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
171 {
172 ASSERT(!(reg & 1));
173 return (FPDoubleRegisterID)(reg >> 1);
174 }
175 }
176
177 class ARMv7Assembler;
178 class ARMThumbImmediate {
179 friend class ARMv7Assembler;
180
181 typedef uint8_t ThumbImmediateType;
182 static const ThumbImmediateType TypeInvalid = 0;
183 static const ThumbImmediateType TypeEncoded = 1;
184 static const ThumbImmediateType TypeUInt16 = 2;
185
186 typedef union {
187 int16_t asInt;
188 struct {
189 unsigned imm8 : 8;
190 unsigned imm3 : 3;
191 unsigned i : 1;
192 unsigned imm4 : 4;
193 };
194 // If this is an encoded immediate, then it may describe a shift, or a pattern.
195 struct {
196 unsigned shiftValue7 : 7;
197 unsigned shiftAmount : 5;
198 };
199 struct {
200 unsigned immediate : 8;
201 unsigned pattern : 4;
202 };
203 } ThumbImmediateValue;
204
205 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
206 typedef union {
207 int32_t asInt;
208 struct {
209 uint8_t byte0;
210 uint8_t byte1;
211 uint8_t byte2;
212 uint8_t byte3;
213 };
214 } PatternBytes;
215
216 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
217 {
218 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219 value >>= N; /* if any were set, lose the bottom N */
220 else /* if none of the top N bits are set, */
221 zeros += N; /* then we have identified N leading zeros */
222 }
223
224 static int32_t countLeadingZeros(uint32_t value)
225 {
226 if (!value)
227 return 32;
228
229 int32_t zeros = 0;
230 countLeadingZerosPartial(value, zeros, 16);
231 countLeadingZerosPartial(value, zeros, 8);
232 countLeadingZerosPartial(value, zeros, 4);
233 countLeadingZerosPartial(value, zeros, 2);
234 countLeadingZerosPartial(value, zeros, 1);
235 return zeros;
236 }
237
238 ARMThumbImmediate()
239 : m_type(TypeInvalid)
240 {
241 m_value.asInt = 0;
242 }
243
244 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
245 : m_type(type)
246 , m_value(value)
247 {
248 }
249
250 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
251 : m_type(TypeUInt16)
252 {
253 // Make sure this constructor is only reached with type TypeUInt16;
254 // this extra parameter makes the code a little clearer by making it
255 // explicit at call sites which type is being constructed
256 ASSERT_UNUSED(type, type == TypeUInt16);
257
258 m_value.asInt = value;
259 }
260
261 public:
262 static ARMThumbImmediate makeEncodedImm(uint32_t value)
263 {
264 ThumbImmediateValue encoding;
265 encoding.asInt = 0;
266
267 // okay, these are easy.
268 if (value < 256) {
269 encoding.immediate = value;
270 encoding.pattern = 0;
271 return ARMThumbImmediate(TypeEncoded, encoding);
272 }
273
274 int32_t leadingZeros = countLeadingZeros(value);
275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276 ASSERT(leadingZeros < 24);
277
278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281 int32_t rightShiftAmount = 24 - leadingZeros;
282 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
283 // Shift the value down to the low byte position. The assign to
284 // shiftValue7 drops the implicit top bit.
285 encoding.shiftValue7 = value >> rightShiftAmount;
286 // The endoded shift amount is the magnitude of a right rotate.
287 encoding.shiftAmount = 8 + leadingZeros;
288 return ARMThumbImmediate(TypeEncoded, encoding);
289 }
290
291 PatternBytes bytes;
292 bytes.asInt = value;
293
294 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
295 encoding.immediate = bytes.byte0;
296 encoding.pattern = 3;
297 return ARMThumbImmediate(TypeEncoded, encoding);
298 }
299
300 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
301 encoding.immediate = bytes.byte0;
302 encoding.pattern = 1;
303 return ARMThumbImmediate(TypeEncoded, encoding);
304 }
305
306 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
307 encoding.immediate = bytes.byte1;
308 encoding.pattern = 2;
309 return ARMThumbImmediate(TypeEncoded, encoding);
310 }
311
312 return ARMThumbImmediate();
313 }
314
315 static ARMThumbImmediate makeUInt12(int32_t value)
316 {
317 return (!(value & 0xfffff000))
318 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
319 : ARMThumbImmediate();
320 }
321
322 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
323 {
324 // If this is not a 12-bit unsigned it, try making an encoded immediate.
325 return (!(value & 0xfffff000))
326 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
327 : makeEncodedImm(value);
328 }
329
330 // The 'make' methods, above, return a !isValid() value if the argument
331 // cannot be represented as the requested type. This methods is called
332 // 'get' since the argument can always be represented.
333 static ARMThumbImmediate makeUInt16(uint16_t value)
334 {
335 return ARMThumbImmediate(TypeUInt16, value);
336 }
337
338 bool isValid()
339 {
340 return m_type != TypeInvalid;
341 }
342
343 uint16_t asUInt16() const { return m_value.asInt; }
344
345 // These methods rely on the format of encoded byte values.
346 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
347 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
348 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
349 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
350 bool isUInt7() { return !(m_value.asInt & 0xff80); }
351 bool isUInt8() { return !(m_value.asInt & 0xff00); }
352 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
353 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
354 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
355 bool isUInt16() { return m_type == TypeUInt16; }
356 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
357 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
358 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
359 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
360 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
361 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
362 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
363 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
364 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
365 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
366
367 bool isEncodedImm() { return m_type == TypeEncoded; }
368
369 private:
370 ThumbImmediateType m_type;
371 ThumbImmediateValue m_value;
372 };
373
374 typedef enum {
375 SRType_LSL,
376 SRType_LSR,
377 SRType_ASR,
378 SRType_ROR,
379
380 SRType_RRX = SRType_ROR
381 } ARMShiftType;
382
383 class ShiftTypeAndAmount {
384 friend class ARMv7Assembler;
385
386 public:
387 ShiftTypeAndAmount()
388 {
389 m_u.type = (ARMShiftType)0;
390 m_u.amount = 0;
391 }
392
393 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
394 {
395 m_u.type = type;
396 m_u.amount = amount & 31;
397 }
398
399 unsigned lo4() { return m_u.lo4; }
400 unsigned hi4() { return m_u.hi4; }
401
402 private:
403 union {
404 struct {
405 unsigned lo4 : 4;
406 unsigned hi4 : 4;
407 };
408 struct {
409 unsigned type : 2;
410 unsigned amount : 6;
411 };
412 } m_u;
413 };
414
415 class ARMv7Assembler {
416 public:
417 typedef ARMRegisters::RegisterID RegisterID;
418 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
419 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
420 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
421
422 // (HS, LO, HI, LS) -> (AE, B, A, BE)
423 // (VS, VC) -> (O, NO)
424 typedef enum {
425 ConditionEQ,
426 ConditionNE,
427 ConditionHS, ConditionCS = ConditionHS,
428 ConditionLO, ConditionCC = ConditionLO,
429 ConditionMI,
430 ConditionPL,
431 ConditionVS,
432 ConditionVC,
433 ConditionHI,
434 ConditionLS,
435 ConditionGE,
436 ConditionLT,
437 ConditionGT,
438 ConditionLE,
439 ConditionAL,
440 ConditionInvalid
441 } Condition;
442
443 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
444 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
445 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
446 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
447 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
448 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
449 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
450 };
451 enum JumpLinkType {
452 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
453 LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
454 LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
455 LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
456 LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
457 LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
458 LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
459 LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
460 };
461
462 class LinkRecord {
463 public:
464 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
465 {
466 data.realTypes.m_from = from;
467 data.realTypes.m_to = to;
468 data.realTypes.m_type = type;
469 data.realTypes.m_linkType = LinkInvalid;
470 data.realTypes.m_condition = condition;
471 }
472 void operator=(const LinkRecord& other)
473 {
474 data.copyTypes.content[0] = other.data.copyTypes.content[0];
475 data.copyTypes.content[1] = other.data.copyTypes.content[1];
476 data.copyTypes.content[2] = other.data.copyTypes.content[2];
477 }
478 intptr_t from() const { return data.realTypes.m_from; }
479 void setFrom(intptr_t from) { data.realTypes.m_from = from; }
480 intptr_t to() const { return data.realTypes.m_to; }
481 JumpType type() const { return data.realTypes.m_type; }
482 JumpLinkType linkType() const { return data.realTypes.m_linkType; }
483 void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
484 Condition condition() const { return data.realTypes.m_condition; }
485 private:
486 union {
487 struct RealTypes {
488 intptr_t m_from : 31;
489 intptr_t m_to : 31;
490 JumpType m_type : 8;
491 JumpLinkType m_linkType : 8;
492 Condition m_condition : 16;
493 } realTypes;
494 struct CopyTypes {
495 uint32_t content[3];
496 } copyTypes;
497 COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
498 } data;
499 };
500
501 private:
502
503 // ARMv7, Appx-A.6.3
504 bool BadReg(RegisterID reg)
505 {
506 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
507 }
508
509 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
510 {
511 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
512 if (rdNum & 1)
513 rdMask |= 1 << lowBitShift;
514 return rdMask;
515 }
516
517 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
518 {
519 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
520 if (rdNum & 16)
521 rdMask |= 1 << highBitShift;
522 return rdMask;
523 }
524
525 typedef enum {
526 OP_ADD_reg_T1 = 0x1800,
527 OP_SUB_reg_T1 = 0x1A00,
528 OP_ADD_imm_T1 = 0x1C00,
529 OP_SUB_imm_T1 = 0x1E00,
530 OP_MOV_imm_T1 = 0x2000,
531 OP_CMP_imm_T1 = 0x2800,
532 OP_ADD_imm_T2 = 0x3000,
533 OP_SUB_imm_T2 = 0x3800,
534 OP_AND_reg_T1 = 0x4000,
535 OP_EOR_reg_T1 = 0x4040,
536 OP_TST_reg_T1 = 0x4200,
537 OP_RSB_imm_T1 = 0x4240,
538 OP_CMP_reg_T1 = 0x4280,
539 OP_ORR_reg_T1 = 0x4300,
540 OP_MVN_reg_T1 = 0x43C0,
541 OP_ADD_reg_T2 = 0x4400,
542 OP_MOV_reg_T1 = 0x4600,
543 OP_BLX = 0x4700,
544 OP_BX = 0x4700,
545 OP_STR_reg_T1 = 0x5000,
546 OP_STRH_reg_T1 = 0x5200,
547 OP_STRB_reg_T1 = 0x5400,
548 OP_LDRSB_reg_T1 = 0x5600,
549 OP_LDR_reg_T1 = 0x5800,
550 OP_LDRH_reg_T1 = 0x5A00,
551 OP_LDRB_reg_T1 = 0x5C00,
552 OP_LDRSH_reg_T1 = 0x5E00,
553 OP_STR_imm_T1 = 0x6000,
554 OP_LDR_imm_T1 = 0x6800,
555 OP_STRB_imm_T1 = 0x7000,
556 OP_LDRB_imm_T1 = 0x7800,
557 OP_STRH_imm_T1 = 0x8000,
558 OP_LDRH_imm_T1 = 0x8800,
559 OP_STR_imm_T2 = 0x9000,
560 OP_LDR_imm_T2 = 0x9800,
561 OP_ADD_SP_imm_T1 = 0xA800,
562 OP_ADD_SP_imm_T2 = 0xB000,
563 OP_SUB_SP_imm_T1 = 0xB080,
564 OP_BKPT = 0xBE00,
565 OP_IT = 0xBF00,
566 OP_NOP_T1 = 0xBF00,
567 } OpcodeID;
568
569 typedef enum {
570 OP_B_T1 = 0xD000,
571 OP_B_T2 = 0xE000,
572 OP_AND_reg_T2 = 0xEA00,
573 OP_TST_reg_T2 = 0xEA10,
574 OP_ORR_reg_T2 = 0xEA40,
575 OP_ORR_S_reg_T2 = 0xEA50,
576 OP_ASR_imm_T1 = 0xEA4F,
577 OP_LSL_imm_T1 = 0xEA4F,
578 OP_LSR_imm_T1 = 0xEA4F,
579 OP_ROR_imm_T1 = 0xEA4F,
580 OP_MVN_reg_T2 = 0xEA6F,
581 OP_EOR_reg_T2 = 0xEA80,
582 OP_ADD_reg_T3 = 0xEB00,
583 OP_ADD_S_reg_T3 = 0xEB10,
584 OP_SUB_reg_T2 = 0xEBA0,
585 OP_SUB_S_reg_T2 = 0xEBB0,
586 OP_CMP_reg_T2 = 0xEBB0,
587 OP_VMOV_CtoD = 0xEC00,
588 OP_VMOV_DtoC = 0xEC10,
589 OP_FSTS = 0xED00,
590 OP_VSTR = 0xED00,
591 OP_FLDS = 0xED10,
592 OP_VLDR = 0xED10,
593 OP_VMOV_CtoS = 0xEE00,
594 OP_VMOV_StoC = 0xEE10,
595 OP_VMUL_T2 = 0xEE20,
596 OP_VADD_T2 = 0xEE30,
597 OP_VSUB_T2 = 0xEE30,
598 OP_VDIV = 0xEE80,
599 OP_VABS_T2 = 0xEEB0,
600 OP_VCMP = 0xEEB0,
601 OP_VCVT_FPIVFP = 0xEEB0,
602 OP_VMOV_T2 = 0xEEB0,
603 OP_VMOV_IMM_T2 = 0xEEB0,
604 OP_VMRS = 0xEEB0,
605 OP_VNEG_T2 = 0xEEB0,
606 OP_VSQRT_T1 = 0xEEB0,
607 OP_VCVTSD_T1 = 0xEEB0,
608 OP_VCVTDS_T1 = 0xEEB0,
609 OP_B_T3a = 0xF000,
610 OP_B_T4a = 0xF000,
611 OP_AND_imm_T1 = 0xF000,
612 OP_TST_imm = 0xF010,
613 OP_ORR_imm_T1 = 0xF040,
614 OP_MOV_imm_T2 = 0xF040,
615 OP_MVN_imm = 0xF060,
616 OP_EOR_imm_T1 = 0xF080,
617 OP_ADD_imm_T3 = 0xF100,
618 OP_ADD_S_imm_T3 = 0xF110,
619 OP_CMN_imm = 0xF110,
620 OP_ADC_imm = 0xF140,
621 OP_SUB_imm_T3 = 0xF1A0,
622 OP_SUB_S_imm_T3 = 0xF1B0,
623 OP_CMP_imm_T2 = 0xF1B0,
624 OP_RSB_imm_T2 = 0xF1C0,
625 OP_RSB_S_imm_T2 = 0xF1D0,
626 OP_ADD_imm_T4 = 0xF200,
627 OP_MOV_imm_T3 = 0xF240,
628 OP_SUB_imm_T4 = 0xF2A0,
629 OP_MOVT = 0xF2C0,
630 OP_UBFX_T1 = 0xF3C0,
631 OP_NOP_T2a = 0xF3AF,
632 OP_STRB_imm_T3 = 0xF800,
633 OP_STRB_reg_T2 = 0xF800,
634 OP_LDRB_imm_T3 = 0xF810,
635 OP_LDRB_reg_T2 = 0xF810,
636 OP_STRH_imm_T3 = 0xF820,
637 OP_STRH_reg_T2 = 0xF820,
638 OP_LDRH_reg_T2 = 0xF830,
639 OP_LDRH_imm_T3 = 0xF830,
640 OP_STR_imm_T4 = 0xF840,
641 OP_STR_reg_T2 = 0xF840,
642 OP_LDR_imm_T4 = 0xF850,
643 OP_LDR_reg_T2 = 0xF850,
644 OP_STRB_imm_T2 = 0xF880,
645 OP_LDRB_imm_T2 = 0xF890,
646 OP_STRH_imm_T2 = 0xF8A0,
647 OP_LDRH_imm_T2 = 0xF8B0,
648 OP_STR_imm_T3 = 0xF8C0,
649 OP_LDR_imm_T3 = 0xF8D0,
650 OP_LDRSB_reg_T2 = 0xF910,
651 OP_LDRSH_reg_T2 = 0xF930,
652 OP_LSL_reg_T2 = 0xFA00,
653 OP_LSR_reg_T2 = 0xFA20,
654 OP_ASR_reg_T2 = 0xFA40,
655 OP_ROR_reg_T2 = 0xFA60,
656 OP_CLZ = 0xFAB0,
657 OP_SMULL_T1 = 0xFB80,
658 } OpcodeID1;
659
660 typedef enum {
661 OP_VADD_T2b = 0x0A00,
662 OP_VDIVb = 0x0A00,
663 OP_FLDSb = 0x0A00,
664 OP_VLDRb = 0x0A00,
665 OP_VMOV_IMM_T2b = 0x0A00,
666 OP_VMOV_T2b = 0x0A40,
667 OP_VMUL_T2b = 0x0A00,
668 OP_FSTSb = 0x0A00,
669 OP_VSTRb = 0x0A00,
670 OP_VMOV_StoCb = 0x0A10,
671 OP_VMOV_CtoSb = 0x0A10,
672 OP_VMOV_DtoCb = 0x0A10,
673 OP_VMOV_CtoDb = 0x0A10,
674 OP_VMRSb = 0x0A10,
675 OP_VABS_T2b = 0x0A40,
676 OP_VCMPb = 0x0A40,
677 OP_VCVT_FPIVFPb = 0x0A40,
678 OP_VNEG_T2b = 0x0A40,
679 OP_VSUB_T2b = 0x0A40,
680 OP_VSQRT_T1b = 0x0A40,
681 OP_VCVTSD_T1b = 0x0A40,
682 OP_VCVTDS_T1b = 0x0A40,
683 OP_NOP_T2b = 0x8000,
684 OP_B_T3b = 0x8000,
685 OP_B_T4b = 0x9000,
686 } OpcodeID2;
687
688 struct FourFours {
689 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
690 {
691 m_u.f0 = f0;
692 m_u.f1 = f1;
693 m_u.f2 = f2;
694 m_u.f3 = f3;
695 }
696
697 union {
698 unsigned value;
699 struct {
700 unsigned f0 : 4;
701 unsigned f1 : 4;
702 unsigned f2 : 4;
703 unsigned f3 : 4;
704 };
705 } m_u;
706 };
707
708 class ARMInstructionFormatter;
709
710 // false means else!
711 bool ifThenElseConditionBit(Condition condition, bool isIf)
712 {
713 return isIf ? (condition & 1) : !(condition & 1);
714 }
715 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
716 {
717 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
718 | (ifThenElseConditionBit(condition, inst3if) << 2)
719 | (ifThenElseConditionBit(condition, inst4if) << 1)
720 | 1;
721 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
722 return (condition << 4) | mask;
723 }
724 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
725 {
726 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
727 | (ifThenElseConditionBit(condition, inst3if) << 2)
728 | 2;
729 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
730 return (condition << 4) | mask;
731 }
732 uint8_t ifThenElse(Condition condition, bool inst2if)
733 {
734 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
735 | 4;
736 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
737 return (condition << 4) | mask;
738 }
739
740 uint8_t ifThenElse(Condition condition)
741 {
742 int mask = 8;
743 return (condition << 4) | mask;
744 }
745
746 public:
747
748 void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
749 {
750 // Rd can only be SP if Rn is also SP.
751 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
752 ASSERT(rd != ARMRegisters::pc);
753 ASSERT(rn != ARMRegisters::pc);
754 ASSERT(imm.isEncodedImm());
755
756 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
757 }
758
759 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
760 {
761 // Rd can only be SP if Rn is also SP.
762 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
763 ASSERT(rd != ARMRegisters::pc);
764 ASSERT(rn != ARMRegisters::pc);
765 ASSERT(imm.isValid());
766
767 if (rn == ARMRegisters::sp) {
768 ASSERT(!(imm.getUInt16() & 3));
769 if (!(rd & 8) && imm.isUInt10()) {
770 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
771 return;
772 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
773 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
774 return;
775 }
776 } else if (!((rd | rn) & 8)) {
777 if (imm.isUInt3()) {
778 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
779 return;
780 } else if ((rd == rn) && imm.isUInt8()) {
781 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
782 return;
783 }
784 }
785
786 if (imm.isEncodedImm())
787 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
788 else {
789 ASSERT(imm.isUInt12());
790 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
791 }
792 }
793
794 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
795 {
796 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
797 ASSERT(rd != ARMRegisters::pc);
798 ASSERT(rn != ARMRegisters::pc);
799 ASSERT(!BadReg(rm));
800 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
801 }
802
803 // NOTE: In an IT block, add doesn't modify the flags register.
804 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
805 {
806 if (rd == rn)
807 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
808 else if (rd == rm)
809 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
810 else if (!((rd | rn | rm) & 8))
811 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
812 else
813 add(rd, rn, rm, ShiftTypeAndAmount());
814 }
815
816 // Not allowed in an IT (if then) block.
817 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
818 {
819 // Rd can only be SP if Rn is also SP.
820 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
821 ASSERT(rd != ARMRegisters::pc);
822 ASSERT(rn != ARMRegisters::pc);
823 ASSERT(imm.isEncodedImm());
824
825 if (!((rd | rn) & 8)) {
826 if (imm.isUInt3()) {
827 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
828 return;
829 } else if ((rd == rn) && imm.isUInt8()) {
830 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
831 return;
832 }
833 }
834
835 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
836 }
837
838 // Not allowed in an IT (if then) block?
839 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
840 {
841 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
842 ASSERT(rd != ARMRegisters::pc);
843 ASSERT(rn != ARMRegisters::pc);
844 ASSERT(!BadReg(rm));
845 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
846 }
847
848 // Not allowed in an IT (if then) block.
849 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
850 {
851 if (!((rd | rn | rm) & 8))
852 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
853 else
854 add_S(rd, rn, rm, ShiftTypeAndAmount());
855 }
856
857 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
858 {
859 ASSERT(!BadReg(rd));
860 ASSERT(!BadReg(rn));
861 ASSERT(imm.isEncodedImm());
862 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
863 }
864
865 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
866 {
867 ASSERT(!BadReg(rd));
868 ASSERT(!BadReg(rn));
869 ASSERT(!BadReg(rm));
870 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
871 }
872
873 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
874 {
875 if ((rd == rn) && !((rd | rm) & 8))
876 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
877 else if ((rd == rm) && !((rd | rn) & 8))
878 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
879 else
880 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
881 }
882
883 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
884 {
885 ASSERT(!BadReg(rd));
886 ASSERT(!BadReg(rm));
887 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
888 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
889 }
890
891 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
892 {
893 ASSERT(!BadReg(rd));
894 ASSERT(!BadReg(rn));
895 ASSERT(!BadReg(rm));
896 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
897 }
898
899 // Only allowed in IT (if then) block if last instruction.
900 ALWAYS_INLINE AssemblerLabel b()
901 {
902 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
903 return m_formatter.label();
904 }
905
906 // Only allowed in IT (if then) block if last instruction.
907 ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
908 {
909 ASSERT(rm != ARMRegisters::pc);
910 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
911 return m_formatter.label();
912 }
913
914 // Only allowed in IT (if then) block if last instruction.
915 ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
916 {
917 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
918 return m_formatter.label();
919 }
920
921 void bkpt(uint8_t imm = 0)
922 {
923 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
924 }
925
926 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
927 {
928 ASSERT(!BadReg(rd));
929 ASSERT(!BadReg(rm));
930 m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
931 }
932
933 ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
934 {
935 ASSERT(rn != ARMRegisters::pc);
936 ASSERT(imm.isEncodedImm());
937
938 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
939 }
940
941 ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
942 {
943 ASSERT(rn != ARMRegisters::pc);
944 ASSERT(imm.isEncodedImm());
945
946 if (!(rn & 8) && imm.isUInt8())
947 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
948 else
949 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
950 }
951
952 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
953 {
954 ASSERT(rn != ARMRegisters::pc);
955 ASSERT(!BadReg(rm));
956 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
957 }
958
959 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
960 {
961 if ((rn | rm) & 8)
962 cmp(rn, rm, ShiftTypeAndAmount());
963 else
964 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
965 }
966
967 // xor is not spelled with an 'e'. :-(
968 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
969 {
970 ASSERT(!BadReg(rd));
971 ASSERT(!BadReg(rn));
972 ASSERT(imm.isEncodedImm());
973 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
974 }
975
976 // xor is not spelled with an 'e'. :-(
977 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
978 {
979 ASSERT(!BadReg(rd));
980 ASSERT(!BadReg(rn));
981 ASSERT(!BadReg(rm));
982 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
983 }
984
985 // xor is not spelled with an 'e'. :-(
986 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
987 {
988 if ((rd == rn) && !((rd | rm) & 8))
989 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
990 else if ((rd == rm) && !((rd | rn) & 8))
991 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
992 else
993 eor(rd, rn, rm, ShiftTypeAndAmount());
994 }
995
996 ALWAYS_INLINE void it(Condition cond)
997 {
998 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
999 }
1000
1001 ALWAYS_INLINE void it(Condition cond, bool inst2if)
1002 {
1003 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1004 }
1005
1006 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1007 {
1008 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1009 }
1010
1011 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1012 {
1013 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1014 }
1015
1016 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1017 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1018 {
1019 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1020 ASSERT(imm.isUInt12());
1021
1022 if (!((rt | rn) & 8) && imm.isUInt7())
1023 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1024 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1025 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1026 else
1027 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1028 }
1029
1030 ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1031 {
1032 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1033 ASSERT(imm.isUInt7());
1034 ASSERT(!((rt | rn) & 8));
1035 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1036 }
1037
1038 // If index is set, this is a regular offset or a pre-indexed load;
1039 // if index is not set then is is a post-index load.
1040 //
1041 // If wback is set rn is updated - this is a pre or post index load,
1042 // if wback is not set this is a regular offset memory access.
1043 //
1044 // (-255 <= offset <= 255)
1045 // _reg = REG[rn]
1046 // _tmp = _reg + offset
1047 // MEM[index ? _tmp : _reg] = REG[rt]
1048 // if (wback) REG[rn] = _tmp
1049 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1050 {
1051 ASSERT(rt != ARMRegisters::pc);
1052 ASSERT(rn != ARMRegisters::pc);
1053 ASSERT(index || wback);
1054 ASSERT(!wback | (rt != rn));
1055
1056 bool add = true;
1057 if (offset < 0) {
1058 add = false;
1059 offset = -offset;
1060 }
1061 ASSERT((offset & ~0xff) == 0);
1062
1063 offset |= (wback << 8);
1064 offset |= (add << 9);
1065 offset |= (index << 10);
1066 offset |= (1 << 11);
1067
1068 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1069 }
1070
1071 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1072 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1073 {
1074 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1075 ASSERT(!BadReg(rm));
1076 ASSERT(shift <= 3);
1077
1078 if (!shift && !((rt | rn | rm) & 8))
1079 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1080 else
1081 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1082 }
1083
1084 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1085 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1086 {
1087 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1088 ASSERT(imm.isUInt12());
1089
1090 if (!((rt | rn) & 8) && imm.isUInt6())
1091 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1092 else
1093 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1094 }
1095
1096 // If index is set, this is a regular offset or a pre-indexed load;
1097 // if index is not set then is is a post-index load.
1098 //
1099 // If wback is set rn is updated - this is a pre or post index load,
1100 // if wback is not set this is a regular offset memory access.
1101 //
1102 // (-255 <= offset <= 255)
1103 // _reg = REG[rn]
1104 // _tmp = _reg + offset
1105 // MEM[index ? _tmp : _reg] = REG[rt]
1106 // if (wback) REG[rn] = _tmp
1107 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1108 {
1109 ASSERT(rt != ARMRegisters::pc);
1110 ASSERT(rn != ARMRegisters::pc);
1111 ASSERT(index || wback);
1112 ASSERT(!wback | (rt != rn));
1113
1114 bool add = true;
1115 if (offset < 0) {
1116 add = false;
1117 offset = -offset;
1118 }
1119 ASSERT((offset & ~0xff) == 0);
1120
1121 offset |= (wback << 8);
1122 offset |= (add << 9);
1123 offset |= (index << 10);
1124 offset |= (1 << 11);
1125
1126 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1127 }
1128
1129 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1130 {
1131 ASSERT(!BadReg(rt)); // Memory hint
1132 ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1133 ASSERT(!BadReg(rm));
1134 ASSERT(shift <= 3);
1135
1136 if (!shift && !((rt | rn | rm) & 8))
1137 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1138 else
1139 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1140 }
1141
1142 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1143 {
1144 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1145 ASSERT(imm.isUInt12());
1146
1147 if (!((rt | rn) & 8) && imm.isUInt5())
1148 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1149 else
1150 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1151 }
1152
1153 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1154 {
1155 ASSERT(rt != ARMRegisters::pc);
1156 ASSERT(rn != ARMRegisters::pc);
1157 ASSERT(index || wback);
1158 ASSERT(!wback | (rt != rn));
1159
1160 bool add = true;
1161 if (offset < 0) {
1162 add = false;
1163 offset = -offset;
1164 }
1165
1166 ASSERT(!(offset & ~0xff));
1167
1168 offset |= (wback << 8);
1169 offset |= (add << 9);
1170 offset |= (index << 10);
1171 offset |= (1 << 11);
1172
1173 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1174 }
1175
1176 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1177 {
1178 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1179 ASSERT(!BadReg(rm));
1180 ASSERT(shift <= 3);
1181
1182 if (!shift && !((rt | rn | rm) & 8))
1183 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1184 else
1185 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1186 }
1187
1188 void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1189 {
1190 ASSERT(rn != ARMRegisters::pc);
1191 ASSERT(!BadReg(rm));
1192 ASSERT(shift <= 3);
1193
1194 if (!shift && !((rt | rn | rm) & 8))
1195 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1196 else
1197 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1198 }
1199
1200 void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1201 {
1202 ASSERT(rn != ARMRegisters::pc);
1203 ASSERT(!BadReg(rm));
1204 ASSERT(shift <= 3);
1205
1206 if (!shift && !((rt | rn | rm) & 8))
1207 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1208 else
1209 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1210 }
1211
1212 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1213 {
1214 ASSERT(!BadReg(rd));
1215 ASSERT(!BadReg(rm));
1216 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1217 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1218 }
1219
1220 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1221 {
1222 ASSERT(!BadReg(rd));
1223 ASSERT(!BadReg(rn));
1224 ASSERT(!BadReg(rm));
1225 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1226 }
1227
1228 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1229 {
1230 ASSERT(!BadReg(rd));
1231 ASSERT(!BadReg(rm));
1232 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1233 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1234 }
1235
1236 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1237 {
1238 ASSERT(!BadReg(rd));
1239 ASSERT(!BadReg(rn));
1240 ASSERT(!BadReg(rm));
1241 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1242 }
1243
1244 ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1245 {
1246 ASSERT(imm.isValid());
1247 ASSERT(!imm.isEncodedImm());
1248 ASSERT(!BadReg(rd));
1249
1250 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1251 }
1252
1253 ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1254 {
1255 ASSERT(imm.isValid());
1256 ASSERT(!BadReg(rd));
1257
1258 if ((rd < 8) && imm.isUInt8())
1259 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1260 else if (imm.isEncodedImm())
1261 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1262 else
1263 movT3(rd, imm);
1264 }
1265
1266 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1267 {
1268 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1269 }
1270
1271 ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1272 {
1273 ASSERT(imm.isUInt16());
1274 ASSERT(!BadReg(rd));
1275 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1276 }
1277
1278 ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1279 {
1280 ASSERT(imm.isEncodedImm());
1281 ASSERT(!BadReg(rd));
1282
1283 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1284 }
1285
1286 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1287 {
1288 ASSERT(!BadReg(rd));
1289 ASSERT(!BadReg(rm));
1290 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1291 }
1292
1293 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1294 {
1295 if (!((rd | rm) & 8))
1296 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1297 else
1298 mvn(rd, rm, ShiftTypeAndAmount());
1299 }
1300
1301 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1302 {
1303 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1304 sub(rd, zero, rm);
1305 }
1306
1307 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1308 {
1309 ASSERT(!BadReg(rd));
1310 ASSERT(!BadReg(rn));
1311 ASSERT(imm.isEncodedImm());
1312 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1313 }
1314
1315 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1316 {
1317 ASSERT(!BadReg(rd));
1318 ASSERT(!BadReg(rn));
1319 ASSERT(!BadReg(rm));
1320 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1321 }
1322
1323 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1324 {
1325 if ((rd == rn) && !((rd | rm) & 8))
1326 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1327 else if ((rd == rm) && !((rd | rn) & 8))
1328 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1329 else
1330 orr(rd, rn, rm, ShiftTypeAndAmount());
1331 }
1332
1333 ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1334 {
1335 ASSERT(!BadReg(rd));
1336 ASSERT(!BadReg(rn));
1337 ASSERT(!BadReg(rm));
1338 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1339 }
1340
1341 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1342 {
1343 if ((rd == rn) && !((rd | rm) & 8))
1344 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1345 else if ((rd == rm) && !((rd | rn) & 8))
1346 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1347 else
1348 orr_S(rd, rn, rm, ShiftTypeAndAmount());
1349 }
1350
1351 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1352 {
1353 ASSERT(!BadReg(rd));
1354 ASSERT(!BadReg(rm));
1355 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1356 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1357 }
1358
1359 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1360 {
1361 ASSERT(!BadReg(rd));
1362 ASSERT(!BadReg(rn));
1363 ASSERT(!BadReg(rm));
1364 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1365 }
1366
1367 ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1368 {
1369 ASSERT(!BadReg(rdLo));
1370 ASSERT(!BadReg(rdHi));
1371 ASSERT(!BadReg(rn));
1372 ASSERT(!BadReg(rm));
1373 ASSERT(rdLo != rdHi);
1374 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1375 }
1376
1377 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1378 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1379 {
1380 ASSERT(rt != ARMRegisters::pc);
1381 ASSERT(rn != ARMRegisters::pc);
1382 ASSERT(imm.isUInt12());
1383
1384 if (!((rt | rn) & 8) && imm.isUInt7())
1385 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1386 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1387 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1388 else
1389 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1390 }
1391
1392 // If index is set, this is a regular offset or a pre-indexed store;
1393 // if index is not set then is is a post-index store.
1394 //
1395 // If wback is set rn is updated - this is a pre or post index store,
1396 // if wback is not set this is a regular offset memory access.
1397 //
1398 // (-255 <= offset <= 255)
1399 // _reg = REG[rn]
1400 // _tmp = _reg + offset
1401 // MEM[index ? _tmp : _reg] = REG[rt]
1402 // if (wback) REG[rn] = _tmp
1403 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1404 {
1405 ASSERT(rt != ARMRegisters::pc);
1406 ASSERT(rn != ARMRegisters::pc);
1407 ASSERT(index || wback);
1408 ASSERT(!wback | (rt != rn));
1409
1410 bool add = true;
1411 if (offset < 0) {
1412 add = false;
1413 offset = -offset;
1414 }
1415 ASSERT((offset & ~0xff) == 0);
1416
1417 offset |= (wback << 8);
1418 offset |= (add << 9);
1419 offset |= (index << 10);
1420 offset |= (1 << 11);
1421
1422 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1423 }
1424
1425 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1426 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1427 {
1428 ASSERT(rn != ARMRegisters::pc);
1429 ASSERT(!BadReg(rm));
1430 ASSERT(shift <= 3);
1431
1432 if (!shift && !((rt | rn | rm) & 8))
1433 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1434 else
1435 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1436 }
1437
1438 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1439 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1440 {
1441 ASSERT(rt != ARMRegisters::pc);
1442 ASSERT(rn != ARMRegisters::pc);
1443 ASSERT(imm.isUInt12());
1444
1445 if (!((rt | rn) & 8) && imm.isUInt7())
1446 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1447 else
1448 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1449 }
1450
1451 // If index is set, this is a regular offset or a pre-indexed store;
1452 // if index is not set then is is a post-index store.
1453 //
1454 // If wback is set rn is updated - this is a pre or post index store,
1455 // if wback is not set this is a regular offset memory access.
1456 //
1457 // (-255 <= offset <= 255)
1458 // _reg = REG[rn]
1459 // _tmp = _reg + offset
1460 // MEM[index ? _tmp : _reg] = REG[rt]
1461 // if (wback) REG[rn] = _tmp
1462 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1463 {
1464 ASSERT(rt != ARMRegisters::pc);
1465 ASSERT(rn != ARMRegisters::pc);
1466 ASSERT(index || wback);
1467 ASSERT(!wback | (rt != rn));
1468
1469 bool add = true;
1470 if (offset < 0) {
1471 add = false;
1472 offset = -offset;
1473 }
1474 ASSERT((offset & ~0xff) == 0);
1475
1476 offset |= (wback << 8);
1477 offset |= (add << 9);
1478 offset |= (index << 10);
1479 offset |= (1 << 11);
1480
1481 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1482 }
1483
1484 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1485 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1486 {
1487 ASSERT(rn != ARMRegisters::pc);
1488 ASSERT(!BadReg(rm));
1489 ASSERT(shift <= 3);
1490
1491 if (!shift && !((rt | rn | rm) & 8))
1492 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1493 else
1494 m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1495 }
1496
1497 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1498 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1499 {
1500 ASSERT(rt != ARMRegisters::pc);
1501 ASSERT(rn != ARMRegisters::pc);
1502 ASSERT(imm.isUInt12());
1503
1504 if (!((rt | rn) & 8) && imm.isUInt7())
1505 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
1506 else
1507 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1508 }
1509
1510 // If index is set, this is a regular offset or a pre-indexed store;
1511 // if index is not set then is is a post-index store.
1512 //
1513 // If wback is set rn is updated - this is a pre or post index store,
1514 // if wback is not set this is a regular offset memory access.
1515 //
1516 // (-255 <= offset <= 255)
1517 // _reg = REG[rn]
1518 // _tmp = _reg + offset
1519 // MEM[index ? _tmp : _reg] = REG[rt]
1520 // if (wback) REG[rn] = _tmp
1521 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1522 {
1523 ASSERT(rt != ARMRegisters::pc);
1524 ASSERT(rn != ARMRegisters::pc);
1525 ASSERT(index || wback);
1526 ASSERT(!wback | (rt != rn));
1527
1528 bool add = true;
1529 if (offset < 0) {
1530 add = false;
1531 offset = -offset;
1532 }
1533 ASSERT(!(offset & ~0xff));
1534
1535 offset |= (wback << 8);
1536 offset |= (add << 9);
1537 offset |= (index << 10);
1538 offset |= (1 << 11);
1539
1540 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1541 }
1542
1543 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1544 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1545 {
1546 ASSERT(rn != ARMRegisters::pc);
1547 ASSERT(!BadReg(rm));
1548 ASSERT(shift <= 3);
1549
1550 if (!shift && !((rt | rn | rm) & 8))
1551 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1552 else
1553 m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1554 }
1555
1556 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1557 {
1558 // Rd can only be SP if Rn is also SP.
1559 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1560 ASSERT(rd != ARMRegisters::pc);
1561 ASSERT(rn != ARMRegisters::pc);
1562 ASSERT(imm.isValid());
1563
1564 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1565 ASSERT(!(imm.getUInt16() & 3));
1566 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1567 return;
1568 } else if (!((rd | rn) & 8)) {
1569 if (imm.isUInt3()) {
1570 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1571 return;
1572 } else if ((rd == rn) && imm.isUInt8()) {
1573 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1574 return;
1575 }
1576 }
1577
1578 if (imm.isEncodedImm())
1579 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1580 else {
1581 ASSERT(imm.isUInt12());
1582 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1583 }
1584 }
1585
1586 ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1587 {
1588 ASSERT(rd != ARMRegisters::pc);
1589 ASSERT(rn != ARMRegisters::pc);
1590 ASSERT(imm.isValid());
1591 ASSERT(imm.isUInt12());
1592
1593 if (!((rd | rn) & 8) && !imm.getUInt12())
1594 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1595 else
1596 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1597 }
1598
1599 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1600 {
1601 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1602 ASSERT(rd != ARMRegisters::pc);
1603 ASSERT(rn != ARMRegisters::pc);
1604 ASSERT(!BadReg(rm));
1605 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1606 }
1607
1608 // NOTE: In an IT block, add doesn't modify the flags register.
1609 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1610 {
1611 if (!((rd | rn | rm) & 8))
1612 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1613 else
1614 sub(rd, rn, rm, ShiftTypeAndAmount());
1615 }
1616
1617 // Not allowed in an IT (if then) block.
1618 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1619 {
1620 // Rd can only be SP if Rn is also SP.
1621 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1622 ASSERT(rd != ARMRegisters::pc);
1623 ASSERT(rn != ARMRegisters::pc);
1624 ASSERT(imm.isValid());
1625
1626 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1627 ASSERT(!(imm.getUInt16() & 3));
1628 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1629 return;
1630 } else if (!((rd | rn) & 8)) {
1631 if (imm.isUInt3()) {
1632 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1633 return;
1634 } else if ((rd == rn) && imm.isUInt8()) {
1635 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1636 return;
1637 }
1638 }
1639
1640 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1641 }
1642
1643 ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1644 {
1645 ASSERT(rd != ARMRegisters::pc);
1646 ASSERT(rn != ARMRegisters::pc);
1647 ASSERT(imm.isValid());
1648 ASSERT(imm.isUInt12());
1649
1650 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1651 }
1652
1653 // Not allowed in an IT (if then) block?
1654 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1655 {
1656 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1657 ASSERT(rd != ARMRegisters::pc);
1658 ASSERT(rn != ARMRegisters::pc);
1659 ASSERT(!BadReg(rm));
1660 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1661 }
1662
1663 // Not allowed in an IT (if then) block.
1664 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1665 {
1666 if (!((rd | rn | rm) & 8))
1667 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1668 else
1669 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1670 }
1671
1672 ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1673 {
1674 ASSERT(!BadReg(rn));
1675 ASSERT(imm.isEncodedImm());
1676
1677 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1678 }
1679
1680 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1681 {
1682 ASSERT(!BadReg(rn));
1683 ASSERT(!BadReg(rm));
1684 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1685 }
1686
1687 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1688 {
1689 if ((rn | rm) & 8)
1690 tst(rn, rm, ShiftTypeAndAmount());
1691 else
1692 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1693 }
1694
1695 ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1696 {
1697 ASSERT(lsb < 32);
1698 ASSERT((width >= 1) && (width <= 32));
1699 ASSERT((lsb + width) <= 32);
1700 m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1701 }
1702
1703 void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1704 {
1705 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1706 }
1707
1708 void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1709 {
1710 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1711 }
1712
1713 void vcmpz(FPDoubleRegisterID rd)
1714 {
1715 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1716 }
1717
1718 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1719 {
1720 // boolean values are 64bit (toInt, unsigned, roundZero)
1721 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1722 }
1723
1724 void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1725 {
1726 // boolean values are 64bit (toInt, unsigned, roundZero)
1727 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1728 }
1729
1730 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1731 {
1732 // boolean values are 64bit (toInt, unsigned, roundZero)
1733 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1734 }
1735
1736 void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1737 {
1738 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1739 }
1740
1741 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1742 {
1743 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1744 }
1745
1746 void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1747 {
1748 m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1749 }
1750
1751 void vmov(RegisterID rd, FPSingleRegisterID rn)
1752 {
1753 ASSERT(!BadReg(rd));
1754 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1755 }
1756
1757 void vmov(FPSingleRegisterID rd, RegisterID rn)
1758 {
1759 ASSERT(!BadReg(rn));
1760 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1761 }
1762
1763 void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1764 {
1765 ASSERT(!BadReg(rd1));
1766 ASSERT(!BadReg(rd2));
1767 m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1768 }
1769
1770 void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1771 {
1772 ASSERT(!BadReg(rn1));
1773 ASSERT(!BadReg(rn2));
1774 m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1775 }
1776
1777 void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1778 {
1779 m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1780 }
1781
1782 void vmrs(RegisterID reg = ARMRegisters::pc)
1783 {
1784 ASSERT(reg != ARMRegisters::sp);
1785 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1786 }
1787
1788 void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1789 {
1790 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1791 }
1792
1793 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1794 {
1795 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1796 }
1797
1798 void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1799 {
1800 m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1801 }
1802
1803 void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1804 {
1805 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1806 }
1807
1808 void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1809 {
1810 m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
1811 }
1812
1813 void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1814 {
1815 m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
1816 }
1817
1818 void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1819 {
1820 m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
1821 }
1822
1823 void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1824 {
1825 m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
1826 }
1827
1828 void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1829 {
1830 m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
1831 }
1832
1833 void nop()
1834 {
1835 m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
1836 }
1837
1838 AssemblerLabel label()
1839 {
1840 return m_formatter.label();
1841 }
1842
1843 AssemblerLabel align(int alignment)
1844 {
1845 while (!m_formatter.isAligned(alignment))
1846 bkpt();
1847
1848 return label();
1849 }
1850
1851 static void* getRelocatedAddress(void* code, AssemblerLabel label)
1852 {
1853 ASSERT(label.isSet());
1854 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1855 }
1856
1857 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1858 {
1859 return b.m_offset - a.m_offset;
1860 }
1861
1862 int executableOffsetFor(int location)
1863 {
1864 if (!location)
1865 return 0;
1866 return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
1867 }
1868
1869 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
1870
1871 // Assembler admin methods:
1872
1873 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
1874 {
1875 return a.from() < b.from();
1876 }
1877
1878 bool canCompact(JumpType jumpType)
1879 {
1880 // The following cannot be compacted:
1881 // JumpFixed: represents custom jump sequence
1882 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1883 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1884 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
1885 }
1886
1887 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
1888 {
1889 if (jumpType == JumpFixed)
1890 return LinkInvalid;
1891
1892 // for patchable jump we must leave space for the longest code sequence
1893 if (jumpType == JumpNoConditionFixedSize)
1894 return LinkBX;
1895 if (jumpType == JumpConditionFixedSize)
1896 return LinkConditionalBX;
1897
1898 const int paddingSize = JUMP_ENUM_SIZE(jumpType);
1899 bool mayTriggerErrata = false;
1900
1901 if (jumpType == JumpCondition) {
1902 // 2-byte conditional T1
1903 const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
1904 if (canBeJumpT1(jumpT1Location, to))
1905 return LinkJumpT1;
1906 // 4-byte conditional T3
1907 const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
1908 if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) {
1909 if (!mayTriggerErrata)
1910 return LinkJumpT3;
1911 }
1912 // 4-byte conditional T4 with IT
1913 const uint16_t* conditionalJumpT4Location =
1914 reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
1915 if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) {
1916 if (!mayTriggerErrata)
1917 return LinkConditionalJumpT4;
1918 }
1919 } else {
1920 // 2-byte unconditional T2
1921 const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
1922 if (canBeJumpT2(jumpT2Location, to))
1923 return LinkJumpT2;
1924 // 4-byte unconditional T4
1925 const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
1926 if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) {
1927 if (!mayTriggerErrata)
1928 return LinkJumpT4;
1929 }
1930 // use long jump sequence
1931 return LinkBX;
1932 }
1933
1934 ASSERT(jumpType == JumpCondition);
1935 return LinkConditionalBX;
1936 }
1937
1938 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
1939 {
1940 JumpLinkType linkType = computeJumpType(record.type(), from, to);
1941 record.setLinkType(linkType);
1942 return linkType;
1943 }
1944
1945 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
1946 {
1947 int32_t ptr = regionStart / sizeof(int32_t);
1948 const int32_t end = regionEnd / sizeof(int32_t);
1949 int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
1950 while (ptr < end)
1951 offsets[ptr++] = offset;
1952 }
1953
1954 Vector<LinkRecord>& jumpsToLink()
1955 {
1956 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
1957 return m_jumpsToLink;
1958 }
1959
1960 void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
1961 {
1962 switch (record.linkType()) {
1963 case LinkJumpT1:
1964 linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
1965 break;
1966 case LinkJumpT2:
1967 linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
1968 break;
1969 case LinkJumpT3:
1970 linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
1971 break;
1972 case LinkJumpT4:
1973 linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
1974 break;
1975 case LinkConditionalJumpT4:
1976 linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
1977 break;
1978 case LinkConditionalBX:
1979 linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
1980 break;
1981 case LinkBX:
1982 linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
1983 break;
1984 default:
1985 ASSERT_NOT_REACHED();
1986 break;
1987 }
1988 }
1989
1990 void* unlinkedCode() { return m_formatter.data(); }
1991 size_t codeSize() const { return m_formatter.codeSize(); }
1992
1993 static unsigned getCallReturnOffset(AssemblerLabel call)
1994 {
1995 ASSERT(call.isSet());
1996 return call.m_offset;
1997 }
1998
1999 // Linking & patching:
2000 //
2001 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2002 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2003 // code has been finalized it is (platform support permitting) within a non-
2004 // writable region of memory; to modify the code in an execute-only execuable
2005 // pool the 'repatch' and 'relink' methods should be used.
2006
2007 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2008 {
2009 ASSERT(to.isSet());
2010 ASSERT(from.isSet());
2011 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2012 }
2013
2014 static void linkJump(void* code, AssemblerLabel from, void* to)
2015 {
2016 ASSERT(from.isSet());
2017
2018 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2019 linkJumpAbsolute(location, to);
2020 }
2021
2022 static void linkCall(void* code, AssemblerLabel from, void* to)
2023 {
2024 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2025 ASSERT(from.isSet());
2026 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
2027
2028 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
2029 }
2030
2031 static void linkPointer(void* code, AssemblerLabel where, void* value)
2032 {
2033 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
2034 }
2035
2036 static void relinkJump(void* from, void* to)
2037 {
2038 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2039 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2040
2041 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
2042
2043 cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2044 }
2045
2046 static void relinkCall(void* from, void* to)
2047 {
2048 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2049 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
2050
2051 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
2052 }
2053
2054 static void* readCallTarget(void* from)
2055 {
2056 return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2057 }
2058
2059 static void repatchInt32(void* where, int32_t value)
2060 {
2061 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2062
2063 setInt32(where, value);
2064 }
2065
2066 static void repatchCompact(void* where, int32_t value)
2067 {
2068 ASSERT(value >= 0);
2069 ASSERT(ARMThumbImmediate::makeUInt12(value).isUInt7());
2070 setUInt7ForLoad(where, ARMThumbImmediate::makeUInt12(value));
2071 }
2072
2073 static void repatchPointer(void* where, void* value)
2074 {
2075 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2076
2077 setPointer(where, value);
2078 }
2079
2080 static void* readPointer(void* where)
2081 {
2082 return reinterpret_cast<void*>(readInt32(where));
2083 }
2084
2085 unsigned debugOffset() { return m_formatter.debugOffset(); }
2086
2087 static void cacheFlush(void* code, size_t size)
2088 {
2089 #if OS(IOS)
2090 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2091 #elif OS(LINUX)
2092 asm volatile(
2093 "push {r7}\n"
2094 "mov r0, %0\n"
2095 "mov r1, %1\n"
2096 "movw r7, #0x2\n"
2097 "movt r7, #0xf\n"
2098 "movs r2, #0x0\n"
2099 "svc 0x0\n"
2100 "pop {r7}\n"
2101 :
2102 : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
2103 : "r0", "r1", "r2");
2104 #elif OS(WINCE)
2105 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
2106 #elif OS(QNX)
2107 #if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
2108 msync(code, size, MS_INVALIDATE_ICACHE);
2109 #else
2110 UNUSED_PARAM(code);
2111 UNUSED_PARAM(size);
2112 #endif
2113 #else
2114 #error "The cacheFlush support is missing on this platform."
2115 #endif
2116 }
2117
2118 private:
2119 // VFP operations commonly take one or more 5-bit operands, typically representing a
2120 // floating point register number. This will commonly be encoded in the instruction
2121 // in two parts, with one single bit field, and one 4-bit field. In the case of
2122 // double precision operands the high bit of the register number will be encoded
2123 // separately, and for single precision operands the high bit of the register number
2124 // will be encoded individually.
2125 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2126 // field to be encoded together in the instruction (the low 4-bits of a double
2127 // register number, or the high 4-bits of a single register number), and bit 4
2128 // contains the bit value to be encoded individually.
2129 struct VFPOperand {
2130 explicit VFPOperand(uint32_t value)
2131 : m_value(value)
2132 {
2133 ASSERT(!(m_value & ~0x1f));
2134 }
2135
2136 VFPOperand(FPDoubleRegisterID reg)
2137 : m_value(reg)
2138 {
2139 }
2140
2141 VFPOperand(RegisterID reg)
2142 : m_value(reg)
2143 {
2144 }
2145
2146 VFPOperand(FPSingleRegisterID reg)
2147 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2148 {
2149 }
2150
2151 uint32_t bits1()
2152 {
2153 return m_value >> 4;
2154 }
2155
2156 uint32_t bits4()
2157 {
2158 return m_value & 0xf;
2159 }
2160
2161 uint32_t m_value;
2162 };
2163
2164 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2165 {
2166 // Cannot specify rounding when converting to float.
2167 ASSERT(toInteger || !isRoundZero);
2168
2169 uint32_t op = 0x8;
2170 if (toInteger) {
2171 // opc2 indicates both toInteger & isUnsigned.
2172 op |= isUnsigned ? 0x4 : 0x5;
2173 // 'op' field in instruction is isRoundZero
2174 if (isRoundZero)
2175 op |= 0x10;
2176 } else {
2177 ASSERT(!isRoundZero);
2178 // 'op' field in instruction is isUnsigned
2179 if (!isUnsigned)
2180 op |= 0x10;
2181 }
2182 return VFPOperand(op);
2183 }
2184
2185 static void setInt32(void* code, uint32_t value)
2186 {
2187 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2188 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2189
2190 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2191 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2192 location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2193 location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2194 location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2195 location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2196
2197 cacheFlush(location - 4, 4 * sizeof(uint16_t));
2198 }
2199
2200 static int32_t readInt32(void* code)
2201 {
2202 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2203 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2204
2205 ARMThumbImmediate lo16;
2206 ARMThumbImmediate hi16;
2207 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2208 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2209 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2210 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2211 uint32_t result = hi16.asUInt16();
2212 result <<= 16;
2213 result |= lo16.asUInt16();
2214 return static_cast<int32_t>(result);
2215 }
2216
2217 static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2218 {
2219 // Requires us to have planted a LDR_imm_T1
2220 ASSERT(imm.isValid());
2221 ASSERT(imm.isUInt7());
2222 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2223 location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2224 location[0] |= (imm.getUInt7() >> 2) << 6;
2225 cacheFlush(location, sizeof(uint16_t));
2226 }
2227
2228 static void setPointer(void* code, void* value)
2229 {
2230 setInt32(code, reinterpret_cast<uint32_t>(value));
2231 }
2232
2233 static bool isB(void* address)
2234 {
2235 uint16_t* instruction = static_cast<uint16_t*>(address);
2236 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2237 }
2238
2239 static bool isBX(void* address)
2240 {
2241 uint16_t* instruction = static_cast<uint16_t*>(address);
2242 return (instruction[0] & 0xff87) == OP_BX;
2243 }
2244
2245 static bool isMOV_imm_T3(void* address)
2246 {
2247 uint16_t* instruction = static_cast<uint16_t*>(address);
2248 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2249 }
2250
2251 static bool isMOVT(void* address)
2252 {
2253 uint16_t* instruction = static_cast<uint16_t*>(address);
2254 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2255 }
2256
2257 static bool isNOP_T1(void* address)
2258 {
2259 uint16_t* instruction = static_cast<uint16_t*>(address);
2260 return instruction[0] == OP_NOP_T1;
2261 }
2262
2263 static bool isNOP_T2(void* address)
2264 {
2265 uint16_t* instruction = static_cast<uint16_t*>(address);
2266 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2267 }
2268
2269 static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2270 {
2271 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2272 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2273
2274 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2275 // It does not appear to be documented in the ARM ARM (big surprise), but
2276 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2277 // less than the actual displacement.
2278 relative -= 2;
2279 return ((relative << 23) >> 23) == relative;
2280 }
2281
2282 static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2283 {
2284 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2285 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2286
2287 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2288 // It does not appear to be documented in the ARM ARM (big surprise), but
2289 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2290 // less than the actual displacement.
2291 relative -= 2;
2292 return ((relative << 20) >> 20) == relative;
2293 }
2294
2295 static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
2296 {
2297 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2298 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2299
2300 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2301 // From Cortex-A8 errata:
2302 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2303 // the target of the branch falls within the first region it is
2304 // possible for the processor to incorrectly determine the branch
2305 // instruction, and it is also possible in some cases for the processor
2306 // to enter a deadlock state.
2307 // The instruction is spanning two pages if it ends at an address ending 0x002
2308 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
2309 mayTriggerErrata = spansTwo4K;
2310 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2311 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
2312 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
2313 return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata;
2314 }
2315
2316 static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
2317 {
2318 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2319 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2320
2321 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2322 // From Cortex-A8 errata:
2323 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
2324 // the target of the branch falls within the first region it is
2325 // possible for the processor to incorrectly determine the branch
2326 // instruction, and it is also possible in some cases for the processor
2327 // to enter a deadlock state.
2328 // The instruction is spanning two pages if it ends at an address ending 0x002
2329 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
2330 mayTriggerErrata = spansTwo4K;
2331 // The target is in the first page if the jump branch back by [3..0x1002] bytes
2332 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
2333 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
2334 return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata;
2335 }
2336
2337 void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2338 {
2339 // FIMXE: this should be up in the MacroAssembler layer. :-(
2340 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2341 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2342 ASSERT(canBeJumpT1(instruction, target));
2343
2344 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2345 // It does not appear to be documented in the ARM ARM (big surprise), but
2346 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2347 // less than the actual displacement.
2348 relative -= 2;
2349
2350 // All branch offsets should be an even distance.
2351 ASSERT(!(relative & 1));
2352 instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2353 }
2354
2355 static void linkJumpT2(uint16_t* instruction, void* target)
2356 {
2357 // FIMXE: this should be up in the MacroAssembler layer. :-(
2358 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2359 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2360 ASSERT(canBeJumpT2(instruction, target));
2361
2362 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2363 // It does not appear to be documented in the ARM ARM (big surprise), but
2364 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2365 // less than the actual displacement.
2366 relative -= 2;
2367
2368 // All branch offsets should be an even distance.
2369 ASSERT(!(relative & 1));
2370 instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2371 }
2372
2373 void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2374 {
2375 // FIMXE: this should be up in the MacroAssembler layer. :-(
2376 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2377 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2378 bool scratch;
2379 UNUSED_PARAM(scratch);
2380 ASSERT(canBeJumpT3(instruction, target, scratch));
2381
2382 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2383
2384 // All branch offsets should be an even distance.
2385 ASSERT(!(relative & 1));
2386 instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2387 instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2388 }
2389
2390 static void linkJumpT4(uint16_t* instruction, void* target)
2391 {
2392 // FIMXE: this should be up in the MacroAssembler layer. :-(
2393 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2394 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2395 bool scratch;
2396 UNUSED_PARAM(scratch);
2397 ASSERT(canBeJumpT4(instruction, target, scratch));
2398
2399 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2400 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2401 if (relative >= 0)
2402 relative ^= 0xC00000;
2403
2404 // All branch offsets should be an even distance.
2405 ASSERT(!(relative & 1));
2406 instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2407 instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2408 }
2409
2410 void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2411 {
2412 // FIMXE: this should be up in the MacroAssembler layer. :-(
2413 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2414 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2415
2416 instruction[-3] = ifThenElse(cond) | OP_IT;
2417 linkJumpT4(instruction, target);
2418 }
2419
2420 static void linkBX(uint16_t* instruction, void* target)
2421 {
2422 // FIMXE: this should be up in the MacroAssembler layer. :-(
2423 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2424 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2425
2426 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2427 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2428 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2429 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2430 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2431 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2432 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2433 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2434 }
2435
2436 void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2437 {
2438 // FIMXE: this should be up in the MacroAssembler layer. :-(
2439 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2440 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2441
2442 linkBX(instruction, target);
2443 instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2444 }
2445
2446 static void linkJumpAbsolute(uint16_t* instruction, void* target)
2447 {
2448 // FIMXE: this should be up in the MacroAssembler layer. :-(
2449 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2450 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2451
2452 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2453 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2454
2455 bool scratch;
2456 if (canBeJumpT4(instruction, target, scratch)) {
2457 // There may be a better way to fix this, but right now put the NOPs first, since in the
2458 // case of an conditional branch this will be coming after an ITTT predicating *three*
2459 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2460 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2461 // actually be the second half of a 2-word op.
2462 instruction[-5] = OP_NOP_T1;
2463 instruction[-4] = OP_NOP_T2a;
2464 instruction[-3] = OP_NOP_T2b;
2465 linkJumpT4(instruction, target);
2466 } else {
2467 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2468 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2469 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2470 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2471 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2472 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2473 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2474 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2475 }
2476 }
2477
2478 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2479 {
2480 return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2481 }
2482
2483 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2484 {
2485 result.m_value.i = (value >> 10) & 1;
2486 result.m_value.imm4 = value & 15;
2487 }
2488
2489 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2490 {
2491 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2492 }
2493
2494 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2495 {
2496 result.m_value.imm3 = (value >> 12) & 7;
2497 result.m_value.imm8 = value & 255;
2498 }
2499
2500 class ARMInstructionFormatter {
2501 public:
2502 ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2503 {
2504 m_buffer.putShort(op | (rd << 8) | imm);
2505 }
2506
2507 ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2508 {
2509 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2510 }
2511
2512 ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2513 {
2514 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2515 }
2516
2517 ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2518 {
2519 m_buffer.putShort(op | imm);
2520 }
2521
2522 ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2523 {
2524 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2525 }
2526
2527 ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2528 {
2529 m_buffer.putShort(op | imm);
2530 }
2531
2532 ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2533 {
2534 m_buffer.putShort(op | (reg1 << 3) | reg2);
2535 }
2536
2537 ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2538 {
2539 m_buffer.putShort(op | reg);
2540 m_buffer.putShort(ff.m_u.value);
2541 }
2542
2543 ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2544 {
2545 m_buffer.putShort(op);
2546 m_buffer.putShort(ff.m_u.value);
2547 }
2548
2549 ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2550 {
2551 m_buffer.putShort(op1);
2552 m_buffer.putShort(op2);
2553 }
2554
2555 ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2556 {
2557 ARMThumbImmediate newImm = imm;
2558 newImm.m_value.imm4 = imm4;
2559
2560 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2561 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2562 }
2563
2564 ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2565 {
2566 m_buffer.putShort(op | reg1);
2567 m_buffer.putShort((reg2 << 12) | imm);
2568 }
2569
2570 ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2571 {
2572 m_buffer.putShort(op | reg1);
2573 m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2574 }
2575
2576 // Formats up instructions of the pattern:
2577 // 111111111B11aaaa:bbbb222SA2C2cccc
2578 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2579 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2580 ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2581 {
2582 ASSERT(!(op1 & 0x004f));
2583 ASSERT(!(op2 & 0xf1af));
2584 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2585 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2586 }
2587
2588 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2589 // (i.e. +/-(0..255) 32-bit words)
2590 ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2591 {
2592 bool up = true;
2593 if (imm < 0) {
2594 imm = -imm;
2595 up = false;
2596 }
2597
2598 uint32_t offset = imm;
2599 ASSERT(!(offset & ~0x3fc));
2600 offset >>= 2;
2601
2602 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2603 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2604 }
2605
2606 // Administrative methods:
2607
2608 size_t codeSize() const { return m_buffer.codeSize(); }
2609 AssemblerLabel label() const { return m_buffer.label(); }
2610 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2611 void* data() const { return m_buffer.data(); }
2612
2613 unsigned debugOffset() { return m_buffer.debugOffset(); }
2614
2615 private:
2616 AssemblerBuffer m_buffer;
2617 } m_formatter;
2618
2619 Vector<LinkRecord> m_jumpsToLink;
2620 Vector<int32_t> m_offsets;
2621 };
2622
2623 } // namespace JSC
2624
2625 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2626
2627 #endif // ARMAssembler_h