]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/ARMv7Assembler.h
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / assembler / ARMv7Assembler.h
1 /*
2 * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
29
30 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
31
32 #include "AssemblerBuffer.h"
33 #include <limits.h>
34 #include <wtf/Assertions.h>
35 #include <wtf/Vector.h>
36 #include <stdint.h>
37
38 namespace JSC {
39
40 namespace ARMRegisters {
41 typedef enum {
42 r0,
43 r1,
44 r2,
45 r3,
46 r4,
47 r5,
48 r6,
49 r7, fp = r7, // frame pointer
50 r8,
51 r9, sb = r9, // static base
52 r10, sl = r10, // stack limit
53 r11,
54 r12, ip = r12,
55 r13, sp = r13,
56 r14, lr = r14,
57 r15, pc = r15,
58 } RegisterID;
59
60 typedef enum {
61 s0,
62 s1,
63 s2,
64 s3,
65 s4,
66 s5,
67 s6,
68 s7,
69 s8,
70 s9,
71 s10,
72 s11,
73 s12,
74 s13,
75 s14,
76 s15,
77 s16,
78 s17,
79 s18,
80 s19,
81 s20,
82 s21,
83 s22,
84 s23,
85 s24,
86 s25,
87 s26,
88 s27,
89 s28,
90 s29,
91 s30,
92 s31,
93 } FPSingleRegisterID;
94
95 typedef enum {
96 d0,
97 d1,
98 d2,
99 d3,
100 d4,
101 d5,
102 d6,
103 d7,
104 d8,
105 d9,
106 d10,
107 d11,
108 d12,
109 d13,
110 d14,
111 d15,
112 d16,
113 d17,
114 d18,
115 d19,
116 d20,
117 d21,
118 d22,
119 d23,
120 d24,
121 d25,
122 d26,
123 d27,
124 d28,
125 d29,
126 d30,
127 d31,
128 } FPDoubleRegisterID;
129
130 typedef enum {
131 q0,
132 q1,
133 q2,
134 q3,
135 q4,
136 q5,
137 q6,
138 q7,
139 q8,
140 q9,
141 q10,
142 q11,
143 q12,
144 q13,
145 q14,
146 q15,
147 q16,
148 q17,
149 q18,
150 q19,
151 q20,
152 q21,
153 q22,
154 q23,
155 q24,
156 q25,
157 q26,
158 q27,
159 q28,
160 q29,
161 q30,
162 q31,
163 } FPQuadRegisterID;
164
165 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
166 {
167 ASSERT(reg < d16);
168 return (FPSingleRegisterID)(reg << 1);
169 }
170
171 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
172 {
173 ASSERT(!(reg & 1));
174 return (FPDoubleRegisterID)(reg >> 1);
175 }
176
177 #if USE(MASM_PROBE)
178 #define FOR_EACH_CPU_REGISTER(V) \
179 FOR_EACH_CPU_GPREGISTER(V) \
180 FOR_EACH_CPU_SPECIAL_REGISTER(V) \
181 FOR_EACH_CPU_FPREGISTER(V)
182
183 #define FOR_EACH_CPU_GPREGISTER(V) \
184 V(void*, r0) \
185 V(void*, r1) \
186 V(void*, r2) \
187 V(void*, r3) \
188 V(void*, r4) \
189 V(void*, r5) \
190 V(void*, r6) \
191 V(void*, r7) \
192 V(void*, r8) \
193 V(void*, r9) \
194 V(void*, r10) \
195 V(void*, r11) \
196 V(void*, ip) \
197 V(void*, sp) \
198 V(void*, lr) \
199 V(void*, pc)
200
201 #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
202 V(void*, apsr) \
203 V(void*, fpscr) \
204
205 #define FOR_EACH_CPU_FPREGISTER(V) \
206 V(double, d0) \
207 V(double, d1) \
208 V(double, d2) \
209 V(double, d3) \
210 V(double, d4) \
211 V(double, d5) \
212 V(double, d6) \
213 V(double, d7) \
214 V(double, d8) \
215 V(double, d9) \
216 V(double, d10) \
217 V(double, d11) \
218 V(double, d12) \
219 V(double, d13) \
220 V(double, d14) \
221 V(double, d15) \
222 FOR_EACH_CPU_FPREGISTER_EXTENSION(V)
223
224 #if CPU(APPLE_ARMV7S)
225 #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \
226 V(double, d16) \
227 V(double, d17) \
228 V(double, d18) \
229 V(double, d19) \
230 V(double, d20) \
231 V(double, d21) \
232 V(double, d22) \
233 V(double, d23) \
234 V(double, d24) \
235 V(double, d25) \
236 V(double, d26) \
237 V(double, d27) \
238 V(double, d28) \
239 V(double, d29) \
240 V(double, d30) \
241 V(double, d31)
242 #else
243 #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add.
244 #endif // CPU(APPLE_ARMV7S)
245
246 #endif // USE(MASM_PROBE)
247 }
248
249 class ARMv7Assembler;
250 class ARMThumbImmediate {
251 friend class ARMv7Assembler;
252
253 typedef uint8_t ThumbImmediateType;
254 static const ThumbImmediateType TypeInvalid = 0;
255 static const ThumbImmediateType TypeEncoded = 1;
256 static const ThumbImmediateType TypeUInt16 = 2;
257
258 typedef union {
259 int16_t asInt;
260 struct {
261 unsigned imm8 : 8;
262 unsigned imm3 : 3;
263 unsigned i : 1;
264 unsigned imm4 : 4;
265 };
266 // If this is an encoded immediate, then it may describe a shift, or a pattern.
267 struct {
268 unsigned shiftValue7 : 7;
269 unsigned shiftAmount : 5;
270 };
271 struct {
272 unsigned immediate : 8;
273 unsigned pattern : 4;
274 };
275 } ThumbImmediateValue;
276
277 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
278 typedef union {
279 int32_t asInt;
280 struct {
281 uint8_t byte0;
282 uint8_t byte1;
283 uint8_t byte2;
284 uint8_t byte3;
285 };
286 } PatternBytes;
287
288 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
289 {
290 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
291 value >>= N; /* if any were set, lose the bottom N */
292 else /* if none of the top N bits are set, */
293 zeros += N; /* then we have identified N leading zeros */
294 }
295
296 static int32_t countLeadingZeros(uint32_t value)
297 {
298 if (!value)
299 return 32;
300
301 int32_t zeros = 0;
302 countLeadingZerosPartial(value, zeros, 16);
303 countLeadingZerosPartial(value, zeros, 8);
304 countLeadingZerosPartial(value, zeros, 4);
305 countLeadingZerosPartial(value, zeros, 2);
306 countLeadingZerosPartial(value, zeros, 1);
307 return zeros;
308 }
309
310 ARMThumbImmediate()
311 : m_type(TypeInvalid)
312 {
313 m_value.asInt = 0;
314 }
315
316 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
317 : m_type(type)
318 , m_value(value)
319 {
320 }
321
322 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
323 : m_type(TypeUInt16)
324 {
325 // Make sure this constructor is only reached with type TypeUInt16;
326 // this extra parameter makes the code a little clearer by making it
327 // explicit at call sites which type is being constructed
328 ASSERT_UNUSED(type, type == TypeUInt16);
329
330 m_value.asInt = value;
331 }
332
333 public:
334 static ARMThumbImmediate makeEncodedImm(uint32_t value)
335 {
336 ThumbImmediateValue encoding;
337 encoding.asInt = 0;
338
339 // okay, these are easy.
340 if (value < 256) {
341 encoding.immediate = value;
342 encoding.pattern = 0;
343 return ARMThumbImmediate(TypeEncoded, encoding);
344 }
345
346 int32_t leadingZeros = countLeadingZeros(value);
347 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
348 ASSERT(leadingZeros < 24);
349
350 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
351 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
352 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
353 int32_t rightShiftAmount = 24 - leadingZeros;
354 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
355 // Shift the value down to the low byte position. The assign to
356 // shiftValue7 drops the implicit top bit.
357 encoding.shiftValue7 = value >> rightShiftAmount;
358 // The endoded shift amount is the magnitude of a right rotate.
359 encoding.shiftAmount = 8 + leadingZeros;
360 return ARMThumbImmediate(TypeEncoded, encoding);
361 }
362
363 PatternBytes bytes;
364 bytes.asInt = value;
365
366 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
367 encoding.immediate = bytes.byte0;
368 encoding.pattern = 3;
369 return ARMThumbImmediate(TypeEncoded, encoding);
370 }
371
372 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
373 encoding.immediate = bytes.byte0;
374 encoding.pattern = 1;
375 return ARMThumbImmediate(TypeEncoded, encoding);
376 }
377
378 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
379 encoding.immediate = bytes.byte1;
380 encoding.pattern = 2;
381 return ARMThumbImmediate(TypeEncoded, encoding);
382 }
383
384 return ARMThumbImmediate();
385 }
386
387 static ARMThumbImmediate makeUInt12(int32_t value)
388 {
389 return (!(value & 0xfffff000))
390 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
391 : ARMThumbImmediate();
392 }
393
394 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
395 {
396 // If this is not a 12-bit unsigned it, try making an encoded immediate.
397 return (!(value & 0xfffff000))
398 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
399 : makeEncodedImm(value);
400 }
401
402 // The 'make' methods, above, return a !isValid() value if the argument
403 // cannot be represented as the requested type. This methods is called
404 // 'get' since the argument can always be represented.
405 static ARMThumbImmediate makeUInt16(uint16_t value)
406 {
407 return ARMThumbImmediate(TypeUInt16, value);
408 }
409
410 bool isValid()
411 {
412 return m_type != TypeInvalid;
413 }
414
415 uint16_t asUInt16() const { return m_value.asInt; }
416
417 // These methods rely on the format of encoded byte values.
418 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
419 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
420 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
421 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
422 bool isUInt7() { return !(m_value.asInt & 0xff80); }
423 bool isUInt8() { return !(m_value.asInt & 0xff00); }
424 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
425 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
426 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
427 bool isUInt16() { return m_type == TypeUInt16; }
428 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
429 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
430 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
431 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
432 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
433 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
434 uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
435 uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
436 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
437 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
438
439 bool isEncodedImm() { return m_type == TypeEncoded; }
440
441 private:
442 ThumbImmediateType m_type;
443 ThumbImmediateValue m_value;
444 };
445
446 typedef enum {
447 SRType_LSL,
448 SRType_LSR,
449 SRType_ASR,
450 SRType_ROR,
451
452 SRType_RRX = SRType_ROR
453 } ARMShiftType;
454
455 class ShiftTypeAndAmount {
456 friend class ARMv7Assembler;
457
458 public:
459 ShiftTypeAndAmount()
460 {
461 m_u.type = (ARMShiftType)0;
462 m_u.amount = 0;
463 }
464
465 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
466 {
467 m_u.type = type;
468 m_u.amount = amount & 31;
469 }
470
471 unsigned lo4() { return m_u.lo4; }
472 unsigned hi4() { return m_u.hi4; }
473
474 private:
475 union {
476 struct {
477 unsigned lo4 : 4;
478 unsigned hi4 : 4;
479 };
480 struct {
481 unsigned type : 2;
482 unsigned amount : 6;
483 };
484 } m_u;
485 };
486
487 class ARMv7Assembler {
488 public:
489 typedef ARMRegisters::RegisterID RegisterID;
490 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
491 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
492 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
493 typedef FPDoubleRegisterID FPRegisterID;
494
495 static RegisterID firstRegister() { return ARMRegisters::r0; }
496 static RegisterID lastRegister() { return ARMRegisters::r13; }
497
498 static FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
499 static FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
500
501 // (HS, LO, HI, LS) -> (AE, B, A, BE)
502 // (VS, VC) -> (O, NO)
503 typedef enum {
504 ConditionEQ, // Zero / Equal.
505 ConditionNE, // Non-zero / Not equal.
506 ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
507 ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
508 ConditionMI, // Negative.
509 ConditionPL, // Positive or zero.
510 ConditionVS, // Overflowed.
511 ConditionVC, // Not overflowed.
512 ConditionHI, // Unsigned higher.
513 ConditionLS, // Unsigned lower or same.
514 ConditionGE, // Signed greater than or equal.
515 ConditionLT, // Signed less than.
516 ConditionGT, // Signed greater than.
517 ConditionLE, // Signed less than or equal.
518 ConditionAL, // Unconditional / Always execute.
519 ConditionInvalid
520 } Condition;
521
522 #define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
523 #define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
524 enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
525 JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
526 JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
527 JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
528 JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
529 };
530 enum JumpLinkType {
531 LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
532 LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
533 LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
534 LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
535 LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
536 LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
537 LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
538 LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
539 };
540
541 class LinkRecord {
542 public:
543 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
544 {
545 data.realTypes.m_from = from;
546 data.realTypes.m_to = to;
547 data.realTypes.m_type = type;
548 data.realTypes.m_linkType = LinkInvalid;
549 data.realTypes.m_condition = condition;
550 }
551 void operator=(const LinkRecord& other)
552 {
553 data.copyTypes.content[0] = other.data.copyTypes.content[0];
554 data.copyTypes.content[1] = other.data.copyTypes.content[1];
555 data.copyTypes.content[2] = other.data.copyTypes.content[2];
556 }
557 intptr_t from() const { return data.realTypes.m_from; }
558 void setFrom(intptr_t from) { data.realTypes.m_from = from; }
559 intptr_t to() const { return data.realTypes.m_to; }
560 JumpType type() const { return data.realTypes.m_type; }
561 JumpLinkType linkType() const { return data.realTypes.m_linkType; }
562 void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
563 Condition condition() const { return data.realTypes.m_condition; }
564 private:
565 union {
566 struct RealTypes {
567 intptr_t m_from : 31;
568 intptr_t m_to : 31;
569 JumpType m_type : 8;
570 JumpLinkType m_linkType : 8;
571 Condition m_condition : 16;
572 } realTypes;
573 struct CopyTypes {
574 uint32_t content[3];
575 } copyTypes;
576 COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
577 } data;
578 };
579
580 ARMv7Assembler()
581 : m_indexOfLastWatchpoint(INT_MIN)
582 , m_indexOfTailOfLastWatchpoint(INT_MIN)
583 {
584 }
585
586 AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
587
588 private:
589
590 // ARMv7, Appx-A.6.3
591 static bool BadReg(RegisterID reg)
592 {
593 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
594 }
595
596 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
597 {
598 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
599 if (rdNum & 1)
600 rdMask |= 1 << lowBitShift;
601 return rdMask;
602 }
603
604 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
605 {
606 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
607 if (rdNum & 16)
608 rdMask |= 1 << highBitShift;
609 return rdMask;
610 }
611
612 typedef enum {
613 OP_ADD_reg_T1 = 0x1800,
614 OP_SUB_reg_T1 = 0x1A00,
615 OP_ADD_imm_T1 = 0x1C00,
616 OP_SUB_imm_T1 = 0x1E00,
617 OP_MOV_imm_T1 = 0x2000,
618 OP_CMP_imm_T1 = 0x2800,
619 OP_ADD_imm_T2 = 0x3000,
620 OP_SUB_imm_T2 = 0x3800,
621 OP_AND_reg_T1 = 0x4000,
622 OP_EOR_reg_T1 = 0x4040,
623 OP_TST_reg_T1 = 0x4200,
624 OP_RSB_imm_T1 = 0x4240,
625 OP_CMP_reg_T1 = 0x4280,
626 OP_ORR_reg_T1 = 0x4300,
627 OP_MVN_reg_T1 = 0x43C0,
628 OP_ADD_reg_T2 = 0x4400,
629 OP_MOV_reg_T1 = 0x4600,
630 OP_BLX = 0x4700,
631 OP_BX = 0x4700,
632 OP_STR_reg_T1 = 0x5000,
633 OP_STRH_reg_T1 = 0x5200,
634 OP_STRB_reg_T1 = 0x5400,
635 OP_LDRSB_reg_T1 = 0x5600,
636 OP_LDR_reg_T1 = 0x5800,
637 OP_LDRH_reg_T1 = 0x5A00,
638 OP_LDRB_reg_T1 = 0x5C00,
639 OP_LDRSH_reg_T1 = 0x5E00,
640 OP_STR_imm_T1 = 0x6000,
641 OP_LDR_imm_T1 = 0x6800,
642 OP_STRB_imm_T1 = 0x7000,
643 OP_LDRB_imm_T1 = 0x7800,
644 OP_STRH_imm_T1 = 0x8000,
645 OP_LDRH_imm_T1 = 0x8800,
646 OP_STR_imm_T2 = 0x9000,
647 OP_LDR_imm_T2 = 0x9800,
648 OP_ADD_SP_imm_T1 = 0xA800,
649 OP_ADD_SP_imm_T2 = 0xB000,
650 OP_SUB_SP_imm_T1 = 0xB080,
651 OP_PUSH_T1 = 0xB400,
652 OP_POP_T1 = 0xBC00,
653 OP_BKPT = 0xBE00,
654 OP_IT = 0xBF00,
655 OP_NOP_T1 = 0xBF00,
656 } OpcodeID;
657
658 typedef enum {
659 OP_B_T1 = 0xD000,
660 OP_B_T2 = 0xE000,
661 OP_POP_T2 = 0xE8BD,
662 OP_PUSH_T2 = 0xE92D,
663 OP_AND_reg_T2 = 0xEA00,
664 OP_TST_reg_T2 = 0xEA10,
665 OP_ORR_reg_T2 = 0xEA40,
666 OP_ORR_S_reg_T2 = 0xEA50,
667 OP_ASR_imm_T1 = 0xEA4F,
668 OP_LSL_imm_T1 = 0xEA4F,
669 OP_LSR_imm_T1 = 0xEA4F,
670 OP_ROR_imm_T1 = 0xEA4F,
671 OP_MVN_reg_T2 = 0xEA6F,
672 OP_EOR_reg_T2 = 0xEA80,
673 OP_ADD_reg_T3 = 0xEB00,
674 OP_ADD_S_reg_T3 = 0xEB10,
675 OP_SUB_reg_T2 = 0xEBA0,
676 OP_SUB_S_reg_T2 = 0xEBB0,
677 OP_CMP_reg_T2 = 0xEBB0,
678 OP_VMOV_CtoD = 0xEC00,
679 OP_VMOV_DtoC = 0xEC10,
680 OP_FSTS = 0xED00,
681 OP_VSTR = 0xED00,
682 OP_FLDS = 0xED10,
683 OP_VLDR = 0xED10,
684 OP_VMOV_CtoS = 0xEE00,
685 OP_VMOV_StoC = 0xEE10,
686 OP_VMUL_T2 = 0xEE20,
687 OP_VADD_T2 = 0xEE30,
688 OP_VSUB_T2 = 0xEE30,
689 OP_VDIV = 0xEE80,
690 OP_VABS_T2 = 0xEEB0,
691 OP_VCMP = 0xEEB0,
692 OP_VCVT_FPIVFP = 0xEEB0,
693 OP_VMOV_T2 = 0xEEB0,
694 OP_VMOV_IMM_T2 = 0xEEB0,
695 OP_VMRS = 0xEEB0,
696 OP_VNEG_T2 = 0xEEB0,
697 OP_VSQRT_T1 = 0xEEB0,
698 OP_VCVTSD_T1 = 0xEEB0,
699 OP_VCVTDS_T1 = 0xEEB0,
700 OP_B_T3a = 0xF000,
701 OP_B_T4a = 0xF000,
702 OP_AND_imm_T1 = 0xF000,
703 OP_TST_imm = 0xF010,
704 OP_ORR_imm_T1 = 0xF040,
705 OP_MOV_imm_T2 = 0xF040,
706 OP_MVN_imm = 0xF060,
707 OP_EOR_imm_T1 = 0xF080,
708 OP_ADD_imm_T3 = 0xF100,
709 OP_ADD_S_imm_T3 = 0xF110,
710 OP_CMN_imm = 0xF110,
711 OP_ADC_imm = 0xF140,
712 OP_SUB_imm_T3 = 0xF1A0,
713 OP_SUB_S_imm_T3 = 0xF1B0,
714 OP_CMP_imm_T2 = 0xF1B0,
715 OP_RSB_imm_T2 = 0xF1C0,
716 OP_RSB_S_imm_T2 = 0xF1D0,
717 OP_ADD_imm_T4 = 0xF200,
718 OP_MOV_imm_T3 = 0xF240,
719 OP_SUB_imm_T4 = 0xF2A0,
720 OP_MOVT = 0xF2C0,
721 OP_UBFX_T1 = 0xF3C0,
722 OP_NOP_T2a = 0xF3AF,
723 OP_DMB_SY_T2a = 0xF3BF,
724 OP_STRB_imm_T3 = 0xF800,
725 OP_STRB_reg_T2 = 0xF800,
726 OP_LDRB_imm_T3 = 0xF810,
727 OP_LDRB_reg_T2 = 0xF810,
728 OP_STRH_imm_T3 = 0xF820,
729 OP_STRH_reg_T2 = 0xF820,
730 OP_LDRH_reg_T2 = 0xF830,
731 OP_LDRH_imm_T3 = 0xF830,
732 OP_STR_imm_T4 = 0xF840,
733 OP_STR_reg_T2 = 0xF840,
734 OP_LDR_imm_T4 = 0xF850,
735 OP_LDR_reg_T2 = 0xF850,
736 OP_STRB_imm_T2 = 0xF880,
737 OP_LDRB_imm_T2 = 0xF890,
738 OP_STRH_imm_T2 = 0xF8A0,
739 OP_LDRH_imm_T2 = 0xF8B0,
740 OP_STR_imm_T3 = 0xF8C0,
741 OP_LDR_imm_T3 = 0xF8D0,
742 OP_LDRSB_reg_T2 = 0xF910,
743 OP_LDRSH_reg_T2 = 0xF930,
744 OP_LSL_reg_T2 = 0xFA00,
745 OP_LSR_reg_T2 = 0xFA20,
746 OP_ASR_reg_T2 = 0xFA40,
747 OP_ROR_reg_T2 = 0xFA60,
748 OP_CLZ = 0xFAB0,
749 OP_SMULL_T1 = 0xFB80,
750 #if CPU(APPLE_ARMV7S)
751 OP_SDIV_T1 = 0xFB90,
752 OP_UDIV_T1 = 0xFBB0,
753 #endif
754 } OpcodeID1;
755
756 typedef enum {
757 OP_VADD_T2b = 0x0A00,
758 OP_VDIVb = 0x0A00,
759 OP_FLDSb = 0x0A00,
760 OP_VLDRb = 0x0A00,
761 OP_VMOV_IMM_T2b = 0x0A00,
762 OP_VMOV_T2b = 0x0A40,
763 OP_VMUL_T2b = 0x0A00,
764 OP_FSTSb = 0x0A00,
765 OP_VSTRb = 0x0A00,
766 OP_VMOV_StoCb = 0x0A10,
767 OP_VMOV_CtoSb = 0x0A10,
768 OP_VMOV_DtoCb = 0x0A10,
769 OP_VMOV_CtoDb = 0x0A10,
770 OP_VMRSb = 0x0A10,
771 OP_VABS_T2b = 0x0A40,
772 OP_VCMPb = 0x0A40,
773 OP_VCVT_FPIVFPb = 0x0A40,
774 OP_VNEG_T2b = 0x0A40,
775 OP_VSUB_T2b = 0x0A40,
776 OP_VSQRT_T1b = 0x0A40,
777 OP_VCVTSD_T1b = 0x0A40,
778 OP_VCVTDS_T1b = 0x0A40,
779 OP_NOP_T2b = 0x8000,
780 OP_DMB_SY_T2b = 0x8F5F,
781 OP_B_T3b = 0x8000,
782 OP_B_T4b = 0x9000,
783 } OpcodeID2;
784
785 struct FourFours {
786 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
787 {
788 m_u.f0 = f0;
789 m_u.f1 = f1;
790 m_u.f2 = f2;
791 m_u.f3 = f3;
792 }
793
794 union {
795 unsigned value;
796 struct {
797 unsigned f0 : 4;
798 unsigned f1 : 4;
799 unsigned f2 : 4;
800 unsigned f3 : 4;
801 };
802 } m_u;
803 };
804
805 class ARMInstructionFormatter;
806
807 // false means else!
808 static bool ifThenElseConditionBit(Condition condition, bool isIf)
809 {
810 return isIf ? (condition & 1) : !(condition & 1);
811 }
812 static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
813 {
814 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
815 | (ifThenElseConditionBit(condition, inst3if) << 2)
816 | (ifThenElseConditionBit(condition, inst4if) << 1)
817 | 1;
818 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
819 return (condition << 4) | mask;
820 }
821 static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
822 {
823 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
824 | (ifThenElseConditionBit(condition, inst3if) << 2)
825 | 2;
826 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
827 return (condition << 4) | mask;
828 }
829 static uint8_t ifThenElse(Condition condition, bool inst2if)
830 {
831 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
832 | 4;
833 ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
834 return (condition << 4) | mask;
835 }
836
837 static uint8_t ifThenElse(Condition condition)
838 {
839 int mask = 8;
840 return (condition << 4) | mask;
841 }
842
843 public:
844
845 void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
846 {
847 // Rd can only be SP if Rn is also SP.
848 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
849 ASSERT(rd != ARMRegisters::pc);
850 ASSERT(rn != ARMRegisters::pc);
851 ASSERT(imm.isEncodedImm());
852
853 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
854 }
855
856 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
857 {
858 // Rd can only be SP if Rn is also SP.
859 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
860 ASSERT(rd != ARMRegisters::pc);
861 ASSERT(rn != ARMRegisters::pc);
862 ASSERT(imm.isValid());
863
864 if (rn == ARMRegisters::sp && imm.isUInt16()) {
865 ASSERT(!(imm.getUInt16() & 3));
866 if (!(rd & 8) && imm.isUInt10()) {
867 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
868 return;
869 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
870 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
871 return;
872 }
873 } else if (!((rd | rn) & 8)) {
874 if (imm.isUInt3()) {
875 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
876 return;
877 } else if ((rd == rn) && imm.isUInt8()) {
878 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
879 return;
880 }
881 }
882
883 if (imm.isEncodedImm())
884 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
885 else {
886 ASSERT(imm.isUInt12());
887 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
888 }
889 }
890
891 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
892 {
893 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
894 ASSERT(rd != ARMRegisters::pc);
895 ASSERT(rn != ARMRegisters::pc);
896 ASSERT(!BadReg(rm));
897 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
898 }
899
900 // NOTE: In an IT block, add doesn't modify the flags register.
901 ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
902 {
903 if (rd == ARMRegisters::sp) {
904 mov(rd, rn);
905 rn = rd;
906 }
907
908 if (rd == rn)
909 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
910 else if (rd == rm)
911 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
912 else if (!((rd | rn | rm) & 8))
913 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
914 else
915 add(rd, rn, rm, ShiftTypeAndAmount());
916 }
917
918 // Not allowed in an IT (if then) block.
919 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
920 {
921 // Rd can only be SP if Rn is also SP.
922 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
923 ASSERT(rd != ARMRegisters::pc);
924 ASSERT(rn != ARMRegisters::pc);
925 ASSERT(imm.isEncodedImm());
926
927 if (!((rd | rn) & 8)) {
928 if (imm.isUInt3()) {
929 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
930 return;
931 } else if ((rd == rn) && imm.isUInt8()) {
932 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
933 return;
934 }
935 }
936
937 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
938 }
939
940 // Not allowed in an IT (if then) block?
941 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
942 {
943 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
944 ASSERT(rd != ARMRegisters::pc);
945 ASSERT(rn != ARMRegisters::pc);
946 ASSERT(!BadReg(rm));
947 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
948 }
949
950 // Not allowed in an IT (if then) block.
951 ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
952 {
953 if (!((rd | rn | rm) & 8))
954 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
955 else
956 add_S(rd, rn, rm, ShiftTypeAndAmount());
957 }
958
959 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
960 {
961 ASSERT(!BadReg(rd));
962 ASSERT(!BadReg(rn));
963 ASSERT(imm.isEncodedImm());
964 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
965 }
966
967 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
968 {
969 ASSERT(!BadReg(rd));
970 ASSERT(!BadReg(rn));
971 ASSERT(!BadReg(rm));
972 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
973 }
974
975 ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
976 {
977 if ((rd == rn) && !((rd | rm) & 8))
978 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
979 else if ((rd == rm) && !((rd | rn) & 8))
980 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
981 else
982 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
983 }
984
985 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
986 {
987 ASSERT(!BadReg(rd));
988 ASSERT(!BadReg(rm));
989 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
990 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
991 }
992
993 ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
994 {
995 ASSERT(!BadReg(rd));
996 ASSERT(!BadReg(rn));
997 ASSERT(!BadReg(rm));
998 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
999 }
1000
1001 // Only allowed in IT (if then) block if last instruction.
1002 ALWAYS_INLINE AssemblerLabel b()
1003 {
1004 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
1005 return m_formatter.label();
1006 }
1007
1008 // Only allowed in IT (if then) block if last instruction.
1009 ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
1010 {
1011 ASSERT(rm != ARMRegisters::pc);
1012 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
1013 return m_formatter.label();
1014 }
1015
1016 // Only allowed in IT (if then) block if last instruction.
1017 ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
1018 {
1019 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
1020 return m_formatter.label();
1021 }
1022
1023 void bkpt(uint8_t imm = 0)
1024 {
1025 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
1026 }
1027
1028 ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
1029 {
1030 ASSERT(!BadReg(rd));
1031 ASSERT(!BadReg(rm));
1032 m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
1033 }
1034
1035 ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
1036 {
1037 ASSERT(rn != ARMRegisters::pc);
1038 ASSERT(imm.isEncodedImm());
1039
1040 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
1041 }
1042
1043 ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
1044 {
1045 ASSERT(rn != ARMRegisters::pc);
1046 ASSERT(imm.isEncodedImm());
1047
1048 if (!(rn & 8) && imm.isUInt8())
1049 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
1050 else
1051 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
1052 }
1053
1054 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1055 {
1056 ASSERT(rn != ARMRegisters::pc);
1057 ASSERT(!BadReg(rm));
1058 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1059 }
1060
1061 ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
1062 {
1063 if ((rn | rm) & 8)
1064 cmp(rn, rm, ShiftTypeAndAmount());
1065 else
1066 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
1067 }
1068
1069 // xor is not spelled with an 'e'. :-(
1070 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1071 {
1072 ASSERT(!BadReg(rd));
1073 ASSERT(!BadReg(rn));
1074 ASSERT(imm.isEncodedImm());
1075 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
1076 }
1077
1078 // xor is not spelled with an 'e'. :-(
1079 ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1080 {
1081 ASSERT(!BadReg(rd));
1082 ASSERT(!BadReg(rn));
1083 ASSERT(!BadReg(rm));
1084 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1085 }
1086
1087 // xor is not spelled with an 'e'. :-(
1088 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1089 {
1090 if ((rd == rn) && !((rd | rm) & 8))
1091 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1092 else if ((rd == rm) && !((rd | rn) & 8))
1093 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1094 else
1095 eor(rd, rn, rm, ShiftTypeAndAmount());
1096 }
1097
1098 ALWAYS_INLINE void it(Condition cond)
1099 {
1100 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1101 }
1102
1103 ALWAYS_INLINE void it(Condition cond, bool inst2if)
1104 {
1105 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1106 }
1107
1108 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1109 {
1110 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1111 }
1112
1113 ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1114 {
1115 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1116 }
1117
1118 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1119 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1120 {
1121 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1122 ASSERT(imm.isUInt12());
1123
1124 if (!((rt | rn) & 8) && imm.isUInt7())
1125 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1126 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1127 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1128 else
1129 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1130 }
1131
1132 ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1133 {
1134 ASSERT(rn != ARMRegisters::pc);
1135 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1136 }
1137
1138 ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1139 {
1140 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1141 ASSERT(imm.isUInt7());
1142 ASSERT(!((rt | rn) & 8));
1143 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1144 }
1145
1146 // If index is set, this is a regular offset or a pre-indexed load;
1147 // if index is not set then is is a post-index load.
1148 //
1149 // If wback is set rn is updated - this is a pre or post index load,
1150 // if wback is not set this is a regular offset memory access.
1151 //
1152 // (-255 <= offset <= 255)
1153 // _reg = REG[rn]
1154 // _tmp = _reg + offset
1155 // MEM[index ? _tmp : _reg] = REG[rt]
1156 // if (wback) REG[rn] = _tmp
1157 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1158 {
1159 ASSERT(rt != ARMRegisters::pc);
1160 ASSERT(rn != ARMRegisters::pc);
1161 ASSERT(index || wback);
1162 ASSERT(!wback | (rt != rn));
1163
1164 bool add = true;
1165 if (offset < 0) {
1166 add = false;
1167 offset = -offset;
1168 }
1169 ASSERT((offset & ~0xff) == 0);
1170
1171 offset |= (wback << 8);
1172 offset |= (add << 9);
1173 offset |= (index << 10);
1174 offset |= (1 << 11);
1175
1176 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1177 }
1178
1179 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1180 ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1181 {
1182 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1183 ASSERT(!BadReg(rm));
1184 ASSERT(shift <= 3);
1185
1186 if (!shift && !((rt | rn | rm) & 8))
1187 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1188 else
1189 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1190 }
1191
1192 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1193 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1194 {
1195 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1196 ASSERT(imm.isUInt12());
1197 ASSERT(!(imm.getUInt12() & 1));
1198
1199 if (!((rt | rn) & 8) && imm.isUInt6())
1200 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
1201 else
1202 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1203 }
1204
1205 // If index is set, this is a regular offset or a pre-indexed load;
1206 // if index is not set then is is a post-index load.
1207 //
1208 // If wback is set rn is updated - this is a pre or post index load,
1209 // if wback is not set this is a regular offset memory access.
1210 //
1211 // (-255 <= offset <= 255)
1212 // _reg = REG[rn]
1213 // _tmp = _reg + offset
1214 // MEM[index ? _tmp : _reg] = REG[rt]
1215 // if (wback) REG[rn] = _tmp
1216 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1217 {
1218 ASSERT(rt != ARMRegisters::pc);
1219 ASSERT(rn != ARMRegisters::pc);
1220 ASSERT(index || wback);
1221 ASSERT(!wback | (rt != rn));
1222
1223 bool add = true;
1224 if (offset < 0) {
1225 add = false;
1226 offset = -offset;
1227 }
1228 ASSERT((offset & ~0xff) == 0);
1229
1230 offset |= (wback << 8);
1231 offset |= (add << 9);
1232 offset |= (index << 10);
1233 offset |= (1 << 11);
1234
1235 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1236 }
1237
1238 ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1239 {
1240 ASSERT(!BadReg(rt)); // Memory hint
1241 ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1242 ASSERT(!BadReg(rm));
1243 ASSERT(shift <= 3);
1244
1245 if (!shift && !((rt | rn | rm) & 8))
1246 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1247 else
1248 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1249 }
1250
1251 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1252 {
1253 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1254 ASSERT(imm.isUInt12());
1255
1256 if (!((rt | rn) & 8) && imm.isUInt5())
1257 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1258 else
1259 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1260 }
1261
1262 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1263 {
1264 ASSERT(rt != ARMRegisters::pc);
1265 ASSERT(rn != ARMRegisters::pc);
1266 ASSERT(index || wback);
1267 ASSERT(!wback | (rt != rn));
1268
1269 bool add = true;
1270 if (offset < 0) {
1271 add = false;
1272 offset = -offset;
1273 }
1274
1275 ASSERT(!(offset & ~0xff));
1276
1277 offset |= (wback << 8);
1278 offset |= (add << 9);
1279 offset |= (index << 10);
1280 offset |= (1 << 11);
1281
1282 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1283 }
1284
1285 ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1286 {
1287 ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1288 ASSERT(!BadReg(rm));
1289 ASSERT(shift <= 3);
1290
1291 if (!shift && !((rt | rn | rm) & 8))
1292 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1293 else
1294 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1295 }
1296
1297 void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1298 {
1299 ASSERT(rn != ARMRegisters::pc);
1300 ASSERT(!BadReg(rm));
1301 ASSERT(shift <= 3);
1302
1303 if (!shift && !((rt | rn | rm) & 8))
1304 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1305 else
1306 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1307 }
1308
1309 void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1310 {
1311 ASSERT(rn != ARMRegisters::pc);
1312 ASSERT(!BadReg(rm));
1313 ASSERT(shift <= 3);
1314
1315 if (!shift && !((rt | rn | rm) & 8))
1316 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1317 else
1318 m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1319 }
1320
1321 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1322 {
1323 ASSERT(!BadReg(rd));
1324 ASSERT(!BadReg(rm));
1325 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1326 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1327 }
1328
1329 ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1330 {
1331 ASSERT(!BadReg(rd));
1332 ASSERT(!BadReg(rn));
1333 ASSERT(!BadReg(rm));
1334 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1335 }
1336
1337 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1338 {
1339 ASSERT(!BadReg(rd));
1340 ASSERT(!BadReg(rm));
1341 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1342 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1343 }
1344
1345 ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1346 {
1347 ASSERT(!BadReg(rd));
1348 ASSERT(!BadReg(rn));
1349 ASSERT(!BadReg(rm));
1350 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1351 }
1352
1353 ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1354 {
1355 ASSERT(imm.isValid());
1356 ASSERT(!imm.isEncodedImm());
1357 ASSERT(!BadReg(rd));
1358
1359 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1360 }
1361
1362 #if OS(LINUX)
1363 static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
1364 {
1365 uint16_t* address = static_cast<uint16_t*>(instructionStart);
1366 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
1367 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
1368 address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
1369 address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
1370 address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
1371 address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
1372 address[4] = OP_CMP_reg_T2 | left;
1373 cacheFlush(address, sizeof(uint16_t) * 5);
1374 }
1375 #else
1376 static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
1377 {
1378 ASSERT(imm.isValid());
1379 ASSERT(!imm.isEncodedImm());
1380 ASSERT(!BadReg(rd));
1381
1382 uint16_t* address = static_cast<uint16_t*>(instructionStart);
1383 address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
1384 address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
1385 cacheFlush(address, sizeof(uint16_t) * 2);
1386 }
1387 #endif
1388
1389 ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1390 {
1391 ASSERT(imm.isValid());
1392 ASSERT(!BadReg(rd));
1393
1394 if ((rd < 8) && imm.isUInt8())
1395 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1396 else if (imm.isEncodedImm())
1397 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1398 else
1399 movT3(rd, imm);
1400 }
1401
1402 ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1403 {
1404 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1405 }
1406
1407 ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1408 {
1409 ASSERT(imm.isUInt16());
1410 ASSERT(!BadReg(rd));
1411 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1412 }
1413
1414 ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1415 {
1416 ASSERT(imm.isEncodedImm());
1417 ASSERT(!BadReg(rd));
1418
1419 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1420 }
1421
1422 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1423 {
1424 ASSERT(!BadReg(rd));
1425 ASSERT(!BadReg(rm));
1426 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1427 }
1428
1429 ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1430 {
1431 if (!((rd | rm) & 8))
1432 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1433 else
1434 mvn(rd, rm, ShiftTypeAndAmount());
1435 }
1436
1437 ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1438 {
1439 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1440 sub(rd, zero, rm);
1441 }
1442
1443 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1444 {
1445 ASSERT(!BadReg(rd));
1446 ASSERT(!BadReg(rn));
1447 ASSERT(imm.isEncodedImm());
1448 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1449 }
1450
1451 ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1452 {
1453 ASSERT(!BadReg(rd));
1454 ASSERT(!BadReg(rn));
1455 ASSERT(!BadReg(rm));
1456 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1457 }
1458
1459 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1460 {
1461 if ((rd == rn) && !((rd | rm) & 8))
1462 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1463 else if ((rd == rm) && !((rd | rn) & 8))
1464 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1465 else
1466 orr(rd, rn, rm, ShiftTypeAndAmount());
1467 }
1468
1469 ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1470 {
1471 ASSERT(!BadReg(rd));
1472 ASSERT(!BadReg(rn));
1473 ASSERT(!BadReg(rm));
1474 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1475 }
1476
1477 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1478 {
1479 if ((rd == rn) && !((rd | rm) & 8))
1480 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1481 else if ((rd == rm) && !((rd | rn) & 8))
1482 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1483 else
1484 orr_S(rd, rn, rm, ShiftTypeAndAmount());
1485 }
1486
1487 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1488 {
1489 ASSERT(!BadReg(rd));
1490 ASSERT(!BadReg(rm));
1491 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1492 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1493 }
1494
1495 ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1496 {
1497 ASSERT(!BadReg(rd));
1498 ASSERT(!BadReg(rn));
1499 ASSERT(!BadReg(rm));
1500 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1501 }
1502
1503 ALWAYS_INLINE void pop(RegisterID dest)
1504 {
1505 if (dest < ARMRegisters::r8)
1506 m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
1507 else {
1508 // Load postindexed with writeback.
1509 ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
1510 }
1511 }
1512
1513 ALWAYS_INLINE void pop(uint32_t registerList)
1514 {
1515 ASSERT(WTF::bitCount(registerList) > 1);
1516 ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
1517 ASSERT(!((1 << ARMRegisters::sp) & registerList));
1518 m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
1519 }
1520
1521 ALWAYS_INLINE void push(RegisterID src)
1522 {
1523 if (src < ARMRegisters::r8)
1524 m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
1525 else if (src == ARMRegisters::lr)
1526 m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
1527 else {
1528 // Store preindexed with writeback.
1529 str(src, ARMRegisters::sp, -sizeof(void*), true, true);
1530 }
1531 }
1532
1533 ALWAYS_INLINE void push(uint32_t registerList)
1534 {
1535 ASSERT(WTF::bitCount(registerList) > 1);
1536 ASSERT(!((1 << ARMRegisters::pc) & registerList));
1537 ASSERT(!((1 << ARMRegisters::sp) & registerList));
1538 m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
1539 }
1540
1541 #if CPU(APPLE_ARMV7S)
1542 template<int datasize>
1543 ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1544 {
1545 static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
1546 ASSERT(!BadReg(rd));
1547 ASSERT(!BadReg(rn));
1548 ASSERT(!BadReg(rm));
1549 m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1550 }
1551 #endif
1552
1553 ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1554 {
1555 ASSERT(!BadReg(rdLo));
1556 ASSERT(!BadReg(rdHi));
1557 ASSERT(!BadReg(rn));
1558 ASSERT(!BadReg(rm));
1559 ASSERT(rdLo != rdHi);
1560 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1561 }
1562
1563 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1564 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1565 {
1566 ASSERT(rt != ARMRegisters::pc);
1567 ASSERT(rn != ARMRegisters::pc);
1568 ASSERT(imm.isUInt12());
1569
1570 if (!((rt | rn) & 8) && imm.isUInt7())
1571 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1572 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1573 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1574 else
1575 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1576 }
1577
1578 // If index is set, this is a regular offset or a pre-indexed store;
1579 // if index is not set then is is a post-index store.
1580 //
1581 // If wback is set rn is updated - this is a pre or post index store,
1582 // if wback is not set this is a regular offset memory access.
1583 //
1584 // (-255 <= offset <= 255)
1585 // _reg = REG[rn]
1586 // _tmp = _reg + offset
1587 // MEM[index ? _tmp : _reg] = REG[rt]
1588 // if (wback) REG[rn] = _tmp
1589 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1590 {
1591 ASSERT(rt != ARMRegisters::pc);
1592 ASSERT(rn != ARMRegisters::pc);
1593 ASSERT(index || wback);
1594 ASSERT(!wback | (rt != rn));
1595
1596 bool add = true;
1597 if (offset < 0) {
1598 add = false;
1599 offset = -offset;
1600 }
1601 ASSERT((offset & ~0xff) == 0);
1602
1603 offset |= (wback << 8);
1604 offset |= (add << 9);
1605 offset |= (index << 10);
1606 offset |= (1 << 11);
1607
1608 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1609 }
1610
1611 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1612 ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1613 {
1614 ASSERT(rn != ARMRegisters::pc);
1615 ASSERT(!BadReg(rm));
1616 ASSERT(shift <= 3);
1617
1618 if (!shift && !((rt | rn | rm) & 8))
1619 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1620 else
1621 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1622 }
1623
1624 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1625 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1626 {
1627 ASSERT(rt != ARMRegisters::pc);
1628 ASSERT(rn != ARMRegisters::pc);
1629 ASSERT(imm.isUInt12());
1630
1631 if (!((rt | rn) & 8) && imm.isUInt7())
1632 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1633 else
1634 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1635 }
1636
1637 // If index is set, this is a regular offset or a pre-indexed store;
1638 // if index is not set then is is a post-index store.
1639 //
1640 // If wback is set rn is updated - this is a pre or post index store,
1641 // if wback is not set this is a regular offset memory access.
1642 //
1643 // (-255 <= offset <= 255)
1644 // _reg = REG[rn]
1645 // _tmp = _reg + offset
1646 // MEM[index ? _tmp : _reg] = REG[rt]
1647 // if (wback) REG[rn] = _tmp
1648 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1649 {
1650 ASSERT(rt != ARMRegisters::pc);
1651 ASSERT(rn != ARMRegisters::pc);
1652 ASSERT(index || wback);
1653 ASSERT(!wback | (rt != rn));
1654
1655 bool add = true;
1656 if (offset < 0) {
1657 add = false;
1658 offset = -offset;
1659 }
1660 ASSERT((offset & ~0xff) == 0);
1661
1662 offset |= (wback << 8);
1663 offset |= (add << 9);
1664 offset |= (index << 10);
1665 offset |= (1 << 11);
1666
1667 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1668 }
1669
1670 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1671 ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1672 {
1673 ASSERT(rn != ARMRegisters::pc);
1674 ASSERT(!BadReg(rm));
1675 ASSERT(shift <= 3);
1676
1677 if (!shift && !((rt | rn | rm) & 8))
1678 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1679 else
1680 m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1681 }
1682
1683 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1684 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1685 {
1686 ASSERT(rt != ARMRegisters::pc);
1687 ASSERT(rn != ARMRegisters::pc);
1688 ASSERT(imm.isUInt12());
1689
1690 if (!((rt | rn) & 8) && imm.isUInt7())
1691 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
1692 else
1693 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1694 }
1695
1696 // If index is set, this is a regular offset or a pre-indexed store;
1697 // if index is not set then is is a post-index store.
1698 //
1699 // If wback is set rn is updated - this is a pre or post index store,
1700 // if wback is not set this is a regular offset memory access.
1701 //
1702 // (-255 <= offset <= 255)
1703 // _reg = REG[rn]
1704 // _tmp = _reg + offset
1705 // MEM[index ? _tmp : _reg] = REG[rt]
1706 // if (wback) REG[rn] = _tmp
1707 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1708 {
1709 ASSERT(rt != ARMRegisters::pc);
1710 ASSERT(rn != ARMRegisters::pc);
1711 ASSERT(index || wback);
1712 ASSERT(!wback | (rt != rn));
1713
1714 bool add = true;
1715 if (offset < 0) {
1716 add = false;
1717 offset = -offset;
1718 }
1719 ASSERT(!(offset & ~0xff));
1720
1721 offset |= (wback << 8);
1722 offset |= (add << 9);
1723 offset |= (index << 10);
1724 offset |= (1 << 11);
1725
1726 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1727 }
1728
1729 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1730 ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1731 {
1732 ASSERT(rn != ARMRegisters::pc);
1733 ASSERT(!BadReg(rm));
1734 ASSERT(shift <= 3);
1735
1736 if (!shift && !((rt | rn | rm) & 8))
1737 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1738 else
1739 m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1740 }
1741
1742 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1743 {
1744 // Rd can only be SP if Rn is also SP.
1745 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1746 ASSERT(rd != ARMRegisters::pc);
1747 ASSERT(rn != ARMRegisters::pc);
1748 ASSERT(imm.isValid());
1749
1750 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1751 ASSERT(!(imm.getUInt16() & 3));
1752 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1753 return;
1754 } else if (!((rd | rn) & 8)) {
1755 if (imm.isUInt3()) {
1756 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1757 return;
1758 } else if ((rd == rn) && imm.isUInt8()) {
1759 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1760 return;
1761 }
1762 }
1763
1764 if (imm.isEncodedImm())
1765 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1766 else {
1767 ASSERT(imm.isUInt12());
1768 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1769 }
1770 }
1771
1772 ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1773 {
1774 ASSERT(rd != ARMRegisters::pc);
1775 ASSERT(rn != ARMRegisters::pc);
1776 ASSERT(imm.isValid());
1777 ASSERT(imm.isUInt12());
1778
1779 if (!((rd | rn) & 8) && !imm.getUInt12())
1780 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1781 else
1782 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1783 }
1784
1785 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1786 {
1787 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1788 ASSERT(rd != ARMRegisters::pc);
1789 ASSERT(rn != ARMRegisters::pc);
1790 ASSERT(!BadReg(rm));
1791 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1792 }
1793
1794 // NOTE: In an IT block, add doesn't modify the flags register.
1795 ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1796 {
1797 if (!((rd | rn | rm) & 8))
1798 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1799 else
1800 sub(rd, rn, rm, ShiftTypeAndAmount());
1801 }
1802
1803 // Not allowed in an IT (if then) block.
1804 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1805 {
1806 // Rd can only be SP if Rn is also SP.
1807 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1808 ASSERT(rd != ARMRegisters::pc);
1809 ASSERT(rn != ARMRegisters::pc);
1810 ASSERT(imm.isValid());
1811
1812 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1813 ASSERT(!(imm.getUInt16() & 3));
1814 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1815 return;
1816 } else if (!((rd | rn) & 8)) {
1817 if (imm.isUInt3()) {
1818 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1819 return;
1820 } else if ((rd == rn) && imm.isUInt8()) {
1821 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1822 return;
1823 }
1824 }
1825
1826 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1827 }
1828
1829 ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1830 {
1831 ASSERT(rd != ARMRegisters::pc);
1832 ASSERT(rn != ARMRegisters::pc);
1833 ASSERT(imm.isValid());
1834 ASSERT(imm.isUInt12());
1835
1836 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1837 }
1838
1839 // Not allowed in an IT (if then) block?
1840 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1841 {
1842 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1843 ASSERT(rd != ARMRegisters::pc);
1844 ASSERT(rn != ARMRegisters::pc);
1845 ASSERT(!BadReg(rm));
1846 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1847 }
1848
1849 // Not allowed in an IT (if then) block.
1850 ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1851 {
1852 if (!((rd | rn | rm) & 8))
1853 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1854 else
1855 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1856 }
1857
1858 ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1859 {
1860 ASSERT(!BadReg(rn));
1861 ASSERT(imm.isEncodedImm());
1862
1863 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1864 }
1865
1866 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1867 {
1868 ASSERT(!BadReg(rn));
1869 ASSERT(!BadReg(rm));
1870 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1871 }
1872
1873 ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1874 {
1875 if ((rn | rm) & 8)
1876 tst(rn, rm, ShiftTypeAndAmount());
1877 else
1878 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1879 }
1880
1881 ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1882 {
1883 ASSERT(lsb < 32);
1884 ASSERT((width >= 1) && (width <= 32));
1885 ASSERT((lsb + width) <= 32);
1886 m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1887 }
1888
1889 #if CPU(APPLE_ARMV7S)
1890 ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1891 {
1892 ASSERT(!BadReg(rd));
1893 ASSERT(!BadReg(rn));
1894 ASSERT(!BadReg(rm));
1895 m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1896 }
1897 #endif
1898
1899 void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1900 {
1901 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1902 }
1903
1904 void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1905 {
1906 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1907 }
1908
1909 void vcmpz(FPDoubleRegisterID rd)
1910 {
1911 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1912 }
1913
1914 void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1915 {
1916 // boolean values are 64bit (toInt, unsigned, roundZero)
1917 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1918 }
1919
1920 void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1921 {
1922 // boolean values are 64bit (toInt, unsigned, roundZero)
1923 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1924 }
1925
1926 void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1927 {
1928 // boolean values are 64bit (toInt, unsigned, roundZero)
1929 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1930 }
1931
1932 void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1933 {
1934 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1935 }
1936
1937 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1938 {
1939 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1940 }
1941
1942 void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1943 {
1944 m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1945 }
1946
1947 void vmov(RegisterID rd, FPSingleRegisterID rn)
1948 {
1949 ASSERT(!BadReg(rd));
1950 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1951 }
1952
1953 void vmov(FPSingleRegisterID rd, RegisterID rn)
1954 {
1955 ASSERT(!BadReg(rn));
1956 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1957 }
1958
1959 void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1960 {
1961 ASSERT(!BadReg(rd1));
1962 ASSERT(!BadReg(rd2));
1963 m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1964 }
1965
1966 void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1967 {
1968 ASSERT(!BadReg(rn1));
1969 ASSERT(!BadReg(rn2));
1970 m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1971 }
1972
1973 void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1974 {
1975 m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1976 }
1977
1978 void vmrs(RegisterID reg = ARMRegisters::pc)
1979 {
1980 ASSERT(reg != ARMRegisters::sp);
1981 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1982 }
1983
1984 void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1985 {
1986 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1987 }
1988
1989 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1990 {
1991 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1992 }
1993
1994 void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1995 {
1996 m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1997 }
1998
1999 void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
2000 {
2001 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
2002 }
2003
2004 void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
2005 {
2006 m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
2007 }
2008
2009 void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
2010 {
2011 m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
2012 }
2013
2014 void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
2015 {
2016 m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
2017 }
2018
2019 void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
2020 {
2021 m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
2022 }
2023
2024 void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
2025 {
2026 m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
2027 }
2028
2029 void nop()
2030 {
2031 m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
2032 }
2033
2034 void nopw()
2035 {
2036 m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
2037 }
2038
2039 void dmbSY()
2040 {
2041 m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b);
2042 }
2043
2044 AssemblerLabel labelIgnoringWatchpoints()
2045 {
2046 return m_formatter.label();
2047 }
2048
2049 AssemblerLabel labelForWatchpoint()
2050 {
2051 AssemblerLabel result = m_formatter.label();
2052 if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
2053 result = label();
2054 m_indexOfLastWatchpoint = result.m_offset;
2055 m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
2056 return result;
2057 }
2058
2059 AssemblerLabel label()
2060 {
2061 AssemblerLabel result = m_formatter.label();
2062 while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
2063 if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
2064 nopw();
2065 else
2066 nop();
2067 result = m_formatter.label();
2068 }
2069 return result;
2070 }
2071
2072 AssemblerLabel align(int alignment)
2073 {
2074 while (!m_formatter.isAligned(alignment))
2075 bkpt();
2076
2077 return label();
2078 }
2079
2080 static void* getRelocatedAddress(void* code, AssemblerLabel label)
2081 {
2082 ASSERT(label.isSet());
2083 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2084 }
2085
2086 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2087 {
2088 return b.m_offset - a.m_offset;
2089 }
2090
2091 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
2092
2093 // Assembler admin methods:
2094
2095 static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
2096 {
2097 return a.from() < b.from();
2098 }
2099
2100 static bool canCompact(JumpType jumpType)
2101 {
2102 // The following cannot be compacted:
2103 // JumpFixed: represents custom jump sequence
2104 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
2105 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
2106 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
2107 }
2108
2109 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
2110 {
2111 if (jumpType == JumpFixed)
2112 return LinkInvalid;
2113
2114 // for patchable jump we must leave space for the longest code sequence
2115 if (jumpType == JumpNoConditionFixedSize)
2116 return LinkBX;
2117 if (jumpType == JumpConditionFixedSize)
2118 return LinkConditionalBX;
2119
2120 const int paddingSize = JUMP_ENUM_SIZE(jumpType);
2121
2122 if (jumpType == JumpCondition) {
2123 // 2-byte conditional T1
2124 const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
2125 if (canBeJumpT1(jumpT1Location, to))
2126 return LinkJumpT1;
2127 // 4-byte conditional T3
2128 const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
2129 if (canBeJumpT3(jumpT3Location, to))
2130 return LinkJumpT3;
2131 // 4-byte conditional T4 with IT
2132 const uint16_t* conditionalJumpT4Location =
2133 reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
2134 if (canBeJumpT4(conditionalJumpT4Location, to))
2135 return LinkConditionalJumpT4;
2136 } else {
2137 // 2-byte unconditional T2
2138 const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
2139 if (canBeJumpT2(jumpT2Location, to))
2140 return LinkJumpT2;
2141 // 4-byte unconditional T4
2142 const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
2143 if (canBeJumpT4(jumpT4Location, to))
2144 return LinkJumpT4;
2145 // use long jump sequence
2146 return LinkBX;
2147 }
2148
2149 ASSERT(jumpType == JumpCondition);
2150 return LinkConditionalBX;
2151 }
2152
2153 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2154 {
2155 JumpLinkType linkType = computeJumpType(record.type(), from, to);
2156 record.setLinkType(linkType);
2157 return linkType;
2158 }
2159
2160 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2161 {
2162 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2163 return m_jumpsToLink;
2164 }
2165
2166 static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
2167 {
2168 switch (record.linkType()) {
2169 case LinkJumpT1:
2170 linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2171 break;
2172 case LinkJumpT2:
2173 linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
2174 break;
2175 case LinkJumpT3:
2176 linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2177 break;
2178 case LinkJumpT4:
2179 linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
2180 break;
2181 case LinkConditionalJumpT4:
2182 linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2183 break;
2184 case LinkConditionalBX:
2185 linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2186 break;
2187 case LinkBX:
2188 linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
2189 break;
2190 default:
2191 RELEASE_ASSERT_NOT_REACHED();
2192 break;
2193 }
2194 }
2195
2196 void* unlinkedCode() { return m_formatter.data(); }
2197 size_t codeSize() const { return m_formatter.codeSize(); }
2198
2199 static unsigned getCallReturnOffset(AssemblerLabel call)
2200 {
2201 ASSERT(call.isSet());
2202 return call.m_offset;
2203 }
2204
2205 // Linking & patching:
2206 //
2207 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2208 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2209 // code has been finalized it is (platform support permitting) within a non-
2210 // writable region of memory; to modify the code in an execute-only execuable
2211 // pool the 'repatch' and 'relink' methods should be used.
2212
2213 void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2214 {
2215 ASSERT(to.isSet());
2216 ASSERT(from.isSet());
2217 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2218 }
2219
2220 static void linkJump(void* code, AssemblerLabel from, void* to)
2221 {
2222 ASSERT(from.isSet());
2223
2224 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2225 linkJumpAbsolute(location, to);
2226 }
2227
2228 static void linkCall(void* code, AssemblerLabel from, void* to)
2229 {
2230 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2231 ASSERT(from.isSet());
2232
2233 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
2234 }
2235
2236 static void linkPointer(void* code, AssemblerLabel where, void* value)
2237 {
2238 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
2239 }
2240
2241 static void relinkJump(void* from, void* to)
2242 {
2243 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2244 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2245
2246 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
2247
2248 cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2249 }
2250
2251 static void relinkCall(void* from, void* to)
2252 {
2253 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2254
2255 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2256 }
2257
2258 static void* readCallTarget(void* from)
2259 {
2260 return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2261 }
2262
2263 static void repatchInt32(void* where, int32_t value)
2264 {
2265 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2266
2267 setInt32(where, value, true);
2268 }
2269
2270 static void repatchCompact(void* where, int32_t offset)
2271 {
2272 ASSERT(offset >= -255 && offset <= 255);
2273
2274 bool add = true;
2275 if (offset < 0) {
2276 add = false;
2277 offset = -offset;
2278 }
2279
2280 offset |= (add << 9);
2281 offset |= (1 << 10);
2282 offset |= (1 << 11);
2283
2284 uint16_t* location = reinterpret_cast<uint16_t*>(where);
2285 location[1] &= ~((1 << 12) - 1);
2286 location[1] |= offset;
2287 cacheFlush(location, sizeof(uint16_t) * 2);
2288 }
2289
2290 static void repatchPointer(void* where, void* value)
2291 {
2292 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2293
2294 setPointer(where, value, true);
2295 }
2296
2297 static void* readPointer(void* where)
2298 {
2299 return reinterpret_cast<void*>(readInt32(where));
2300 }
2301
2302 static void replaceWithJump(void* instructionStart, void* to)
2303 {
2304 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2305 ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2306
2307 #if OS(LINUX)
2308 if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
2309 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2310 linkJumpT4(ptr, to);
2311 cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2312 } else {
2313 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
2314 linkBX(ptr, to);
2315 cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
2316 }
2317 #else
2318 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2319 linkJumpT4(ptr, to);
2320 cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2321 #endif
2322 }
2323
2324 static ptrdiff_t maxJumpReplacementSize()
2325 {
2326 #if OS(LINUX)
2327 return 10;
2328 #else
2329 return 4;
2330 #endif
2331 }
2332
2333 static void replaceWithLoad(void* instructionStart)
2334 {
2335 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2336 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2337 switch (ptr[0] & 0xFFF0) {
2338 case OP_LDR_imm_T3:
2339 break;
2340 case OP_ADD_imm_T3:
2341 ASSERT(!(ptr[1] & 0xF000));
2342 ptr[0] &= 0x000F;
2343 ptr[0] |= OP_LDR_imm_T3;
2344 ptr[1] |= (ptr[1] & 0x0F00) << 4;
2345 ptr[1] &= 0xF0FF;
2346 cacheFlush(ptr, sizeof(uint16_t) * 2);
2347 break;
2348 default:
2349 RELEASE_ASSERT_NOT_REACHED();
2350 }
2351 }
2352
2353 static void replaceWithAddressComputation(void* instructionStart)
2354 {
2355 ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2356 uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2357 switch (ptr[0] & 0xFFF0) {
2358 case OP_LDR_imm_T3:
2359 ASSERT(!(ptr[1] & 0x0F00));
2360 ptr[0] &= 0x000F;
2361 ptr[0] |= OP_ADD_imm_T3;
2362 ptr[1] |= (ptr[1] & 0xF000) >> 4;
2363 ptr[1] &= 0x0FFF;
2364 cacheFlush(ptr, sizeof(uint16_t) * 2);
2365 break;
2366 case OP_ADD_imm_T3:
2367 break;
2368 default:
2369 RELEASE_ASSERT_NOT_REACHED();
2370 }
2371 }
2372
2373 unsigned debugOffset() { return m_formatter.debugOffset(); }
2374
2375 #if OS(LINUX)
2376 static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2377 {
2378 asm volatile(
2379 "push {r7}\n"
2380 "mov r0, %0\n"
2381 "mov r1, %1\n"
2382 "movw r7, #0x2\n"
2383 "movt r7, #0xf\n"
2384 "movs r2, #0x0\n"
2385 "svc 0x0\n"
2386 "pop {r7}\n"
2387 :
2388 : "r" (begin), "r" (end)
2389 : "r0", "r1", "r2");
2390 }
2391 #endif
2392
2393 static void cacheFlush(void* code, size_t size)
2394 {
2395 #if OS(IOS)
2396 sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2397 #elif OS(LINUX)
2398 size_t page = pageSize();
2399 uintptr_t current = reinterpret_cast<uintptr_t>(code);
2400 uintptr_t end = current + size;
2401 uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2402
2403 if (end <= firstPageEnd) {
2404 linuxPageFlush(current, end);
2405 return;
2406 }
2407
2408 linuxPageFlush(current, firstPageEnd);
2409
2410 for (current = firstPageEnd; current + page < end; current += page)
2411 linuxPageFlush(current, current + page);
2412
2413 linuxPageFlush(current, end);
2414 #elif OS(WINCE)
2415 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
2416 #else
2417 #error "The cacheFlush support is missing on this platform."
2418 #endif
2419 }
2420
2421 private:
2422 // VFP operations commonly take one or more 5-bit operands, typically representing a
2423 // floating point register number. This will commonly be encoded in the instruction
2424 // in two parts, with one single bit field, and one 4-bit field. In the case of
2425 // double precision operands the high bit of the register number will be encoded
2426 // separately, and for single precision operands the high bit of the register number
2427 // will be encoded individually.
2428 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2429 // field to be encoded together in the instruction (the low 4-bits of a double
2430 // register number, or the high 4-bits of a single register number), and bit 4
2431 // contains the bit value to be encoded individually.
2432 struct VFPOperand {
2433 explicit VFPOperand(uint32_t value)
2434 : m_value(value)
2435 {
2436 ASSERT(!(m_value & ~0x1f));
2437 }
2438
2439 VFPOperand(FPDoubleRegisterID reg)
2440 : m_value(reg)
2441 {
2442 }
2443
2444 VFPOperand(RegisterID reg)
2445 : m_value(reg)
2446 {
2447 }
2448
2449 VFPOperand(FPSingleRegisterID reg)
2450 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2451 {
2452 }
2453
2454 uint32_t bits1()
2455 {
2456 return m_value >> 4;
2457 }
2458
2459 uint32_t bits4()
2460 {
2461 return m_value & 0xf;
2462 }
2463
2464 uint32_t m_value;
2465 };
2466
2467 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2468 {
2469 // Cannot specify rounding when converting to float.
2470 ASSERT(toInteger || !isRoundZero);
2471
2472 uint32_t op = 0x8;
2473 if (toInteger) {
2474 // opc2 indicates both toInteger & isUnsigned.
2475 op |= isUnsigned ? 0x4 : 0x5;
2476 // 'op' field in instruction is isRoundZero
2477 if (isRoundZero)
2478 op |= 0x10;
2479 } else {
2480 ASSERT(!isRoundZero);
2481 // 'op' field in instruction is isUnsigned
2482 if (!isUnsigned)
2483 op |= 0x10;
2484 }
2485 return VFPOperand(op);
2486 }
2487
2488 static void setInt32(void* code, uint32_t value, bool flush)
2489 {
2490 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2491 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2492
2493 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2494 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2495 location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2496 location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2497 location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2498 location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2499
2500 if (flush)
2501 cacheFlush(location - 4, 4 * sizeof(uint16_t));
2502 }
2503
2504 static int32_t readInt32(void* code)
2505 {
2506 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2507 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2508
2509 ARMThumbImmediate lo16;
2510 ARMThumbImmediate hi16;
2511 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2512 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2513 decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2514 decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2515 uint32_t result = hi16.asUInt16();
2516 result <<= 16;
2517 result |= lo16.asUInt16();
2518 return static_cast<int32_t>(result);
2519 }
2520
2521 static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2522 {
2523 // Requires us to have planted a LDR_imm_T1
2524 ASSERT(imm.isValid());
2525 ASSERT(imm.isUInt7());
2526 uint16_t* location = reinterpret_cast<uint16_t*>(code);
2527 location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2528 location[0] |= (imm.getUInt7() >> 2) << 6;
2529 cacheFlush(location, sizeof(uint16_t));
2530 }
2531
2532 static void setPointer(void* code, void* value, bool flush)
2533 {
2534 setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2535 }
2536
2537 static bool isB(void* address)
2538 {
2539 uint16_t* instruction = static_cast<uint16_t*>(address);
2540 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2541 }
2542
2543 static bool isBX(void* address)
2544 {
2545 uint16_t* instruction = static_cast<uint16_t*>(address);
2546 return (instruction[0] & 0xff87) == OP_BX;
2547 }
2548
2549 static bool isMOV_imm_T3(void* address)
2550 {
2551 uint16_t* instruction = static_cast<uint16_t*>(address);
2552 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2553 }
2554
2555 static bool isMOVT(void* address)
2556 {
2557 uint16_t* instruction = static_cast<uint16_t*>(address);
2558 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2559 }
2560
2561 static bool isNOP_T1(void* address)
2562 {
2563 uint16_t* instruction = static_cast<uint16_t*>(address);
2564 return instruction[0] == OP_NOP_T1;
2565 }
2566
2567 static bool isNOP_T2(void* address)
2568 {
2569 uint16_t* instruction = static_cast<uint16_t*>(address);
2570 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2571 }
2572
2573 static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2574 {
2575 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2576 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2577
2578 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2579 // It does not appear to be documented in the ARM ARM (big surprise), but
2580 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2581 // less than the actual displacement.
2582 relative -= 2;
2583 return ((relative << 23) >> 23) == relative;
2584 }
2585
2586 static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2587 {
2588 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2589 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2590
2591 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2592 // It does not appear to be documented in the ARM ARM (big surprise), but
2593 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2594 // less than the actual displacement.
2595 relative -= 2;
2596 return ((relative << 20) >> 20) == relative;
2597 }
2598
2599 static bool canBeJumpT3(const uint16_t* instruction, const void* target)
2600 {
2601 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2602 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2603
2604 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2605 return ((relative << 11) >> 11) == relative;
2606 }
2607
2608 static bool canBeJumpT4(const uint16_t* instruction, const void* target)
2609 {
2610 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2611 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2612
2613 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2614 return ((relative << 7) >> 7) == relative;
2615 }
2616
2617 static void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2618 {
2619 // FIMXE: this should be up in the MacroAssembler layer. :-(
2620 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2621 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2622 ASSERT(canBeJumpT1(instruction, target));
2623
2624 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2625 // It does not appear to be documented in the ARM ARM (big surprise), but
2626 // for OP_B_T1 the branch displacement encoded in the instruction is 2
2627 // less than the actual displacement.
2628 relative -= 2;
2629
2630 // All branch offsets should be an even distance.
2631 ASSERT(!(relative & 1));
2632 instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2633 }
2634
2635 static void linkJumpT2(uint16_t* instruction, void* target)
2636 {
2637 // FIMXE: this should be up in the MacroAssembler layer. :-(
2638 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2639 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2640 ASSERT(canBeJumpT2(instruction, target));
2641
2642 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2643 // It does not appear to be documented in the ARM ARM (big surprise), but
2644 // for OP_B_T2 the branch displacement encoded in the instruction is 2
2645 // less than the actual displacement.
2646 relative -= 2;
2647
2648 // All branch offsets should be an even distance.
2649 ASSERT(!(relative & 1));
2650 instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2651 }
2652
2653 static void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2654 {
2655 // FIMXE: this should be up in the MacroAssembler layer. :-(
2656 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2657 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2658 ASSERT(canBeJumpT3(instruction, target));
2659
2660 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2661
2662 // All branch offsets should be an even distance.
2663 ASSERT(!(relative & 1));
2664 instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2665 instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2666 }
2667
2668 static void linkJumpT4(uint16_t* instruction, void* target)
2669 {
2670 // FIMXE: this should be up in the MacroAssembler layer. :-(
2671 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2672 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2673 ASSERT(canBeJumpT4(instruction, target));
2674
2675 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2676 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2677 if (relative >= 0)
2678 relative ^= 0xC00000;
2679
2680 // All branch offsets should be an even distance.
2681 ASSERT(!(relative & 1));
2682 instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2683 instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2684 }
2685
2686 static void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2687 {
2688 // FIMXE: this should be up in the MacroAssembler layer. :-(
2689 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2690 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2691
2692 instruction[-3] = ifThenElse(cond) | OP_IT;
2693 linkJumpT4(instruction, target);
2694 }
2695
2696 static void linkBX(uint16_t* instruction, void* target)
2697 {
2698 // FIMXE: this should be up in the MacroAssembler layer. :-(
2699 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2700 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2701
2702 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2703 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2704 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2705 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2706 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2707 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2708 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2709 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2710 }
2711
2712 static void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2713 {
2714 // FIMXE: this should be up in the MacroAssembler layer. :-(
2715 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2716 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2717
2718 linkBX(instruction, target);
2719 instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2720 }
2721
2722 static void linkJumpAbsolute(uint16_t* instruction, void* target)
2723 {
2724 // FIMXE: this should be up in the MacroAssembler layer. :-(
2725 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2726 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2727
2728 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2729 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2730
2731 if (canBeJumpT4(instruction, target)) {
2732 // There may be a better way to fix this, but right now put the NOPs first, since in the
2733 // case of an conditional branch this will be coming after an ITTT predicating *three*
2734 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
2735 // variable wdith encoding - the previous instruction might *look* like an ITTT but
2736 // actually be the second half of a 2-word op.
2737 instruction[-5] = OP_NOP_T1;
2738 instruction[-4] = OP_NOP_T2a;
2739 instruction[-3] = OP_NOP_T2b;
2740 linkJumpT4(instruction, target);
2741 } else {
2742 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2743 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2744 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2745 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2746 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2747 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2748 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2749 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2750 }
2751 }
2752
2753 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2754 {
2755 return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2756 }
2757
2758 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2759 {
2760 result.m_value.i = (value >> 10) & 1;
2761 result.m_value.imm4 = value & 15;
2762 }
2763
2764 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2765 {
2766 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2767 }
2768
2769 static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2770 {
2771 result.m_value.imm3 = (value >> 12) & 7;
2772 result.m_value.imm8 = value & 255;
2773 }
2774
2775 class ARMInstructionFormatter {
2776 public:
2777 ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2778 {
2779 m_buffer.putShort(op | (rd << 8) | imm);
2780 }
2781
2782 ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2783 {
2784 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2785 }
2786
2787 ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2788 {
2789 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2790 }
2791
2792 ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
2793 {
2794 m_buffer.putShort(op | imm);
2795 }
2796
2797 ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2798 {
2799 m_buffer.putShort(op | imm);
2800 }
2801
2802 ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2803 {
2804 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2805 }
2806
2807 ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2808 {
2809 m_buffer.putShort(op | imm);
2810 }
2811
2812 ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2813 {
2814 m_buffer.putShort(op | (reg1 << 3) | reg2);
2815 }
2816
2817 ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2818 {
2819 m_buffer.putShort(op | reg);
2820 m_buffer.putShort(ff.m_u.value);
2821 }
2822
2823 ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2824 {
2825 m_buffer.putShort(op);
2826 m_buffer.putShort(ff.m_u.value);
2827 }
2828
2829 ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2830 {
2831 m_buffer.putShort(op1);
2832 m_buffer.putShort(op2);
2833 }
2834
2835 ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
2836 {
2837 m_buffer.putShort(op1);
2838 m_buffer.putShort(imm);
2839 }
2840
2841 ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2842 {
2843 ARMThumbImmediate newImm = imm;
2844 newImm.m_value.imm4 = imm4;
2845
2846 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2847 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2848 }
2849
2850 ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2851 {
2852 m_buffer.putShort(op | reg1);
2853 m_buffer.putShort((reg2 << 12) | imm);
2854 }
2855
2856 ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2857 {
2858 m_buffer.putShort(op | reg1);
2859 m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2860 }
2861
2862 // Formats up instructions of the pattern:
2863 // 111111111B11aaaa:bbbb222SA2C2cccc
2864 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2865 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2866 ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2867 {
2868 ASSERT(!(op1 & 0x004f));
2869 ASSERT(!(op2 & 0xf1af));
2870 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2871 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2872 }
2873
2874 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2875 // (i.e. +/-(0..255) 32-bit words)
2876 ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2877 {
2878 bool up = true;
2879 if (imm < 0) {
2880 imm = -imm;
2881 up = false;
2882 }
2883
2884 uint32_t offset = imm;
2885 ASSERT(!(offset & ~0x3fc));
2886 offset >>= 2;
2887
2888 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2889 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2890 }
2891
2892 // Administrative methods:
2893
2894 size_t codeSize() const { return m_buffer.codeSize(); }
2895 AssemblerLabel label() const { return m_buffer.label(); }
2896 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2897 void* data() const { return m_buffer.data(); }
2898
2899 unsigned debugOffset() { return m_buffer.debugOffset(); }
2900
2901 AssemblerBuffer m_buffer;
2902 } m_formatter;
2903
2904 Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
2905 int m_indexOfLastWatchpoint;
2906 int m_indexOfTailOfLastWatchpoint;
2907 };
2908
2909 } // namespace JSC
2910
2911 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2912
2913 #endif // ARMAssembler_h