]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/ARMAssembler.h
JavaScriptCore-1097.13.tar.gz
[apple/javascriptcore.git] / assembler / ARMAssembler.h
1 /*
2 * Copyright (C) 2009, 2010 University of Szeged
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef ARMAssembler_h
28 #define ARMAssembler_h
29
30 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
31
32 #include "AssemblerBufferWithConstantPool.h"
33 #include "JITCompilationEffort.h"
34 #include <wtf/Assertions.h>
35 namespace JSC {
36
37 typedef uint32_t ARMWord;
38
39 namespace ARMRegisters {
40 typedef enum {
41 r0 = 0,
42 r1,
43 r2,
44 r3, S0 = r3,
45 r4,
46 r5,
47 r6,
48 r7,
49 r8, S1 = r8,
50 r9,
51 r10,
52 r11,
53 r12,
54 r13, sp = r13,
55 r14, lr = r14,
56 r15, pc = r15
57 } RegisterID;
58
59 typedef enum {
60 d0,
61 d1,
62 d2,
63 d3, SD0 = d3,
64 d4,
65 d5,
66 d6,
67 d7,
68 d8,
69 d9,
70 d10,
71 d11,
72 d12,
73 d13,
74 d14,
75 d15,
76 d16,
77 d17,
78 d18,
79 d19,
80 d20,
81 d21,
82 d22,
83 d23,
84 d24,
85 d25,
86 d26,
87 d27,
88 d28,
89 d29,
90 d30,
91 d31
92 } FPRegisterID;
93
94 } // namespace ARMRegisters
95
96 class ARMAssembler {
97 public:
98 typedef ARMRegisters::RegisterID RegisterID;
99 typedef ARMRegisters::FPRegisterID FPRegisterID;
100 typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
101 typedef SegmentedVector<AssemblerLabel, 64> Jumps;
102
103 ARMAssembler() { }
104
105 // ARM conditional constants
106 typedef enum {
107 EQ = 0x00000000, // Zero
108 NE = 0x10000000, // Non-zero
109 CS = 0x20000000,
110 CC = 0x30000000,
111 MI = 0x40000000,
112 PL = 0x50000000,
113 VS = 0x60000000,
114 VC = 0x70000000,
115 HI = 0x80000000,
116 LS = 0x90000000,
117 GE = 0xa0000000,
118 LT = 0xb0000000,
119 GT = 0xc0000000,
120 LE = 0xd0000000,
121 AL = 0xe0000000
122 } Condition;
123
124 // ARM instruction constants
125 enum {
126 AND = (0x0 << 21),
127 EOR = (0x1 << 21),
128 SUB = (0x2 << 21),
129 RSB = (0x3 << 21),
130 ADD = (0x4 << 21),
131 ADC = (0x5 << 21),
132 SBC = (0x6 << 21),
133 RSC = (0x7 << 21),
134 TST = (0x8 << 21),
135 TEQ = (0x9 << 21),
136 CMP = (0xa << 21),
137 CMN = (0xb << 21),
138 ORR = (0xc << 21),
139 MOV = (0xd << 21),
140 BIC = (0xe << 21),
141 MVN = (0xf << 21),
142 MUL = 0x00000090,
143 MULL = 0x00c00090,
144 VADD_F64 = 0x0e300b00,
145 VDIV_F64 = 0x0e800b00,
146 VSUB_F64 = 0x0e300b40,
147 VMUL_F64 = 0x0e200b00,
148 VCMP_F64 = 0x0eb40b40,
149 VSQRT_F64 = 0x0eb10bc0,
150 DTR = 0x05000000,
151 LDRH = 0x00100090,
152 STRH = 0x00000090,
153 STMDB = 0x09200000,
154 LDMIA = 0x08b00000,
155 FDTR = 0x0d000b00,
156 B = 0x0a000000,
157 BL = 0x0b000000,
158 #if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__)
159 BX = 0x012fff10,
160 #endif
161 VMOV_VFP = 0x0e000a10,
162 VMOV_ARM = 0x0e100a10,
163 VCVT_F64_S32 = 0x0eb80bc0,
164 VCVT_S32_F64 = 0x0ebd0b40,
165 VMRS_APSR = 0x0ef1fa10,
166 #if WTF_ARM_ARCH_AT_LEAST(5)
167 CLZ = 0x016f0f10,
168 BKPT = 0xe1200070,
169 BLX = 0x012fff30,
170 NOP_T2 = 0xf3af8000,
171 #endif
172 #if WTF_ARM_ARCH_AT_LEAST(7)
173 MOVW = 0x03000000,
174 MOVT = 0x03400000,
175 #endif
176 NOP = 0xe1a00000,
177 };
178
179 enum {
180 OP2_IMM = (1 << 25),
181 OP2_IMMh = (1 << 22),
182 OP2_INV_IMM = (1 << 26),
183 SET_CC = (1 << 20),
184 OP2_OFSREG = (1 << 25),
185 DT_UP = (1 << 23),
186 DT_BYTE = (1 << 22),
187 DT_WB = (1 << 21),
188 // This flag is inlcuded in LDR and STR
189 DT_PRE = (1 << 24),
190 HDT_UH = (1 << 5),
191 DT_LOAD = (1 << 20),
192 };
193
194 // Masks of ARM instructions
195 enum {
196 BRANCH_MASK = 0x00ffffff,
197 NONARM = 0xf0000000,
198 SDT_MASK = 0x0c000000,
199 SDT_OFFSET_MASK = 0xfff,
200 };
201
202 enum {
203 BOFFSET_MIN = -0x00800000,
204 BOFFSET_MAX = 0x007fffff,
205 SDT = 0x04000000,
206 };
207
208 enum {
209 padForAlign8 = 0x00,
210 padForAlign16 = 0x0000,
211 padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
212 };
213
214 static const ARMWord INVALID_IMM = 0xf0000000;
215 static const ARMWord InvalidBranchTarget = 0xffffffff;
216 static const int DefaultPrefetching = 2;
217
218 // Instruction formating
219
220 void emitInst(ARMWord op, int rd, int rn, ARMWord op2)
221 {
222 ASSERT(((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMMh) <= 0xfff)));
223 m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
224 }
225
226 void emitDoublePrecisionInst(ARMWord op, int dd, int dn, int dm)
227 {
228 ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
229 m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
230 | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
231 | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
232 }
233
234 void emitSinglePrecisionInst(ARMWord op, int sd, int sn, int sm)
235 {
236 ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
237 m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
238 | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
239 | (sm >> 1) | ((sm & 0x1) << 5));
240 }
241
242 void and_r(int rd, int rn, ARMWord op2, Condition cc = AL)
243 {
244 emitInst(static_cast<ARMWord>(cc) | AND, rd, rn, op2);
245 }
246
247 void ands_r(int rd, int rn, ARMWord op2, Condition cc = AL)
248 {
249 emitInst(static_cast<ARMWord>(cc) | AND | SET_CC, rd, rn, op2);
250 }
251
252 void eor_r(int rd, int rn, ARMWord op2, Condition cc = AL)
253 {
254 emitInst(static_cast<ARMWord>(cc) | EOR, rd, rn, op2);
255 }
256
257 void eors_r(int rd, int rn, ARMWord op2, Condition cc = AL)
258 {
259 emitInst(static_cast<ARMWord>(cc) | EOR | SET_CC, rd, rn, op2);
260 }
261
262 void sub_r(int rd, int rn, ARMWord op2, Condition cc = AL)
263 {
264 emitInst(static_cast<ARMWord>(cc) | SUB, rd, rn, op2);
265 }
266
267 void subs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
268 {
269 emitInst(static_cast<ARMWord>(cc) | SUB | SET_CC, rd, rn, op2);
270 }
271
272 void rsb_r(int rd, int rn, ARMWord op2, Condition cc = AL)
273 {
274 emitInst(static_cast<ARMWord>(cc) | RSB, rd, rn, op2);
275 }
276
277 void rsbs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
278 {
279 emitInst(static_cast<ARMWord>(cc) | RSB | SET_CC, rd, rn, op2);
280 }
281
282 void add_r(int rd, int rn, ARMWord op2, Condition cc = AL)
283 {
284 emitInst(static_cast<ARMWord>(cc) | ADD, rd, rn, op2);
285 }
286
287 void adds_r(int rd, int rn, ARMWord op2, Condition cc = AL)
288 {
289 emitInst(static_cast<ARMWord>(cc) | ADD | SET_CC, rd, rn, op2);
290 }
291
292 void adc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
293 {
294 emitInst(static_cast<ARMWord>(cc) | ADC, rd, rn, op2);
295 }
296
297 void adcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
298 {
299 emitInst(static_cast<ARMWord>(cc) | ADC | SET_CC, rd, rn, op2);
300 }
301
302 void sbc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
303 {
304 emitInst(static_cast<ARMWord>(cc) | SBC, rd, rn, op2);
305 }
306
307 void sbcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
308 {
309 emitInst(static_cast<ARMWord>(cc) | SBC | SET_CC, rd, rn, op2);
310 }
311
312 void rsc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
313 {
314 emitInst(static_cast<ARMWord>(cc) | RSC, rd, rn, op2);
315 }
316
317 void rscs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
318 {
319 emitInst(static_cast<ARMWord>(cc) | RSC | SET_CC, rd, rn, op2);
320 }
321
322 void tst_r(int rn, ARMWord op2, Condition cc = AL)
323 {
324 emitInst(static_cast<ARMWord>(cc) | TST | SET_CC, 0, rn, op2);
325 }
326
327 void teq_r(int rn, ARMWord op2, Condition cc = AL)
328 {
329 emitInst(static_cast<ARMWord>(cc) | TEQ | SET_CC, 0, rn, op2);
330 }
331
332 void cmp_r(int rn, ARMWord op2, Condition cc = AL)
333 {
334 emitInst(static_cast<ARMWord>(cc) | CMP | SET_CC, 0, rn, op2);
335 }
336
337 void cmn_r(int rn, ARMWord op2, Condition cc = AL)
338 {
339 emitInst(static_cast<ARMWord>(cc) | CMN | SET_CC, 0, rn, op2);
340 }
341
342 void orr_r(int rd, int rn, ARMWord op2, Condition cc = AL)
343 {
344 emitInst(static_cast<ARMWord>(cc) | ORR, rd, rn, op2);
345 }
346
347 void orrs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
348 {
349 emitInst(static_cast<ARMWord>(cc) | ORR | SET_CC, rd, rn, op2);
350 }
351
352 void mov_r(int rd, ARMWord op2, Condition cc = AL)
353 {
354 emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2);
355 }
356
357 #if WTF_ARM_ARCH_AT_LEAST(7)
358 void movw_r(int rd, ARMWord op2, Condition cc = AL)
359 {
360 ASSERT((op2 | 0xf0fff) == 0xf0fff);
361 m_buffer.putInt(static_cast<ARMWord>(cc) | MOVW | RD(rd) | op2);
362 }
363
364 void movt_r(int rd, ARMWord op2, Condition cc = AL)
365 {
366 ASSERT((op2 | 0xf0fff) == 0xf0fff);
367 m_buffer.putInt(static_cast<ARMWord>(cc) | MOVT | RD(rd) | op2);
368 }
369 #endif
370
371 void movs_r(int rd, ARMWord op2, Condition cc = AL)
372 {
373 emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARMRegisters::r0, op2);
374 }
375
376 void bic_r(int rd, int rn, ARMWord op2, Condition cc = AL)
377 {
378 emitInst(static_cast<ARMWord>(cc) | BIC, rd, rn, op2);
379 }
380
381 void bics_r(int rd, int rn, ARMWord op2, Condition cc = AL)
382 {
383 emitInst(static_cast<ARMWord>(cc) | BIC | SET_CC, rd, rn, op2);
384 }
385
386 void mvn_r(int rd, ARMWord op2, Condition cc = AL)
387 {
388 emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARMRegisters::r0, op2);
389 }
390
391 void mvns_r(int rd, ARMWord op2, Condition cc = AL)
392 {
393 emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARMRegisters::r0, op2);
394 }
395
396 void mul_r(int rd, int rn, int rm, Condition cc = AL)
397 {
398 m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
399 }
400
401 void muls_r(int rd, int rn, int rm, Condition cc = AL)
402 {
403 m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | SET_CC | RN(rd) | RS(rn) | RM(rm));
404 }
405
406 void mull_r(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
407 {
408 m_buffer.putInt(static_cast<ARMWord>(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
409 }
410
411 void vadd_f64_r(int dd, int dn, int dm, Condition cc = AL)
412 {
413 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VADD_F64, dd, dn, dm);
414 }
415
416 void vdiv_f64_r(int dd, int dn, int dm, Condition cc = AL)
417 {
418 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VDIV_F64, dd, dn, dm);
419 }
420
421 void vsub_f64_r(int dd, int dn, int dm, Condition cc = AL)
422 {
423 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VSUB_F64, dd, dn, dm);
424 }
425
426 void vmul_f64_r(int dd, int dn, int dm, Condition cc = AL)
427 {
428 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VMUL_F64, dd, dn, dm);
429 }
430
431 void vcmp_f64_r(int dd, int dm, Condition cc = AL)
432 {
433 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCMP_F64, dd, 0, dm);
434 }
435
436 void vsqrt_f64_r(int dd, int dm, Condition cc = AL)
437 {
438 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VSQRT_F64, dd, 0, dm);
439 }
440
441 void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
442 {
443 m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true);
444 }
445
446 void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL)
447 {
448 m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm);
449 }
450
451 void dtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
452 {
453 emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP, rd, rb, op2);
454 }
455
456 void dtr_ur(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
457 {
458 emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP | OP2_OFSREG, rd, rb, rm);
459 }
460
461 void dtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
462 {
463 emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
464 }
465
466 void dtr_dr(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
467 {
468 emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | OP2_OFSREG, rd, rb, rm);
469 }
470
471 void ldrh_r(int rd, int rn, int rm, Condition cc = AL)
472 {
473 emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
474 }
475
476 void ldrh_d(int rd, int rb, ARMWord op2, Condition cc = AL)
477 {
478 emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_PRE, rd, rb, op2);
479 }
480
481 void ldrh_u(int rd, int rb, ARMWord op2, Condition cc = AL)
482 {
483 emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rb, op2);
484 }
485
486 void strh_r(int rn, int rm, int rd, Condition cc = AL)
487 {
488 emitInst(static_cast<ARMWord>(cc) | STRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
489 }
490
491 void fdtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
492 {
493 ASSERT(op2 <= 0xff);
494 emitInst(static_cast<ARMWord>(cc) | FDTR | DT_UP | (isLoad ? DT_LOAD : 0), rd, rb, op2);
495 }
496
497 void fdtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
498 {
499 ASSERT(op2 <= 0xff);
500 emitInst(static_cast<ARMWord>(cc) | FDTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
501 }
502
503 void push_r(int reg, Condition cc = AL)
504 {
505 ASSERT(ARMWord(reg) <= 0xf);
506 m_buffer.putInt(cc | DTR | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4);
507 }
508
509 void pop_r(int reg, Condition cc = AL)
510 {
511 ASSERT(ARMWord(reg) <= 0xf);
512 m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4);
513 }
514
515 inline void poke_r(int reg, Condition cc = AL)
516 {
517 dtr_d(false, ARMRegisters::sp, 0, reg, cc);
518 }
519
520 inline void peek_r(int reg, Condition cc = AL)
521 {
522 dtr_u(true, reg, ARMRegisters::sp, 0, cc);
523 }
524
525 void vmov_vfp_r(int sn, int rt, Condition cc = AL)
526 {
527 ASSERT(rt <= 15);
528 emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_VFP, rt << 1, sn, 0);
529 }
530
531 void vmov_arm_r(int rt, int sn, Condition cc = AL)
532 {
533 ASSERT(rt <= 15);
534 emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_ARM, rt << 1, sn, 0);
535 }
536
537 void vcvt_f64_s32_r(int dd, int sm, Condition cc = AL)
538 {
539 ASSERT(!(sm & 0x1)); // sm must be divisible by 2
540 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
541 }
542
543 void vcvt_s32_f64_r(int sd, int dm, Condition cc = AL)
544 {
545 ASSERT(!(sd & 0x1)); // sd must be divisible by 2
546 emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
547 }
548
549 void vmrs_apsr(Condition cc = AL)
550 {
551 m_buffer.putInt(static_cast<ARMWord>(cc) | VMRS_APSR);
552 }
553
554 #if WTF_ARM_ARCH_AT_LEAST(5)
555 void clz_r(int rd, int rm, Condition cc = AL)
556 {
557 m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm));
558 }
559 #endif
560
561 void bkpt(ARMWord value)
562 {
563 #if WTF_ARM_ARCH_AT_LEAST(5)
564 m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
565 #else
566 // Cannot access to Zero memory address
567 dtr_dr(true, ARMRegisters::S0, ARMRegisters::S0, ARMRegisters::S0);
568 #endif
569 }
570
571 void nop()
572 {
573 m_buffer.putInt(OP_NOP_T2);
574 }
575
576 void nop()
577 {
578 m_buffer.putInt(NOP);
579 }
580
581 void bx(int rm, Condition cc = AL)
582 {
583 #if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__)
584 emitInst(static_cast<ARMWord>(cc) | BX, 0, 0, RM(rm));
585 #else
586 mov_r(ARMRegisters::pc, RM(rm), cc);
587 #endif
588 }
589
590 AssemblerLabel blx(int rm, Condition cc = AL)
591 {
592 #if WTF_ARM_ARCH_AT_LEAST(5)
593 emitInst(static_cast<ARMWord>(cc) | BLX, 0, 0, RM(rm));
594 #else
595 ASSERT(rm != 14);
596 ensureSpace(2 * sizeof(ARMWord), 0);
597 mov_r(ARMRegisters::lr, ARMRegisters::pc, cc);
598 bx(rm, cc);
599 #endif
600 return m_buffer.label();
601 }
602
603 static ARMWord lsl(int reg, ARMWord value)
604 {
605 ASSERT(reg <= ARMRegisters::pc);
606 ASSERT(value <= 0x1f);
607 return reg | (value << 7) | 0x00;
608 }
609
610 static ARMWord lsr(int reg, ARMWord value)
611 {
612 ASSERT(reg <= ARMRegisters::pc);
613 ASSERT(value <= 0x1f);
614 return reg | (value << 7) | 0x20;
615 }
616
617 static ARMWord asr(int reg, ARMWord value)
618 {
619 ASSERT(reg <= ARMRegisters::pc);
620 ASSERT(value <= 0x1f);
621 return reg | (value << 7) | 0x40;
622 }
623
624 static ARMWord lsl_r(int reg, int shiftReg)
625 {
626 ASSERT(reg <= ARMRegisters::pc);
627 ASSERT(shiftReg <= ARMRegisters::pc);
628 return reg | (shiftReg << 8) | 0x10;
629 }
630
631 static ARMWord lsr_r(int reg, int shiftReg)
632 {
633 ASSERT(reg <= ARMRegisters::pc);
634 ASSERT(shiftReg <= ARMRegisters::pc);
635 return reg | (shiftReg << 8) | 0x30;
636 }
637
638 static ARMWord asr_r(int reg, int shiftReg)
639 {
640 ASSERT(reg <= ARMRegisters::pc);
641 ASSERT(shiftReg <= ARMRegisters::pc);
642 return reg | (shiftReg << 8) | 0x50;
643 }
644
645 // General helpers
646
647 size_t codeSize() const
648 {
649 return m_buffer.codeSize();
650 }
651
652 void ensureSpace(int insnSpace, int constSpace)
653 {
654 m_buffer.ensureSpace(insnSpace, constSpace);
655 }
656
657 int sizeOfConstantPool()
658 {
659 return m_buffer.sizeOfConstantPool();
660 }
661
662 AssemblerLabel label()
663 {
664 m_buffer.ensureSpaceForAnyOneInstruction();
665 return m_buffer.label();
666 }
667
668 AssemblerLabel align(int alignment)
669 {
670 while (!m_buffer.isAligned(alignment))
671 mov_r(ARMRegisters::r0, ARMRegisters::r0);
672
673 return label();
674 }
675
676 AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
677 {
678 ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
679 m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
680 ldr_un_imm(rd, InvalidBranchTarget, cc);
681 return m_buffer.label();
682 }
683
684 AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
685 {
686 return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
687 }
688
689 PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID, JITCompilationEffort);
690
691 unsigned debugOffset() { return m_buffer.debugOffset(); }
692
693 // Patching helpers
694
695 static ARMWord* getLdrImmAddress(ARMWord* insn)
696 {
697 #if WTF_ARM_ARCH_AT_LEAST(5)
698 // Check for call
699 if ((*insn & 0x0f7f0000) != 0x051f0000) {
700 // Must be BLX
701 ASSERT((*insn & 0x012fff30) == 0x012fff30);
702 insn--;
703 }
704 #endif
705 // Must be an ldr ..., [pc +/- imm]
706 ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
707
708 ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetching * sizeof(ARMWord);
709 if (*insn & DT_UP)
710 return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
711 return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
712 }
713
714 static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
715 {
716 // Must be an ldr ..., [pc +/- imm]
717 ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
718
719 if (*insn & 0x1)
720 return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
721 return getLdrImmAddress(insn);
722 }
723
724 static void patchPointerInternal(intptr_t from, void* to)
725 {
726 ARMWord* insn = reinterpret_cast<ARMWord*>(from);
727 ARMWord* addr = getLdrImmAddress(insn);
728 *addr = reinterpret_cast<ARMWord>(to);
729 }
730
731 static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
732 {
733 value = (value << 1) + 1;
734 ASSERT(!(value & ~0xfff));
735 return (load & ~0xfff) | value;
736 }
737
738 static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
739
740 // Read pointers
741 static void* readPointer(void* from)
742 {
743 ARMWord* insn = reinterpret_cast<ARMWord*>(from);
744 ARMWord* addr = getLdrImmAddress(insn);
745 return *reinterpret_cast<void**>(addr);
746 }
747
748 // Patch pointers
749
750 static void linkPointer(void* code, AssemblerLabel from, void* to)
751 {
752 patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
753 }
754
755 static void repatchInt32(void* from, int32_t to)
756 {
757 patchPointerInternal(reinterpret_cast<intptr_t>(from), reinterpret_cast<void*>(to));
758 }
759
760 static void repatchCompact(void* where, int32_t value)
761 {
762 repatchInt32(where, value);
763 }
764
765 static void repatchPointer(void* from, void* to)
766 {
767 patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
768 }
769
770 // Linkers
771 static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
772 {
773 return reinterpret_cast<intptr_t>(base) + offset - sizeof(ARMWord);
774 }
775
776 void linkJump(AssemblerLabel from, AssemblerLabel to)
777 {
778 ARMWord* insn = reinterpret_cast<ARMWord*>(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
779 ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
780 *addr = static_cast<ARMWord>(to.m_offset);
781 }
782
783 static void linkJump(void* code, AssemblerLabel from, void* to)
784 {
785 patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
786 }
787
788 static void relinkJump(void* from, void* to)
789 {
790 patchPointerInternal(getAbsoluteJumpAddress(from), to);
791 }
792
793 static void linkCall(void* code, AssemblerLabel from, void* to)
794 {
795 patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
796 }
797
798 static void relinkCall(void* from, void* to)
799 {
800 patchPointerInternal(getAbsoluteJumpAddress(from), to);
801 }
802
803 static void* readCallTarget(void* from)
804 {
805 return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from))));
806 }
807
808 // Address operations
809
810 static void* getRelocatedAddress(void* code, AssemblerLabel label)
811 {
812 return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
813 }
814
815 // Address differences
816
817 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
818 {
819 return b.m_offset - a.m_offset;
820 }
821
822 static unsigned getCallReturnOffset(AssemblerLabel call)
823 {
824 return call.m_offset;
825 }
826
827 // Handle immediates
828
829 static ARMWord getOp2Byte(ARMWord imm)
830 {
831 ASSERT(imm <= 0xff);
832 return OP2_IMMh | (imm & 0x0f) | ((imm & 0xf0) << 4) ;
833 }
834
835 static ARMWord getOp2(ARMWord imm);
836
837 #if WTF_ARM_ARCH_AT_LEAST(7)
838 static ARMWord getImm16Op2(ARMWord imm)
839 {
840 if (imm <= 0xffff)
841 return (imm & 0xf000) << 4 | (imm & 0xfff);
842 return INVALID_IMM;
843 }
844 #endif
845 ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
846 void moveImm(ARMWord imm, int dest);
847 ARMWord encodeComplexImm(ARMWord imm, int dest);
848
849 ARMWord getOffsetForHalfwordDataTransfer(ARMWord imm, int tmpReg)
850 {
851 // Encode immediate data in the instruction if it is possible
852 if (imm <= 0xff)
853 return getOp2Byte(imm);
854 // Otherwise, store the data in a temporary register
855 return encodeComplexImm(imm, tmpReg);
856 }
857
858 // Memory load/store helpers
859
860 void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes = false);
861 void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset, bool bytes = false);
862 void doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset);
863
864 // Constant pool hnadlers
865
866 static ARMWord placeConstantPoolBarrier(int offset)
867 {
868 offset = (offset - sizeof(ARMWord)) >> 2;
869 ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
870 return AL | B | (offset & BRANCH_MASK);
871 }
872
873 #if OS(LINUX) && COMPILER(RVCT)
874 static __asm void cacheFlush(void* code, size_t);
875 #else
876 static void cacheFlush(void* code, size_t size)
877 {
878 #if OS(LINUX) && COMPILER(GCC)
879 uintptr_t currentPage = reinterpret_cast<uintptr_t>(code) & ~(pageSize() - 1);
880 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(code) + size) & ~(pageSize() - 1);
881 do {
882 asm volatile(
883 "push {r7}\n"
884 "mov r0, %0\n"
885 "mov r1, %1\n"
886 "mov r7, #0xf0000\n"
887 "add r7, r7, #0x2\n"
888 "mov r2, #0x0\n"
889 "svc 0x0\n"
890 "pop {r7}\n"
891 :
892 : "r" (currentPage), "r" (currentPage + pageSize())
893 : "r0", "r1", "r2");
894 currentPage += pageSize();
895 } while (lastPage >= currentPage);
896 #elif OS(WINCE)
897 CacheRangeFlush(code, size, CACHE_SYNC_ALL);
898 #elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE)
899 UNUSED_PARAM(code);
900 UNUSED_PARAM(size);
901 #elif OS(QNX)
902 msync(code, size, MS_INVALIDATE_ICACHE);
903 #else
904 #error "The cacheFlush support is missing on this platform."
905 #endif
906 }
907 #endif
908
909 private:
910 ARMWord RM(int reg)
911 {
912 ASSERT(reg <= ARMRegisters::pc);
913 return reg;
914 }
915
916 ARMWord RS(int reg)
917 {
918 ASSERT(reg <= ARMRegisters::pc);
919 return reg << 8;
920 }
921
922 ARMWord RD(int reg)
923 {
924 ASSERT(reg <= ARMRegisters::pc);
925 return reg << 12;
926 }
927
928 ARMWord RN(int reg)
929 {
930 ASSERT(reg <= ARMRegisters::pc);
931 return reg << 16;
932 }
933
934 static ARMWord getConditionalField(ARMWord i)
935 {
936 return i & 0xf0000000;
937 }
938
939 int genInt(int reg, ARMWord imm, bool positive);
940
941 ARMBuffer m_buffer;
942 Jumps m_jumps;
943 };
944
945 } // namespace JSC
946
947 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
948
949 #endif // ARMAssembler_h