]>
git.saurik.com Git - apple/javascriptcore.git/blob - assembler/ARMAssembler.cpp
2 * Copyright (C) 2009 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
31 #include "ARMAssembler.h"
37 void ARMAssembler::patchConstantPoolLoad(void* loadAddr
, void* constPoolAddr
)
39 ARMWord
*ldr
= reinterpret_cast<ARMWord
*>(loadAddr
);
40 ARMWord diff
= reinterpret_cast<ARMWord
*>(constPoolAddr
) - ldr
;
41 ARMWord index
= (*ldr
& 0xfff) >> 1;
44 if (diff
>= 2 || index
> 0) {
45 diff
= (diff
+ index
- 2) * sizeof(ARMWord
);
46 ASSERT(diff
<= 0xfff);
47 *ldr
= (*ldr
& ~0xfff) | diff
;
49 *ldr
= (*ldr
& ~(0xfff | ARMAssembler::DT_UP
)) | sizeof(ARMWord
);
54 ARMWord
ARMAssembler::getOp2(ARMWord imm
)
61 if ((imm
& 0xff000000) == 0) {
66 imm
= (imm
<< 24) | (imm
>> 8);
70 if ((imm
& 0xff000000) == 0) {
75 if ((imm
& 0xf0000000) == 0) {
80 if ((imm
& 0xc0000000) == 0) {
85 if ((imm
& 0x00ffffff) == 0)
86 return OP2_IMM
| (imm
>> 24) | (rol
<< 8);
91 int ARMAssembler::genInt(int reg
, ARMWord imm
, bool positive
)
93 // Step1: Search a non-immediate part
102 if ((imm
& mask
) == 0) {
103 imm
= (imm
<< rol
) | (imm
>> (32 - rol
));
104 rol
= 4 + (rol
>> 1);
111 imm
= (imm
<< 8) | (imm
>> 24);
115 if ((imm
& mask
) == 0) {
116 imm
= (imm
<< rol
) | (imm
>> (32 - rol
));
117 rol
= (rol
>> 1) - 8;
129 ASSERT((imm
& 0xff) == 0);
131 if ((imm
& 0xff000000) == 0) {
132 imm1
= OP2_IMM
| ((imm
>> 16) & 0xff) | (((rol
+ 4) & 0xf) << 8);
133 imm2
= OP2_IMM
| ((imm
>> 8) & 0xff) | (((rol
+ 8) & 0xf) << 8);
134 } else if (imm
& 0xc0000000) {
135 imm1
= OP2_IMM
| ((imm
>> 24) & 0xff) | ((rol
& 0xf) << 8);
139 if ((imm
& 0xff000000) == 0) {
144 if ((imm
& 0xf0000000) == 0) {
149 if ((imm
& 0xc0000000) == 0) {
154 if ((imm
& 0x00ffffff) == 0)
155 imm2
= OP2_IMM
| (imm
>> 24) | ((rol
& 0xf) << 8);
159 if ((imm
& 0xf0000000) == 0) {
164 if ((imm
& 0xc0000000) == 0) {
169 imm1
= OP2_IMM
| ((imm
>> 24) & 0xff) | ((rol
& 0xf) << 8);
173 if ((imm
& 0xf0000000) == 0) {
178 if ((imm
& 0xc0000000) == 0) {
183 if ((imm
& 0x00ffffff) == 0)
184 imm2
= OP2_IMM
| (imm
>> 24) | ((rol
& 0xf) << 8);
191 orr_r(reg
, reg
, imm2
);
194 bic_r(reg
, reg
, imm2
);
200 ARMWord
ARMAssembler::getImm(ARMWord imm
, int tmpReg
, bool invert
)
204 // Do it by 1 instruction
206 if (tmp
!= INVALID_IMM
)
210 if (tmp
!= INVALID_IMM
) {
212 return tmp
| OP2_INV_IMM
;
217 return encodeComplexImm(imm
, tmpReg
);
220 void ARMAssembler::moveImm(ARMWord imm
, int dest
)
224 // Do it by 1 instruction
226 if (tmp
!= INVALID_IMM
) {
232 if (tmp
!= INVALID_IMM
) {
237 encodeComplexImm(imm
, dest
);
240 ARMWord
ARMAssembler::encodeComplexImm(ARMWord imm
, int dest
)
242 #if WTF_ARM_ARCH_AT_LEAST(7)
243 ARMWord tmp
= getImm16Op2(imm
);
244 if (tmp
!= INVALID_IMM
) {
248 movw_r(dest
, getImm16Op2(imm
& 0xffff));
249 movt_r(dest
, getImm16Op2(imm
>> 16));
252 // Do it by 2 instruction
253 if (genInt(dest
, imm
, true))
255 if (genInt(dest
, ~imm
, false))
263 // Memory load/store helpers
265 void ARMAssembler::dataTransfer32(bool isLoad
, RegisterID srcDst
, RegisterID base
, int32_t offset
, bool bytes
)
267 ARMWord transferFlag
= bytes
? DT_BYTE
: 0;
270 dtr_u(isLoad
, srcDst
, base
, offset
| transferFlag
);
271 else if (offset
<= 0xfffff) {
272 add_r(ARMRegisters::S0
, base
, OP2_IMM
| (offset
>> 12) | (10 << 8));
273 dtr_u(isLoad
, srcDst
, ARMRegisters::S0
, (offset
& 0xfff) | transferFlag
);
275 ARMWord reg
= getImm(offset
, ARMRegisters::S0
);
276 dtr_ur(isLoad
, srcDst
, base
, reg
| transferFlag
);
281 dtr_d(isLoad
, srcDst
, base
, offset
| transferFlag
);
282 else if (offset
<= 0xfffff) {
283 sub_r(ARMRegisters::S0
, base
, OP2_IMM
| (offset
>> 12) | (10 << 8));
284 dtr_d(isLoad
, srcDst
, ARMRegisters::S0
, (offset
& 0xfff) | transferFlag
);
286 ARMWord reg
= getImm(offset
, ARMRegisters::S0
);
287 dtr_dr(isLoad
, srcDst
, base
, reg
| transferFlag
);
292 void ARMAssembler::baseIndexTransfer32(bool isLoad
, RegisterID srcDst
, RegisterID base
, RegisterID index
, int scale
, int32_t offset
)
296 ASSERT(scale
>= 0 && scale
<= 3);
297 op2
= lsl(index
, scale
);
299 if (offset
>= 0 && offset
<= 0xfff) {
300 add_r(ARMRegisters::S0
, base
, op2
);
301 dtr_u(isLoad
, srcDst
, ARMRegisters::S0
, offset
);
304 if (offset
<= 0 && offset
>= -0xfff) {
305 add_r(ARMRegisters::S0
, base
, op2
);
306 dtr_d(isLoad
, srcDst
, ARMRegisters::S0
, -offset
);
310 ldr_un_imm(ARMRegisters::S0
, offset
);
311 add_r(ARMRegisters::S0
, ARMRegisters::S0
, op2
);
312 dtr_ur(isLoad
, srcDst
, base
, ARMRegisters::S0
);
315 void ARMAssembler::doubleTransfer(bool isLoad
, FPRegisterID srcDst
, RegisterID base
, int32_t offset
)
318 if (offset
<= 0x3ff && offset
>= 0) {
319 fdtr_u(isLoad
, srcDst
, base
, offset
>> 2);
322 if (offset
<= 0x3ffff && offset
>= 0) {
323 add_r(ARMRegisters::S0
, base
, OP2_IMM
| (offset
>> 10) | (11 << 8));
324 fdtr_u(isLoad
, srcDst
, ARMRegisters::S0
, (offset
>> 2) & 0xff);
329 if (offset
<= 0x3ff && offset
>= 0) {
330 fdtr_d(isLoad
, srcDst
, base
, offset
>> 2);
333 if (offset
<= 0x3ffff && offset
>= 0) {
334 sub_r(ARMRegisters::S0
, base
, OP2_IMM
| (offset
>> 10) | (11 << 8));
335 fdtr_d(isLoad
, srcDst
, ARMRegisters::S0
, (offset
>> 2) & 0xff);
341 ldr_un_imm(ARMRegisters::S0
, offset
);
342 add_r(ARMRegisters::S0
, ARMRegisters::S0
, base
);
343 fdtr_u(isLoad
, srcDst
, ARMRegisters::S0
, 0);
346 void* ARMAssembler::executableCopy(ExecutablePool
* allocator
)
348 // 64-bit alignment is required for next constant pool and JIT code as well
349 m_buffer
.flushWithoutBarrier(true);
350 if (m_buffer
.uncheckedSize() & 0x7)
353 char* data
= reinterpret_cast<char*>(m_buffer
.executableCopy(allocator
));
355 for (Jumps::Iterator iter
= m_jumps
.begin(); iter
!= m_jumps
.end(); ++iter
) {
356 // The last bit is set if the constant must be placed on constant pool.
357 int pos
= (*iter
) & (~0x1);
358 ARMWord
* ldrAddr
= reinterpret_cast<ARMWord
*>(data
+ pos
);
359 ARMWord
* addr
= getLdrImmAddress(ldrAddr
);
360 if (*addr
!= InvalidBranchTarget
) {
362 int diff
= reinterpret_cast<ARMWord
*>(data
+ *addr
) - (ldrAddr
+ DefaultPrefetching
);
364 if ((diff
<= BOFFSET_MAX
&& diff
>= BOFFSET_MIN
)) {
365 *ldrAddr
= B
| getConditionalField(*ldrAddr
) | (diff
& BRANCH_MASK
);
369 *addr
= reinterpret_cast<ARMWord
>(data
+ *addr
);
378 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)