2 * Copyright (C) 2009 University of Szeged
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
31 #include "ARMAssembler.h"
37 void ARMAssembler::patchConstantPoolLoad(void* loadAddr
, void* constPoolAddr
)
39 ARMWord
*ldr
= reinterpret_cast<ARMWord
*>(loadAddr
);
40 ARMWord diff
= reinterpret_cast<ARMWord
*>(constPoolAddr
) - ldr
;
41 ARMWord index
= (*ldr
& 0xfff) >> 1;
44 if (diff
>= 2 || index
> 0) {
45 diff
= (diff
+ index
- 2) * sizeof(ARMWord
);
46 ASSERT(diff
<= 0xfff);
47 *ldr
= (*ldr
& ~0xfff) | diff
;
49 *ldr
= (*ldr
& ~(0xfff | ARMAssembler::DataTransferUp
)) | sizeof(ARMWord
);
54 ARMWord
ARMAssembler::getOp2(ARMWord imm
)
59 return Op2Immediate
| imm
;
61 if ((imm
& 0xff000000) == 0) {
66 imm
= (imm
<< 24) | (imm
>> 8);
70 if ((imm
& 0xff000000) == 0) {
75 if ((imm
& 0xf0000000) == 0) {
80 if ((imm
& 0xc0000000) == 0) {
85 if ((imm
& 0x00ffffff) == 0)
86 return Op2Immediate
| (imm
>> 24) | (rol
<< 8);
88 return InvalidImmediate
;
91 int ARMAssembler::genInt(int reg
, ARMWord imm
, bool positive
)
93 // Step1: Search a non-immediate part
102 if ((imm
& mask
) == 0) {
103 imm
= (imm
<< rol
) | (imm
>> (32 - rol
));
104 rol
= 4 + (rol
>> 1);
111 imm
= (imm
<< 8) | (imm
>> 24);
115 if ((imm
& mask
) == 0) {
116 imm
= (imm
<< rol
) | (imm
>> (32 - rol
));
117 rol
= (rol
>> 1) - 8;
129 ASSERT((imm
& 0xff) == 0);
131 if ((imm
& 0xff000000) == 0) {
132 imm1
= Op2Immediate
| ((imm
>> 16) & 0xff) | (((rol
+ 4) & 0xf) << 8);
133 imm2
= Op2Immediate
| ((imm
>> 8) & 0xff) | (((rol
+ 8) & 0xf) << 8);
134 } else if (imm
& 0xc0000000) {
135 imm1
= Op2Immediate
| ((imm
>> 24) & 0xff) | ((rol
& 0xf) << 8);
139 if ((imm
& 0xff000000) == 0) {
144 if ((imm
& 0xf0000000) == 0) {
149 if ((imm
& 0xc0000000) == 0) {
154 if ((imm
& 0x00ffffff) == 0)
155 imm2
= Op2Immediate
| (imm
>> 24) | ((rol
& 0xf) << 8);
159 if ((imm
& 0xf0000000) == 0) {
164 if ((imm
& 0xc0000000) == 0) {
169 imm1
= Op2Immediate
| ((imm
>> 24) & 0xff) | ((rol
& 0xf) << 8);
173 if ((imm
& 0xf0000000) == 0) {
178 if ((imm
& 0xc0000000) == 0) {
183 if ((imm
& 0x00ffffff) == 0)
184 imm2
= Op2Immediate
| (imm
>> 24) | ((rol
& 0xf) << 8);
200 ARMWord
ARMAssembler::getImm(ARMWord imm
, int tmpReg
, bool invert
)
204 // Do it by 1 instruction
206 if (tmp
!= InvalidImmediate
)
210 if (tmp
!= InvalidImmediate
) {
212 return tmp
| Op2InvertedImmediate
;
217 return encodeComplexImm(imm
, tmpReg
);
220 void ARMAssembler::moveImm(ARMWord imm
, int dest
)
224 // Do it by 1 instruction
226 if (tmp
!= InvalidImmediate
) {
232 if (tmp
!= InvalidImmediate
) {
237 encodeComplexImm(imm
, dest
);
240 ARMWord
ARMAssembler::encodeComplexImm(ARMWord imm
, int dest
)
242 #if WTF_ARM_ARCH_AT_LEAST(7)
243 ARMWord tmp
= getImm16Op2(imm
);
244 if (tmp
!= InvalidImmediate
) {
248 movw(dest
, getImm16Op2(imm
& 0xffff));
249 movt(dest
, getImm16Op2(imm
>> 16));
252 // Do it by 2 instruction
253 if (genInt(dest
, imm
, true))
255 if (genInt(dest
, ~imm
, false))
258 ldrImmediate(dest
, imm
);
263 // Memory load/store helpers
265 void ARMAssembler::dataTransfer32(DataTransferTypeA transferType
, RegisterID srcDst
, RegisterID base
, int32_t offset
)
269 dtrUp(transferType
, srcDst
, base
, offset
);
270 else if (offset
<= 0xfffff) {
271 add(ARMRegisters::S0
, base
, Op2Immediate
| (offset
>> 12) | (10 << 8));
272 dtrUp(transferType
, srcDst
, ARMRegisters::S0
, (offset
& 0xfff));
274 moveImm(offset
, ARMRegisters::S0
);
275 dtrUpRegister(transferType
, srcDst
, base
, ARMRegisters::S0
);
278 if (offset
>= -0xfff)
279 dtrDown(transferType
, srcDst
, base
, -offset
);
280 else if (offset
>= -0xfffff) {
281 sub(ARMRegisters::S0
, base
, Op2Immediate
| (-offset
>> 12) | (10 << 8));
282 dtrDown(transferType
, srcDst
, ARMRegisters::S0
, (-offset
& 0xfff));
284 moveImm(offset
, ARMRegisters::S0
);
285 dtrUpRegister(transferType
, srcDst
, base
, ARMRegisters::S0
);
290 void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType
, RegisterID srcDst
, RegisterID base
, RegisterID index
, int scale
, int32_t offset
)
292 ASSERT(scale
>= 0 && scale
<= 3);
293 ARMWord op2
= lsl(index
, scale
);
296 dtrUpRegister(transferType
, srcDst
, base
, op2
);
300 if (offset
<= 0xfffff && offset
>= -0xfffff) {
301 add(ARMRegisters::S0
, base
, op2
);
302 dataTransfer32(transferType
, srcDst
, ARMRegisters::S0
, offset
);
306 moveImm(offset
, ARMRegisters::S0
);
307 add(ARMRegisters::S0
, ARMRegisters::S0
, op2
);
308 dtrUpRegister(transferType
, srcDst
, base
, ARMRegisters::S0
);
311 void ARMAssembler::dataTransfer16(DataTransferTypeB transferType
, RegisterID srcDst
, RegisterID base
, int32_t offset
)
315 halfDtrUp(transferType
, srcDst
, base
, getOp2Half(offset
));
316 else if (offset
<= 0xffff) {
317 add(ARMRegisters::S0
, base
, Op2Immediate
| (offset
>> 8) | (12 << 8));
318 halfDtrUp(transferType
, srcDst
, ARMRegisters::S0
, getOp2Half(offset
& 0xff));
320 moveImm(offset
, ARMRegisters::S0
);
321 halfDtrUpRegister(transferType
, srcDst
, base
, ARMRegisters::S0
);
325 halfDtrDown(transferType
, srcDst
, base
, getOp2Half(-offset
));
326 else if (offset
>= -0xffff) {
327 sub(ARMRegisters::S0
, base
, Op2Immediate
| (-offset
>> 8) | (12 << 8));
328 halfDtrDown(transferType
, srcDst
, ARMRegisters::S0
, getOp2Half(-offset
& 0xff));
330 moveImm(offset
, ARMRegisters::S0
);
331 halfDtrUpRegister(transferType
, srcDst
, base
, ARMRegisters::S0
);
336 void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType
, RegisterID srcDst
, RegisterID base
, RegisterID index
, int scale
, int32_t offset
)
338 if (!scale
&& !offset
) {
339 halfDtrUpRegister(transferType
, srcDst
, base
, index
);
343 ARMWord op2
= lsl(index
, scale
);
345 if (offset
<= 0xffff && offset
>= -0xffff) {
346 add(ARMRegisters::S0
, base
, op2
);
347 dataTransfer16(transferType
, srcDst
, ARMRegisters::S0
, offset
);
351 moveImm(offset
, ARMRegisters::S0
);
352 add(ARMRegisters::S0
, ARMRegisters::S0
, op2
);
353 halfDtrUpRegister(transferType
, srcDst
, base
, ARMRegisters::S0
);
356 void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType
, FPRegisterID srcDst
, RegisterID base
, int32_t offset
)
358 // VFP cannot directly access memory that is not four-byte-aligned
359 if (!(offset
& 0x3)) {
360 if (offset
<= 0x3ff && offset
>= 0) {
361 doubleDtrUp(transferType
, srcDst
, base
, offset
>> 2);
364 if (offset
<= 0x3ffff && offset
>= 0) {
365 add(ARMRegisters::S0
, base
, Op2Immediate
| (offset
>> 10) | (11 << 8));
366 doubleDtrUp(transferType
, srcDst
, ARMRegisters::S0
, (offset
>> 2) & 0xff);
371 if (offset
<= 0x3ff && offset
>= 0) {
372 doubleDtrDown(transferType
, srcDst
, base
, offset
>> 2);
375 if (offset
<= 0x3ffff && offset
>= 0) {
376 sub(ARMRegisters::S0
, base
, Op2Immediate
| (offset
>> 10) | (11 << 8));
377 doubleDtrDown(transferType
, srcDst
, ARMRegisters::S0
, (offset
>> 2) & 0xff);
383 moveImm(offset
, ARMRegisters::S0
);
384 add(ARMRegisters::S0
, ARMRegisters::S0
, base
);
385 doubleDtrUp(transferType
, srcDst
, ARMRegisters::S0
, 0);
388 void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType
, FPRegisterID srcDst
, RegisterID base
, RegisterID index
, int scale
, int32_t offset
)
390 add(ARMRegisters::S1
, base
, lsl(index
, scale
));
391 dataTransferFloat(transferType
, srcDst
, ARMRegisters::S1
, offset
);
394 PassRefPtr
<ExecutableMemoryHandle
> ARMAssembler::executableCopy(VM
& vm
, void* ownerUID
, JITCompilationEffort effort
)
396 // 64-bit alignment is required for next constant pool and JIT code as well
397 m_buffer
.flushWithoutBarrier(true);
398 if (!m_buffer
.isAligned(8))
401 RefPtr
<ExecutableMemoryHandle
> result
= m_buffer
.executableCopy(vm
, ownerUID
, effort
);
402 char* data
= reinterpret_cast<char*>(result
->start());
404 for (Jumps::Iterator iter
= m_jumps
.begin(); iter
!= m_jumps
.end(); ++iter
) {
405 // The last bit is set if the constant must be placed on constant pool.
406 int pos
= (iter
->m_offset
) & (~0x1);
407 ARMWord
* ldrAddr
= reinterpret_cast_ptr
<ARMWord
*>(data
+ pos
);
408 ARMWord
* addr
= getLdrImmAddress(ldrAddr
);
409 if (*addr
!= InvalidBranchTarget
) {
410 if (!(iter
->m_offset
& 1)) {
411 intptr_t difference
= reinterpret_cast_ptr
<ARMWord
*>(data
+ *addr
) - (ldrAddr
+ DefaultPrefetchOffset
);
413 if ((difference
<= MaximumBranchOffsetDistance
&& difference
>= MinimumBranchOffsetDistance
)) {
414 *ldrAddr
= B
| getConditionalField(*ldrAddr
) | (difference
& BranchOffsetMask
);
418 *addr
= reinterpret_cast<ARMWord
>(data
+ *addr
);
425 #if OS(LINUX) && COMPILER(RVCT)
427 __asm
void ARMAssembler::cacheFlush(void* code
, size_t size
)
444 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)