add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
} else {
- ARMWord reg = getImm(offset, ARMRegisters::S0);
- dtr_ur(isLoad, srcDst, base, reg | transferFlag);
+ moveImm(offset, ARMRegisters::S0);
+ dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
}
} else {
- offset = -offset;
- if (offset <= 0xfff)
- dtr_d(isLoad, srcDst, base, offset | transferFlag);
- else if (offset <= 0xfffff) {
- sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
- dtr_d(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag);
+ if (offset >= -0xfff)
+ dtr_d(isLoad, srcDst, base, -offset | transferFlag);
+ else if (offset >= -0xfffff) {
+ sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 12) | (10 << 8));
+ dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag);
} else {
- ARMWord reg = getImm(offset, ARMRegisters::S0);
- dtr_dr(isLoad, srcDst, base, reg | transferFlag);
+ moveImm(offset, ARMRegisters::S0);
+ dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
}
}
}
-void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset, bool bytes)
{
ARMWord op2;
+ ARMWord transferFlag = bytes ? DT_BYTE : 0;
ASSERT(scale >= 0 && scale <= 3);
op2 = lsl(index, scale);
if (offset >= 0 && offset <= 0xfff) {
add_r(ARMRegisters::S0, base, op2);
- dtr_u(isLoad, srcDst, ARMRegisters::S0, offset);
+ dtr_u(isLoad, srcDst, ARMRegisters::S0, offset | transferFlag);
return;
}
if (offset <= 0 && offset >= -0xfff) {
add_r(ARMRegisters::S0, base, op2);
- dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset);
+ dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag);
return;
}
ldr_un_imm(ARMRegisters::S0, offset);
add_r(ARMRegisters::S0, ARMRegisters::S0, op2);
- dtr_ur(isLoad, srcDst, base, ARMRegisters::S0);
+ dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag);
}
void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset)
{
- if (offset & 0x3) {
+ // VFP cannot directly access memory that is not four-byte-aligned
+ if (!(offset & 0x3)) {
if (offset <= 0x3ff && offset >= 0) {
fdtr_u(isLoad, srcDst, base, offset >> 2);
return;
fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
}
-void* ARMAssembler::executableCopy(ExecutablePool* allocator)
+PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
{
// 64-bit alignment is required for next constant pool and JIT code as well
m_buffer.flushWithoutBarrier(true);
- if (m_buffer.uncheckedSize() & 0x7)
+ if (!m_buffer.isAligned(8))
bkpt(0);
- char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
+ char* data = reinterpret_cast<char*>(result->start());
for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
// The last bit is set if the constant must be placed on constant pool.
- int pos = (*iter) & (~0x1);
- ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
+ int pos = (iter->m_offset) & (~0x1);
+ ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
ARMWord* addr = getLdrImmAddress(ldrAddr);
if (*addr != InvalidBranchTarget) {
- if (!(*iter & 1)) {
- int diff = reinterpret_cast<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
+ if (!(iter->m_offset & 1)) {
+ int diff = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
*ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK);
}
}
- return data;
+ return result;
+}
+
+#if OS(LINUX) && COMPILER(RVCT)
+
+__asm void ARMAssembler::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
}
+#endif
+
} // namespace JSC
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)