X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/f9bf01c6616d5ddcf65b13b33cedf9e387ff7a63..6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174:/assembler/ARMAssembler.cpp?ds=inline diff --git a/assembler/ARMAssembler.cpp b/assembler/ARMAssembler.cpp index 6dd2b87..74809ca 100644 --- a/assembler/ARMAssembler.cpp +++ b/assembler/ARMAssembler.cpp @@ -262,58 +262,60 @@ ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest) // Memory load/store helpers -void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset) +void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes) { + ARMWord transferFlag = bytes ? DT_BYTE : 0; if (offset >= 0) { if (offset <= 0xfff) - dtr_u(isLoad, srcDst, base, offset); + dtr_u(isLoad, srcDst, base, offset | transferFlag); else if (offset <= 0xfffff) { add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); - dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff); + dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag); } else { - ARMWord reg = getImm(offset, ARMRegisters::S0); - dtr_ur(isLoad, srcDst, base, reg); + moveImm(offset, ARMRegisters::S0); + dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag); } } else { - offset = -offset; - if (offset <= 0xfff) - dtr_d(isLoad, srcDst, base, offset); - else if (offset <= 0xfffff) { - sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); - dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff); + if (offset >= -0xfff) + dtr_d(isLoad, srcDst, base, -offset | transferFlag); + else if (offset >= -0xfffff) { + sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 12) | (10 << 8)); + dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag); } else { - ARMWord reg = getImm(offset, ARMRegisters::S0); - dtr_dr(isLoad, srcDst, base, reg); + moveImm(offset, ARMRegisters::S0); + dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag); } } } -void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) +void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset, bool bytes) { ARMWord op2; + ARMWord transferFlag = bytes ? DT_BYTE : 0; ASSERT(scale >= 0 && scale <= 3); op2 = lsl(index, scale); if (offset >= 0 && offset <= 0xfff) { add_r(ARMRegisters::S0, base, op2); - dtr_u(isLoad, srcDst, ARMRegisters::S0, offset); + dtr_u(isLoad, srcDst, ARMRegisters::S0, offset | transferFlag); return; } if (offset <= 0 && offset >= -0xfff) { add_r(ARMRegisters::S0, base, op2); - dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset); + dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag); return; } ldr_un_imm(ARMRegisters::S0, offset); add_r(ARMRegisters::S0, ARMRegisters::S0, op2); - dtr_ur(isLoad, srcDst, base, ARMRegisters::S0); + dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag); } void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset) { - if (offset & 0x3) { + // VFP cannot directly access memory that is not four-byte-aligned + if (!(offset & 0x3)) { if (offset <= 0x3ff && offset >= 0) { fdtr_u(isLoad, srcDst, base, offset >> 2); return; @@ -342,23 +344,24 @@ void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID b fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0); } -void* ARMAssembler::executableCopy(ExecutablePool* allocator) +PassRefPtr ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort) { // 64-bit alignment is required for next constant pool and JIT code as well m_buffer.flushWithoutBarrier(true); - if (m_buffer.uncheckedSize() & 0x7) + if (!m_buffer.isAligned(8)) bkpt(0); - char* data = reinterpret_cast(m_buffer.executableCopy(allocator)); + RefPtr result = m_buffer.executableCopy(globalData, ownerUID, effort); + char* data = reinterpret_cast(result->start()); for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) { // The last bit is set if the constant must be placed on constant pool. - int pos = (*iter) & (~0x1); - ARMWord* ldrAddr = reinterpret_cast(data + pos); + int pos = (iter->m_offset) & (~0x1); + ARMWord* ldrAddr = reinterpret_cast_ptr(data + pos); ARMWord* addr = getLdrImmAddress(ldrAddr); - if (*addr != 0xffffffff) { - if (!(*iter & 1)) { - int diff = reinterpret_cast(data + *addr) - (ldrAddr + DefaultPrefetching); + if (*addr != InvalidBranchTarget) { + if (!(iter->m_offset & 1)) { + int diff = reinterpret_cast_ptr(data + *addr) - (ldrAddr + DefaultPrefetching); if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) { *ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK); @@ -369,9 +372,26 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator) } } - return data; + return result; } +#if OS(LINUX) && COMPILER(RVCT) + +__asm void ARMAssembler::cacheFlush(void* code, size_t size) +{ + ARM + push {r7} + add r1, r1, r0 + mov r7, #0xf0000 + add r7, r7, #0x2 + mov r2, #0x0 + svc #0x0 + pop {r7} + bx lr +} + +#endif + } // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)