2 * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "LinkBuffer.h"
31 #include "CodeBlock.h"
33 #include "JSCInlines.h"
36 #include <wtf/CompilationThread.h>
40 bool shouldShowDisassemblyFor(CodeBlock
* codeBlock
)
42 if (JITCode::isOptimizingJIT(codeBlock
->jitType()) && Options::showDFGDisassembly())
44 return Options::showDisassembly();
47 LinkBuffer::CodeRef
LinkBuffer::finalizeCodeWithoutDisassembly()
49 performFinalization();
51 ASSERT(m_didAllocate
);
52 if (m_executableMemory
)
53 return CodeRef(m_executableMemory
);
55 return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code
));
58 LinkBuffer::CodeRef
LinkBuffer::finalizeCodeWithDisassembly(const char* format
, ...)
60 CodeRef result
= finalizeCodeWithoutDisassembly();
62 #if ENABLE(DISASSEMBLER)
63 dataLogF("Generated JIT code for ");
65 va_start(argList
, format
);
66 WTF::dataLogFV(format
, argList
);
70 dataLogF(" Code at [%p, %p):\n", result
.code().executableAddress(), static_cast<char*>(result
.code().executableAddress()) + result
.size());
71 disassemble(result
.code(), m_size
, " ", WTF::dataFile());
74 #endif // ENABLE(DISASSEMBLER)
79 #if ENABLE(BRANCH_COMPACTION)
80 static ALWAYS_INLINE
void recordLinkOffsets(AssemblerData
& assemblerData
, int32_t regionStart
, int32_t regionEnd
, int32_t offset
)
82 int32_t ptr
= regionStart
/ sizeof(int32_t);
83 const int32_t end
= regionEnd
/ sizeof(int32_t);
84 int32_t* offsets
= reinterpret_cast<int32_t*>(assemblerData
.buffer());
86 offsets
[ptr
++] = offset
;
89 template <typename InstructionType
>
90 void LinkBuffer::copyCompactAndLinkCode(MacroAssembler
& macroAssembler
, void* ownerUID
, JITCompilationEffort effort
)
92 m_initialSize
= macroAssembler
.m_assembler
.codeSize();
93 allocate(m_initialSize
, ownerUID
, effort
);
94 if (didFailToAllocate())
96 Vector
<LinkRecord
, 0, UnsafeVectorOverflow
>& jumpsToLink
= macroAssembler
.jumpsToLink();
97 m_assemblerStorage
= macroAssembler
.m_assembler
.buffer().releaseAssemblerData();
98 uint8_t* inData
= reinterpret_cast<uint8_t*>(m_assemblerStorage
.buffer());
99 uint8_t* outData
= reinterpret_cast<uint8_t*>(m_code
);
102 unsigned jumpCount
= jumpsToLink
.size();
103 for (unsigned i
= 0; i
< jumpCount
; ++i
) {
104 int offset
= readPtr
- writePtr
;
105 ASSERT(!(offset
& 1));
107 // Copy the instructions from the last jump to the current one.
108 size_t regionSize
= jumpsToLink
[i
].from() - readPtr
;
109 InstructionType
* copySource
= reinterpret_cast_ptr
<InstructionType
*>(inData
+ readPtr
);
110 InstructionType
* copyEnd
= reinterpret_cast_ptr
<InstructionType
*>(inData
+ readPtr
+ regionSize
);
111 InstructionType
* copyDst
= reinterpret_cast_ptr
<InstructionType
*>(outData
+ writePtr
);
112 ASSERT(!(regionSize
% 2));
113 ASSERT(!(readPtr
% 2));
114 ASSERT(!(writePtr
% 2));
115 while (copySource
!= copyEnd
)
116 *copyDst
++ = *copySource
++;
117 recordLinkOffsets(m_assemblerStorage
, readPtr
, jumpsToLink
[i
].from(), offset
);
118 readPtr
+= regionSize
;
119 writePtr
+= regionSize
;
121 // Calculate absolute address of the jump target, in the case of backwards
122 // branches we need to be precise, forward branches we are pessimistic
123 const uint8_t* target
;
124 if (jumpsToLink
[i
].to() >= jumpsToLink
[i
].from())
125 target
= outData
+ jumpsToLink
[i
].to() - offset
; // Compensate for what we have collapsed so far
127 target
= outData
+ jumpsToLink
[i
].to() - executableOffsetFor(jumpsToLink
[i
].to());
129 JumpLinkType jumpLinkType
= MacroAssembler::computeJumpType(jumpsToLink
[i
], outData
+ writePtr
, target
);
130 // Compact branch if we can...
131 if (MacroAssembler::canCompact(jumpsToLink
[i
].type())) {
132 // Step back in the write stream
133 int32_t delta
= MacroAssembler::jumpSizeDelta(jumpsToLink
[i
].type(), jumpLinkType
);
136 recordLinkOffsets(m_assemblerStorage
, jumpsToLink
[i
].from() - delta
, readPtr
, readPtr
- writePtr
);
139 jumpsToLink
[i
].setFrom(writePtr
);
141 // Copy everything after the last jump
142 memcpy(outData
+ writePtr
, inData
+ readPtr
, m_initialSize
- readPtr
);
143 recordLinkOffsets(m_assemblerStorage
, readPtr
, m_initialSize
, readPtr
- writePtr
);
145 for (unsigned i
= 0; i
< jumpCount
; ++i
) {
146 uint8_t* location
= outData
+ jumpsToLink
[i
].from();
147 uint8_t* target
= outData
+ jumpsToLink
[i
].to() - executableOffsetFor(jumpsToLink
[i
].to());
148 MacroAssembler::link(jumpsToLink
[i
], location
, target
);
152 shrink(writePtr
+ m_initialSize
- readPtr
);
154 #if DUMP_LINK_STATISTICS
155 dumpLinkStatistics(m_code
, m_initialSize
, m_size
);
158 dumpCode(m_code
, m_size
);
164 void LinkBuffer::linkCode(MacroAssembler
& macroAssembler
, void* ownerUID
, JITCompilationEffort effort
)
166 #if !ENABLE(BRANCH_COMPACTION)
167 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
168 macroAssembler
.m_assembler
.buffer().flushConstantPool(false);
170 AssemblerBuffer
& buffer
= macroAssembler
.m_assembler
.buffer();
171 allocate(buffer
.codeSize(), ownerUID
, effort
);
175 #if CPU(ARM_TRADITIONAL)
176 macroAssembler
.m_assembler
.prepareExecutableCopy(m_code
);
178 memcpy(m_code
, buffer
.data(), buffer
.codeSize());
180 macroAssembler
.m_assembler
.relocateJumps(buffer
.data(), m_code
);
182 #elif CPU(ARM_THUMB2)
183 copyCompactAndLinkCode
<uint16_t>(macroAssembler
, ownerUID
, effort
);
185 copyCompactAndLinkCode
<uint32_t>(macroAssembler
, ownerUID
, effort
);
189 void LinkBuffer::allocate(size_t initialSize
, void* ownerUID
, JITCompilationEffort effort
)
192 if (initialSize
> m_size
)
195 m_didAllocate
= true;
196 m_size
= initialSize
;
200 m_executableMemory
= m_vm
->executableAllocator
.allocate(*m_vm
, initialSize
, ownerUID
, effort
);
201 if (!m_executableMemory
)
203 ExecutableAllocator::makeWritable(m_executableMemory
->start(), m_executableMemory
->sizeInBytes());
204 m_code
= m_executableMemory
->start();
205 m_size
= initialSize
;
206 m_didAllocate
= true;
209 void LinkBuffer::shrink(size_t newSize
)
211 if (!m_executableMemory
)
214 m_executableMemory
->shrink(m_size
);
217 void LinkBuffer::performFinalization()
220 ASSERT(!isCompilationThread());
221 ASSERT(!m_completed
);
226 #if ENABLE(BRANCH_COMPACTION)
227 ExecutableAllocator::makeExecutable(code(), m_initialSize
);
229 ExecutableAllocator::makeExecutable(code(), m_size
);
231 MacroAssembler::cacheFlush(code(), m_size
);
234 #if DUMP_LINK_STATISTICS
235 void LinkBuffer::dumpLinkStatistics(void* code
, size_t initializeSize
, size_t finalSize
)
237 static unsigned linkCount
= 0;
238 static unsigned totalInitialSize
= 0;
239 static unsigned totalFinalSize
= 0;
241 totalInitialSize
+= initialSize
;
242 totalFinalSize
+= finalSize
;
243 dataLogF("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
244 code
, static_cast<unsigned>(initialSize
), static_cast<unsigned>(finalSize
),
245 static_cast<unsigned>(initialSize
- finalSize
),
246 100.0 * (initialSize
- finalSize
) / initialSize
);
247 dataLogF("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
248 linkCount
, totalInitialSize
, totalFinalSize
, totalInitialSize
- totalFinalSize
,
249 100.0 * (totalInitialSize
- totalFinalSize
) / totalInitialSize
);
254 void LinkBuffer::dumpCode(void* code
, size_t size
)
257 // Dump the generated code in an asm file format that can be assembled and then disassembled
258 // for debugging purposes. For example, save this output as jit.s:
259 // gcc -arch armv7 -c jit.s
261 static unsigned codeCount
= 0;
262 unsigned short* tcode
= static_cast<unsigned short*>(code
);
263 size_t tsize
= size
/ sizeof(short);
265 snprintf(nameBuf
, sizeof(nameBuf
), "_jsc_jit%u", codeCount
++);
266 dataLogF("\t.syntax unified\n"
267 "\t.section\t__TEXT,__text,regular,pure_instructions\n"
271 "\t.thumb_func\t%s\n"
273 "%s:\n", nameBuf
, nameBuf
, code
, nameBuf
);
275 for (unsigned i
= 0; i
< tsize
; i
++)
276 dataLogF("\t.short\t0x%x\n", tcode
[i
]);
277 #elif CPU(ARM_TRADITIONAL)
280 static unsigned codeCount
= 0;
281 unsigned int* tcode
= static_cast<unsigned int*>(code
);
282 size_t tsize
= size
/ sizeof(unsigned int);
284 snprintf(nameBuf
, sizeof(nameBuf
), "_jsc_jit%u", codeCount
++);
285 dataLogF("\t.globl\t%s\n"
290 "%s:\n", nameBuf
, code
, nameBuf
);
292 for (unsigned i
= 0; i
< tsize
; i
++)
293 dataLogF("\t.long\t0x%x\n", tcode
[i
]);
300 #endif // ENABLE(ASSEMBLER)