2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #define DUMP_LINK_STATISTICS 0
34 #include <MacroAssembler.h>
35 #include <wtf/Noncopyable.h>
43 // This class assists in linking code generated by the macro assembler, once code generation
44 // has been completed, and the code has been copied to is final location in memory. At this
45 // time pointers to labels within the code may be resolved, and relative offsets to external
46 // addresses may be fixed.
49 // * Jump objects may be linked to external targets,
50 // * The address of Jump objects may taken, such that it can later be relinked.
51 // * The return address of a Call may be acquired.
52 // * The address of a Label pointing into the code may be resolved.
53 // * The value referenced by a DataLabel may be set.
56 WTF_MAKE_NONCOPYABLE(LinkBuffer
);
57 typedef MacroAssemblerCodeRef CodeRef
;
58 typedef MacroAssemblerCodePtr CodePtr
;
59 typedef MacroAssembler::Label Label
;
60 typedef MacroAssembler::Jump Jump
;
61 typedef MacroAssembler::JumpList JumpList
;
62 typedef MacroAssembler::Call Call
;
63 typedef MacroAssembler::DataLabelCompact DataLabelCompact
;
64 typedef MacroAssembler::DataLabel32 DataLabel32
;
65 typedef MacroAssembler::DataLabelPtr DataLabelPtr
;
66 #if ENABLE(BRANCH_COMPACTION)
67 typedef MacroAssembler::LinkRecord LinkRecord
;
68 typedef MacroAssembler::JumpLinkType JumpLinkType
;
72 LinkBuffer(JSGlobalData
& globalData
, MacroAssembler
* masm
, PassRefPtr
<ExecutablePool
> executablePool
)
73 : m_executablePool(executablePool
)
77 , m_globalData(&globalData
)
85 LinkBuffer(JSGlobalData
& globalData
, MacroAssembler
* masm
, ExecutableAllocator
& allocator
)
86 : m_executablePool(allocator
.poolForSize(globalData
, masm
->m_assembler
.codeSize()))
90 , m_globalData(&globalData
)
103 // These methods are used to link or set values at code generation time.
105 void link(Call call
, FunctionPtr function
)
107 ASSERT(call
.isFlagSet(Call::Linkable
));
108 call
.m_jmp
= applyOffset(call
.m_jmp
);
109 MacroAssembler::linkCall(code(), call
, function
);
112 void link(Jump jump
, CodeLocationLabel label
)
114 jump
.m_jmp
= applyOffset(jump
.m_jmp
);
115 MacroAssembler::linkJump(code(), jump
, label
);
118 void link(JumpList list
, CodeLocationLabel label
)
120 for (unsigned i
= 0; i
< list
.m_jumps
.size(); ++i
)
121 link(list
.m_jumps
[i
], label
);
124 void patch(DataLabelPtr label
, void* value
)
126 AssemblerLabel target
= applyOffset(label
.m_label
);
127 MacroAssembler::linkPointer(code(), target
, value
);
130 void patch(DataLabelPtr label
, CodeLocationLabel value
)
132 AssemblerLabel target
= applyOffset(label
.m_label
);
133 MacroAssembler::linkPointer(code(), target
, value
.executableAddress());
136 // These methods are used to obtain handles to allow the code to be relinked / repatched later.
138 CodeLocationCall
locationOf(Call call
)
140 ASSERT(call
.isFlagSet(Call::Linkable
));
141 ASSERT(!call
.isFlagSet(Call::Near
));
142 return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call
.m_jmp
)));
145 CodeLocationNearCall
locationOfNearCall(Call call
)
147 ASSERT(call
.isFlagSet(Call::Linkable
));
148 ASSERT(call
.isFlagSet(Call::Near
));
149 return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call
.m_jmp
)));
152 CodeLocationLabel
locationOf(Label label
)
154 return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label
.m_label
)));
157 CodeLocationDataLabelPtr
locationOf(DataLabelPtr label
)
159 return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label
.m_label
)));
162 CodeLocationDataLabel32
locationOf(DataLabel32 label
)
164 return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label
.m_label
)));
167 CodeLocationDataLabelCompact
locationOf(DataLabelCompact label
)
169 return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label
.m_label
)));
172 // This method obtains the return address of the call, given as an offset from
173 // the start of the code.
174 unsigned returnAddressOffset(Call call
)
176 call
.m_jmp
= applyOffset(call
.m_jmp
);
177 return MacroAssembler::getLinkerCallReturnOffset(call
);
180 // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
181 // once to complete generation of the code. 'finalizeCode()' is suited to situations
182 // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
183 // suited to adding to an existing allocation.
184 CodeRef
finalizeCode()
186 performFinalization();
188 return CodeRef(m_code
, m_executablePool
, m_size
);
191 CodeLocationLabel
finalizeCodeAddendum()
193 performFinalization();
195 return CodeLocationLabel(code());
198 CodePtr
trampolineAt(Label label
)
200 return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label
.m_label
)));
211 template <typename T
> T
applyOffset(T src
)
213 #if ENABLE(BRANCH_COMPACTION)
214 src
.m_offset
-= m_assembler
->executableOffsetFor(src
.m_offset
);
219 // Keep this private! - the underlying code should only be obtained externally via
220 // finalizeCode() or finalizeCodeAddendum().
229 #if !ENABLE(BRANCH_COMPACTION)
230 m_code
= m_assembler
->m_assembler
.executableCopy(*m_globalData
, m_executablePool
.get());
231 m_size
= m_assembler
->m_assembler
.codeSize();
234 size_t initialSize
= m_assembler
->m_assembler
.codeSize();
235 m_code
= (uint8_t*)m_executablePool
->alloc(*m_globalData
, initialSize
);
238 ExecutableAllocator::makeWritable(m_code
, initialSize
);
239 uint8_t* inData
= (uint8_t*)m_assembler
->unlinkedCode();
240 uint8_t* outData
= reinterpret_cast<uint8_t*>(m_code
);
243 Vector
<LinkRecord
>& jumpsToLink
= m_assembler
->jumpsToLink();
244 unsigned jumpCount
= jumpsToLink
.size();
245 for (unsigned i
= 0; i
< jumpCount
; ++i
) {
246 int offset
= readPtr
- writePtr
;
247 ASSERT(!(offset
& 1));
249 // Copy the instructions from the last jump to the current one.
250 size_t regionSize
= jumpsToLink
[i
].from() - readPtr
;
251 uint16_t* copySource
= reinterpret_cast<uint16_t*>(inData
+ readPtr
);
252 uint16_t* copyEnd
= reinterpret_cast<uint16_t*>(inData
+ readPtr
+ regionSize
);
253 uint16_t* copyDst
= reinterpret_cast<uint16_t*>(outData
+ writePtr
);
254 ASSERT(!(regionSize
% 2));
255 ASSERT(!(readPtr
% 2));
256 ASSERT(!(writePtr
% 2));
257 while (copySource
!= copyEnd
)
258 *copyDst
++ = *copySource
++;
259 m_assembler
->recordLinkOffsets(readPtr
, jumpsToLink
[i
].from(), offset
);
260 readPtr
+= regionSize
;
261 writePtr
+= regionSize
;
263 // Calculate absolute address of the jump target, in the case of backwards
264 // branches we need to be precise, forward branches we are pessimistic
265 const uint8_t* target
;
266 if (jumpsToLink
[i
].to() >= jumpsToLink
[i
].from())
267 target
= outData
+ jumpsToLink
[i
].to() - offset
; // Compensate for what we have collapsed so far
269 target
= outData
+ jumpsToLink
[i
].to() - m_assembler
->executableOffsetFor(jumpsToLink
[i
].to());
271 JumpLinkType jumpLinkType
= m_assembler
->computeJumpType(jumpsToLink
[i
], outData
+ writePtr
, target
);
272 // Compact branch if we can...
273 if (m_assembler
->canCompact(jumpsToLink
[i
].type())) {
274 // Step back in the write stream
275 int32_t delta
= m_assembler
->jumpSizeDelta(jumpsToLink
[i
].type(), jumpLinkType
);
278 m_assembler
->recordLinkOffsets(jumpsToLink
[i
].from() - delta
, readPtr
, readPtr
- writePtr
);
281 jumpsToLink
[i
].setFrom(writePtr
);
283 // Copy everything after the last jump
284 memcpy(outData
+ writePtr
, inData
+ readPtr
, initialSize
- readPtr
);
285 m_assembler
->recordLinkOffsets(readPtr
, initialSize
, readPtr
- writePtr
);
287 for (unsigned i
= 0; i
< jumpCount
; ++i
) {
288 uint8_t* location
= outData
+ jumpsToLink
[i
].from();
289 uint8_t* target
= outData
+ jumpsToLink
[i
].to() - m_assembler
->executableOffsetFor(jumpsToLink
[i
].to());
290 m_assembler
->link(jumpsToLink
[i
], location
, target
);
294 m_size
= writePtr
+ initialSize
- readPtr
;
295 m_executablePool
->tryShrink(m_code
, initialSize
, m_size
);
297 #if DUMP_LINK_STATISTICS
298 dumpLinkStatistics(m_code
, initialSize
, m_size
);
301 dumpCode(m_code
, m_size
);
306 void performFinalization()
309 ASSERT(!m_completed
);
313 ExecutableAllocator::makeExecutable(code(), m_size
);
314 ExecutableAllocator::cacheFlush(code(), m_size
);
317 #if DUMP_LINK_STATISTICS
318 static void dumpLinkStatistics(void* code
, size_t initialSize
, size_t finalSize
)
320 static unsigned linkCount
= 0;
321 static unsigned totalInitialSize
= 0;
322 static unsigned totalFinalSize
= 0;
324 totalInitialSize
+= initialSize
;
325 totalFinalSize
+= finalSize
;
326 printf("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
327 code
, static_cast<unsigned>(initialSize
), static_cast<unsigned>(finalSize
),
328 static_cast<unsigned>(initialSize
- finalSize
),
329 100.0 * (initialSize
- finalSize
) / initialSize
);
330 printf("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
331 linkCount
, totalInitialSize
, totalFinalSize
, totalInitialSize
- totalFinalSize
,
332 100.0 * (totalInitialSize
- totalFinalSize
) / totalInitialSize
);
337 static void dumpCode(void* code
, size_t size
)
340 // Dump the generated code in an asm file format that can be assembled and then disassembled
341 // for debugging purposes. For example, save this output as jit.s:
342 // gcc -arch armv7 -c jit.s
344 static unsigned codeCount
= 0;
345 unsigned short* tcode
= static_cast<unsigned short*>(code
);
346 size_t tsize
= size
/ sizeof(short);
348 snprintf(nameBuf
, sizeof(nameBuf
), "_jsc_jit%u", codeCount
++);
349 printf("\t.syntax unified\n"
350 "\t.section\t__TEXT,__text,regular,pure_instructions\n"
354 "\t.thumb_func\t%s\n"
356 "%s:\n", nameBuf
, nameBuf
, code
, nameBuf
);
358 for (unsigned i
= 0; i
< tsize
; i
++)
359 printf("\t.short\t0x%x\n", tcode
[i
]);
364 RefPtr
<ExecutablePool
> m_executablePool
;
367 MacroAssembler
* m_assembler
;
368 JSGlobalData
* m_globalData
;
376 #endif // ENABLE(ASSEMBLER)
378 #endif // LinkBuffer_h