2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "FTLCompile.h"
31 #include "CodeBlockWithJITType.h"
32 #include "CCallHelpers.h"
33 #include "DFGCommon.h"
34 #include "DFGGraphSafepoint.h"
36 #include "Disassembler.h"
37 #include "FTLExitThunkGenerator.h"
38 #include "FTLInlineCacheSize.h"
39 #include "FTLJITCode.h"
40 #include "FTLThunks.h"
41 #include "FTLUnwindInfo.h"
44 #include "LinkBuffer.h"
45 #include "RepatchBuffer.h"
47 namespace JSC
{ namespace FTL
{
51 static uint8_t* mmAllocateCodeSection(
52 void* opaqueState
, uintptr_t size
, unsigned alignment
, unsigned, const char* sectionName
)
54 State
& state
= *static_cast<State
*>(opaqueState
);
56 RELEASE_ASSERT(alignment
<= jitAllocationGranule
);
58 RefPtr
<ExecutableMemoryHandle
> result
=
59 state
.graph
.m_vm
.executableAllocator
.allocate(
60 state
.graph
.m_vm
, size
, state
.graph
.m_codeBlock
, JITCompilationMustSucceed
);
62 // LLVM used to put __compact_unwind in a code section. We keep this here defensively,
63 // for clients that use older LLVMs.
64 if (!strcmp(sectionName
, "__compact_unwind")) {
65 state
.compactUnwind
= result
->start();
66 state
.compactUnwindSize
= result
->sizeInBytes();
69 state
.jitCode
->addHandle(result
);
70 state
.codeSectionNames
.append(sectionName
);
72 return static_cast<uint8_t*>(result
->start());
75 static uint8_t* mmAllocateDataSection(
76 void* opaqueState
, uintptr_t size
, unsigned alignment
, unsigned sectionID
,
77 const char* sectionName
, LLVMBool isReadOnly
)
79 UNUSED_PARAM(sectionID
);
80 UNUSED_PARAM(isReadOnly
);
82 // Allocate the GOT in the code section to make it reachable for all code.
83 if (!strcmp(sectionName
, "__got"))
84 return mmAllocateCodeSection(opaqueState
, size
, alignment
, sectionID
, sectionName
);
86 State
& state
= *static_cast<State
*>(opaqueState
);
88 RefPtr
<DataSection
> section
= adoptRef(new DataSection(size
, alignment
));
90 if (!strcmp(sectionName
, "__llvm_stackmaps"))
91 state
.stackmapsSection
= section
;
93 state
.jitCode
->addDataSection(section
);
94 state
.dataSectionNames
.append(sectionName
);
95 if (!strcmp(sectionName
, "__compact_unwind")) {
96 state
.compactUnwind
= section
->base();
97 state
.compactUnwindSize
= size
;
101 return bitwise_cast
<uint8_t*>(section
->base());
104 static LLVMBool
mmApplyPermissions(void*, char**)
109 static void mmDestroy(void*)
113 static void dumpDataSection(DataSection
* section
, const char* prefix
)
115 for (unsigned j
= 0; j
< section
->size() / sizeof(int64_t); ++j
) {
117 int64_t* wordPointer
= static_cast<int64_t*>(section
->base()) + j
;
118 snprintf(buf
, sizeof(buf
), "0x%lx", static_cast<unsigned long>(bitwise_cast
<uintptr_t>(wordPointer
)));
119 dataLogF("%s%16s: 0x%016llx\n", prefix
, buf
, static_cast<long long>(*wordPointer
));
123 template<typename DescriptorType
>
124 void generateICFastPath(
125 State
& state
, CodeBlock
* codeBlock
, GeneratedFunction generatedFunction
,
126 StackMaps::RecordMap
& recordMap
, DescriptorType
& ic
, size_t sizeOfIC
)
128 VM
& vm
= state
.graph
.m_vm
;
130 StackMaps::RecordMap::iterator iter
= recordMap
.find(ic
.stackmapID());
131 if (iter
== recordMap
.end()) {
132 // It was optimized out.
136 Vector
<StackMaps::Record
>& records
= iter
->value
;
138 RELEASE_ASSERT(records
.size() == ic
.m_generators
.size());
140 for (unsigned i
= records
.size(); i
--;) {
141 StackMaps::Record
& record
= records
[i
];
142 auto generator
= ic
.m_generators
[i
];
144 CCallHelpers
fastPathJIT(&vm
, codeBlock
);
145 generator
.generateFastPath(fastPathJIT
);
148 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
;
150 LinkBuffer
linkBuffer(vm
, fastPathJIT
, startOfIC
, sizeOfIC
);
151 // Note: we could handle the !isValid() case. We just don't appear to have a
152 // reason to do so, yet.
153 RELEASE_ASSERT(linkBuffer
.isValid());
155 MacroAssembler::AssemblerType_T::fillNops(
156 startOfIC
+ linkBuffer
.size(), sizeOfIC
- linkBuffer
.size());
158 state
.finalizer
->sideCodeLinkBuffer
->link(
159 ic
.m_slowPathDone
[i
], CodeLocationLabel(startOfIC
+ sizeOfIC
));
162 generator
.slowPathJump(),
163 state
.finalizer
->sideCodeLinkBuffer
->locationOf(generator
.slowPathBegin()));
165 generator
.finalize(linkBuffer
, *state
.finalizer
->sideCodeLinkBuffer
);
169 static RegisterSet
usedRegistersFor(const StackMaps::Record
& record
)
171 if (Options::assumeAllRegsInFTLICAreLive())
172 return RegisterSet::allRegisters();
173 return RegisterSet(record
.usedRegisterSet(), RegisterSet::calleeSaveRegisters());
176 static void fixFunctionBasedOnStackMaps(
177 State
& state
, CodeBlock
* codeBlock
, JITCode
* jitCode
, GeneratedFunction generatedFunction
,
178 StackMaps::RecordMap
& recordMap
, bool didSeeUnwindInfo
)
180 Graph
& graph
= state
.graph
;
182 StackMaps stackmaps
= jitCode
->stackmaps
;
184 StackMaps::RecordMap::iterator iter
= recordMap
.find(state
.capturedStackmapID
);
185 RELEASE_ASSERT(iter
!= recordMap
.end());
186 RELEASE_ASSERT(iter
->value
.size() == 1);
187 RELEASE_ASSERT(iter
->value
[0].locations
.size() == 1);
188 Location capturedLocation
=
189 Location::forStackmaps(&jitCode
->stackmaps
, iter
->value
[0].locations
[0]);
190 RELEASE_ASSERT(capturedLocation
.kind() == Location::Register
);
191 RELEASE_ASSERT(capturedLocation
.gpr() == GPRInfo::callFrameRegister
);
192 RELEASE_ASSERT(!(capturedLocation
.addend() % sizeof(Register
)));
193 int32_t localsOffset
= capturedLocation
.addend() / sizeof(Register
) + graph
.m_nextMachineLocal
;
195 for (unsigned i
= graph
.m_inlineVariableData
.size(); i
--;) {
196 InlineCallFrame
* inlineCallFrame
= graph
.m_inlineVariableData
[i
].inlineCallFrame
;
198 if (inlineCallFrame
->argumentsRegister
.isValid()) {
199 inlineCallFrame
->argumentsRegister
= VirtualRegister(
200 inlineCallFrame
->argumentsRegister
.offset() + localsOffset
);
203 for (unsigned argument
= inlineCallFrame
->arguments
.size(); argument
-- > 1;) {
204 inlineCallFrame
->arguments
[argument
] =
205 inlineCallFrame
->arguments
[argument
].withLocalsOffset(localsOffset
);
208 if (inlineCallFrame
->isClosureCall
) {
209 inlineCallFrame
->calleeRecovery
=
210 inlineCallFrame
->calleeRecovery
.withLocalsOffset(localsOffset
);
214 if (codeBlock
->usesArguments()) {
215 codeBlock
->setArgumentsRegister(
216 VirtualRegister(codeBlock
->argumentsRegister().offset() + localsOffset
));
219 MacroAssembler::Label stackOverflowException
;
222 CCallHelpers
checkJIT(&vm
, codeBlock
);
224 // At this point it's perfectly fair to just blow away all state and restore the
225 // JS JIT view of the universe.
226 checkJIT
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
228 MacroAssembler::Label exceptionContinueArg1Set
= checkJIT
.label();
229 checkJIT
.move(MacroAssembler::TrustedImm64(TagTypeNumber
), GPRInfo::tagTypeNumberRegister
);
230 checkJIT
.move(MacroAssembler::TrustedImm64(TagMask
), GPRInfo::tagMaskRegister
);
232 checkJIT
.move(MacroAssembler::TrustedImmPtr(&vm
), GPRInfo::argumentGPR0
);
233 MacroAssembler::Call call
= checkJIT
.call();
234 checkJIT
.jumpToExceptionHandler();
236 stackOverflowException
= checkJIT
.label();
237 checkJIT
.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1
);
238 checkJIT
.jump(exceptionContinueArg1Set
);
240 OwnPtr
<LinkBuffer
> linkBuffer
= adoptPtr(new LinkBuffer(
241 vm
, checkJIT
, codeBlock
, JITCompilationMustSucceed
));
242 linkBuffer
->link(call
, FunctionPtr(lookupExceptionHandler
));
244 state
.finalizer
->handleExceptionsLinkBuffer
= linkBuffer
.release();
247 ExitThunkGenerator
exitThunkGenerator(state
);
248 exitThunkGenerator
.emitThunks();
249 if (exitThunkGenerator
.didThings()) {
250 RELEASE_ASSERT(state
.finalizer
->osrExit
.size());
251 RELEASE_ASSERT(didSeeUnwindInfo
);
253 OwnPtr
<LinkBuffer
> linkBuffer
= adoptPtr(new LinkBuffer(
254 vm
, exitThunkGenerator
, codeBlock
, JITCompilationMustSucceed
));
256 RELEASE_ASSERT(state
.finalizer
->osrExit
.size() == state
.jitCode
->osrExit
.size());
258 for (unsigned i
= 0; i
< state
.jitCode
->osrExit
.size(); ++i
) {
259 OSRExitCompilationInfo
& info
= state
.finalizer
->osrExit
[i
];
260 OSRExit
& exit
= jitCode
->osrExit
[i
];
262 if (verboseCompilationEnabled())
263 dataLog("Handling OSR stackmap #", exit
.m_stackmapID
, " for ", exit
.m_codeOrigin
, "\n");
265 iter
= recordMap
.find(exit
.m_stackmapID
);
266 if (iter
== recordMap
.end()) {
267 // It was optimized out.
271 info
.m_thunkAddress
= linkBuffer
->locationOf(info
.m_thunkLabel
);
272 exit
.m_patchableCodeOffset
= linkBuffer
->offsetOf(info
.m_thunkJump
);
274 for (unsigned j
= exit
.m_values
.size(); j
--;) {
275 ExitValue value
= exit
.m_values
[j
];
276 if (!value
.isInJSStackSomehow())
278 if (!value
.virtualRegister().isLocal())
280 exit
.m_values
[j
] = value
.withVirtualRegister(
281 VirtualRegister(value
.virtualRegister().offset() + localsOffset
));
284 if (verboseCompilationEnabled()) {
286 dataLog(" Exit values: ", inContext(exit
.m_values
, &context
), "\n");
290 state
.finalizer
->exitThunksLinkBuffer
= linkBuffer
.release();
293 if (!state
.getByIds
.isEmpty() || !state
.putByIds
.isEmpty()) {
294 CCallHelpers
slowPathJIT(&vm
, codeBlock
);
296 CCallHelpers::JumpList exceptionTarget
;
298 for (unsigned i
= state
.getByIds
.size(); i
--;) {
299 GetByIdDescriptor
& getById
= state
.getByIds
[i
];
301 if (verboseCompilationEnabled())
302 dataLog("Handling GetById stackmap #", getById
.stackmapID(), "\n");
304 iter
= recordMap
.find(getById
.stackmapID());
305 if (iter
== recordMap
.end()) {
306 // It was optimized out.
310 for (unsigned i
= 0; i
< iter
->value
.size(); ++i
) {
311 StackMaps::Record
& record
= iter
->value
[i
];
313 RegisterSet usedRegisters
= usedRegistersFor(record
);
315 GPRReg result
= record
.locations
[0].directGPR();
316 GPRReg base
= record
.locations
[1].directGPR();
318 JITGetByIdGenerator
gen(
319 codeBlock
, getById
.codeOrigin(), usedRegisters
, JSValueRegs(base
),
320 JSValueRegs(result
), NeedToSpill
);
322 MacroAssembler::Label begin
= slowPathJIT
.label();
324 MacroAssembler::Call call
= callOperation(
325 state
, usedRegisters
, slowPathJIT
, getById
.codeOrigin(), &exceptionTarget
,
326 operationGetByIdOptimize
, result
, gen
.stubInfo(), base
, getById
.uid());
328 gen
.reportSlowPathCall(begin
, call
);
330 getById
.m_slowPathDone
.append(slowPathJIT
.jump());
331 getById
.m_generators
.append(gen
);
335 for (unsigned i
= state
.putByIds
.size(); i
--;) {
336 PutByIdDescriptor
& putById
= state
.putByIds
[i
];
338 if (verboseCompilationEnabled())
339 dataLog("Handling PutById stackmap #", putById
.stackmapID(), "\n");
341 iter
= recordMap
.find(putById
.stackmapID());
342 if (iter
== recordMap
.end()) {
343 // It was optimized out.
347 for (unsigned i
= 0; i
< iter
->value
.size(); ++i
) {
348 StackMaps::Record
& record
= iter
->value
[i
];
350 RegisterSet usedRegisters
= usedRegistersFor(record
);
352 GPRReg base
= record
.locations
[0].directGPR();
353 GPRReg value
= record
.locations
[1].directGPR();
355 JITPutByIdGenerator
gen(
356 codeBlock
, putById
.codeOrigin(), usedRegisters
, JSValueRegs(base
),
357 JSValueRegs(value
), GPRInfo::patchpointScratchRegister
, NeedToSpill
,
358 putById
.ecmaMode(), putById
.putKind());
360 MacroAssembler::Label begin
= slowPathJIT
.label();
362 MacroAssembler::Call call
= callOperation(
363 state
, usedRegisters
, slowPathJIT
, putById
.codeOrigin(), &exceptionTarget
,
364 gen
.slowPathFunction(), gen
.stubInfo(), value
, base
, putById
.uid());
366 gen
.reportSlowPathCall(begin
, call
);
368 putById
.m_slowPathDone
.append(slowPathJIT
.jump());
369 putById
.m_generators
.append(gen
);
373 exceptionTarget
.link(&slowPathJIT
);
374 MacroAssembler::Jump exceptionJump
= slowPathJIT
.jump();
376 state
.finalizer
->sideCodeLinkBuffer
= adoptPtr(
377 new LinkBuffer(vm
, slowPathJIT
, codeBlock
, JITCompilationMustSucceed
));
378 state
.finalizer
->sideCodeLinkBuffer
->link(
379 exceptionJump
, state
.finalizer
->handleExceptionsLinkBuffer
->entrypoint());
381 for (unsigned i
= state
.getByIds
.size(); i
--;) {
383 state
, codeBlock
, generatedFunction
, recordMap
, state
.getByIds
[i
],
386 for (unsigned i
= state
.putByIds
.size(); i
--;) {
388 state
, codeBlock
, generatedFunction
, recordMap
, state
.putByIds
[i
],
393 // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM
394 // generated code. That implies first pruning the ones that LLVM didn't generate.
395 Vector
<JSCall
> oldCalls
= state
.jsCalls
;
396 state
.jsCalls
.resize(0);
397 for (unsigned i
= 0; i
< oldCalls
.size(); ++i
) {
398 JSCall
& call
= oldCalls
[i
];
400 StackMaps::RecordMap::iterator iter
= recordMap
.find(call
.stackmapID());
401 if (iter
== recordMap
.end())
404 for (unsigned j
= 0; j
< iter
->value
.size(); ++j
) {
406 copy
.m_instructionOffset
= iter
->value
[j
].instructionOffset
;
407 state
.jsCalls
.append(copy
);
411 std::sort(state
.jsCalls
.begin(), state
.jsCalls
.end());
413 for (unsigned i
= state
.jsCalls
.size(); i
--;) {
414 JSCall
& call
= state
.jsCalls
[i
];
416 CCallHelpers
fastPathJIT(&vm
, codeBlock
);
417 call
.emit(fastPathJIT
);
419 char* startOfIC
= bitwise_cast
<char*>(generatedFunction
) + call
.m_instructionOffset
;
421 LinkBuffer
linkBuffer(vm
, fastPathJIT
, startOfIC
, sizeOfCall());
422 if (!linkBuffer
.isValid()) {
423 dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT
.m_assembler
.codeSize(), " prior to compaction.\n");
424 RELEASE_ASSERT_NOT_REACHED();
427 MacroAssembler::AssemblerType_T::fillNops(
428 startOfIC
+ linkBuffer
.size(), sizeOfCall() - linkBuffer
.size());
430 call
.link(vm
, linkBuffer
);
433 RepatchBuffer
repatchBuffer(codeBlock
);
435 iter
= recordMap
.find(state
.handleStackOverflowExceptionStackmapID
);
436 // It's sort of remotely possible that we won't have an in-band exception handling
437 // path, for some kinds of functions.
438 if (iter
!= recordMap
.end()) {
439 for (unsigned i
= iter
->value
.size(); i
--;) {
440 StackMaps::Record
& record
= iter
->value
[i
];
442 CodeLocationLabel source
= CodeLocationLabel(
443 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
);
445 RELEASE_ASSERT(stackOverflowException
.isSet());
447 repatchBuffer
.replaceWithJump(source
, state
.finalizer
->handleExceptionsLinkBuffer
->locationOf(stackOverflowException
));
451 iter
= recordMap
.find(state
.handleExceptionStackmapID
);
452 // It's sort of remotely possible that we won't have an in-band exception handling
453 // path, for some kinds of functions.
454 if (iter
!= recordMap
.end()) {
455 for (unsigned i
= iter
->value
.size(); i
--;) {
456 StackMaps::Record
& record
= iter
->value
[i
];
458 CodeLocationLabel source
= CodeLocationLabel(
459 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
);
461 repatchBuffer
.replaceWithJump(source
, state
.finalizer
->handleExceptionsLinkBuffer
->entrypoint());
465 for (unsigned exitIndex
= 0; exitIndex
< jitCode
->osrExit
.size(); ++exitIndex
) {
466 OSRExitCompilationInfo
& info
= state
.finalizer
->osrExit
[exitIndex
];
467 OSRExit
& exit
= jitCode
->osrExit
[exitIndex
];
468 iter
= recordMap
.find(exit
.m_stackmapID
);
470 Vector
<const void*> codeAddresses
;
472 if (iter
!= recordMap
.end()) {
473 for (unsigned i
= iter
->value
.size(); i
--;) {
474 StackMaps::Record
& record
= iter
->value
[i
];
476 CodeLocationLabel source
= CodeLocationLabel(
477 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
);
479 codeAddresses
.append(bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
+ MacroAssembler::maxJumpReplacementSize());
481 if (info
.m_isInvalidationPoint
)
482 jitCode
->common
.jumpReplacements
.append(JumpReplacement(source
, info
.m_thunkAddress
));
484 repatchBuffer
.replaceWithJump(source
, info
.m_thunkAddress
);
488 if (graph
.compilation())
489 graph
.compilation()->addOSRExitSite(codeAddresses
);
493 void compile(State
& state
, Safepoint::Result
& safepointResult
)
498 GraphSafepoint
safepoint(state
.graph
, safepointResult
);
500 LLVMMCJITCompilerOptions options
;
501 llvm
->InitializeMCJITCompilerOptions(&options
, sizeof(options
));
502 options
.OptLevel
= Options::llvmBackendOptimizationLevel();
503 options
.NoFramePointerElim
= true;
504 if (Options::useLLVMSmallCodeModel())
505 options
.CodeModel
= LLVMCodeModelSmall
;
506 options
.EnableFastISel
= Options::enableLLVMFastISel();
507 options
.MCJMM
= llvm
->CreateSimpleMCJITMemoryManager(
508 &state
, mmAllocateCodeSection
, mmAllocateDataSection
, mmApplyPermissions
, mmDestroy
);
510 LLVMExecutionEngineRef engine
;
513 llvm
->SetTarget(state
.module, "arm64-apple-ios");
515 if (llvm
->CreateMCJITCompilerForModule(&engine
, state
.module, &options
, sizeof(options
), &error
)) {
516 dataLog("FATAL: Could not create LLVM execution engine: ", error
, "\n");
520 LLVMPassManagerRef functionPasses
= 0;
521 LLVMPassManagerRef modulePasses
;
523 if (Options::llvmSimpleOpt()) {
524 modulePasses
= llvm
->CreatePassManager();
525 llvm
->AddTargetData(llvm
->GetExecutionEngineTargetData(engine
), modulePasses
);
526 llvm
->AddPromoteMemoryToRegisterPass(modulePasses
);
527 llvm
->AddConstantPropagationPass(modulePasses
);
528 llvm
->AddInstructionCombiningPass(modulePasses
);
529 llvm
->AddTypeBasedAliasAnalysisPass(modulePasses
);
530 llvm
->AddBasicAliasAnalysisPass(modulePasses
);
531 llvm
->AddGVNPass(modulePasses
);
532 llvm
->AddCFGSimplificationPass(modulePasses
);
533 llvm
->AddDeadStoreEliminationPass(modulePasses
);
534 llvm
->RunPassManager(modulePasses
, state
.module);
536 LLVMPassManagerBuilderRef passBuilder
= llvm
->PassManagerBuilderCreate();
537 llvm
->PassManagerBuilderSetOptLevel(passBuilder
, Options::llvmOptimizationLevel());
538 llvm
->PassManagerBuilderSetSizeLevel(passBuilder
, Options::llvmSizeLevel());
540 functionPasses
= llvm
->CreateFunctionPassManagerForModule(state
.module);
541 modulePasses
= llvm
->CreatePassManager();
543 llvm
->AddTargetData(llvm
->GetExecutionEngineTargetData(engine
), modulePasses
);
545 llvm
->PassManagerBuilderPopulateFunctionPassManager(passBuilder
, functionPasses
);
546 llvm
->PassManagerBuilderPopulateModulePassManager(passBuilder
, modulePasses
);
548 llvm
->PassManagerBuilderDispose(passBuilder
);
550 llvm
->InitializeFunctionPassManager(functionPasses
);
551 for (LValue function
= llvm
->GetFirstFunction(state
.module); function
; function
= llvm
->GetNextFunction(function
))
552 llvm
->RunFunctionPassManager(functionPasses
, function
);
553 llvm
->FinalizeFunctionPassManager(functionPasses
);
555 llvm
->RunPassManager(modulePasses
, state
.module);
558 if (shouldShowDisassembly() || verboseCompilationEnabled())
559 state
.dumpState("after optimization");
561 // FIXME: Need to add support for the case where JIT memory allocation failed.
562 // https://bugs.webkit.org/show_bug.cgi?id=113620
563 state
.generatedFunction
= reinterpret_cast<GeneratedFunction
>(llvm
->GetPointerToGlobal(engine
, state
.function
));
565 llvm
->DisposePassManager(functionPasses
);
566 llvm
->DisposePassManager(modulePasses
);
567 llvm
->DisposeExecutionEngine(engine
);
569 if (safepointResult
.didGetCancelled())
571 RELEASE_ASSERT(!state
.graph
.m_vm
.heap
.isCollecting());
573 if (shouldShowDisassembly()) {
574 for (unsigned i
= 0; i
< state
.jitCode
->handles().size(); ++i
) {
575 ExecutableMemoryHandle
* handle
= state
.jitCode
->handles()[i
].get();
577 "Generated LLVM code for ",
578 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
),
579 " #", i
, ", ", state
.codeSectionNames
[i
], ":\n");
581 MacroAssemblerCodePtr(handle
->start()), handle
->sizeInBytes(),
582 " ", WTF::dataFile(), LLVMSubset
);
585 for (unsigned i
= 0; i
< state
.jitCode
->dataSections().size(); ++i
) {
586 DataSection
* section
= state
.jitCode
->dataSections()[i
].get();
588 "Generated LLVM data section for ",
589 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
),
590 " #", i
, ", ", state
.dataSectionNames
[i
], ":\n");
591 dumpDataSection(section
, " ");
595 bool didSeeUnwindInfo
= state
.jitCode
->unwindInfo
.parse(
596 state
.compactUnwind
, state
.compactUnwindSize
, state
.generatedFunction
);
597 if (shouldShowDisassembly()) {
598 dataLog("Unwind info for ", CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
), ":\n");
599 if (didSeeUnwindInfo
)
600 dataLog(" ", state
.jitCode
->unwindInfo
, "\n");
602 dataLog(" <no unwind info>\n");
605 if (state
.stackmapsSection
&& state
.stackmapsSection
->size()) {
606 if (shouldShowDisassembly()) {
608 "Generated LLVM stackmaps section for ",
609 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
), ":\n");
610 dataLog(" Raw data:\n");
611 dumpDataSection(state
.stackmapsSection
.get(), " ");
614 RefPtr
<DataView
> stackmapsData
= DataView::create(
615 ArrayBuffer::create(state
.stackmapsSection
->base(), state
.stackmapsSection
->size()));
616 state
.jitCode
->stackmaps
.parse(stackmapsData
.get());
618 if (shouldShowDisassembly()) {
619 dataLog(" Structured data:\n");
620 state
.jitCode
->stackmaps
.dumpMultiline(WTF::dataFile(), " ");
623 StackMaps::RecordMap recordMap
= state
.jitCode
->stackmaps
.computeRecordMap();
624 fixFunctionBasedOnStackMaps(
625 state
, state
.graph
.m_codeBlock
, state
.jitCode
.get(), state
.generatedFunction
,
626 recordMap
, didSeeUnwindInfo
);
628 if (shouldShowDisassembly()) {
629 for (unsigned i
= 0; i
< state
.jitCode
->handles().size(); ++i
) {
630 if (state
.codeSectionNames
[i
] != "__text")
633 ExecutableMemoryHandle
* handle
= state
.jitCode
->handles()[i
].get();
635 "Generated LLVM code after stackmap-based fix-up for ",
636 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
),
637 " in ", state
.graph
.m_plan
.mode
, " #", i
, ", ",
638 state
.codeSectionNames
[i
], ":\n");
640 MacroAssemblerCodePtr(handle
->start()), handle
->sizeInBytes(),
641 " ", WTF::dataFile(), LLVMSubset
);
646 state
.module = 0; // We no longer own the module.
649 } } // namespace JSC::FTL
651 #endif // ENABLE(FTL_JIT)