2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2014 Samsung Electronics
4 * Copyright (C) 2014 University of Szeged
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "FTLCompile.h"
33 #include "CodeBlockWithJITType.h"
34 #include "CCallHelpers.h"
35 #include "DFGCommon.h"
36 #include "DFGGraphSafepoint.h"
38 #include "Disassembler.h"
39 #include "FTLExitThunkGenerator.h"
40 #include "FTLInlineCacheSize.h"
41 #include "FTLJITCode.h"
42 #include "FTLThunks.h"
43 #include "FTLUnwindInfo.h"
46 #include "LinkBuffer.h"
47 #include "RepatchBuffer.h"
49 namespace JSC
{ namespace FTL
{
53 static uint8_t* mmAllocateCodeSection(
54 void* opaqueState
, uintptr_t size
, unsigned alignment
, unsigned, const char* sectionName
)
56 State
& state
= *static_cast<State
*>(opaqueState
);
58 RELEASE_ASSERT(alignment
<= jitAllocationGranule
);
60 RefPtr
<ExecutableMemoryHandle
> result
=
61 state
.graph
.m_vm
.executableAllocator
.allocate(
62 state
.graph
.m_vm
, size
, state
.graph
.m_codeBlock
, JITCompilationCanFail
);
65 // Signal failure. This compilation will get tossed.
66 state
.allocationFailed
= true;
68 // Fake an allocation, since LLVM cannot handle failures in the memory manager.
69 RefPtr
<DataSection
> fakeSection
= adoptRef(new DataSection(size
, jitAllocationGranule
));
70 state
.jitCode
->addDataSection(fakeSection
);
71 return bitwise_cast
<uint8_t*>(fakeSection
->base());
74 // LLVM used to put __compact_unwind in a code section. We keep this here defensively,
75 // for clients that use older LLVMs.
76 if (!strcmp(sectionName
, SECTION_NAME("compact_unwind"))) {
77 state
.unwindDataSection
= result
->start();
78 state
.unwindDataSectionSize
= result
->sizeInBytes();
81 state
.jitCode
->addHandle(result
);
82 state
.codeSectionNames
.append(sectionName
);
84 return static_cast<uint8_t*>(result
->start());
87 static uint8_t* mmAllocateDataSection(
88 void* opaqueState
, uintptr_t size
, unsigned alignment
, unsigned sectionID
,
89 const char* sectionName
, LLVMBool isReadOnly
)
91 UNUSED_PARAM(sectionID
);
92 UNUSED_PARAM(isReadOnly
);
94 // Allocate the GOT in the code section to make it reachable for all code.
95 if (!strcmp(sectionName
, SECTION_NAME("got")))
96 return mmAllocateCodeSection(opaqueState
, size
, alignment
, sectionID
, sectionName
);
98 State
& state
= *static_cast<State
*>(opaqueState
);
100 RefPtr
<DataSection
> section
= adoptRef(new DataSection(size
, alignment
));
102 if (!strcmp(sectionName
, SECTION_NAME("llvm_stackmaps")))
103 state
.stackmapsSection
= section
;
105 state
.jitCode
->addDataSection(section
);
106 state
.dataSectionNames
.append(sectionName
);
108 if (!strcmp(sectionName
, SECTION_NAME("compact_unwind"))) {
110 if (!strcmp(sectionName
, SECTION_NAME("eh_frame"))) {
112 #error "Unrecognized OS"
114 state
.unwindDataSection
= section
->base();
115 state
.unwindDataSectionSize
= size
;
119 return bitwise_cast
<uint8_t*>(section
->base());
122 static LLVMBool
mmApplyPermissions(void*, char**)
127 static void mmDestroy(void*)
131 static void dumpDataSection(DataSection
* section
, const char* prefix
)
133 for (unsigned j
= 0; j
< section
->size() / sizeof(int64_t); ++j
) {
135 int64_t* wordPointer
= static_cast<int64_t*>(section
->base()) + j
;
136 snprintf(buf
, sizeof(buf
), "0x%lx", static_cast<unsigned long>(bitwise_cast
<uintptr_t>(wordPointer
)));
137 dataLogF("%s%16s: 0x%016llx\n", prefix
, buf
, static_cast<long long>(*wordPointer
));
141 static int offsetOfStackRegion(StackMaps::RecordMap
& recordMap
, uint32_t stackmapID
)
143 if (stackmapID
== UINT_MAX
)
146 StackMaps::RecordMap::iterator iter
= recordMap
.find(stackmapID
);
147 RELEASE_ASSERT(iter
!= recordMap
.end());
148 RELEASE_ASSERT(iter
->value
.size() == 1);
149 RELEASE_ASSERT(iter
->value
[0].locations
.size() == 1);
150 Location capturedLocation
=
151 Location::forStackmaps(nullptr, iter
->value
[0].locations
[0]);
152 RELEASE_ASSERT(capturedLocation
.kind() == Location::Register
);
153 RELEASE_ASSERT(capturedLocation
.gpr() == GPRInfo::callFrameRegister
);
154 RELEASE_ASSERT(!(capturedLocation
.addend() % sizeof(Register
)));
155 return capturedLocation
.addend() / sizeof(Register
);
158 static void generateInlineIfPossibleOutOfLineIfNot(State
& state
, VM
& vm
, CodeBlock
* codeBlock
, CCallHelpers
& code
, char* startOfInlineCode
, size_t sizeOfInlineCode
, const char* codeDescription
, const std::function
<void(LinkBuffer
&, CCallHelpers
&, bool wasCompiledInline
)>& callback
)
160 std::unique_ptr
<LinkBuffer
> codeLinkBuffer
;
161 size_t actualCodeSize
= code
.m_assembler
.buffer().codeSize();
163 if (actualCodeSize
<= sizeOfInlineCode
) {
164 LinkBuffer
codeLinkBuffer(vm
, code
, startOfInlineCode
, sizeOfInlineCode
);
166 // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
167 MacroAssembler::AssemblerType_T::fillNops(bitwise_cast
<char*>(startOfInlineCode
) + actualCodeSize
, sizeOfInlineCode
- actualCodeSize
);
169 callback(codeLinkBuffer
, code
, true);
174 // If there isn't enough space in the provided inline code area, allocate out of line
175 // executable memory to link the provided code. Place a jump at the beginning of the
176 // inline area and jump to the out of line code. Similarly return by appending a jump
177 // to the provided code that goes to the instruction after the inline code.
178 // Fill the middle with nop's.
179 MacroAssembler::Jump returnToMainline
= code
.jump();
181 // Allocate out of line executable memory and link the provided code there.
182 codeLinkBuffer
= std::make_unique
<LinkBuffer
>(vm
, code
, codeBlock
, JITCompilationMustSucceed
);
184 // Plant a jmp in the inline buffer to the out of line code.
185 MacroAssembler callToOutOfLineCode
;
186 MacroAssembler::Jump jumpToOutOfLine
= callToOutOfLineCode
.jump();
187 LinkBuffer
inlineBuffer(vm
, callToOutOfLineCode
, startOfInlineCode
, sizeOfInlineCode
);
188 inlineBuffer
.link(jumpToOutOfLine
, codeLinkBuffer
->entrypoint());
190 // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
191 MacroAssembler::AssemblerType_T::fillNops(bitwise_cast
<char*>(startOfInlineCode
) + inlineBuffer
.size(), sizeOfInlineCode
- inlineBuffer
.size());
193 // Link the end of the out of line code to right after the inline area.
194 codeLinkBuffer
->link(returnToMainline
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(startOfInlineCode
)).labelAtOffset(sizeOfInlineCode
));
196 callback(*codeLinkBuffer
.get(), code
, false);
198 state
.finalizer
->outOfLineCodeInfos
.append(OutOfLineCodeInfo(WTF::move(codeLinkBuffer
), codeDescription
));
201 template<typename DescriptorType
>
202 void generateICFastPath(
203 State
& state
, CodeBlock
* codeBlock
, GeneratedFunction generatedFunction
,
204 StackMaps::RecordMap
& recordMap
, DescriptorType
& ic
, size_t sizeOfIC
)
206 VM
& vm
= state
.graph
.m_vm
;
208 StackMaps::RecordMap::iterator iter
= recordMap
.find(ic
.stackmapID());
209 if (iter
== recordMap
.end()) {
210 // It was optimized out.
214 Vector
<StackMaps::Record
>& records
= iter
->value
;
216 RELEASE_ASSERT(records
.size() == ic
.m_generators
.size());
218 for (unsigned i
= records
.size(); i
--;) {
219 StackMaps::Record
& record
= records
[i
];
220 auto generator
= ic
.m_generators
[i
];
222 CCallHelpers
fastPathJIT(&vm
, codeBlock
);
223 generator
.generateFastPath(fastPathJIT
);
226 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
;
228 generateInlineIfPossibleOutOfLineIfNot(state
, vm
, codeBlock
, fastPathJIT
, startOfIC
, sizeOfIC
, "inline cache fast path", [&] (LinkBuffer
& linkBuffer
, CCallHelpers
&, bool) {
229 state
.finalizer
->sideCodeLinkBuffer
->link(ic
.m_slowPathDone
[i
],
230 CodeLocationLabel(startOfIC
+ sizeOfIC
));
232 linkBuffer
.link(generator
.slowPathJump(),
233 state
.finalizer
->sideCodeLinkBuffer
->locationOf(generator
.slowPathBegin()));
235 generator
.finalize(linkBuffer
, *state
.finalizer
->sideCodeLinkBuffer
);
240 static void generateCheckInICFastPath(
241 State
& state
, CodeBlock
* codeBlock
, GeneratedFunction generatedFunction
,
242 StackMaps::RecordMap
& recordMap
, CheckInDescriptor
& ic
, size_t sizeOfIC
)
244 VM
& vm
= state
.graph
.m_vm
;
246 StackMaps::RecordMap::iterator iter
= recordMap
.find(ic
.stackmapID());
247 if (iter
== recordMap
.end()) {
248 // It was optimized out.
252 Vector
<StackMaps::Record
>& records
= iter
->value
;
254 RELEASE_ASSERT(records
.size() == ic
.m_generators
.size());
256 for (unsigned i
= records
.size(); i
--;) {
257 StackMaps::Record
& record
= records
[i
];
258 auto generator
= ic
.m_generators
[i
];
260 StructureStubInfo
& stubInfo
= *generator
.m_stub
;
261 auto call
= generator
.m_slowCall
;
262 auto slowPathBegin
= generator
.m_beginLabel
;
264 CCallHelpers
fastPathJIT(&vm
, codeBlock
);
266 auto jump
= fastPathJIT
.patchableJump();
267 auto done
= fastPathJIT
.label();
270 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
;
272 auto postLink
= [&] (LinkBuffer
& fastPath
, CCallHelpers
&, bool) {
273 LinkBuffer
& slowPath
= *state
.finalizer
->sideCodeLinkBuffer
;
275 state
.finalizer
->sideCodeLinkBuffer
->link(
276 ic
.m_slowPathDone
[i
], CodeLocationLabel(startOfIC
+ sizeOfIC
));
278 CodeLocationLabel slowPathBeginLoc
= slowPath
.locationOf(slowPathBegin
);
279 fastPath
.link(jump
, slowPathBeginLoc
);
281 CodeLocationCall callReturnLocation
= slowPath
.locationOf(call
);
283 stubInfo
.patch
.deltaCallToDone
= MacroAssembler::differenceBetweenCodePtr(
284 callReturnLocation
, fastPath
.locationOf(done
));
286 stubInfo
.patch
.deltaCallToJump
= MacroAssembler::differenceBetweenCodePtr(
287 callReturnLocation
, fastPath
.locationOf(jump
));
288 stubInfo
.callReturnLocation
= callReturnLocation
;
289 stubInfo
.patch
.deltaCallToSlowCase
= MacroAssembler::differenceBetweenCodePtr(
290 callReturnLocation
, slowPathBeginLoc
);
293 generateInlineIfPossibleOutOfLineIfNot(state
, vm
, codeBlock
, fastPathJIT
, startOfIC
, sizeOfIC
, "CheckIn inline cache", postLink
);
298 static RegisterSet
usedRegistersFor(const StackMaps::Record
& record
)
300 if (Options::assumeAllRegsInFTLICAreLive())
301 return RegisterSet::allRegisters();
302 return RegisterSet(record
.usedRegisterSet(), RegisterSet::calleeSaveRegisters());
305 template<typename CallType
>
306 void adjustCallICsForStackmaps(Vector
<CallType
>& calls
, StackMaps::RecordMap
& recordMap
)
308 // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM
309 // generated code. That implies first pruning the ones that LLVM didn't generate.
311 Vector
<CallType
> oldCalls
;
312 oldCalls
.swap(calls
);
314 for (unsigned i
= 0; i
< oldCalls
.size(); ++i
) {
315 CallType
& call
= oldCalls
[i
];
317 StackMaps::RecordMap::iterator iter
= recordMap
.find(call
.stackmapID());
318 if (iter
== recordMap
.end())
321 for (unsigned j
= 0; j
< iter
->value
.size(); ++j
) {
322 CallType copy
= call
;
323 copy
.m_instructionOffset
= iter
->value
[j
].instructionOffset
;
328 std::sort(calls
.begin(), calls
.end());
331 static void fixFunctionBasedOnStackMaps(
332 State
& state
, CodeBlock
* codeBlock
, JITCode
* jitCode
, GeneratedFunction generatedFunction
,
333 StackMaps::RecordMap
& recordMap
, bool didSeeUnwindInfo
)
335 Graph
& graph
= state
.graph
;
337 StackMaps stackmaps
= jitCode
->stackmaps
;
339 int localsOffset
= offsetOfStackRegion(recordMap
, state
.capturedStackmapID
) + graph
.m_nextMachineLocal
;
340 int varargsSpillSlotsOffset
= offsetOfStackRegion(recordMap
, state
.varargsSpillSlotsStackmapID
);
342 for (unsigned i
= graph
.m_inlineVariableData
.size(); i
--;) {
343 InlineCallFrame
* inlineCallFrame
= graph
.m_inlineVariableData
[i
].inlineCallFrame
;
345 if (inlineCallFrame
->argumentCountRegister
.isValid())
346 inlineCallFrame
->argumentCountRegister
+= localsOffset
;
348 for (unsigned argument
= inlineCallFrame
->arguments
.size(); argument
-- > 1;) {
349 inlineCallFrame
->arguments
[argument
] =
350 inlineCallFrame
->arguments
[argument
].withLocalsOffset(localsOffset
);
353 if (inlineCallFrame
->isClosureCall
) {
354 inlineCallFrame
->calleeRecovery
=
355 inlineCallFrame
->calleeRecovery
.withLocalsOffset(localsOffset
);
358 if (graph
.hasDebuggerEnabled())
359 codeBlock
->setScopeRegister(codeBlock
->scopeRegister() + localsOffset
);
362 MacroAssembler::Label stackOverflowException
;
365 CCallHelpers
checkJIT(&vm
, codeBlock
);
367 // At this point it's perfectly fair to just blow away all state and restore the
368 // JS JIT view of the universe.
369 checkJIT
.move(MacroAssembler::TrustedImmPtr(&vm
), GPRInfo::argumentGPR0
);
370 checkJIT
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
371 MacroAssembler::Call callLookupExceptionHandler
= checkJIT
.call();
372 checkJIT
.jumpToExceptionHandler();
374 stackOverflowException
= checkJIT
.label();
375 checkJIT
.move(MacroAssembler::TrustedImmPtr(&vm
), GPRInfo::argumentGPR0
);
376 checkJIT
.move(GPRInfo::callFrameRegister
, GPRInfo::argumentGPR1
);
377 MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame
= checkJIT
.call();
378 checkJIT
.jumpToExceptionHandler();
380 auto linkBuffer
= std::make_unique
<LinkBuffer
>(
381 vm
, checkJIT
, codeBlock
, JITCompilationCanFail
);
382 if (linkBuffer
->didFailToAllocate()) {
383 state
.allocationFailed
= true;
386 linkBuffer
->link(callLookupExceptionHandler
, FunctionPtr(lookupExceptionHandler
));
387 linkBuffer
->link(callLookupExceptionHandlerFromCallerFrame
, FunctionPtr(lookupExceptionHandlerFromCallerFrame
));
389 state
.finalizer
->handleExceptionsLinkBuffer
= WTF::move(linkBuffer
);
392 ExitThunkGenerator
exitThunkGenerator(state
);
393 exitThunkGenerator
.emitThunks();
394 if (exitThunkGenerator
.didThings()) {
395 RELEASE_ASSERT(state
.finalizer
->osrExit
.size());
396 RELEASE_ASSERT(didSeeUnwindInfo
);
398 auto linkBuffer
= std::make_unique
<LinkBuffer
>(
399 vm
, exitThunkGenerator
, codeBlock
, JITCompilationCanFail
);
400 if (linkBuffer
->didFailToAllocate()) {
401 state
.allocationFailed
= true;
405 RELEASE_ASSERT(state
.finalizer
->osrExit
.size() == state
.jitCode
->osrExit
.size());
407 for (unsigned i
= 0; i
< state
.jitCode
->osrExit
.size(); ++i
) {
408 OSRExitCompilationInfo
& info
= state
.finalizer
->osrExit
[i
];
409 OSRExit
& exit
= jitCode
->osrExit
[i
];
411 if (verboseCompilationEnabled())
412 dataLog("Handling OSR stackmap #", exit
.m_stackmapID
, " for ", exit
.m_codeOrigin
, "\n");
414 auto iter
= recordMap
.find(exit
.m_stackmapID
);
415 if (iter
== recordMap
.end()) {
416 // It was optimized out.
420 info
.m_thunkAddress
= linkBuffer
->locationOf(info
.m_thunkLabel
);
421 exit
.m_patchableCodeOffset
= linkBuffer
->offsetOf(info
.m_thunkJump
);
423 for (unsigned j
= exit
.m_values
.size(); j
--;)
424 exit
.m_values
[j
] = exit
.m_values
[j
].withLocalsOffset(localsOffset
);
425 for (ExitTimeObjectMaterialization
* materialization
: exit
.m_materializations
)
426 materialization
->accountForLocalsOffset(localsOffset
);
428 if (verboseCompilationEnabled()) {
430 dataLog(" Exit values: ", inContext(exit
.m_values
, &context
), "\n");
431 if (!exit
.m_materializations
.isEmpty()) {
432 dataLog(" Materializations: \n");
433 for (ExitTimeObjectMaterialization
* materialization
: exit
.m_materializations
)
434 dataLog(" Materialize(", pointerDump(materialization
), ")\n");
439 state
.finalizer
->exitThunksLinkBuffer
= WTF::move(linkBuffer
);
442 if (!state
.getByIds
.isEmpty() || !state
.putByIds
.isEmpty() || !state
.checkIns
.isEmpty()) {
443 CCallHelpers
slowPathJIT(&vm
, codeBlock
);
445 CCallHelpers::JumpList exceptionTarget
;
447 for (unsigned i
= state
.getByIds
.size(); i
--;) {
448 GetByIdDescriptor
& getById
= state
.getByIds
[i
];
450 if (verboseCompilationEnabled())
451 dataLog("Handling GetById stackmap #", getById
.stackmapID(), "\n");
453 auto iter
= recordMap
.find(getById
.stackmapID());
454 if (iter
== recordMap
.end()) {
455 // It was optimized out.
459 for (unsigned i
= 0; i
< iter
->value
.size(); ++i
) {
460 StackMaps::Record
& record
= iter
->value
[i
];
462 RegisterSet usedRegisters
= usedRegistersFor(record
);
464 GPRReg result
= record
.locations
[0].directGPR();
465 GPRReg base
= record
.locations
[1].directGPR();
467 JITGetByIdGenerator
gen(
468 codeBlock
, getById
.codeOrigin(), usedRegisters
, JSValueRegs(base
),
469 JSValueRegs(result
), NeedToSpill
);
471 MacroAssembler::Label begin
= slowPathJIT
.label();
473 MacroAssembler::Call call
= callOperation(
474 state
, usedRegisters
, slowPathJIT
, getById
.codeOrigin(), &exceptionTarget
,
475 operationGetByIdOptimize
, result
, gen
.stubInfo(), base
, getById
.uid());
477 gen
.reportSlowPathCall(begin
, call
);
479 getById
.m_slowPathDone
.append(slowPathJIT
.jump());
480 getById
.m_generators
.append(gen
);
484 for (unsigned i
= state
.putByIds
.size(); i
--;) {
485 PutByIdDescriptor
& putById
= state
.putByIds
[i
];
487 if (verboseCompilationEnabled())
488 dataLog("Handling PutById stackmap #", putById
.stackmapID(), "\n");
490 auto iter
= recordMap
.find(putById
.stackmapID());
491 if (iter
== recordMap
.end()) {
492 // It was optimized out.
496 for (unsigned i
= 0; i
< iter
->value
.size(); ++i
) {
497 StackMaps::Record
& record
= iter
->value
[i
];
499 RegisterSet usedRegisters
= usedRegistersFor(record
);
501 GPRReg base
= record
.locations
[0].directGPR();
502 GPRReg value
= record
.locations
[1].directGPR();
504 JITPutByIdGenerator
gen(
505 codeBlock
, putById
.codeOrigin(), usedRegisters
, JSValueRegs(base
),
506 JSValueRegs(value
), GPRInfo::patchpointScratchRegister
, NeedToSpill
,
507 putById
.ecmaMode(), putById
.putKind());
509 MacroAssembler::Label begin
= slowPathJIT
.label();
511 MacroAssembler::Call call
= callOperation(
512 state
, usedRegisters
, slowPathJIT
, putById
.codeOrigin(), &exceptionTarget
,
513 gen
.slowPathFunction(), gen
.stubInfo(), value
, base
, putById
.uid());
515 gen
.reportSlowPathCall(begin
, call
);
517 putById
.m_slowPathDone
.append(slowPathJIT
.jump());
518 putById
.m_generators
.append(gen
);
522 for (unsigned i
= state
.checkIns
.size(); i
--;) {
523 CheckInDescriptor
& checkIn
= state
.checkIns
[i
];
525 if (verboseCompilationEnabled())
526 dataLog("Handling checkIn stackmap #", checkIn
.stackmapID(), "\n");
528 auto iter
= recordMap
.find(checkIn
.stackmapID());
529 if (iter
== recordMap
.end()) {
530 // It was optimized out.
534 for (unsigned i
= 0; i
< iter
->value
.size(); ++i
) {
535 StackMaps::Record
& record
= iter
->value
[i
];
536 RegisterSet usedRegisters
= usedRegistersFor(record
);
537 GPRReg result
= record
.locations
[0].directGPR();
538 GPRReg obj
= record
.locations
[1].directGPR();
539 StructureStubInfo
* stubInfo
= codeBlock
->addStubInfo();
540 stubInfo
->codeOrigin
= checkIn
.codeOrigin();
541 stubInfo
->patch
.baseGPR
= static_cast<int8_t>(obj
);
542 stubInfo
->patch
.valueGPR
= static_cast<int8_t>(result
);
543 stubInfo
->patch
.usedRegisters
= usedRegisters
;
544 stubInfo
->patch
.spillMode
= NeedToSpill
;
546 MacroAssembler::Label begin
= slowPathJIT
.label();
548 MacroAssembler::Call slowCall
= callOperation(
549 state
, usedRegisters
, slowPathJIT
, checkIn
.codeOrigin(), &exceptionTarget
,
550 operationInOptimize
, result
, stubInfo
, obj
, checkIn
.m_uid
);
552 checkIn
.m_slowPathDone
.append(slowPathJIT
.jump());
554 checkIn
.m_generators
.append(CheckInGenerator(stubInfo
, slowCall
, begin
));
558 exceptionTarget
.link(&slowPathJIT
);
559 MacroAssembler::Jump exceptionJump
= slowPathJIT
.jump();
561 state
.finalizer
->sideCodeLinkBuffer
= std::make_unique
<LinkBuffer
>(vm
, slowPathJIT
, codeBlock
, JITCompilationCanFail
);
562 if (state
.finalizer
->sideCodeLinkBuffer
->didFailToAllocate()) {
563 state
.allocationFailed
= true;
566 state
.finalizer
->sideCodeLinkBuffer
->link(
567 exceptionJump
, state
.finalizer
->handleExceptionsLinkBuffer
->entrypoint());
569 for (unsigned i
= state
.getByIds
.size(); i
--;) {
571 state
, codeBlock
, generatedFunction
, recordMap
, state
.getByIds
[i
],
574 for (unsigned i
= state
.putByIds
.size(); i
--;) {
576 state
, codeBlock
, generatedFunction
, recordMap
, state
.putByIds
[i
],
580 for (unsigned i
= state
.checkIns
.size(); i
--;) {
581 generateCheckInICFastPath(
582 state
, codeBlock
, generatedFunction
, recordMap
, state
.checkIns
[i
],
587 adjustCallICsForStackmaps(state
.jsCalls
, recordMap
);
589 for (unsigned i
= state
.jsCalls
.size(); i
--;) {
590 JSCall
& call
= state
.jsCalls
[i
];
592 CCallHelpers
fastPathJIT(&vm
, codeBlock
);
593 call
.emit(fastPathJIT
);
595 char* startOfIC
= bitwise_cast
<char*>(generatedFunction
) + call
.m_instructionOffset
;
597 generateInlineIfPossibleOutOfLineIfNot(state
, vm
, codeBlock
, fastPathJIT
, startOfIC
, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer
& linkBuffer
, CCallHelpers
&, bool) {
598 call
.link(vm
, linkBuffer
);
602 adjustCallICsForStackmaps(state
.jsCallVarargses
, recordMap
);
604 for (unsigned i
= state
.jsCallVarargses
.size(); i
--;) {
605 JSCallVarargs
& call
= state
.jsCallVarargses
[i
];
607 CCallHelpers
fastPathJIT(&vm
, codeBlock
);
608 call
.emit(fastPathJIT
, varargsSpillSlotsOffset
);
610 char* startOfIC
= bitwise_cast
<char*>(generatedFunction
) + call
.m_instructionOffset
;
611 size_t sizeOfIC
= sizeOfICFor(call
.node());
613 generateInlineIfPossibleOutOfLineIfNot(state
, vm
, codeBlock
, fastPathJIT
, startOfIC
, sizeOfIC
, "varargs call inline cache", [&] (LinkBuffer
& linkBuffer
, CCallHelpers
&, bool) {
614 call
.link(vm
, linkBuffer
, state
.finalizer
->handleExceptionsLinkBuffer
->entrypoint());
618 RepatchBuffer
repatchBuffer(codeBlock
);
620 auto iter
= recordMap
.find(state
.handleStackOverflowExceptionStackmapID
);
621 // It's sort of remotely possible that we won't have an in-band exception handling
622 // path, for some kinds of functions.
623 if (iter
!= recordMap
.end()) {
624 for (unsigned i
= iter
->value
.size(); i
--;) {
625 StackMaps::Record
& record
= iter
->value
[i
];
627 CodeLocationLabel source
= CodeLocationLabel(
628 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
);
630 RELEASE_ASSERT(stackOverflowException
.isSet());
632 repatchBuffer
.replaceWithJump(source
, state
.finalizer
->handleExceptionsLinkBuffer
->locationOf(stackOverflowException
));
636 iter
= recordMap
.find(state
.handleExceptionStackmapID
);
637 // It's sort of remotely possible that we won't have an in-band exception handling
638 // path, for some kinds of functions.
639 if (iter
!= recordMap
.end()) {
640 for (unsigned i
= iter
->value
.size(); i
--;) {
641 StackMaps::Record
& record
= iter
->value
[i
];
643 CodeLocationLabel source
= CodeLocationLabel(
644 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
);
646 repatchBuffer
.replaceWithJump(source
, state
.finalizer
->handleExceptionsLinkBuffer
->entrypoint());
650 for (unsigned exitIndex
= 0; exitIndex
< jitCode
->osrExit
.size(); ++exitIndex
) {
651 OSRExitCompilationInfo
& info
= state
.finalizer
->osrExit
[exitIndex
];
652 OSRExit
& exit
= jitCode
->osrExit
[exitIndex
];
653 iter
= recordMap
.find(exit
.m_stackmapID
);
655 Vector
<const void*> codeAddresses
;
657 if (iter
!= recordMap
.end()) {
658 for (unsigned i
= iter
->value
.size(); i
--;) {
659 StackMaps::Record
& record
= iter
->value
[i
];
661 CodeLocationLabel source
= CodeLocationLabel(
662 bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
);
664 codeAddresses
.append(bitwise_cast
<char*>(generatedFunction
) + record
.instructionOffset
+ MacroAssembler::maxJumpReplacementSize());
666 if (info
.m_isInvalidationPoint
)
667 jitCode
->common
.jumpReplacements
.append(JumpReplacement(source
, info
.m_thunkAddress
));
669 repatchBuffer
.replaceWithJump(source
, info
.m_thunkAddress
);
673 if (graph
.compilation())
674 graph
.compilation()->addOSRExitSite(codeAddresses
);
678 void compile(State
& state
, Safepoint::Result
& safepointResult
)
683 GraphSafepoint
safepoint(state
.graph
, safepointResult
);
685 LLVMMCJITCompilerOptions options
;
686 llvm
->InitializeMCJITCompilerOptions(&options
, sizeof(options
));
687 options
.OptLevel
= Options::llvmBackendOptimizationLevel();
688 options
.NoFramePointerElim
= true;
689 if (Options::useLLVMSmallCodeModel())
690 options
.CodeModel
= LLVMCodeModelSmall
;
691 options
.EnableFastISel
= enableLLVMFastISel
;
692 options
.MCJMM
= llvm
->CreateSimpleMCJITMemoryManager(
693 &state
, mmAllocateCodeSection
, mmAllocateDataSection
, mmApplyPermissions
, mmDestroy
);
695 LLVMExecutionEngineRef engine
;
699 llvm
->SetTarget(state
.module, "arm64-apple-ios");
701 llvm
->SetTarget(state
.module, "aarch64-linux-gnu");
703 #error "Unrecognized OS"
707 if (llvm
->CreateMCJITCompilerForModule(&engine
, state
.module, &options
, sizeof(options
), &error
)) {
708 dataLog("FATAL: Could not create LLVM execution engine: ", error
, "\n");
712 // At this point we no longer own the module.
713 LModule
module = state
.module;
714 state
.module = nullptr;
716 // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply
718 LLVMTargetMachineRef targetMachine
= llvm
->GetExecutionEngineTargetMachine(engine
);
719 LLVMTargetDataRef targetData
= llvm
->GetExecutionEngineTargetData(engine
);
720 char* stringRepOfTargetData
= llvm
->CopyStringRepOfTargetData(targetData
);
721 llvm
->SetDataLayout(module, stringRepOfTargetData
);
722 free(stringRepOfTargetData
);
724 LLVMPassManagerRef functionPasses
= 0;
725 LLVMPassManagerRef modulePasses
;
727 if (Options::llvmSimpleOpt()) {
728 modulePasses
= llvm
->CreatePassManager();
729 llvm
->AddTargetData(targetData
, modulePasses
);
730 llvm
->AddAnalysisPasses(targetMachine
, modulePasses
);
731 llvm
->AddPromoteMemoryToRegisterPass(modulePasses
);
732 llvm
->AddGlobalOptimizerPass(modulePasses
);
733 llvm
->AddFunctionInliningPass(modulePasses
);
734 llvm
->AddPruneEHPass(modulePasses
);
735 llvm
->AddGlobalDCEPass(modulePasses
);
736 llvm
->AddConstantPropagationPass(modulePasses
);
737 llvm
->AddAggressiveDCEPass(modulePasses
);
738 llvm
->AddInstructionCombiningPass(modulePasses
);
739 // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
740 llvm
->AddTypeBasedAliasAnalysisPass(modulePasses
);
741 llvm
->AddBasicAliasAnalysisPass(modulePasses
);
742 // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
743 llvm
->AddGVNPass(modulePasses
);
744 llvm
->AddCFGSimplificationPass(modulePasses
);
745 llvm
->AddDeadStoreEliminationPass(modulePasses
);
747 if (enableLLVMFastISel
)
748 llvm
->AddLowerSwitchPass(modulePasses
);
750 llvm
->RunPassManager(modulePasses
, module);
752 LLVMPassManagerBuilderRef passBuilder
= llvm
->PassManagerBuilderCreate();
753 llvm
->PassManagerBuilderSetOptLevel(passBuilder
, Options::llvmOptimizationLevel());
754 llvm
->PassManagerBuilderUseInlinerWithThreshold(passBuilder
, 275);
755 llvm
->PassManagerBuilderSetSizeLevel(passBuilder
, Options::llvmSizeLevel());
757 functionPasses
= llvm
->CreateFunctionPassManagerForModule(module);
758 modulePasses
= llvm
->CreatePassManager();
760 llvm
->AddTargetData(llvm
->GetExecutionEngineTargetData(engine
), modulePasses
);
762 llvm
->PassManagerBuilderPopulateFunctionPassManager(passBuilder
, functionPasses
);
763 llvm
->PassManagerBuilderPopulateModulePassManager(passBuilder
, modulePasses
);
765 llvm
->PassManagerBuilderDispose(passBuilder
);
767 llvm
->InitializeFunctionPassManager(functionPasses
);
768 for (LValue function
= llvm
->GetFirstFunction(module); function
; function
= llvm
->GetNextFunction(function
))
769 llvm
->RunFunctionPassManager(functionPasses
, function
);
770 llvm
->FinalizeFunctionPassManager(functionPasses
);
772 llvm
->RunPassManager(modulePasses
, module);
775 if (shouldShowDisassembly() || verboseCompilationEnabled())
776 state
.dumpState(module, "after optimization");
778 // FIXME: Need to add support for the case where JIT memory allocation failed.
779 // https://bugs.webkit.org/show_bug.cgi?id=113620
780 state
.generatedFunction
= reinterpret_cast<GeneratedFunction
>(llvm
->GetPointerToGlobal(engine
, state
.function
));
782 llvm
->DisposePassManager(functionPasses
);
783 llvm
->DisposePassManager(modulePasses
);
784 llvm
->DisposeExecutionEngine(engine
);
787 if (safepointResult
.didGetCancelled())
789 RELEASE_ASSERT(!state
.graph
.m_vm
.heap
.isCollecting());
791 if (state
.allocationFailed
)
794 if (shouldShowDisassembly()) {
795 for (unsigned i
= 0; i
< state
.jitCode
->handles().size(); ++i
) {
796 ExecutableMemoryHandle
* handle
= state
.jitCode
->handles()[i
].get();
798 "Generated LLVM code for ",
799 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
),
800 " #", i
, ", ", state
.codeSectionNames
[i
], ":\n");
802 MacroAssemblerCodePtr(handle
->start()), handle
->sizeInBytes(),
803 " ", WTF::dataFile(), LLVMSubset
);
806 for (unsigned i
= 0; i
< state
.jitCode
->dataSections().size(); ++i
) {
807 DataSection
* section
= state
.jitCode
->dataSections()[i
].get();
809 "Generated LLVM data section for ",
810 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
),
811 " #", i
, ", ", state
.dataSectionNames
[i
], ":\n");
812 dumpDataSection(section
, " ");
816 bool didSeeUnwindInfo
= state
.jitCode
->unwindInfo
.parse(
817 state
.unwindDataSection
, state
.unwindDataSectionSize
,
818 state
.generatedFunction
);
819 if (shouldShowDisassembly()) {
820 dataLog("Unwind info for ", CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
), ":\n");
821 if (didSeeUnwindInfo
)
822 dataLog(" ", state
.jitCode
->unwindInfo
, "\n");
824 dataLog(" <no unwind info>\n");
827 if (state
.stackmapsSection
&& state
.stackmapsSection
->size()) {
828 if (shouldShowDisassembly()) {
830 "Generated LLVM stackmaps section for ",
831 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
), ":\n");
832 dataLog(" Raw data:\n");
833 dumpDataSection(state
.stackmapsSection
.get(), " ");
836 RefPtr
<DataView
> stackmapsData
= DataView::create(
837 ArrayBuffer::create(state
.stackmapsSection
->base(), state
.stackmapsSection
->size()));
838 state
.jitCode
->stackmaps
.parse(stackmapsData
.get());
840 if (shouldShowDisassembly()) {
841 dataLog(" Structured data:\n");
842 state
.jitCode
->stackmaps
.dumpMultiline(WTF::dataFile(), " ");
845 StackMaps::RecordMap recordMap
= state
.jitCode
->stackmaps
.computeRecordMap();
846 fixFunctionBasedOnStackMaps(
847 state
, state
.graph
.m_codeBlock
, state
.jitCode
.get(), state
.generatedFunction
,
848 recordMap
, didSeeUnwindInfo
);
849 if (state
.allocationFailed
)
852 if (shouldShowDisassembly() || Options::asyncDisassembly()) {
853 for (unsigned i
= 0; i
< state
.jitCode
->handles().size(); ++i
) {
854 if (state
.codeSectionNames
[i
] != SECTION_NAME("text"))
857 ExecutableMemoryHandle
* handle
= state
.jitCode
->handles()[i
].get();
859 CString header
= toCString(
860 "Generated LLVM code after stackmap-based fix-up for ",
861 CodeBlockWithJITType(state
.graph
.m_codeBlock
, JITCode::FTLJIT
),
862 " in ", state
.graph
.m_plan
.mode
, " #", i
, ", ",
863 state
.codeSectionNames
[i
], ":\n");
865 if (Options::asyncDisassembly()) {
866 disassembleAsynchronously(
867 header
, MacroAssemblerCodeRef(handle
), handle
->sizeInBytes(), " ",
874 MacroAssemblerCodePtr(handle
->start()), handle
->sizeInBytes(),
875 " ", WTF::dataFile(), LLVMSubset
);
881 } } // namespace JSC::FTL
883 #endif // ENABLE(FTL_JIT)