]> git.saurik.com Git - apple/javascriptcore.git/blob - ftl/FTLCompile.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / ftl / FTLCompile.cpp
1 /*
2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2014 Samsung Electronics
4 * Copyright (C) 2014 University of Szeged
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "config.h"
29 #include "FTLCompile.h"
30
31 #if ENABLE(FTL_JIT)
32
33 #include "CodeBlockWithJITType.h"
34 #include "CCallHelpers.h"
35 #include "DFGCommon.h"
36 #include "DFGGraphSafepoint.h"
37 #include "DataView.h"
38 #include "Disassembler.h"
39 #include "FTLExitThunkGenerator.h"
40 #include "FTLInlineCacheSize.h"
41 #include "FTLJITCode.h"
42 #include "FTLThunks.h"
43 #include "FTLUnwindInfo.h"
44 #include "JITStubs.h"
45 #include "LLVMAPI.h"
46 #include "LinkBuffer.h"
47 #include "RepatchBuffer.h"
48
49 namespace JSC { namespace FTL {
50
51 using namespace DFG;
52
53 static uint8_t* mmAllocateCodeSection(
54 void* opaqueState, uintptr_t size, unsigned alignment, unsigned, const char* sectionName)
55 {
56 State& state = *static_cast<State*>(opaqueState);
57
58 RELEASE_ASSERT(alignment <= jitAllocationGranule);
59
60 RefPtr<ExecutableMemoryHandle> result =
61 state.graph.m_vm.executableAllocator.allocate(
62 state.graph.m_vm, size, state.graph.m_codeBlock, JITCompilationCanFail);
63
64 if (!result) {
65 // Signal failure. This compilation will get tossed.
66 state.allocationFailed = true;
67
68 // Fake an allocation, since LLVM cannot handle failures in the memory manager.
69 RefPtr<DataSection> fakeSection = adoptRef(new DataSection(size, jitAllocationGranule));
70 state.jitCode->addDataSection(fakeSection);
71 return bitwise_cast<uint8_t*>(fakeSection->base());
72 }
73
74 // LLVM used to put __compact_unwind in a code section. We keep this here defensively,
75 // for clients that use older LLVMs.
76 if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
77 state.unwindDataSection = result->start();
78 state.unwindDataSectionSize = result->sizeInBytes();
79 }
80
81 state.jitCode->addHandle(result);
82 state.codeSectionNames.append(sectionName);
83
84 return static_cast<uint8_t*>(result->start());
85 }
86
87 static uint8_t* mmAllocateDataSection(
88 void* opaqueState, uintptr_t size, unsigned alignment, unsigned sectionID,
89 const char* sectionName, LLVMBool isReadOnly)
90 {
91 UNUSED_PARAM(sectionID);
92 UNUSED_PARAM(isReadOnly);
93
94 // Allocate the GOT in the code section to make it reachable for all code.
95 if (!strcmp(sectionName, SECTION_NAME("got")))
96 return mmAllocateCodeSection(opaqueState, size, alignment, sectionID, sectionName);
97
98 State& state = *static_cast<State*>(opaqueState);
99
100 RefPtr<DataSection> section = adoptRef(new DataSection(size, alignment));
101
102 if (!strcmp(sectionName, SECTION_NAME("llvm_stackmaps")))
103 state.stackmapsSection = section;
104 else {
105 state.jitCode->addDataSection(section);
106 state.dataSectionNames.append(sectionName);
107 #if OS(DARWIN)
108 if (!strcmp(sectionName, SECTION_NAME("compact_unwind"))) {
109 #elif OS(LINUX)
110 if (!strcmp(sectionName, SECTION_NAME("eh_frame"))) {
111 #else
112 #error "Unrecognized OS"
113 #endif
114 state.unwindDataSection = section->base();
115 state.unwindDataSectionSize = size;
116 }
117 }
118
119 return bitwise_cast<uint8_t*>(section->base());
120 }
121
122 static LLVMBool mmApplyPermissions(void*, char**)
123 {
124 return false;
125 }
126
127 static void mmDestroy(void*)
128 {
129 }
130
131 static void dumpDataSection(DataSection* section, const char* prefix)
132 {
133 for (unsigned j = 0; j < section->size() / sizeof(int64_t); ++j) {
134 char buf[32];
135 int64_t* wordPointer = static_cast<int64_t*>(section->base()) + j;
136 snprintf(buf, sizeof(buf), "0x%lx", static_cast<unsigned long>(bitwise_cast<uintptr_t>(wordPointer)));
137 dataLogF("%s%16s: 0x%016llx\n", prefix, buf, static_cast<long long>(*wordPointer));
138 }
139 }
140
141 static int offsetOfStackRegion(StackMaps::RecordMap& recordMap, uint32_t stackmapID)
142 {
143 if (stackmapID == UINT_MAX)
144 return 0;
145
146 StackMaps::RecordMap::iterator iter = recordMap.find(stackmapID);
147 RELEASE_ASSERT(iter != recordMap.end());
148 RELEASE_ASSERT(iter->value.size() == 1);
149 RELEASE_ASSERT(iter->value[0].locations.size() == 1);
150 Location capturedLocation =
151 Location::forStackmaps(nullptr, iter->value[0].locations[0]);
152 RELEASE_ASSERT(capturedLocation.kind() == Location::Register);
153 RELEASE_ASSERT(capturedLocation.gpr() == GPRInfo::callFrameRegister);
154 RELEASE_ASSERT(!(capturedLocation.addend() % sizeof(Register)));
155 return capturedLocation.addend() / sizeof(Register);
156 }
157
158 static void generateInlineIfPossibleOutOfLineIfNot(State& state, VM& vm, CodeBlock* codeBlock, CCallHelpers& code, char* startOfInlineCode, size_t sizeOfInlineCode, const char* codeDescription, const std::function<void(LinkBuffer&, CCallHelpers&, bool wasCompiledInline)>& callback)
159 {
160 std::unique_ptr<LinkBuffer> codeLinkBuffer;
161 size_t actualCodeSize = code.m_assembler.buffer().codeSize();
162
163 if (actualCodeSize <= sizeOfInlineCode) {
164 LinkBuffer codeLinkBuffer(vm, code, startOfInlineCode, sizeOfInlineCode);
165
166 // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
167 MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + actualCodeSize, sizeOfInlineCode - actualCodeSize);
168
169 callback(codeLinkBuffer, code, true);
170
171 return;
172 }
173
174 // If there isn't enough space in the provided inline code area, allocate out of line
175 // executable memory to link the provided code. Place a jump at the beginning of the
176 // inline area and jump to the out of line code. Similarly return by appending a jump
177 // to the provided code that goes to the instruction after the inline code.
178 // Fill the middle with nop's.
179 MacroAssembler::Jump returnToMainline = code.jump();
180
181 // Allocate out of line executable memory and link the provided code there.
182 codeLinkBuffer = std::make_unique<LinkBuffer>(vm, code, codeBlock, JITCompilationMustSucceed);
183
184 // Plant a jmp in the inline buffer to the out of line code.
185 MacroAssembler callToOutOfLineCode;
186 MacroAssembler::Jump jumpToOutOfLine = callToOutOfLineCode.jump();
187 LinkBuffer inlineBuffer(vm, callToOutOfLineCode, startOfInlineCode, sizeOfInlineCode);
188 inlineBuffer.link(jumpToOutOfLine, codeLinkBuffer->entrypoint());
189
190 // Fill the remainder of the inline space with nops to avoid confusing the disassembler.
191 MacroAssembler::AssemblerType_T::fillNops(bitwise_cast<char*>(startOfInlineCode) + inlineBuffer.size(), sizeOfInlineCode - inlineBuffer.size());
192
193 // Link the end of the out of line code to right after the inline area.
194 codeLinkBuffer->link(returnToMainline, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(startOfInlineCode)).labelAtOffset(sizeOfInlineCode));
195
196 callback(*codeLinkBuffer.get(), code, false);
197
198 state.finalizer->outOfLineCodeInfos.append(OutOfLineCodeInfo(WTF::move(codeLinkBuffer), codeDescription));
199 }
200
201 template<typename DescriptorType>
202 void generateICFastPath(
203 State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
204 StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC)
205 {
206 VM& vm = state.graph.m_vm;
207
208 StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
209 if (iter == recordMap.end()) {
210 // It was optimized out.
211 return;
212 }
213
214 Vector<StackMaps::Record>& records = iter->value;
215
216 RELEASE_ASSERT(records.size() == ic.m_generators.size());
217
218 for (unsigned i = records.size(); i--;) {
219 StackMaps::Record& record = records[i];
220 auto generator = ic.m_generators[i];
221
222 CCallHelpers fastPathJIT(&vm, codeBlock);
223 generator.generateFastPath(fastPathJIT);
224
225 char* startOfIC =
226 bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
227
228 generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "inline cache fast path", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
229 state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i],
230 CodeLocationLabel(startOfIC + sizeOfIC));
231
232 linkBuffer.link(generator.slowPathJump(),
233 state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin()));
234
235 generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer);
236 });
237 }
238 }
239
240 static void generateCheckInICFastPath(
241 State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
242 StackMaps::RecordMap& recordMap, CheckInDescriptor& ic, size_t sizeOfIC)
243 {
244 VM& vm = state.graph.m_vm;
245
246 StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
247 if (iter == recordMap.end()) {
248 // It was optimized out.
249 return;
250 }
251
252 Vector<StackMaps::Record>& records = iter->value;
253
254 RELEASE_ASSERT(records.size() == ic.m_generators.size());
255
256 for (unsigned i = records.size(); i--;) {
257 StackMaps::Record& record = records[i];
258 auto generator = ic.m_generators[i];
259
260 StructureStubInfo& stubInfo = *generator.m_stub;
261 auto call = generator.m_slowCall;
262 auto slowPathBegin = generator.m_beginLabel;
263
264 CCallHelpers fastPathJIT(&vm, codeBlock);
265
266 auto jump = fastPathJIT.patchableJump();
267 auto done = fastPathJIT.label();
268
269 char* startOfIC =
270 bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
271
272 auto postLink = [&] (LinkBuffer& fastPath, CCallHelpers&, bool) {
273 LinkBuffer& slowPath = *state.finalizer->sideCodeLinkBuffer;
274
275 state.finalizer->sideCodeLinkBuffer->link(
276 ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
277
278 CodeLocationLabel slowPathBeginLoc = slowPath.locationOf(slowPathBegin);
279 fastPath.link(jump, slowPathBeginLoc);
280
281 CodeLocationCall callReturnLocation = slowPath.locationOf(call);
282
283 stubInfo.patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
284 callReturnLocation, fastPath.locationOf(done));
285
286 stubInfo.patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr(
287 callReturnLocation, fastPath.locationOf(jump));
288 stubInfo.callReturnLocation = callReturnLocation;
289 stubInfo.patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(
290 callReturnLocation, slowPathBeginLoc);
291 };
292
293 generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "CheckIn inline cache", postLink);
294 }
295 }
296
297
298 static RegisterSet usedRegistersFor(const StackMaps::Record& record)
299 {
300 if (Options::assumeAllRegsInFTLICAreLive())
301 return RegisterSet::allRegisters();
302 return RegisterSet(record.usedRegisterSet(), RegisterSet::calleeSaveRegisters());
303 }
304
305 template<typename CallType>
306 void adjustCallICsForStackmaps(Vector<CallType>& calls, StackMaps::RecordMap& recordMap)
307 {
308 // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM
309 // generated code. That implies first pruning the ones that LLVM didn't generate.
310
311 Vector<CallType> oldCalls;
312 oldCalls.swap(calls);
313
314 for (unsigned i = 0; i < oldCalls.size(); ++i) {
315 CallType& call = oldCalls[i];
316
317 StackMaps::RecordMap::iterator iter = recordMap.find(call.stackmapID());
318 if (iter == recordMap.end())
319 continue;
320
321 for (unsigned j = 0; j < iter->value.size(); ++j) {
322 CallType copy = call;
323 copy.m_instructionOffset = iter->value[j].instructionOffset;
324 calls.append(copy);
325 }
326 }
327
328 std::sort(calls.begin(), calls.end());
329 }
330
331 static void fixFunctionBasedOnStackMaps(
332 State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
333 StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo)
334 {
335 Graph& graph = state.graph;
336 VM& vm = graph.m_vm;
337 StackMaps stackmaps = jitCode->stackmaps;
338
339 int localsOffset = offsetOfStackRegion(recordMap, state.capturedStackmapID) + graph.m_nextMachineLocal;
340 int varargsSpillSlotsOffset = offsetOfStackRegion(recordMap, state.varargsSpillSlotsStackmapID);
341
342 for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
343 InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
344
345 if (inlineCallFrame->argumentCountRegister.isValid())
346 inlineCallFrame->argumentCountRegister += localsOffset;
347
348 for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
349 inlineCallFrame->arguments[argument] =
350 inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
351 }
352
353 if (inlineCallFrame->isClosureCall) {
354 inlineCallFrame->calleeRecovery =
355 inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
356 }
357
358 if (graph.hasDebuggerEnabled())
359 codeBlock->setScopeRegister(codeBlock->scopeRegister() + localsOffset);
360 }
361
362 MacroAssembler::Label stackOverflowException;
363
364 {
365 CCallHelpers checkJIT(&vm, codeBlock);
366
367 // At this point it's perfectly fair to just blow away all state and restore the
368 // JS JIT view of the universe.
369 checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
370 checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
371 MacroAssembler::Call callLookupExceptionHandler = checkJIT.call();
372 checkJIT.jumpToExceptionHandler();
373
374 stackOverflowException = checkJIT.label();
375 checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
376 checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
377 MacroAssembler::Call callLookupExceptionHandlerFromCallerFrame = checkJIT.call();
378 checkJIT.jumpToExceptionHandler();
379
380 auto linkBuffer = std::make_unique<LinkBuffer>(
381 vm, checkJIT, codeBlock, JITCompilationCanFail);
382 if (linkBuffer->didFailToAllocate()) {
383 state.allocationFailed = true;
384 return;
385 }
386 linkBuffer->link(callLookupExceptionHandler, FunctionPtr(lookupExceptionHandler));
387 linkBuffer->link(callLookupExceptionHandlerFromCallerFrame, FunctionPtr(lookupExceptionHandlerFromCallerFrame));
388
389 state.finalizer->handleExceptionsLinkBuffer = WTF::move(linkBuffer);
390 }
391
392 ExitThunkGenerator exitThunkGenerator(state);
393 exitThunkGenerator.emitThunks();
394 if (exitThunkGenerator.didThings()) {
395 RELEASE_ASSERT(state.finalizer->osrExit.size());
396 RELEASE_ASSERT(didSeeUnwindInfo);
397
398 auto linkBuffer = std::make_unique<LinkBuffer>(
399 vm, exitThunkGenerator, codeBlock, JITCompilationCanFail);
400 if (linkBuffer->didFailToAllocate()) {
401 state.allocationFailed = true;
402 return;
403 }
404
405 RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
406
407 for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
408 OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
409 OSRExit& exit = jitCode->osrExit[i];
410
411 if (verboseCompilationEnabled())
412 dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");
413
414 auto iter = recordMap.find(exit.m_stackmapID);
415 if (iter == recordMap.end()) {
416 // It was optimized out.
417 continue;
418 }
419
420 info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
421 exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
422
423 for (unsigned j = exit.m_values.size(); j--;)
424 exit.m_values[j] = exit.m_values[j].withLocalsOffset(localsOffset);
425 for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
426 materialization->accountForLocalsOffset(localsOffset);
427
428 if (verboseCompilationEnabled()) {
429 DumpContext context;
430 dataLog(" Exit values: ", inContext(exit.m_values, &context), "\n");
431 if (!exit.m_materializations.isEmpty()) {
432 dataLog(" Materializations: \n");
433 for (ExitTimeObjectMaterialization* materialization : exit.m_materializations)
434 dataLog(" Materialize(", pointerDump(materialization), ")\n");
435 }
436 }
437 }
438
439 state.finalizer->exitThunksLinkBuffer = WTF::move(linkBuffer);
440 }
441
442 if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty() || !state.checkIns.isEmpty()) {
443 CCallHelpers slowPathJIT(&vm, codeBlock);
444
445 CCallHelpers::JumpList exceptionTarget;
446
447 for (unsigned i = state.getByIds.size(); i--;) {
448 GetByIdDescriptor& getById = state.getByIds[i];
449
450 if (verboseCompilationEnabled())
451 dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
452
453 auto iter = recordMap.find(getById.stackmapID());
454 if (iter == recordMap.end()) {
455 // It was optimized out.
456 continue;
457 }
458
459 for (unsigned i = 0; i < iter->value.size(); ++i) {
460 StackMaps::Record& record = iter->value[i];
461
462 RegisterSet usedRegisters = usedRegistersFor(record);
463
464 GPRReg result = record.locations[0].directGPR();
465 GPRReg base = record.locations[1].directGPR();
466
467 JITGetByIdGenerator gen(
468 codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base),
469 JSValueRegs(result), NeedToSpill);
470
471 MacroAssembler::Label begin = slowPathJIT.label();
472
473 MacroAssembler::Call call = callOperation(
474 state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget,
475 operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());
476
477 gen.reportSlowPathCall(begin, call);
478
479 getById.m_slowPathDone.append(slowPathJIT.jump());
480 getById.m_generators.append(gen);
481 }
482 }
483
484 for (unsigned i = state.putByIds.size(); i--;) {
485 PutByIdDescriptor& putById = state.putByIds[i];
486
487 if (verboseCompilationEnabled())
488 dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
489
490 auto iter = recordMap.find(putById.stackmapID());
491 if (iter == recordMap.end()) {
492 // It was optimized out.
493 continue;
494 }
495
496 for (unsigned i = 0; i < iter->value.size(); ++i) {
497 StackMaps::Record& record = iter->value[i];
498
499 RegisterSet usedRegisters = usedRegistersFor(record);
500
501 GPRReg base = record.locations[0].directGPR();
502 GPRReg value = record.locations[1].directGPR();
503
504 JITPutByIdGenerator gen(
505 codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base),
506 JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
507 putById.ecmaMode(), putById.putKind());
508
509 MacroAssembler::Label begin = slowPathJIT.label();
510
511 MacroAssembler::Call call = callOperation(
512 state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget,
513 gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
514
515 gen.reportSlowPathCall(begin, call);
516
517 putById.m_slowPathDone.append(slowPathJIT.jump());
518 putById.m_generators.append(gen);
519 }
520 }
521
522 for (unsigned i = state.checkIns.size(); i--;) {
523 CheckInDescriptor& checkIn = state.checkIns[i];
524
525 if (verboseCompilationEnabled())
526 dataLog("Handling checkIn stackmap #", checkIn.stackmapID(), "\n");
527
528 auto iter = recordMap.find(checkIn.stackmapID());
529 if (iter == recordMap.end()) {
530 // It was optimized out.
531 continue;
532 }
533
534 for (unsigned i = 0; i < iter->value.size(); ++i) {
535 StackMaps::Record& record = iter->value[i];
536 RegisterSet usedRegisters = usedRegistersFor(record);
537 GPRReg result = record.locations[0].directGPR();
538 GPRReg obj = record.locations[1].directGPR();
539 StructureStubInfo* stubInfo = codeBlock->addStubInfo();
540 stubInfo->codeOrigin = checkIn.codeOrigin();
541 stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
542 stubInfo->patch.valueGPR = static_cast<int8_t>(result);
543 stubInfo->patch.usedRegisters = usedRegisters;
544 stubInfo->patch.spillMode = NeedToSpill;
545
546 MacroAssembler::Label begin = slowPathJIT.label();
547
548 MacroAssembler::Call slowCall = callOperation(
549 state, usedRegisters, slowPathJIT, checkIn.codeOrigin(), &exceptionTarget,
550 operationInOptimize, result, stubInfo, obj, checkIn.m_uid);
551
552 checkIn.m_slowPathDone.append(slowPathJIT.jump());
553
554 checkIn.m_generators.append(CheckInGenerator(stubInfo, slowCall, begin));
555 }
556 }
557
558 exceptionTarget.link(&slowPathJIT);
559 MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
560
561 state.finalizer->sideCodeLinkBuffer = std::make_unique<LinkBuffer>(vm, slowPathJIT, codeBlock, JITCompilationCanFail);
562 if (state.finalizer->sideCodeLinkBuffer->didFailToAllocate()) {
563 state.allocationFailed = true;
564 return;
565 }
566 state.finalizer->sideCodeLinkBuffer->link(
567 exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
568
569 for (unsigned i = state.getByIds.size(); i--;) {
570 generateICFastPath(
571 state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
572 sizeOfGetById());
573 }
574 for (unsigned i = state.putByIds.size(); i--;) {
575 generateICFastPath(
576 state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
577 sizeOfPutById());
578 }
579
580 for (unsigned i = state.checkIns.size(); i--;) {
581 generateCheckInICFastPath(
582 state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
583 sizeOfIn());
584 }
585 }
586
587 adjustCallICsForStackmaps(state.jsCalls, recordMap);
588
589 for (unsigned i = state.jsCalls.size(); i--;) {
590 JSCall& call = state.jsCalls[i];
591
592 CCallHelpers fastPathJIT(&vm, codeBlock);
593 call.emit(fastPathJIT);
594
595 char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
596
597 generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfCall(), "JSCall inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
598 call.link(vm, linkBuffer);
599 });
600 }
601
602 adjustCallICsForStackmaps(state.jsCallVarargses, recordMap);
603
604 for (unsigned i = state.jsCallVarargses.size(); i--;) {
605 JSCallVarargs& call = state.jsCallVarargses[i];
606
607 CCallHelpers fastPathJIT(&vm, codeBlock);
608 call.emit(fastPathJIT, varargsSpillSlotsOffset);
609
610 char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
611 size_t sizeOfIC = sizeOfICFor(call.node());
612
613 generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "varargs call inline cache", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
614 call.link(vm, linkBuffer, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
615 });
616 }
617
618 RepatchBuffer repatchBuffer(codeBlock);
619
620 auto iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
621 // It's sort of remotely possible that we won't have an in-band exception handling
622 // path, for some kinds of functions.
623 if (iter != recordMap.end()) {
624 for (unsigned i = iter->value.size(); i--;) {
625 StackMaps::Record& record = iter->value[i];
626
627 CodeLocationLabel source = CodeLocationLabel(
628 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
629
630 RELEASE_ASSERT(stackOverflowException.isSet());
631
632 repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
633 }
634 }
635
636 iter = recordMap.find(state.handleExceptionStackmapID);
637 // It's sort of remotely possible that we won't have an in-band exception handling
638 // path, for some kinds of functions.
639 if (iter != recordMap.end()) {
640 for (unsigned i = iter->value.size(); i--;) {
641 StackMaps::Record& record = iter->value[i];
642
643 CodeLocationLabel source = CodeLocationLabel(
644 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
645
646 repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
647 }
648 }
649
650 for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
651 OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
652 OSRExit& exit = jitCode->osrExit[exitIndex];
653 iter = recordMap.find(exit.m_stackmapID);
654
655 Vector<const void*> codeAddresses;
656
657 if (iter != recordMap.end()) {
658 for (unsigned i = iter->value.size(); i--;) {
659 StackMaps::Record& record = iter->value[i];
660
661 CodeLocationLabel source = CodeLocationLabel(
662 bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
663
664 codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
665
666 if (info.m_isInvalidationPoint)
667 jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
668 else
669 repatchBuffer.replaceWithJump(source, info.m_thunkAddress);
670 }
671 }
672
673 if (graph.compilation())
674 graph.compilation()->addOSRExitSite(codeAddresses);
675 }
676 }
677
678 void compile(State& state, Safepoint::Result& safepointResult)
679 {
680 char* error = 0;
681
682 {
683 GraphSafepoint safepoint(state.graph, safepointResult);
684
685 LLVMMCJITCompilerOptions options;
686 llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
687 options.OptLevel = Options::llvmBackendOptimizationLevel();
688 options.NoFramePointerElim = true;
689 if (Options::useLLVMSmallCodeModel())
690 options.CodeModel = LLVMCodeModelSmall;
691 options.EnableFastISel = enableLLVMFastISel;
692 options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
693 &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
694
695 LLVMExecutionEngineRef engine;
696
697 if (isARM64()) {
698 #if OS(DARWIN)
699 llvm->SetTarget(state.module, "arm64-apple-ios");
700 #elif OS(LINUX)
701 llvm->SetTarget(state.module, "aarch64-linux-gnu");
702 #else
703 #error "Unrecognized OS"
704 #endif
705 }
706
707 if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
708 dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
709 CRASH();
710 }
711
712 // At this point we no longer own the module.
713 LModule module = state.module;
714 state.module = nullptr;
715
716 // The data layout also has to be set in the module. Get the data layout from the MCJIT and apply
717 // it to the module.
718 LLVMTargetMachineRef targetMachine = llvm->GetExecutionEngineTargetMachine(engine);
719 LLVMTargetDataRef targetData = llvm->GetExecutionEngineTargetData(engine);
720 char* stringRepOfTargetData = llvm->CopyStringRepOfTargetData(targetData);
721 llvm->SetDataLayout(module, stringRepOfTargetData);
722 free(stringRepOfTargetData);
723
724 LLVMPassManagerRef functionPasses = 0;
725 LLVMPassManagerRef modulePasses;
726
727 if (Options::llvmSimpleOpt()) {
728 modulePasses = llvm->CreatePassManager();
729 llvm->AddTargetData(targetData, modulePasses);
730 llvm->AddAnalysisPasses(targetMachine, modulePasses);
731 llvm->AddPromoteMemoryToRegisterPass(modulePasses);
732 llvm->AddGlobalOptimizerPass(modulePasses);
733 llvm->AddFunctionInliningPass(modulePasses);
734 llvm->AddPruneEHPass(modulePasses);
735 llvm->AddGlobalDCEPass(modulePasses);
736 llvm->AddConstantPropagationPass(modulePasses);
737 llvm->AddAggressiveDCEPass(modulePasses);
738 llvm->AddInstructionCombiningPass(modulePasses);
739 // BEGIN - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
740 llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
741 llvm->AddBasicAliasAnalysisPass(modulePasses);
742 // END - DO NOT CHANGE THE ORDER OF THE ALIAS ANALYSIS PASSES
743 llvm->AddGVNPass(modulePasses);
744 llvm->AddCFGSimplificationPass(modulePasses);
745 llvm->AddDeadStoreEliminationPass(modulePasses);
746
747 if (enableLLVMFastISel)
748 llvm->AddLowerSwitchPass(modulePasses);
749
750 llvm->RunPassManager(modulePasses, module);
751 } else {
752 LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
753 llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
754 llvm->PassManagerBuilderUseInlinerWithThreshold(passBuilder, 275);
755 llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
756
757 functionPasses = llvm->CreateFunctionPassManagerForModule(module);
758 modulePasses = llvm->CreatePassManager();
759
760 llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
761
762 llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
763 llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
764
765 llvm->PassManagerBuilderDispose(passBuilder);
766
767 llvm->InitializeFunctionPassManager(functionPasses);
768 for (LValue function = llvm->GetFirstFunction(module); function; function = llvm->GetNextFunction(function))
769 llvm->RunFunctionPassManager(functionPasses, function);
770 llvm->FinalizeFunctionPassManager(functionPasses);
771
772 llvm->RunPassManager(modulePasses, module);
773 }
774
775 if (shouldShowDisassembly() || verboseCompilationEnabled())
776 state.dumpState(module, "after optimization");
777
778 // FIXME: Need to add support for the case where JIT memory allocation failed.
779 // https://bugs.webkit.org/show_bug.cgi?id=113620
780 state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
781 if (functionPasses)
782 llvm->DisposePassManager(functionPasses);
783 llvm->DisposePassManager(modulePasses);
784 llvm->DisposeExecutionEngine(engine);
785 }
786
787 if (safepointResult.didGetCancelled())
788 return;
789 RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting());
790
791 if (state.allocationFailed)
792 return;
793
794 if (shouldShowDisassembly()) {
795 for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
796 ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
797 dataLog(
798 "Generated LLVM code for ",
799 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
800 " #", i, ", ", state.codeSectionNames[i], ":\n");
801 disassemble(
802 MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
803 " ", WTF::dataFile(), LLVMSubset);
804 }
805
806 for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
807 DataSection* section = state.jitCode->dataSections()[i].get();
808 dataLog(
809 "Generated LLVM data section for ",
810 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
811 " #", i, ", ", state.dataSectionNames[i], ":\n");
812 dumpDataSection(section, " ");
813 }
814 }
815
816 bool didSeeUnwindInfo = state.jitCode->unwindInfo.parse(
817 state.unwindDataSection, state.unwindDataSectionSize,
818 state.generatedFunction);
819 if (shouldShowDisassembly()) {
820 dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
821 if (didSeeUnwindInfo)
822 dataLog(" ", state.jitCode->unwindInfo, "\n");
823 else
824 dataLog(" <no unwind info>\n");
825 }
826
827 if (state.stackmapsSection && state.stackmapsSection->size()) {
828 if (shouldShowDisassembly()) {
829 dataLog(
830 "Generated LLVM stackmaps section for ",
831 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
832 dataLog(" Raw data:\n");
833 dumpDataSection(state.stackmapsSection.get(), " ");
834 }
835
836 RefPtr<DataView> stackmapsData = DataView::create(
837 ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size()));
838 state.jitCode->stackmaps.parse(stackmapsData.get());
839
840 if (shouldShowDisassembly()) {
841 dataLog(" Structured data:\n");
842 state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), " ");
843 }
844
845 StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap();
846 fixFunctionBasedOnStackMaps(
847 state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
848 recordMap, didSeeUnwindInfo);
849 if (state.allocationFailed)
850 return;
851
852 if (shouldShowDisassembly() || Options::asyncDisassembly()) {
853 for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
854 if (state.codeSectionNames[i] != SECTION_NAME("text"))
855 continue;
856
857 ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
858
859 CString header = toCString(
860 "Generated LLVM code after stackmap-based fix-up for ",
861 CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
862 " in ", state.graph.m_plan.mode, " #", i, ", ",
863 state.codeSectionNames[i], ":\n");
864
865 if (Options::asyncDisassembly()) {
866 disassembleAsynchronously(
867 header, MacroAssemblerCodeRef(handle), handle->sizeInBytes(), " ",
868 LLVMSubset);
869 continue;
870 }
871
872 dataLog(header);
873 disassemble(
874 MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
875 " ", WTF::dataFile(), LLVMSubset);
876 }
877 }
878 }
879 }
880
881 } } // namespace JSC::FTL
882
883 #endif // ENABLE(FTL_JIT)
884