]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGPlan.cpp
bf7b7dd176ed5a7cefa3cb5fa3547ae52b381006
[apple/javascriptcore.git] / dfg / DFGPlan.cpp
1 /*
2 * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGPlan.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGArgumentsEliminationPhase.h"
32 #include "DFGBackwardsPropagationPhase.h"
33 #include "DFGByteCodeParser.h"
34 #include "DFGCFAPhase.h"
35 #include "DFGCFGSimplificationPhase.h"
36 #include "DFGCPSRethreadingPhase.h"
37 #include "DFGCSEPhase.h"
38 #include "DFGCleanUpPhase.h"
39 #include "DFGConstantFoldingPhase.h"
40 #include "DFGConstantHoistingPhase.h"
41 #include "DFGCriticalEdgeBreakingPhase.h"
42 #include "DFGDCEPhase.h"
43 #include "DFGFailedFinalizer.h"
44 #include "DFGFixupPhase.h"
45 #include "DFGGraphSafepoint.h"
46 #include "DFGIntegerCheckCombiningPhase.h"
47 #include "DFGIntegerRangeOptimizationPhase.h"
48 #include "DFGInvalidationPointInjectionPhase.h"
49 #include "DFGJITCompiler.h"
50 #include "DFGLICMPhase.h"
51 #include "DFGLivenessAnalysisPhase.h"
52 #include "DFGLoopPreHeaderCreationPhase.h"
53 #include "DFGMovHintRemovalPhase.h"
54 #include "DFGOSRAvailabilityAnalysisPhase.h"
55 #include "DFGOSREntrypointCreationPhase.h"
56 #include "DFGObjectAllocationSinkingPhase.h"
57 #include "DFGPhantomInsertionPhase.h"
58 #include "DFGPredictionInjectionPhase.h"
59 #include "DFGPredictionPropagationPhase.h"
60 #include "DFGPutStackSinkingPhase.h"
61 #include "DFGSSAConversionPhase.h"
62 #include "DFGSSALoweringPhase.h"
63 #include "DFGStackLayoutPhase.h"
64 #include "DFGStaticExecutionCountEstimationPhase.h"
65 #include "DFGStoreBarrierInsertionPhase.h"
66 #include "DFGStrengthReductionPhase.h"
67 #include "DFGStructureRegistrationPhase.h"
68 #include "DFGTierUpCheckInjectionPhase.h"
69 #include "DFGTypeCheckHoistingPhase.h"
70 #include "DFGUnificationPhase.h"
71 #include "DFGValidate.h"
72 #include "DFGVarargsForwardingPhase.h"
73 #include "DFGVirtualRegisterAllocationPhase.h"
74 #include "DFGWatchpointCollectionPhase.h"
75 #include "Debugger.h"
76 #include "JSCInlines.h"
77 #include "OperandsInlines.h"
78 #include "ProfilerDatabase.h"
79 #include "TrackedReferences.h"
80 #include <wtf/CurrentTime.h>
81
82 #if ENABLE(FTL_JIT)
83 #include "FTLCapabilities.h"
84 #include "FTLCompile.h"
85 #include "FTLFail.h"
86 #include "FTLLink.h"
87 #include "FTLLowerDFGToLLVM.h"
88 #include "FTLState.h"
89 #include "InitializeLLVM.h"
90 #endif
91
92 namespace JSC { namespace DFG {
93
94 static void dumpAndVerifyGraph(Graph& graph, const char* text, bool forceDump = false)
95 {
96 GraphDumpMode modeForFinalValidate = DumpGraph;
97 if (verboseCompilationEnabled(graph.m_plan.mode) || forceDump) {
98 dataLog(text, "\n");
99 graph.dump();
100 modeForFinalValidate = DontDumpGraph;
101 }
102 if (validationEnabled())
103 validate(graph, modeForFinalValidate);
104 }
105
106 static Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
107 {
108 switch (mode) {
109 case InvalidCompilationMode:
110 RELEASE_ASSERT_NOT_REACHED();
111 return Profiler::DFG;
112 case DFGMode:
113 return Profiler::DFG;
114 case FTLMode:
115 return Profiler::FTL;
116 case FTLForOSREntryMode:
117 return Profiler::FTLForOSREntry;
118 }
119 RELEASE_ASSERT_NOT_REACHED();
120 return Profiler::DFG;
121 }
122
123 Plan::Plan(PassRefPtr<CodeBlock> passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
124 CompilationMode mode, unsigned osrEntryBytecodeIndex,
125 const Operands<JSValue>& mustHandleValues)
126 : vm(*passedCodeBlock->vm())
127 , codeBlock(passedCodeBlock)
128 , profiledDFGCodeBlock(profiledDFGCodeBlock)
129 , mode(mode)
130 , osrEntryBytecodeIndex(osrEntryBytecodeIndex)
131 , mustHandleValues(mustHandleValues)
132 , compilation(codeBlock->vm()->m_perBytecodeProfiler ? adoptRef(new Profiler::Compilation(codeBlock->vm()->m_perBytecodeProfiler->ensureBytecodesFor(codeBlock.get()), profilerCompilationKindForMode(mode))) : 0)
133 , inlineCallFrames(adoptRef(new InlineCallFrameSet()))
134 , identifiers(codeBlock.get())
135 , weakReferences(codeBlock.get())
136 , willTryToTierUp(false)
137 , stage(Preparing)
138 {
139 }
140
141 Plan::~Plan()
142 {
143 }
144
145 bool Plan::reportCompileTimes() const
146 {
147 return Options::reportCompileTimes()
148 || (Options::reportFTLCompileTimes() && isFTL(mode));
149 }
150
151 void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData)
152 {
153 this->threadData = threadData;
154
155 double before = 0;
156 CString codeBlockName;
157 if (reportCompileTimes()) {
158 before = monotonicallyIncreasingTimeMS();
159 codeBlockName = toCString(*codeBlock);
160 }
161
162 SamplingRegion samplingRegion("DFG Compilation (Plan)");
163 CompilationScope compilationScope;
164
165 if (logCompilationChanges(mode))
166 dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n");
167
168 CompilationPath path = compileInThreadImpl(longLivedState);
169
170 RELEASE_ASSERT(path == CancelPath || finalizer);
171 RELEASE_ASSERT((path == CancelPath) == (stage == Cancelled));
172
173 if (reportCompileTimes()) {
174 const char* pathName;
175 switch (path) {
176 case FailPath:
177 pathName = "N/A (fail)";
178 break;
179 case DFGPath:
180 pathName = "DFG";
181 break;
182 case FTLPath:
183 pathName = "FTL";
184 break;
185 case CancelPath:
186 pathName = "Cancelled";
187 break;
188 default:
189 RELEASE_ASSERT_NOT_REACHED();
190 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
191 pathName = "";
192 #endif
193 break;
194 }
195 double now = monotonicallyIncreasingTimeMS();
196 dataLog("Optimized ", codeBlockName, " using ", mode, " with ", pathName, " into ", finalizer ? finalizer->codeSize() : 0, " bytes in ", now - before, " ms");
197 if (path == FTLPath)
198 dataLog(" (DFG: ", m_timeBeforeFTL - before, ", LLVM: ", now - m_timeBeforeFTL, ")");
199 dataLog(".\n");
200 }
201 }
202
203 Plan::CompilationPath Plan::compileInThreadImpl(LongLivedState& longLivedState)
204 {
205 if (verboseCompilationEnabled(mode) && osrEntryBytecodeIndex != UINT_MAX) {
206 dataLog("\n");
207 dataLog("Compiler must handle OSR entry from bc#", osrEntryBytecodeIndex, " with values: ", mustHandleValues, "\n");
208 dataLog("\n");
209 }
210
211 Graph dfg(vm, *this, longLivedState);
212
213 if (!parse(dfg)) {
214 finalizer = std::make_unique<FailedFinalizer>(*this);
215 return FailPath;
216 }
217
218 // By this point the DFG bytecode parser will have potentially mutated various tables
219 // in the CodeBlock. This is a good time to perform an early shrink, which is more
220 // powerful than a late one. It's safe to do so because we haven't generated any code
221 // that references any of the tables directly, yet.
222 codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
223
224 if (validationEnabled())
225 validate(dfg);
226
227 if (Options::dumpGraphAfterParsing()) {
228 dataLog("Graph after parsing:\n");
229 dfg.dump();
230 }
231
232 performCPSRethreading(dfg);
233 performUnification(dfg);
234 performPredictionInjection(dfg);
235
236 performStaticExecutionCountEstimation(dfg);
237
238 if (mode == FTLForOSREntryMode) {
239 bool result = performOSREntrypointCreation(dfg);
240 if (!result) {
241 finalizer = std::make_unique<FailedFinalizer>(*this);
242 return FailPath;
243 }
244 performCPSRethreading(dfg);
245 }
246
247 if (validationEnabled())
248 validate(dfg);
249
250 performBackwardsPropagation(dfg);
251 performPredictionPropagation(dfg);
252 performFixup(dfg);
253 performStructureRegistration(dfg);
254 performInvalidationPointInjection(dfg);
255 performTypeCheckHoisting(dfg);
256
257 dfg.m_fixpointState = FixpointNotConverged;
258
259 // For now we're back to avoiding a fixpoint. Note that we've ping-ponged on this decision
260 // many times. For maximum throughput, it's best to fixpoint. But the throughput benefit is
261 // small and not likely to show up in FTL anyway. On the other hand, not fixpointing means
262 // that the compiler compiles more quickly. We want the third tier to compile quickly, which
263 // not fixpointing accomplishes; and the fourth tier shouldn't need a fixpoint.
264 if (validationEnabled())
265 validate(dfg);
266
267 performStrengthReduction(dfg);
268 performLocalCSE(dfg);
269 performCPSRethreading(dfg);
270 performCFA(dfg);
271 performConstantFolding(dfg);
272 bool changed = false;
273 changed |= performCFGSimplification(dfg);
274 changed |= performLocalCSE(dfg);
275
276 if (validationEnabled())
277 validate(dfg);
278
279 performCPSRethreading(dfg);
280 if (!isFTL(mode)) {
281 // Only run this if we're not FTLing, because currently for a LoadVarargs that is forwardable and
282 // in a non-varargs inlined call frame, this will generate ForwardVarargs while the FTL
283 // ArgumentsEliminationPhase will create a sequence of GetStack+PutStacks. The GetStack+PutStack
284 // sequence then gets sunk, eliminating anything that looks like an escape for subsequent phases,
285 // while the ForwardVarargs doesn't get simplified until later (or not at all) and looks like an
286 // escape for all of the arguments. This then disables object allocation sinking.
287 //
288 // So, for now, we just disable this phase for the FTL.
289 //
290 // If we wanted to enable it, we'd have to do any of the following:
291 // - Enable ForwardVarargs->GetStack+PutStack strength reduction, and have that run before
292 // PutStack sinking and object allocation sinking.
293 // - Make VarargsForwarding emit a GetLocal+SetLocal sequence, that we can later turn into
294 // GetStack+PutStack.
295 //
296 // But, it's not super valuable to enable those optimizations, since the FTL
297 // ArgumentsEliminationPhase does everything that this phase does, and it doesn't introduce this
298 // pathology.
299
300 changed |= performVarargsForwarding(dfg); // Do this after CFG simplification and CPS rethreading.
301 }
302 if (changed) {
303 performCFA(dfg);
304 performConstantFolding(dfg);
305 }
306
307 // If we're doing validation, then run some analyses, to give them an opportunity
308 // to self-validate. Now is as good a time as any to do this.
309 if (validationEnabled()) {
310 dfg.m_dominators.computeIfNecessary(dfg);
311 dfg.m_naturalLoops.computeIfNecessary(dfg);
312 dfg.m_prePostNumbering.computeIfNecessary(dfg);
313 }
314
315 switch (mode) {
316 case DFGMode: {
317 dfg.m_fixpointState = FixpointConverged;
318
319 performTierUpCheckInjection(dfg);
320
321 performFastStoreBarrierInsertion(dfg);
322 performCleanUp(dfg);
323 performCPSRethreading(dfg);
324 performDCE(dfg);
325 performPhantomInsertion(dfg);
326 performStackLayout(dfg);
327 performVirtualRegisterAllocation(dfg);
328 performWatchpointCollection(dfg);
329 dumpAndVerifyGraph(dfg, "Graph after optimization:");
330
331 JITCompiler dataFlowJIT(dfg);
332 if (codeBlock->codeType() == FunctionCode)
333 dataFlowJIT.compileFunction();
334 else
335 dataFlowJIT.compile();
336
337 return DFGPath;
338 }
339
340 case FTLMode:
341 case FTLForOSREntryMode: {
342 #if ENABLE(FTL_JIT)
343 if (FTL::canCompile(dfg) == FTL::CannotCompile) {
344 finalizer = std::make_unique<FailedFinalizer>(*this);
345 return FailPath;
346 }
347
348 performCleanUp(dfg); // Reduce the graph size a bit.
349 performCriticalEdgeBreaking(dfg);
350 performLoopPreHeaderCreation(dfg);
351 performCPSRethreading(dfg);
352 performSSAConversion(dfg);
353 performSSALowering(dfg);
354
355 // Ideally, these would be run to fixpoint with the object allocation sinking phase.
356 performArgumentsElimination(dfg);
357 performPutStackSinking(dfg);
358
359 performConstantHoisting(dfg);
360 performGlobalCSE(dfg);
361 performLivenessAnalysis(dfg);
362 performIntegerRangeOptimization(dfg);
363 performLivenessAnalysis(dfg);
364 performCFA(dfg);
365 performConstantFolding(dfg);
366 performCleanUp(dfg); // Reduce the graph size a lot.
367 changed = false;
368 changed |= performStrengthReduction(dfg);
369 if (Options::enableObjectAllocationSinking()) {
370 changed |= performCriticalEdgeBreaking(dfg);
371 changed |= performObjectAllocationSinking(dfg);
372 }
373 if (changed) {
374 // State-at-tail and state-at-head will be invalid if we did strength reduction since
375 // it might increase live ranges.
376 performLivenessAnalysis(dfg);
377 performCFA(dfg);
378 performConstantFolding(dfg);
379 }
380
381 // Currently, this relies on pre-headers still being valid. That precludes running CFG
382 // simplification before it, unless we re-created the pre-headers. There wouldn't be anything
383 // wrong with running LICM earlier, if we wanted to put other CFG transforms above this point.
384 // Alternatively, we could run loop pre-header creation after SSA conversion - but if we did that
385 // then we'd need to do some simple SSA fix-up.
386 performLICM(dfg);
387
388 performCleanUp(dfg);
389 performIntegerCheckCombining(dfg);
390 performGlobalCSE(dfg);
391
392 // At this point we're not allowed to do any further code motion because our reasoning
393 // about code motion assumes that it's OK to insert GC points in random places.
394 dfg.m_fixpointState = FixpointConverged;
395
396 performLivenessAnalysis(dfg);
397 performCFA(dfg);
398 performGlobalStoreBarrierInsertion(dfg);
399 if (Options::enableMovHintRemoval())
400 performMovHintRemoval(dfg);
401 performCleanUp(dfg);
402 performDCE(dfg); // We rely on this to kill dead code that won't be recognized as dead by LLVM.
403 performStackLayout(dfg);
404 performLivenessAnalysis(dfg);
405 performOSRAvailabilityAnalysis(dfg);
406 performWatchpointCollection(dfg);
407
408 if (FTL::canCompile(dfg) == FTL::CannotCompile) {
409 finalizer = std::make_unique<FailedFinalizer>(*this);
410 return FailPath;
411 }
412
413 dumpAndVerifyGraph(dfg, "Graph just before FTL lowering:", shouldShowDisassembly(mode));
414
415 bool haveLLVM;
416 Safepoint::Result safepointResult;
417 {
418 GraphSafepoint safepoint(dfg, safepointResult);
419 haveLLVM = initializeLLVM();
420 }
421 if (safepointResult.didGetCancelled())
422 return CancelPath;
423
424 if (!haveLLVM) {
425 if (Options::ftlCrashesIfCantInitializeLLVM()) {
426 dataLog("LLVM can't be initialized.\n");
427 CRASH();
428 }
429 finalizer = std::make_unique<FailedFinalizer>(*this);
430 return FailPath;
431 }
432
433 FTL::State state(dfg);
434 FTL::lowerDFGToLLVM(state);
435
436 if (reportCompileTimes())
437 m_timeBeforeFTL = monotonicallyIncreasingTimeMS();
438
439 if (Options::llvmAlwaysFailsBeforeCompile()) {
440 FTL::fail(state);
441 return FTLPath;
442 }
443
444 FTL::compile(state, safepointResult);
445 if (safepointResult.didGetCancelled())
446 return CancelPath;
447
448 if (Options::llvmAlwaysFailsBeforeLink()) {
449 FTL::fail(state);
450 return FTLPath;
451 }
452
453 if (state.allocationFailed) {
454 FTL::fail(state);
455 return FTLPath;
456 }
457
458 if (state.jitCode->stackmaps.stackSize() > Options::llvmMaxStackSize()) {
459 FTL::fail(state);
460 return FTLPath;
461 }
462
463 FTL::link(state);
464
465 if (state.allocationFailed) {
466 FTL::fail(state);
467 return FTLPath;
468 }
469
470 return FTLPath;
471 #else
472 RELEASE_ASSERT_NOT_REACHED();
473 return FailPath;
474 #endif // ENABLE(FTL_JIT)
475 }
476
477 default:
478 RELEASE_ASSERT_NOT_REACHED();
479 return FailPath;
480 }
481 }
482
483 bool Plan::isStillValid()
484 {
485 CodeBlock* replacement = codeBlock->replacement();
486 if (!replacement)
487 return false;
488 // FIXME: This is almost certainly not necessary. There's no way for the baseline
489 // code to be replaced during a compilation, except if we delete the plan, in which
490 // case we wouldn't be here.
491 // https://bugs.webkit.org/show_bug.cgi?id=132707
492 if (codeBlock->alternative() != replacement->baselineVersion())
493 return false;
494 if (!watchpoints.areStillValid())
495 return false;
496 return true;
497 }
498
499 void Plan::reallyAdd(CommonData* commonData)
500 {
501 watchpoints.reallyAdd(codeBlock.get(), *commonData);
502 identifiers.reallyAdd(vm, commonData);
503 weakReferences.reallyAdd(vm, commonData);
504 transitions.reallyAdd(vm, commonData);
505 writeBarriers.trigger(vm);
506 }
507
508 void Plan::notifyCompiling()
509 {
510 stage = Compiling;
511 }
512
513 void Plan::notifyCompiled()
514 {
515 stage = Compiled;
516 }
517
518 void Plan::notifyReady()
519 {
520 callback->compilationDidBecomeReadyAsynchronously(codeBlock.get());
521 stage = Ready;
522 }
523
524 CompilationResult Plan::finalizeWithoutNotifyingCallback()
525 {
526 if (!isStillValid())
527 return CompilationInvalidated;
528
529 bool result;
530 if (codeBlock->codeType() == FunctionCode)
531 result = finalizer->finalizeFunction();
532 else
533 result = finalizer->finalize();
534
535 if (!result)
536 return CompilationFailed;
537
538 reallyAdd(codeBlock->jitCode()->dfgCommon());
539
540 if (validationEnabled()) {
541 TrackedReferences trackedReferences;
542
543 for (WriteBarrier<JSCell>& reference : codeBlock->jitCode()->dfgCommon()->weakReferences)
544 trackedReferences.add(reference.get());
545 for (WriteBarrier<Structure>& reference : codeBlock->jitCode()->dfgCommon()->weakStructureReferences)
546 trackedReferences.add(reference.get());
547 for (WriteBarrier<Unknown>& constant : codeBlock->constants())
548 trackedReferences.add(constant.get());
549
550 // Check that any other references that we have anywhere in the JITCode are also
551 // tracked either strongly or weakly.
552 codeBlock->jitCode()->validateReferences(trackedReferences);
553 }
554
555 return CompilationSuccessful;
556 }
557
558 void Plan::finalizeAndNotifyCallback()
559 {
560 callback->compilationDidComplete(codeBlock.get(), finalizeWithoutNotifyingCallback());
561 }
562
563 CompilationKey Plan::key()
564 {
565 return CompilationKey(codeBlock->alternative(), mode);
566 }
567
568 void Plan::checkLivenessAndVisitChildren(SlotVisitor& visitor, CodeBlockSet& codeBlocks)
569 {
570 if (!isKnownToBeLiveDuringGC())
571 return;
572
573 for (unsigned i = mustHandleValues.size(); i--;)
574 visitor.appendUnbarrieredValue(&mustHandleValues[i]);
575
576 codeBlocks.mark(codeBlock->alternative());
577 codeBlocks.mark(codeBlock.get());
578 codeBlocks.mark(profiledDFGCodeBlock.get());
579
580 weakReferences.visitChildren(visitor);
581 writeBarriers.visitChildren(visitor);
582 transitions.visitChildren(visitor);
583 }
584
585 bool Plan::isKnownToBeLiveDuringGC()
586 {
587 if (stage == Cancelled)
588 return false;
589 if (!Heap::isMarked(codeBlock->ownerExecutable()))
590 return false;
591 if (!codeBlock->alternative()->isKnownToBeLiveDuringGC())
592 return false;
593 if (!!profiledDFGCodeBlock && !profiledDFGCodeBlock->isKnownToBeLiveDuringGC())
594 return false;
595 return true;
596 }
597
598 void Plan::cancel()
599 {
600 codeBlock = nullptr;
601 profiledDFGCodeBlock = nullptr;
602 mustHandleValues.clear();
603 compilation = nullptr;
604 finalizer = nullptr;
605 inlineCallFrames = nullptr;
606 watchpoints = DesiredWatchpoints();
607 identifiers = DesiredIdentifiers();
608 weakReferences = DesiredWeakReferences();
609 writeBarriers = DesiredWriteBarriers();
610 transitions = DesiredTransitions();
611 callback = nullptr;
612 stage = Cancelled;
613 }
614
615 } } // namespace JSC::DFG
616
617 #endif // ENABLE(DFG_JIT)
618