2 * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGConstantFoldingPhase.h"
31 #include "DFGAbstractInterpreterInlines.h"
32 #include "DFGArgumentsUtilities.h"
33 #include "DFGBasicBlock.h"
35 #include "DFGInPlaceAbstractState.h"
36 #include "DFGInsertionSet.h"
38 #include "GetByIdStatus.h"
39 #include "JSCInlines.h"
40 #include "PutByIdStatus.h"
42 namespace JSC
{ namespace DFG
{
44 class ConstantFoldingPhase
: public Phase
{
46 ConstantFoldingPhase(Graph
& graph
)
47 : Phase(graph
, "constant folding")
49 , m_interpreter(graph
, m_state
)
50 , m_insertionSet(graph
)
58 for (BlockIndex blockIndex
= 0; blockIndex
< m_graph
.numBlocks(); ++blockIndex
) {
59 BasicBlock
* block
= m_graph
.block(blockIndex
);
62 if (block
->cfaFoundConstants
)
63 changed
|= foldConstants(block
);
66 if (changed
&& m_graph
.m_form
== SSA
) {
67 // It's now possible that we have Upsilons pointed at JSConstants. Fix that.
68 for (BlockIndex blockIndex
= m_graph
.numBlocks(); blockIndex
--;) {
69 BasicBlock
* block
= m_graph
.block(blockIndex
);
80 bool foldConstants(BasicBlock
* block
)
83 m_state
.beginBasicBlock(block
);
84 for (unsigned indexInBlock
= 0; indexInBlock
< block
->size(); ++indexInBlock
) {
85 if (!m_state
.isValid())
88 Node
* node
= block
->at(indexInBlock
);
90 bool alreadyHandled
= false;
91 bool eliminated
= false;
94 case BooleanToNumber
: {
95 if (node
->child1().useKind() == UntypedUse
96 && !m_interpreter
.needsTypeCheck(node
->child1(), SpecBoolean
))
97 node
->child1().setUseKind(BooleanUse
);
102 case ArrayifyToStructure
: {
103 AbstractValue
& value
= m_state
.forNode(node
->child1());
105 if (node
->op() == ArrayifyToStructure
)
106 set
= node
->structure();
108 set
= node
->structureSet();
109 if (value
.m_structure
.isSubsetOf(set
)) {
110 m_interpreter
.execute(indexInBlock
); // Catch the fact that we may filter on cell.
118 case GetIndexedPropertyStorage
: {
119 JSArrayBufferView
* view
= m_graph
.tryGetFoldableView(
120 m_state
.forNode(node
->child1()).m_value
, node
->arrayMode());
124 if (view
->mode() == FastTypedArray
) {
125 // FIXME: It would be awesome to be able to fold the property storage for
126 // these GC-allocated typed arrays. For now it doesn't matter because the
127 // most common use-cases for constant typed arrays involve large arrays with
128 // aliased buffer views.
129 // https://bugs.webkit.org/show_bug.cgi?id=125425
133 m_interpreter
.execute(indexInBlock
);
136 m_insertionSet
.insertCheck(indexInBlock
, node
->origin
, node
->children
);
137 node
->convertToConstantStoragePointer(view
->vector());
141 case CheckStructureImmediate
: {
142 AbstractValue
& value
= m_state
.forNode(node
->child1());
143 StructureSet
& set
= node
->structureSet();
146 if (Structure
* structure
= jsDynamicCast
<Structure
*>(value
.value())) {
147 if (set
.contains(structure
)) {
148 m_interpreter
.execute(indexInBlock
);
156 if (PhiChildren
* phiChildren
= m_interpreter
.phiChildren()) {
158 phiChildren
->forAllTransitiveIncomingValues(
160 [&] (Node
* incoming
) {
161 if (Structure
* structure
= incoming
->dynamicCastConstant
<Structure
*>()) {
162 if (set
.contains(structure
))
168 m_interpreter
.execute(indexInBlock
);
179 if (!node
->arrayMode().alreadyChecked(m_graph
, node
, m_state
.forNode(node
->child1())))
187 if (m_state
.forNode(node
->child1()).m_structure
.onlyStructure() != node
->transition()->next
)
196 if (m_state
.forNode(node
->child1()).value() != node
->cellOperand()->value())
203 case CheckNotEmpty
: {
204 if (m_state
.forNode(node
->child1()).m_type
& SpecEmpty
)
211 case CheckInBounds
: {
212 JSValue left
= m_state
.forNode(node
->child1()).value();
213 JSValue right
= m_state
.forNode(node
->child2()).value();
214 if (left
&& right
&& left
.isInt32() && right
.isInt32()
215 && static_cast<uint32_t>(left
.asInt32()) < static_cast<uint32_t>(right
.asInt32())) {
224 case GetMyArgumentByVal
: {
225 JSValue index
= m_state
.forNode(node
->child2()).value();
226 if (!index
|| !index
.isInt32())
229 Node
* arguments
= node
->child1().node();
230 InlineCallFrame
* inlineCallFrame
= arguments
->origin
.semantic
.inlineCallFrame
;
232 // Don't try to do anything if the index is known to be outside our static bounds. Note
233 // that our static bounds are usually strictly larger than the dynamic bounds. The
234 // exception is something like this, assuming foo() is not inlined:
236 // function foo() { return arguments[5]; }
238 // Here the static bound on number of arguments is 0, and we're accessing index 5. We
239 // will not strength-reduce this to GetStack because GetStack is otherwise assumed by the
240 // compiler to access those variables that are statically accounted for; for example if
241 // we emitted a GetStack on arg6 we would have out-of-bounds access crashes anywhere that
242 // uses an Operands<> map. There is not much cost to continuing to use a
243 // GetMyArgumentByVal in such statically-out-of-bounds accesses; we just lose CFA unless
244 // GCSE removes the access entirely.
245 if (inlineCallFrame
) {
246 if (index
.asUInt32() >= inlineCallFrame
->arguments
.size() - 1)
249 if (index
.asUInt32() >= m_state
.variables().numberOfArguments() - 1)
253 m_interpreter
.execute(indexInBlock
); // Push CFA over this node after we get the state before.
255 StackAccessData
* data
;
256 if (inlineCallFrame
) {
257 data
= m_graph
.m_stackAccessData
.add(
259 inlineCallFrame
->stackOffset
+
260 CallFrame::argumentOffset(index
.asInt32())),
263 data
= m_graph
.m_stackAccessData
.add(
264 virtualRegisterForArgument(index
.asInt32() + 1), FlushedJSValue
);
267 if (inlineCallFrame
&& !inlineCallFrame
->isVarargs()
268 && index
.asUInt32() < inlineCallFrame
->arguments
.size() - 1) {
269 node
->convertToGetStack(data
);
274 Node
* length
= emitCodeToGetArgumentsArrayLength(
275 m_insertionSet
, arguments
, indexInBlock
, node
->origin
);
276 m_insertionSet
.insertNode(
277 indexInBlock
, SpecNone
, CheckInBounds
, node
->origin
,
278 node
->child2(), Edge(length
, Int32Use
));
279 node
->convertToGetStack(data
);
284 case MultiGetByOffset
: {
285 Edge baseEdge
= node
->child1();
286 Node
* base
= baseEdge
.node();
287 MultiGetByOffsetData
& data
= node
->multiGetByOffsetData();
289 // First prune the variants, then check if the MultiGetByOffset can be
290 // strength-reduced to a GetByOffset.
292 AbstractValue baseValue
= m_state
.forNode(base
);
294 m_interpreter
.execute(indexInBlock
); // Push CFA over this node after we get the state before.
295 alreadyHandled
= true; // Don't allow the default constant folder to do things to this.
297 for (unsigned i
= 0; i
< data
.variants
.size(); ++i
) {
298 GetByIdVariant
& variant
= data
.variants
[i
];
299 variant
.structureSet().filter(baseValue
);
300 if (variant
.structureSet().isEmpty()) {
301 data
.variants
[i
--] = data
.variants
.last();
302 data
.variants
.removeLast();
307 if (data
.variants
.size() != 1)
311 indexInBlock
, node
, baseValue
, data
.variants
[0], data
.identifierNumber
);
316 case MultiPutByOffset
: {
317 Edge baseEdge
= node
->child1();
318 Node
* base
= baseEdge
.node();
319 MultiPutByOffsetData
& data
= node
->multiPutByOffsetData();
321 AbstractValue baseValue
= m_state
.forNode(base
);
323 m_interpreter
.execute(indexInBlock
); // Push CFA over this node after we get the state before.
324 alreadyHandled
= true; // Don't allow the default constant folder to do things to this.
327 for (unsigned i
= 0; i
< data
.variants
.size(); ++i
) {
328 PutByIdVariant
& variant
= data
.variants
[i
];
329 variant
.oldStructure().filter(baseValue
);
331 if (variant
.oldStructure().isEmpty()) {
332 data
.variants
[i
--] = data
.variants
.last();
333 data
.variants
.removeLast();
338 if (variant
.kind() == PutByIdVariant::Transition
339 && variant
.oldStructure().onlyStructure() == variant
.newStructure()) {
340 variant
= PutByIdVariant::replace(
341 variant
.oldStructure(),
347 if (data
.variants
.size() != 1)
351 indexInBlock
, node
, baseValue
, data
.variants
[0], data
.identifierNumber
);
358 Edge childEdge
= node
->child1();
359 Node
* child
= childEdge
.node();
360 unsigned identifierNumber
= node
->identifierNumber();
362 AbstractValue baseValue
= m_state
.forNode(child
);
364 m_interpreter
.execute(indexInBlock
); // Push CFA over this node after we get the state before.
365 alreadyHandled
= true; // Don't allow the default constant folder to do things to this.
367 if (baseValue
.m_structure
.isTop() || baseValue
.m_structure
.isClobbered()
368 || (node
->child1().useKind() == UntypedUse
|| (baseValue
.m_type
& ~SpecCell
)))
371 GetByIdStatus status
= GetByIdStatus::computeFor(
372 baseValue
.m_structure
.set(), m_graph
.identifiers()[identifierNumber
]);
373 if (!status
.isSimple())
376 for (unsigned i
= status
.numVariants(); i
--;) {
377 if (!status
[i
].constantChecks().isEmpty()
378 || status
[i
].alternateBase()) {
379 // FIXME: We could handle prototype cases.
380 // https://bugs.webkit.org/show_bug.cgi?id=110386
385 if (status
.numVariants() == 1) {
386 emitGetByOffset(indexInBlock
, node
, baseValue
, status
[0], identifierNumber
);
391 if (!isFTL(m_graph
.m_plan
.mode
))
394 MultiGetByOffsetData
* data
= m_graph
.m_multiGetByOffsetData
.add();
395 data
->variants
= status
.variants();
396 data
->identifierNumber
= identifierNumber
;
397 node
->convertToMultiGetByOffset(data
);
405 NodeOrigin origin
= node
->origin
;
406 Edge childEdge
= node
->child1();
407 Node
* child
= childEdge
.node();
408 unsigned identifierNumber
= node
->identifierNumber();
410 ASSERT(childEdge
.useKind() == CellUse
);
412 AbstractValue baseValue
= m_state
.forNode(child
);
414 m_interpreter
.execute(indexInBlock
); // Push CFA over this node after we get the state before.
415 alreadyHandled
= true; // Don't allow the default constant folder to do things to this.
417 if (baseValue
.m_structure
.isTop() || baseValue
.m_structure
.isClobbered())
420 PutByIdStatus status
= PutByIdStatus::computeFor(
421 m_graph
.globalObjectFor(origin
.semantic
),
422 baseValue
.m_structure
.set(),
423 m_graph
.identifiers()[identifierNumber
],
424 node
->op() == PutByIdDirect
);
426 if (!status
.isSimple())
429 ASSERT(status
.numVariants());
431 if (status
.numVariants() > 1 && !isFTL(m_graph
.m_plan
.mode
))
436 for (unsigned i
= status
.numVariants(); i
--;)
437 addChecks(origin
, indexInBlock
, status
[i
].constantChecks());
439 if (status
.numVariants() == 1) {
440 emitPutByOffset(indexInBlock
, node
, baseValue
, status
[0], identifierNumber
);
444 ASSERT(isFTL(m_graph
.m_plan
.mode
));
446 MultiPutByOffsetData
* data
= m_graph
.m_multiPutByOffsetData
.add();
447 data
->variants
= status
.variants();
448 data
->identifierNumber
= identifierNumber
;
449 node
->convertToMultiPutByOffset(data
);
454 if (m_state
.forNode(node
->child1()).m_type
& ~(SpecFullNumber
| SpecBoolean
| SpecString
| SpecSymbol
))
457 node
->convertToIdentity();
463 alreadyHandled
= true;
464 m_interpreter
.execute(indexInBlock
);
465 for (unsigned i
= 0; i
< AdjacencyList::Size
; ++i
) {
466 Edge edge
= node
->children
.child(i
);
469 if (edge
.isProved() || edge
.willNotHaveCheck()) {
470 node
->children
.removeEdge(i
--);
489 m_interpreter
.execute(indexInBlock
);
490 if (!m_state
.isValid()) {
491 // If we invalidated then we shouldn't attempt to constant-fold. Here's an
494 // c: JSConstant(4.2)
495 // x: ValueToInt32(Check:Int32:@const)
497 // It would be correct for an analysis to assume that execution cannot
498 // proceed past @x. Therefore, constant-folding @x could be rather bad. But,
499 // the CFA may report that it found a constant even though it also reported
500 // that everything has been invalidated. This will only happen in a couple of
501 // the constant folding cases; most of them are also separately defensive
502 // about such things.
505 if (!node
->shouldGenerate() || m_state
.didClobber() || node
->hasConstant())
508 // Interesting fact: this freezing that we do right here may turn an fragile value into
509 // a weak value. See DFGValueStrength.h.
510 FrozenValue
* value
= m_graph
.freeze(m_state
.forNode(node
).value());
514 if (node
->op() == GetLocal
) {
515 // Need to preserve bytecode liveness in ThreadedCPS form. This wouldn't be necessary
516 // if it wasn't for https://bugs.webkit.org/show_bug.cgi?id=144086.
517 m_insertionSet
.insertNode(
518 indexInBlock
, SpecNone
, PhantomLocal
, node
->origin
,
519 OpInfo(node
->variableAccessData()));
522 m_insertionSet
.insertCheck(indexInBlock
, node
->origin
, node
->children
);
523 m_graph
.convertToConstant(node
, value
);
528 m_insertionSet
.execute(block
);
533 void emitGetByOffset(unsigned indexInBlock
, Node
* node
, const AbstractValue
& baseValue
, const GetByIdVariant
& variant
, unsigned identifierNumber
)
535 NodeOrigin origin
= node
->origin
;
536 Edge childEdge
= node
->child1();
538 addBaseCheck(indexInBlock
, node
, baseValue
, variant
.structureSet());
541 if (variant
.alternateBase())
542 baseForLoad
= variant
.alternateBase();
544 baseForLoad
= baseValue
.m_value
;
545 if (JSValue value
= m_graph
.tryGetConstantProperty(baseForLoad
, variant
.baseStructure(), variant
.offset())) {
546 m_graph
.convertToConstant(node
, m_graph
.freeze(value
));
550 if (variant
.alternateBase()) {
551 Node
* child
= m_insertionSet
.insertConstant(indexInBlock
, origin
, variant
.alternateBase());
552 childEdge
= Edge(child
, KnownCellUse
);
554 childEdge
.setUseKind(KnownCellUse
);
556 Edge propertyStorage
;
558 if (isInlineOffset(variant
.offset()))
559 propertyStorage
= childEdge
;
561 propertyStorage
= Edge(m_insertionSet
.insertNode(
562 indexInBlock
, SpecNone
, GetButterfly
, origin
, childEdge
));
565 StorageAccessData
& data
= *m_graph
.m_storageAccessData
.add();
566 data
.offset
= variant
.offset();
567 data
.identifierNumber
= identifierNumber
;
569 node
->convertToGetByOffset(data
, propertyStorage
);
572 void emitPutByOffset(unsigned indexInBlock
, Node
* node
, const AbstractValue
& baseValue
, const PutByIdVariant
& variant
, unsigned identifierNumber
)
574 NodeOrigin origin
= node
->origin
;
575 Edge childEdge
= node
->child1();
577 addBaseCheck(indexInBlock
, node
, baseValue
, variant
.oldStructure());
579 childEdge
.setUseKind(KnownCellUse
);
581 Transition
* transition
= 0;
582 if (variant
.kind() == PutByIdVariant::Transition
) {
583 transition
= m_graph
.m_transitions
.add(
584 variant
.oldStructureForTransition(), variant
.newStructure());
587 Edge propertyStorage
;
589 if (isInlineOffset(variant
.offset()))
590 propertyStorage
= childEdge
;
591 else if (!variant
.reallocatesStorage()) {
592 propertyStorage
= Edge(m_insertionSet
.insertNode(
593 indexInBlock
, SpecNone
, GetButterfly
, origin
, childEdge
));
594 } else if (!variant
.oldStructureForTransition()->outOfLineCapacity()) {
595 ASSERT(variant
.newStructure()->outOfLineCapacity());
596 ASSERT(!isInlineOffset(variant
.offset()));
597 Node
* allocatePropertyStorage
= m_insertionSet
.insertNode(
598 indexInBlock
, SpecNone
, AllocatePropertyStorage
,
599 origin
, OpInfo(transition
), childEdge
);
600 propertyStorage
= Edge(allocatePropertyStorage
);
602 ASSERT(variant
.oldStructureForTransition()->outOfLineCapacity());
603 ASSERT(variant
.newStructure()->outOfLineCapacity() > variant
.oldStructureForTransition()->outOfLineCapacity());
604 ASSERT(!isInlineOffset(variant
.offset()));
606 Node
* reallocatePropertyStorage
= m_insertionSet
.insertNode(
607 indexInBlock
, SpecNone
, ReallocatePropertyStorage
, origin
,
608 OpInfo(transition
), childEdge
,
609 Edge(m_insertionSet
.insertNode(
610 indexInBlock
, SpecNone
, GetButterfly
, origin
, childEdge
)));
611 propertyStorage
= Edge(reallocatePropertyStorage
);
614 StorageAccessData
& data
= *m_graph
.m_storageAccessData
.add();
615 data
.offset
= variant
.offset();
616 data
.identifierNumber
= identifierNumber
;
618 node
->convertToPutByOffset(data
, propertyStorage
);
620 if (variant
.kind() == PutByIdVariant::Transition
) {
621 // FIXME: PutStructure goes last until we fix either
622 // https://bugs.webkit.org/show_bug.cgi?id=142921 or
623 // https://bugs.webkit.org/show_bug.cgi?id=142924.
624 m_insertionSet
.insertNode(
625 indexInBlock
+ 1, SpecNone
, PutStructure
, origin
, OpInfo(transition
), childEdge
);
630 unsigned indexInBlock
, Node
* node
, const AbstractValue
& baseValue
, const StructureSet
& set
)
632 if (!baseValue
.m_structure
.isSubsetOf(set
)) {
633 // Arises when we prune MultiGetByOffset. We could have a
634 // MultiGetByOffset with a single variant that checks for structure S,
635 // and the input has structures S and T, for example.
636 m_insertionSet
.insertNode(
637 indexInBlock
, SpecNone
, CheckStructure
, node
->origin
,
638 OpInfo(m_graph
.addStructureSet(set
)), node
->child1());
642 if (baseValue
.m_type
& ~SpecCell
)
643 m_insertionSet
.insertCheck(indexInBlock
, node
->origin
, node
->child1());
647 NodeOrigin origin
, unsigned indexInBlock
, const ConstantStructureCheckVector
& checks
)
649 for (unsigned i
= 0; i
< checks
.size(); ++i
) {
650 addStructureTransitionCheck(
651 origin
, indexInBlock
, checks
[i
].constant(), checks
[i
].structure());
655 void addStructureTransitionCheck(NodeOrigin origin
, unsigned indexInBlock
, JSCell
* cell
, Structure
* structure
)
657 if (m_graph
.registerStructure(cell
->structure()) == StructureRegisteredAndWatched
)
660 m_graph
.registerStructure(structure
);
662 Node
* weakConstant
= m_insertionSet
.insertNode(
663 indexInBlock
, speculationFromValue(cell
), JSConstant
, origin
,
664 OpInfo(m_graph
.freeze(cell
)));
666 m_insertionSet
.insertNode(
667 indexInBlock
, SpecNone
, CheckStructure
, origin
,
668 OpInfo(m_graph
.addStructureSet(structure
)), Edge(weakConstant
, CellUse
));
671 void fixUpsilons(BasicBlock
* block
)
673 for (unsigned nodeIndex
= block
->size(); nodeIndex
--;) {
674 Node
* node
= block
->at(nodeIndex
);
675 if (node
->op() != Upsilon
)
677 switch (node
->phi()->op()) {
686 DFG_CRASH(m_graph
, node
, "Bad Upsilon phi() pointer");
692 InPlaceAbstractState m_state
;
693 AbstractInterpreter
<InPlaceAbstractState
> m_interpreter
;
694 InsertionSet m_insertionSet
;
697 bool performConstantFolding(Graph
& graph
)
699 SamplingRegion
samplingRegion("DFG Constant Folding Phase");
700 return runPhase
<ConstantFoldingPhase
>(graph
);
703 } } // namespace JSC::DFG
705 #endif // ENABLE(DFG_JIT)