2 * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGFixupPhase.h"
31 #include "ArrayPrototype.h"
33 #include "DFGInsertionSet.h"
35 #include "DFGPredictionPropagationPhase.h"
36 #include "DFGVariableAccessDataDump.h"
37 #include "JSCInlines.h"
38 #include "TypeLocation.h"
40 namespace JSC
{ namespace DFG
{
42 class FixupPhase
: public Phase
{
44 FixupPhase(Graph
& graph
)
45 : Phase(graph
, "fixup")
46 , m_insertionSet(graph
)
52 ASSERT(m_graph
.m_fixpointState
== BeforeFixpoint
);
53 ASSERT(m_graph
.m_form
== ThreadedCPS
);
55 m_profitabilityChanged
= false;
56 for (BlockIndex blockIndex
= 0; blockIndex
< m_graph
.numBlocks(); ++blockIndex
)
57 fixupBlock(m_graph
.block(blockIndex
));
59 while (m_profitabilityChanged
) {
60 m_profitabilityChanged
= false;
62 for (unsigned i
= m_graph
.m_argumentPositions
.size(); i
--;)
63 m_graph
.m_argumentPositions
[i
].mergeArgumentUnboxingAwareness();
65 for (BlockIndex blockIndex
= 0; blockIndex
< m_graph
.numBlocks(); ++blockIndex
)
66 fixupGetAndSetLocalsInBlock(m_graph
.block(blockIndex
));
69 for (BlockIndex blockIndex
= 0; blockIndex
< m_graph
.numBlocks(); ++blockIndex
)
70 injectTypeConversionsInBlock(m_graph
.block(blockIndex
));
72 m_graph
.m_planStage
= PlanStage::AfterFixup
;
78 void fixupBlock(BasicBlock
* block
)
82 ASSERT(block
->isReachable
);
84 for (m_indexInBlock
= 0; m_indexInBlock
< block
->size(); ++m_indexInBlock
) {
85 m_currentNode
= block
->at(m_indexInBlock
);
86 fixupNode(m_currentNode
);
88 m_insertionSet
.execute(block
);
91 void fixupNode(Node
* node
)
93 NodeType op
= node
->op();
97 // This gets handled by fixupGetAndSetLocalsInBlock().
107 fixIntConvertingEdge(node
->child1());
108 fixIntConvertingEdge(node
->child2());
113 fixIntConvertingEdge(node
->child1());
114 fixIntConvertingEdge(node
->child2());
115 node
->setOp(ArithMul
);
116 node
->setArithMode(Arith::Unchecked
);
117 node
->child1().setUseKind(Int32Use
);
118 node
->child2().setUseKind(Int32Use
);
123 fixIntConvertingEdge(node
->child1());
124 node
->setArithMode(Arith::Unchecked
);
128 case UInt32ToNumber
: {
129 fixIntConvertingEdge(node
->child1());
130 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
131 node
->convertToIdentity();
132 else if (node
->canSpeculateInt32(FixupPass
))
133 node
->setArithMode(Arith::CheckOverflow
);
135 node
->setArithMode(Arith::DoOverflow
);
136 node
->setResult(NodeResultDouble
);
142 if (attemptToMakeIntegerAdd(node
)) {
143 node
->setOp(ArithAdd
);
146 if (Node::shouldSpeculateNumberOrBooleanExpectingDefined(node
->child1().node(), node
->child2().node())) {
147 fixDoubleOrBooleanEdge(node
->child1());
148 fixDoubleOrBooleanEdge(node
->child2());
149 node
->setOp(ArithAdd
);
150 node
->setResult(NodeResultDouble
);
154 // FIXME: Optimize for the case where one of the operands is the
155 // empty string. Also consider optimizing for the case where we don't
156 // believe either side is the emtpy string. Both of these things should
159 if (node
->child1()->shouldSpeculateString()
160 && attemptToMakeFastStringAdd
<StringUse
>(node
, node
->child1(), node
->child2()))
162 if (node
->child2()->shouldSpeculateString()
163 && attemptToMakeFastStringAdd
<StringUse
>(node
, node
->child2(), node
->child1()))
165 if (node
->child1()->shouldSpeculateStringObject()
166 && attemptToMakeFastStringAdd
<StringObjectUse
>(node
, node
->child1(), node
->child2()))
168 if (node
->child2()->shouldSpeculateStringObject()
169 && attemptToMakeFastStringAdd
<StringObjectUse
>(node
, node
->child2(), node
->child1()))
171 if (node
->child1()->shouldSpeculateStringOrStringObject()
172 && attemptToMakeFastStringAdd
<StringOrStringObjectUse
>(node
, node
->child1(), node
->child2()))
174 if (node
->child2()->shouldSpeculateStringOrStringObject()
175 && attemptToMakeFastStringAdd
<StringOrStringObjectUse
>(node
, node
->child2(), node
->child1()))
187 if (attemptToMakeIntegerAdd(node
))
189 fixDoubleOrBooleanEdge(node
->child1());
190 fixDoubleOrBooleanEdge(node
->child2());
191 node
->setResult(NodeResultDouble
);
196 if (m_graph
.negateShouldSpeculateInt32(node
, FixupPass
)) {
197 fixIntOrBooleanEdge(node
->child1());
198 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
199 node
->setArithMode(Arith::Unchecked
);
200 else if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
201 node
->setArithMode(Arith::CheckOverflow
);
203 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
206 if (m_graph
.negateShouldSpeculateMachineInt(node
, FixupPass
)) {
207 fixEdge
<Int52RepUse
>(node
->child1());
208 if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
209 node
->setArithMode(Arith::CheckOverflow
);
211 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
212 node
->setResult(NodeResultInt52
);
215 fixDoubleOrBooleanEdge(node
->child1());
216 node
->setResult(NodeResultDouble
);
221 if (m_graph
.mulShouldSpeculateInt32(node
, FixupPass
)) {
222 fixIntOrBooleanEdge(node
->child1());
223 fixIntOrBooleanEdge(node
->child2());
224 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
225 node
->setArithMode(Arith::Unchecked
);
226 else if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
227 node
->setArithMode(Arith::CheckOverflow
);
229 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
232 if (m_graph
.mulShouldSpeculateMachineInt(node
, FixupPass
)) {
233 fixEdge
<Int52RepUse
>(node
->child1());
234 fixEdge
<Int52RepUse
>(node
->child2());
235 if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
236 node
->setArithMode(Arith::CheckOverflow
);
238 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
239 node
->setResult(NodeResultInt52
);
242 fixDoubleOrBooleanEdge(node
->child1());
243 fixDoubleOrBooleanEdge(node
->child2());
244 node
->setResult(NodeResultDouble
);
250 if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node
->child1().node(), node
->child2().node())
251 && node
->canSpeculateInt32(FixupPass
)) {
252 if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7IDIVSupported()) {
253 fixIntOrBooleanEdge(node
->child1());
254 fixIntOrBooleanEdge(node
->child2());
255 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
256 node
->setArithMode(Arith::Unchecked
);
257 else if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
258 node
->setArithMode(Arith::CheckOverflow
);
260 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
264 // This will cause conversion nodes to be inserted later.
265 fixDoubleOrBooleanEdge(node
->child1());
266 fixDoubleOrBooleanEdge(node
->child2());
268 // We don't need to do ref'ing on the children because we're stealing them from
269 // the original division.
270 Node
* newDivision
= m_insertionSet
.insertNode(
271 m_indexInBlock
, SpecBytecodeDouble
, *node
);
272 newDivision
->setResult(NodeResultDouble
);
274 node
->setOp(DoubleAsInt32
);
275 node
->children
.initialize(Edge(newDivision
, DoubleRepUse
), Edge(), Edge());
276 if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
277 node
->setArithMode(Arith::CheckOverflow
);
279 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
282 fixDoubleOrBooleanEdge(node
->child1());
283 fixDoubleOrBooleanEdge(node
->child2());
284 node
->setResult(NodeResultDouble
);
290 if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node
->child1().node(), node
->child2().node())
291 && node
->canSpeculateInt32(FixupPass
)) {
292 fixIntOrBooleanEdge(node
->child1());
293 fixIntOrBooleanEdge(node
->child2());
296 fixDoubleOrBooleanEdge(node
->child1());
297 fixDoubleOrBooleanEdge(node
->child2());
298 node
->setResult(NodeResultDouble
);
303 if (node
->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
304 && node
->canSpeculateInt32(FixupPass
)) {
305 fixIntOrBooleanEdge(node
->child1());
308 fixDoubleOrBooleanEdge(node
->child1());
309 node
->setResult(NodeResultDouble
);
314 node
->setResult(NodeResultDouble
);
315 if (node
->child2()->shouldSpeculateInt32OrBooleanForArithmetic()) {
316 fixDoubleOrBooleanEdge(node
->child1());
317 fixIntOrBooleanEdge(node
->child2());
321 fixDoubleOrBooleanEdge(node
->child1());
322 fixDoubleOrBooleanEdge(node
->child2());
327 if (node
->child1()->shouldSpeculateInt32OrBooleanForArithmetic() && node
->canSpeculateInt32(FixupPass
)) {
328 fixIntOrBooleanEdge(node
->child1());
329 insertCheck
<Int32Use
>(m_indexInBlock
, node
->child1().node());
330 node
->convertToIdentity();
333 fixDoubleOrBooleanEdge(node
->child1());
335 if (isInt32OrBooleanSpeculation(node
->getHeapPrediction()) && m_graph
.roundShouldSpeculateInt32(node
, FixupPass
)) {
336 node
->setResult(NodeResultInt32
);
337 if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
338 node
->setArithRoundingMode(Arith::RoundingMode::Int32
);
340 node
->setArithRoundingMode(Arith::RoundingMode::Int32WithNegativeZeroCheck
);
342 node
->setResult(NodeResultDouble
);
343 node
->setArithRoundingMode(Arith::RoundingMode::Double
);
353 fixDoubleOrBooleanEdge(node
->child1());
354 node
->setResult(NodeResultDouble
);
359 if (node
->child1()->shouldSpeculateBoolean())
360 fixEdge
<BooleanUse
>(node
->child1());
361 else if (node
->child1()->shouldSpeculateObjectOrOther())
362 fixEdge
<ObjectOrOtherUse
>(node
->child1());
363 else if (node
->child1()->shouldSpeculateInt32OrBoolean())
364 fixIntOrBooleanEdge(node
->child1());
365 else if (node
->child1()->shouldSpeculateNumber())
366 fixEdge
<DoubleRepUse
>(node
->child1());
367 else if (node
->child1()->shouldSpeculateString())
368 fixEdge
<StringUse
>(node
->child1());
372 case CompareEqConstant
: {
380 case CompareGreaterEq
: {
381 if (node
->op() == CompareEq
382 && Node::shouldSpeculateBoolean(node
->child1().node(), node
->child2().node())) {
383 fixEdge
<BooleanUse
>(node
->child1());
384 fixEdge
<BooleanUse
>(node
->child2());
385 node
->clearFlags(NodeMustGenerate
);
388 if (Node::shouldSpeculateInt32OrBoolean(node
->child1().node(), node
->child2().node())) {
389 fixIntOrBooleanEdge(node
->child1());
390 fixIntOrBooleanEdge(node
->child2());
391 node
->clearFlags(NodeMustGenerate
);
395 && Node::shouldSpeculateMachineInt(node
->child1().node(), node
->child2().node())) {
396 fixEdge
<Int52RepUse
>(node
->child1());
397 fixEdge
<Int52RepUse
>(node
->child2());
398 node
->clearFlags(NodeMustGenerate
);
401 if (Node::shouldSpeculateNumberOrBoolean(node
->child1().node(), node
->child2().node())) {
402 fixDoubleOrBooleanEdge(node
->child1());
403 fixDoubleOrBooleanEdge(node
->child2());
404 node
->clearFlags(NodeMustGenerate
);
407 if (node
->op() != CompareEq
)
409 if (node
->child1()->shouldSpeculateStringIdent() && node
->child2()->shouldSpeculateStringIdent()) {
410 fixEdge
<StringIdentUse
>(node
->child1());
411 fixEdge
<StringIdentUse
>(node
->child2());
412 node
->clearFlags(NodeMustGenerate
);
415 if (node
->child1()->shouldSpeculateString() && node
->child2()->shouldSpeculateString() && GPRInfo::numberOfRegisters
>= 7) {
416 fixEdge
<StringUse
>(node
->child1());
417 fixEdge
<StringUse
>(node
->child2());
418 node
->clearFlags(NodeMustGenerate
);
421 if (node
->child1()->shouldSpeculateObject() && node
->child2()->shouldSpeculateObject()) {
422 fixEdge
<ObjectUse
>(node
->child1());
423 fixEdge
<ObjectUse
>(node
->child2());
424 node
->clearFlags(NodeMustGenerate
);
427 if (node
->child1()->shouldSpeculateObject() && node
->child2()->shouldSpeculateObjectOrOther()) {
428 fixEdge
<ObjectUse
>(node
->child1());
429 fixEdge
<ObjectOrOtherUse
>(node
->child2());
430 node
->clearFlags(NodeMustGenerate
);
433 if (node
->child1()->shouldSpeculateObjectOrOther() && node
->child2()->shouldSpeculateObject()) {
434 fixEdge
<ObjectOrOtherUse
>(node
->child1());
435 fixEdge
<ObjectUse
>(node
->child2());
436 node
->clearFlags(NodeMustGenerate
);
442 case CompareStrictEq
: {
443 if (Node::shouldSpeculateBoolean(node
->child1().node(), node
->child2().node())) {
444 fixEdge
<BooleanUse
>(node
->child1());
445 fixEdge
<BooleanUse
>(node
->child2());
448 if (Node::shouldSpeculateInt32(node
->child1().node(), node
->child2().node())) {
449 fixEdge
<Int32Use
>(node
->child1());
450 fixEdge
<Int32Use
>(node
->child2());
454 && Node::shouldSpeculateMachineInt(node
->child1().node(), node
->child2().node())) {
455 fixEdge
<Int52RepUse
>(node
->child1());
456 fixEdge
<Int52RepUse
>(node
->child2());
459 if (Node::shouldSpeculateNumber(node
->child1().node(), node
->child2().node())) {
460 fixEdge
<DoubleRepUse
>(node
->child1());
461 fixEdge
<DoubleRepUse
>(node
->child2());
464 if (node
->child1()->shouldSpeculateStringIdent() && node
->child2()->shouldSpeculateStringIdent()) {
465 fixEdge
<StringIdentUse
>(node
->child1());
466 fixEdge
<StringIdentUse
>(node
->child2());
469 if (node
->child1()->shouldSpeculateString() && node
->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters
>= 7) || isFTL(m_graph
.m_plan
.mode
))) {
470 fixEdge
<StringUse
>(node
->child1());
471 fixEdge
<StringUse
>(node
->child2());
474 WatchpointSet
* masqueradesAsUndefinedWatchpoint
= m_graph
.globalObjectFor(node
->origin
.semantic
)->masqueradesAsUndefinedWatchpoint();
475 if (masqueradesAsUndefinedWatchpoint
->isStillValid()) {
477 if (node
->child1()->shouldSpeculateObject()) {
478 m_graph
.watchpoints().addLazily(masqueradesAsUndefinedWatchpoint
);
479 fixEdge
<ObjectUse
>(node
->child1());
482 if (node
->child2()->shouldSpeculateObject()) {
483 m_graph
.watchpoints().addLazily(masqueradesAsUndefinedWatchpoint
);
484 fixEdge
<ObjectUse
>(node
->child2());
488 } else if (node
->child1()->shouldSpeculateObject() && node
->child2()->shouldSpeculateObject()) {
489 fixEdge
<ObjectUse
>(node
->child1());
490 fixEdge
<ObjectUse
>(node
->child2());
493 if (node
->child1()->shouldSpeculateMisc()) {
494 fixEdge
<MiscUse
>(node
->child1());
497 if (node
->child2()->shouldSpeculateMisc()) {
498 fixEdge
<MiscUse
>(node
->child2());
501 if (node
->child1()->shouldSpeculateStringIdent()
502 && node
->child2()->shouldSpeculateNotStringVar()) {
503 fixEdge
<StringIdentUse
>(node
->child1());
504 fixEdge
<NotStringVarUse
>(node
->child2());
507 if (node
->child2()->shouldSpeculateStringIdent()
508 && node
->child1()->shouldSpeculateNotStringVar()) {
509 fixEdge
<StringIdentUse
>(node
->child2());
510 fixEdge
<NotStringVarUse
>(node
->child1());
513 if (node
->child1()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters
>= 8) || isFTL(m_graph
.m_plan
.mode
))) {
514 fixEdge
<StringUse
>(node
->child1());
517 if (node
->child2()->shouldSpeculateString() && ((GPRInfo::numberOfRegisters
>= 8) || isFTL(m_graph
.m_plan
.mode
))) {
518 fixEdge
<StringUse
>(node
->child2());
524 case StringFromCharCode
:
525 fixEdge
<Int32Use
>(node
->child1());
529 case StringCharCodeAt
: {
530 // Currently we have no good way of refining these.
531 ASSERT(node
->arrayMode() == ArrayMode(Array::String
));
532 blessArrayOperation(node
->child1(), node
->child2(), node
->child3());
533 fixEdge
<KnownCellUse
>(node
->child1());
534 fixEdge
<Int32Use
>(node
->child2());
539 if (!node
->prediction()) {
540 m_insertionSet
.insertNode(
541 m_indexInBlock
, SpecNone
, ForceOSRExit
, node
->origin
);
545 node
->arrayMode().refine(
547 node
->child1()->prediction(),
548 node
->child2()->prediction(),
551 blessArrayOperation(node
->child1(), node
->child2(), node
->child3());
553 ArrayMode arrayMode
= node
->arrayMode();
554 switch (arrayMode
.type()) {
555 case Array::Contiguous
:
557 if (arrayMode
.arrayClass() == Array::OriginalArray
558 && arrayMode
.speculation() == Array::InBounds
) {
559 JSGlobalObject
* globalObject
= m_graph
.globalObjectFor(node
->origin
.semantic
);
560 if (globalObject
->arrayPrototypeChainIsSane()) {
561 // Check if SaneChain will work on a per-type basis. Note that:
563 // 1) We don't want double arrays to sometimes return undefined, since
564 // that would require a change to the return type and it would pessimise
565 // things a lot. So, we'd only want to do that if we actually had
566 // evidence that we could read from a hole. That's pretty annoying.
567 // Likely the best way to handle that case is with an equivalent of
568 // SaneChain for OutOfBounds. For now we just detect when Undefined and
569 // NaN are indistinguishable according to backwards propagation, and just
570 // use SaneChain in that case. This happens to catch a lot of cases.
572 // 2) We don't want int32 array loads to have to do a hole check just to
573 // coerce to Undefined, since that would mean twice the checks.
575 // This has two implications. First, we have to do more checks than we'd
576 // like. It's unfortunate that we have to do the hole check. Second,
577 // some accesses that hit a hole will now need to take the full-blown
578 // out-of-bounds slow path. We can fix that with:
579 // https://bugs.webkit.org/show_bug.cgi?id=144668
581 bool canDoSaneChain
= false;
582 switch (arrayMode
.type()) {
583 case Array::Contiguous
:
584 // This is happens to be entirely natural. We already would have
585 // returned any JSValue, and now we'll return Undefined. We still do
586 // the check but it doesn't require taking any kind of slow path.
587 canDoSaneChain
= true;
591 if (!(node
->flags() & NodeBytecodeUsesAsOther
)) {
592 // Holes look like NaN already, so if the user doesn't care
593 // about the difference between Undefined and NaN then we can
595 canDoSaneChain
= true;
603 if (canDoSaneChain
) {
604 m_graph
.watchpoints().addLazily(
605 globalObject
->arrayPrototype()->structure()->transitionWatchpointSet());
606 m_graph
.watchpoints().addLazily(
607 globalObject
->objectPrototype()->structure()->transitionWatchpointSet());
608 node
->setArrayMode(arrayMode
.withSpeculation(Array::SaneChain
));
615 if ((node
->prediction() & ~SpecString
)
616 || m_graph
.hasExitSite(node
->origin
.semantic
, OutOfBounds
))
617 node
->setArrayMode(arrayMode
.withSpeculation(Array::OutOfBounds
));
624 arrayMode
= node
->arrayMode();
625 switch (arrayMode
.type()) {
626 case Array::SelectUsingPredictions
:
627 case Array::Unprofiled
:
628 case Array::Undecided
:
629 RELEASE_ASSERT_NOT_REACHED();
632 #if USE(JSVALUE32_64)
633 fixEdge
<CellUse
>(node
->child1()); // Speculating cell due to register pressure on 32-bit.
636 case Array::ForceExit
:
639 fixEdge
<KnownCellUse
>(node
->child1());
640 fixEdge
<Int32Use
>(node
->child2());
644 switch (arrayMode
.type()) {
646 if (!arrayMode
.isOutOfBounds())
647 node
->setResult(NodeResultDouble
);
650 case Array::Float32Array
:
651 case Array::Float64Array
:
652 node
->setResult(NodeResultDouble
);
655 case Array::Uint32Array
:
656 if (node
->shouldSpeculateInt32())
658 if (node
->shouldSpeculateMachineInt() && enableInt52())
659 node
->setResult(NodeResultInt52
);
661 node
->setResult(NodeResultDouble
);
673 case PutByValAlias
: {
674 Edge
& child1
= m_graph
.varArgChild(node
, 0);
675 Edge
& child2
= m_graph
.varArgChild(node
, 1);
676 Edge
& child3
= m_graph
.varArgChild(node
, 2);
679 node
->arrayMode().refine(
681 child1
->prediction(),
682 child2
->prediction(),
683 child3
->prediction()));
685 blessArrayOperation(child1
, child2
, m_graph
.varArgChild(node
, 3));
687 switch (node
->arrayMode().modeForPut().type()) {
688 case Array::SelectUsingPredictions
:
689 case Array::Unprofiled
:
690 case Array::Undecided
:
691 RELEASE_ASSERT_NOT_REACHED();
693 case Array::ForceExit
:
695 #if USE(JSVALUE32_64)
696 // Due to register pressure on 32-bit, we speculate cell and
697 // ignore the base-is-not-cell case entirely by letting the
698 // baseline JIT handle it.
699 fixEdge
<CellUse
>(child1
);
703 fixEdge
<KnownCellUse
>(child1
);
704 fixEdge
<Int32Use
>(child2
);
705 fixEdge
<Int32Use
>(child3
);
708 fixEdge
<KnownCellUse
>(child1
);
709 fixEdge
<Int32Use
>(child2
);
710 fixEdge
<DoubleRepRealUse
>(child3
);
712 case Array::Int8Array
:
713 case Array::Int16Array
:
714 case Array::Int32Array
:
715 case Array::Uint8Array
:
716 case Array::Uint8ClampedArray
:
717 case Array::Uint16Array
:
718 case Array::Uint32Array
:
719 fixEdge
<KnownCellUse
>(child1
);
720 fixEdge
<Int32Use
>(child2
);
721 if (child3
->shouldSpeculateInt32())
722 fixIntOrBooleanEdge(child3
);
723 else if (child3
->shouldSpeculateMachineInt())
724 fixEdge
<Int52RepUse
>(child3
);
726 fixDoubleOrBooleanEdge(child3
);
728 case Array::Float32Array
:
729 case Array::Float64Array
:
730 fixEdge
<KnownCellUse
>(child1
);
731 fixEdge
<Int32Use
>(child2
);
732 fixDoubleOrBooleanEdge(child3
);
734 case Array::Contiguous
:
735 case Array::ArrayStorage
:
736 case Array::SlowPutArrayStorage
:
737 fixEdge
<KnownCellUse
>(child1
);
738 fixEdge
<Int32Use
>(child2
);
739 speculateForBarrier(child3
);
742 fixEdge
<KnownCellUse
>(child1
);
743 fixEdge
<Int32Use
>(child2
);
750 // May need to refine the array mode in case the value prediction contravenes
751 // the array prediction. For example, we may have evidence showing that the
752 // array is in Int32 mode, but the value we're storing is likely to be a double.
753 // Then we should turn this into a conversion to Double array followed by the
754 // push. On the other hand, we absolutely don't want to refine based on the
755 // base prediction. If it has non-cell garbage in it, then we want that to be
756 // ignored. That's because ArrayPush can't handle any array modes that aren't
757 // array-related - so if refine() turned this into a "Generic" ArrayPush then
758 // that would break things.
760 node
->arrayMode().refine(
762 node
->child1()->prediction() & SpecCell
,
764 node
->child2()->prediction()));
765 blessArrayOperation(node
->child1(), Edge(), node
->child3());
766 fixEdge
<KnownCellUse
>(node
->child1());
768 switch (node
->arrayMode().type()) {
770 fixEdge
<Int32Use
>(node
->child2());
773 fixEdge
<DoubleRepRealUse
>(node
->child2());
775 case Array::Contiguous
:
776 case Array::ArrayStorage
:
777 speculateForBarrier(node
->child2());
786 blessArrayOperation(node
->child1(), Edge(), node
->child2());
787 fixEdge
<KnownCellUse
>(node
->child1());
793 fixEdge
<CellUse
>(node
->child1());
794 fixEdge
<CellUse
>(node
->child2());
799 if (node
->child1()->shouldSpeculateBoolean())
800 fixEdge
<BooleanUse
>(node
->child1());
801 else if (node
->child1()->shouldSpeculateObjectOrOther())
802 fixEdge
<ObjectOrOtherUse
>(node
->child1());
803 else if (node
->child1()->shouldSpeculateInt32OrBoolean())
804 fixIntOrBooleanEdge(node
->child1());
805 else if (node
->child1()->shouldSpeculateNumber())
806 fixEdge
<DoubleRepUse
>(node
->child1());
807 else if (node
->child1()->shouldSpeculateString())
808 fixEdge
<StringUse
>(node
->child1());
813 SwitchData
* data
= node
->switchData();
814 switch (data
->kind
) {
816 if (node
->child1()->shouldSpeculateInt32())
817 fixEdge
<Int32Use
>(node
->child1());
820 if (node
->child1()->shouldSpeculateString())
821 fixEdge
<StringUse
>(node
->child1());
824 if (node
->child1()->shouldSpeculateStringIdent())
825 fixEdge
<StringIdentUse
>(node
->child1());
826 else if (node
->child1()->shouldSpeculateString())
827 fixEdge
<StringUse
>(node
->child1());
830 if (node
->child1()->shouldSpeculateCell())
831 fixEdge
<CellUse
>(node
->child1());
832 // else it's fine for this to have UntypedUse; we will handle this by just making
833 // non-cells take the default case.
840 fixupToPrimitive(node
);
845 case CallStringConstructor
: {
846 fixupToStringOrCallStringConstructor(node
);
850 case NewStringObject
: {
851 fixEdge
<KnownStringUse
>(node
->child1());
856 for (unsigned i
= m_graph
.varArgNumChildren(node
); i
--;) {
857 node
->setIndexingType(
858 leastUpperBoundOfIndexingTypeAndType(
859 node
->indexingType(), m_graph
.varArgChild(node
, i
)->prediction()));
861 switch (node
->indexingType()) {
862 case ALL_BLANK_INDEXING_TYPES
:
865 case ALL_UNDECIDED_INDEXING_TYPES
:
866 if (node
->numChildren()) {
867 // This will only happen if the children have no type predictions. We
868 // would have already exited by now, but insert a forced exit just to
870 m_insertionSet
.insertNode(
871 m_indexInBlock
, SpecNone
, ForceOSRExit
, node
->origin
);
874 case ALL_INT32_INDEXING_TYPES
:
875 for (unsigned operandIndex
= 0; operandIndex
< node
->numChildren(); ++operandIndex
)
876 fixEdge
<Int32Use
>(m_graph
.m_varArgChildren
[node
->firstChild() + operandIndex
]);
878 case ALL_DOUBLE_INDEXING_TYPES
:
879 for (unsigned operandIndex
= 0; operandIndex
< node
->numChildren(); ++operandIndex
)
880 fixEdge
<DoubleRepRealUse
>(m_graph
.m_varArgChildren
[node
->firstChild() + operandIndex
]);
882 case ALL_CONTIGUOUS_INDEXING_TYPES
:
883 case ALL_ARRAY_STORAGE_INDEXING_TYPES
:
892 case NewTypedArray
: {
893 if (node
->child1()->shouldSpeculateInt32()) {
894 fixEdge
<Int32Use
>(node
->child1());
895 node
->clearFlags(NodeMustGenerate
);
901 case NewArrayWithSize
: {
902 fixEdge
<Int32Use
>(node
->child1());
907 ECMAMode ecmaMode
= m_graph
.executableFor(node
->origin
.semantic
)->isStrictMode() ? StrictMode
: NotStrictMode
;
909 if (node
->child1()->shouldSpeculateOther()) {
910 if (ecmaMode
== StrictMode
) {
911 fixEdge
<OtherUse
>(node
->child1());
912 node
->convertToIdentity();
916 m_insertionSet
.insertNode(
917 m_indexInBlock
, SpecNone
, Check
, node
->origin
,
918 Edge(node
->child1().node(), OtherUse
));
919 observeUseKindOnNode
<OtherUse
>(node
->child1().node());
920 m_graph
.convertToConstant(
921 node
, m_graph
.globalThisObjectFor(node
->origin
.semantic
));
925 if (isFinalObjectSpeculation(node
->child1()->prediction())) {
926 fixEdge
<FinalObjectUse
>(node
->child1());
927 node
->convertToIdentity();
935 fixEdge
<KnownCellUse
>(node
->child1());
940 case GetFromArguments
: {
941 fixEdge
<KnownCellUse
>(node
->child1());
946 case PutToArguments
: {
947 fixEdge
<KnownCellUse
>(node
->child1());
948 speculateForBarrier(node
->child2());
956 fixEdge
<KnownCellUse
>(node
->child1());
960 case AllocatePropertyStorage
:
961 case ReallocatePropertyStorage
: {
962 fixEdge
<KnownCellUse
>(node
->child1());
968 if (!node
->child1()->shouldSpeculateCell())
971 // If we hadn't exited because of BadCache, BadIndexingType, or ExoticObjectMode, then
972 // leave this as a GetById.
973 if (!m_graph
.hasExitSite(node
->origin
.semantic
, BadCache
)
974 && !m_graph
.hasExitSite(node
->origin
.semantic
, BadIndexingType
)
975 && !m_graph
.hasExitSite(node
->origin
.semantic
, ExoticObjectMode
)) {
976 auto uid
= m_graph
.identifiers()[node
->identifierNumber()];
977 if (uid
== vm().propertyNames
->length
.impl()) {
978 attemptToMakeGetArrayLength(node
);
981 if (uid
== vm().propertyNames
->byteLength
.impl()) {
982 attemptToMakeGetTypedArrayByteLength(node
);
985 if (uid
== vm().propertyNames
->byteOffset
.impl()) {
986 attemptToMakeGetTypedArrayByteOffset(node
);
990 fixEdge
<CellUse
>(node
->child1());
996 case PutByIdDirect
: {
997 fixEdge
<CellUse
>(node
->child1());
998 speculateForBarrier(node
->child2());
1002 case GetExecutable
: {
1003 fixEdge
<FunctionUse
>(node
->child1());
1007 case CheckStructure
:
1009 case CheckHasInstance
:
1011 case GetButterfly
: {
1012 fixEdge
<CellUse
>(node
->child1());
1017 case ArrayifyToStructure
: {
1018 fixEdge
<CellUse
>(node
->child1());
1020 fixEdge
<Int32Use
>(node
->child2());
1025 case GetGetterSetterByOffset
: {
1026 if (!node
->child1()->hasStorageResult())
1027 fixEdge
<KnownCellUse
>(node
->child1());
1028 fixEdge
<KnownCellUse
>(node
->child2());
1032 case MultiGetByOffset
: {
1033 fixEdge
<CellUse
>(node
->child1());
1038 if (!node
->child1()->hasStorageResult())
1039 fixEdge
<KnownCellUse
>(node
->child1());
1040 fixEdge
<KnownCellUse
>(node
->child2());
1041 speculateForBarrier(node
->child3());
1045 case MultiPutByOffset
: {
1046 fixEdge
<CellUse
>(node
->child1());
1047 speculateForBarrier(node
->child2());
1052 if (!(node
->child1()->prediction() & ~SpecCell
))
1053 fixEdge
<CellUse
>(node
->child1());
1054 fixEdge
<CellUse
>(node
->child2());
1059 // FIXME: We should at some point have array profiling on op_in, in which
1060 // case we would be able to turn this into a kind of GetByVal.
1062 fixEdge
<CellUse
>(node
->child2());
1067 m_graph
.doToChildren(
1070 switch (edge
.useKind()) {
1072 if (edge
->shouldSpeculateInt32ForArithmetic())
1073 edge
.setUseKind(Int32Use
);
1078 observeUseKindOnEdge(edge
);
1084 // Phantoms are meaningless past Fixup. We recreate them on-demand in the backend.
1089 RELEASE_ASSERT(enableInt52());
1090 node
->convertToIdentity();
1091 fixEdge
<Int52RepUse
>(node
->child1());
1092 node
->setResult(NodeResultInt52
);
1096 case GetArrayLength
:
1099 case GetIndexedPropertyStorage
:
1100 case GetTypedArrayByteOffset
:
1102 case CheckTierUpInLoop
:
1103 case CheckTierUpAtReturn
:
1104 case CheckTierUpAndOSREnter
:
1105 case CheckTierUpWithNestedTriggerAndOSREnter
:
1106 case InvalidationPoint
:
1109 case ConstantStoragePointer
:
1116 case Identity
: // This should have been cleaned up.
1117 case BooleanToNumber
:
1118 case PhantomNewObject
:
1119 case PhantomNewFunction
:
1120 case PhantomCreateActivation
:
1121 case PhantomDirectArguments
:
1122 case PhantomClonedArguments
:
1123 case ForwardVarargs
:
1124 case GetMyArgumentByVal
:
1126 case CheckStructureImmediate
:
1127 case MaterializeNewObject
:
1128 case MaterializeCreateActivation
:
1133 // These are just nodes that we don't currently expect to see during fixup.
1134 // If we ever wanted to insert them prior to fixup, then we just have to create
1135 // fixup rules for them.
1136 DFG_CRASH(m_graph
, node
, "Unexpected node during fixup");
1139 case PutGlobalVar
: {
1140 fixEdge
<CellUse
>(node
->child1());
1141 speculateForBarrier(node
->child2());
1146 if (node
->child1()->shouldSpeculateString()) {
1147 m_insertionSet
.insertNode(
1148 m_indexInBlock
, SpecNone
, Check
, node
->origin
,
1149 Edge(node
->child1().node(), StringUse
));
1150 m_graph
.convertToConstant(node
, jsBoolean(true));
1151 observeUseKindOnNode
<StringUse
>(node
);
1156 if (node
->child1()->shouldSpeculateObject()) {
1157 m_insertionSet
.insertNode(
1158 m_indexInBlock
, SpecNone
, Check
, node
->origin
,
1159 Edge(node
->child1().node(), ObjectUse
));
1160 m_graph
.convertToConstant(node
, jsBoolean(true));
1161 observeUseKindOnNode
<ObjectUse
>(node
);
1165 case GetEnumerableLength
: {
1166 fixEdge
<CellUse
>(node
->child1());
1169 case HasGenericProperty
: {
1170 fixEdge
<CellUse
>(node
->child2());
1173 case HasStructureProperty
: {
1174 fixEdge
<StringUse
>(node
->child2());
1175 fixEdge
<KnownCellUse
>(node
->child3());
1178 case HasIndexedProperty
: {
1180 node
->arrayMode().refine(
1182 node
->child1()->prediction(),
1183 node
->child2()->prediction(),
1186 blessArrayOperation(node
->child1(), node
->child2(), node
->child3());
1187 fixEdge
<CellUse
>(node
->child1());
1188 fixEdge
<KnownInt32Use
>(node
->child2());
1191 case GetDirectPname
: {
1192 Edge
& base
= m_graph
.varArgChild(node
, 0);
1193 Edge
& property
= m_graph
.varArgChild(node
, 1);
1194 Edge
& index
= m_graph
.varArgChild(node
, 2);
1195 Edge
& enumerator
= m_graph
.varArgChild(node
, 3);
1196 fixEdge
<CellUse
>(base
);
1197 fixEdge
<KnownCellUse
>(property
);
1198 fixEdge
<KnownInt32Use
>(index
);
1199 fixEdge
<KnownCellUse
>(enumerator
);
1202 case GetPropertyEnumerator
: {
1203 fixEdge
<CellUse
>(node
->child1());
1206 case GetEnumeratorStructurePname
: {
1207 fixEdge
<KnownCellUse
>(node
->child1());
1208 fixEdge
<KnownInt32Use
>(node
->child2());
1211 case GetEnumeratorGenericPname
: {
1212 fixEdge
<KnownCellUse
>(node
->child1());
1213 fixEdge
<KnownInt32Use
>(node
->child2());
1216 case ToIndexString
: {
1217 fixEdge
<KnownInt32Use
>(node
->child1());
1221 // We want to insert type checks based on the instructionTypeSet of the TypeLocation, not the globalTypeSet.
1222 // Because the instructionTypeSet is contained in globalTypeSet, if we produce a type check for
1223 // type T for the instructionTypeSet, the global type set must also have information for type T.
1224 // So if it the type check succeeds for type T in the instructionTypeSet, a type check for type T
1225 // in the globalTypeSet would've also succeeded.
1226 // (The other direction does not hold in general).
1228 RefPtr
<TypeSet
> typeSet
= node
->typeLocation()->m_instructionTypeSet
;
1229 RuntimeTypeMask seenTypes
= typeSet
->seenTypes();
1230 if (typeSet
->doesTypeConformTo(TypeMachineInt
)) {
1231 if (node
->child1()->shouldSpeculateInt32())
1232 fixEdge
<Int32Use
>(node
->child1());
1234 fixEdge
<MachineIntUse
>(node
->child1());
1236 } else if (typeSet
->doesTypeConformTo(TypeNumber
| TypeMachineInt
)) {
1237 fixEdge
<NumberUse
>(node
->child1());
1239 } else if (typeSet
->doesTypeConformTo(TypeString
)) {
1240 fixEdge
<StringUse
>(node
->child1());
1242 } else if (typeSet
->doesTypeConformTo(TypeBoolean
)) {
1243 fixEdge
<BooleanUse
>(node
->child1());
1245 } else if (typeSet
->doesTypeConformTo(TypeUndefined
| TypeNull
) && (seenTypes
& TypeUndefined
) && (seenTypes
& TypeNull
)) {
1246 fixEdge
<OtherUse
>(node
->child1());
1248 } else if (typeSet
->doesTypeConformTo(TypeObject
)) {
1249 StructureSet set
= typeSet
->structureSet();
1250 if (!set
.isEmpty()) {
1251 fixEdge
<CellUse
>(node
->child1());
1252 node
->convertToCheckStructure(m_graph
.addStructureSet(set
));
1259 case CreateScopedArguments
:
1260 case CreateActivation
:
1262 fixEdge
<CellUse
>(node
->child1());
1266 #if !ASSERT_DISABLED
1267 // Have these no-op cases here to ensure that nobody forgets to add handlers for new opcodes.
1270 case DoubleConstant
:
1273 case GetArgumentCount
:
1276 case GetLocalUnlinked
:
1279 case VarInjectionWatchpoint
:
1283 case ConstructVarargs
:
1284 case CallForwardVarargs
:
1285 case ConstructForwardVarargs
:
1287 case ProfileControlFlow
:
1289 case NativeConstruct
:
1291 case NewArrayBuffer
:
1294 case ProfileWillCall
:
1295 case ProfileDidCall
:
1299 case IsObjectOrNull
:
1301 case CreateDirectArguments
:
1302 case CreateClonedArguments
:
1306 case ThrowReferenceError
:
1307 case CountExecution
:
1311 case CheckWatchdogTimer
:
1313 case ExtractOSREntryLocal
:
1327 template<UseKind useKind
>
1328 void createToString(Node
* node
, Edge
& edge
)
1330 edge
.setNode(m_insertionSet
.insertNode(
1331 m_indexInBlock
, SpecString
, ToString
, node
->origin
,
1332 Edge(edge
.node(), useKind
)));
1335 template<UseKind useKind
>
1336 void attemptToForceStringArrayModeByToStringConversion(ArrayMode
& arrayMode
, Node
* node
)
1338 ASSERT(arrayMode
== ArrayMode(Array::Generic
));
1340 if (!canOptimizeStringObjectAccess(node
->origin
.semantic
))
1343 createToString
<useKind
>(node
, node
->child1());
1344 arrayMode
= ArrayMode(Array::String
);
1347 template<UseKind useKind
>
1348 bool isStringObjectUse()
1351 case StringObjectUse
:
1352 case StringOrStringObjectUse
:
1359 template<UseKind useKind
>
1360 void convertStringAddUse(Node
* node
, Edge
& edge
)
1362 if (useKind
== StringUse
) {
1363 // This preserves the binaryUseKind() invariant ot ValueAdd: ValueAdd's
1364 // two edges will always have identical use kinds, which makes the
1365 // decision process much easier.
1366 observeUseKindOnNode
<StringUse
>(edge
.node());
1367 m_insertionSet
.insertNode(
1368 m_indexInBlock
, SpecNone
, Check
, node
->origin
,
1369 Edge(edge
.node(), StringUse
));
1370 edge
.setUseKind(KnownStringUse
);
1374 // FIXME: We ought to be able to have a ToPrimitiveToString node.
1376 observeUseKindOnNode
<useKind
>(edge
.node());
1377 createToString
<useKind
>(node
, edge
);
1380 void convertToMakeRope(Node
* node
)
1382 node
->setOpAndDefaultFlags(MakeRope
);
1383 fixupMakeRope(node
);
1386 void fixupMakeRope(Node
* node
)
1388 for (unsigned i
= 0; i
< AdjacencyList::Size
; ++i
) {
1389 Edge
& edge
= node
->children
.child(i
);
1392 edge
.setUseKind(KnownStringUse
);
1393 JSString
* string
= edge
->dynamicCastConstant
<JSString
*>();
1396 if (string
->length())
1399 // Don't allow the MakeRope to have zero children.
1400 if (!i
&& !node
->child2())
1403 node
->children
.removeEdge(i
--);
1406 if (!node
->child2()) {
1407 ASSERT(!node
->child3());
1408 node
->convertToIdentity();
1412 void fixupToPrimitive(Node
* node
)
1414 if (node
->child1()->shouldSpeculateInt32()) {
1415 fixEdge
<Int32Use
>(node
->child1());
1416 node
->convertToIdentity();
1420 if (node
->child1()->shouldSpeculateString()) {
1421 fixEdge
<StringUse
>(node
->child1());
1422 node
->convertToIdentity();
1426 if (node
->child1()->shouldSpeculateStringObject()
1427 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1428 fixEdge
<StringObjectUse
>(node
->child1());
1429 node
->convertToToString();
1433 if (node
->child1()->shouldSpeculateStringOrStringObject()
1434 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1435 fixEdge
<StringOrStringObjectUse
>(node
->child1());
1436 node
->convertToToString();
1441 void fixupToStringOrCallStringConstructor(Node
* node
)
1443 if (node
->child1()->shouldSpeculateString()) {
1444 fixEdge
<StringUse
>(node
->child1());
1445 node
->convertToIdentity();
1449 if (node
->child1()->shouldSpeculateStringObject()
1450 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1451 fixEdge
<StringObjectUse
>(node
->child1());
1455 if (node
->child1()->shouldSpeculateStringOrStringObject()
1456 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1457 fixEdge
<StringOrStringObjectUse
>(node
->child1());
1461 if (node
->child1()->shouldSpeculateCell()) {
1462 fixEdge
<CellUse
>(node
->child1());
1467 template<UseKind leftUseKind
>
1468 bool attemptToMakeFastStringAdd(Node
* node
, Edge
& left
, Edge
& right
)
1470 ASSERT(leftUseKind
== StringUse
|| leftUseKind
== StringObjectUse
|| leftUseKind
== StringOrStringObjectUse
);
1472 if (isStringObjectUse
<leftUseKind
>() && !canOptimizeStringObjectAccess(node
->origin
.semantic
))
1475 convertStringAddUse
<leftUseKind
>(node
, left
);
1477 if (right
->shouldSpeculateString())
1478 convertStringAddUse
<StringUse
>(node
, right
);
1479 else if (right
->shouldSpeculateStringObject() && canOptimizeStringObjectAccess(node
->origin
.semantic
))
1480 convertStringAddUse
<StringObjectUse
>(node
, right
);
1481 else if (right
->shouldSpeculateStringOrStringObject() && canOptimizeStringObjectAccess(node
->origin
.semantic
))
1482 convertStringAddUse
<StringOrStringObjectUse
>(node
, right
);
1484 // At this point we know that the other operand is something weird. The semantically correct
1485 // way of dealing with this is:
1487 // MakeRope(@left, ToString(ToPrimitive(@right)))
1489 // So that's what we emit. NB, we need to do all relevant type checks on @left before we do
1490 // anything to @right, since ToPrimitive may be effectful.
1492 Node
* toPrimitive
= m_insertionSet
.insertNode(
1493 m_indexInBlock
, resultOfToPrimitive(right
->prediction()), ToPrimitive
,
1494 node
->origin
, Edge(right
.node()));
1495 Node
* toString
= m_insertionSet
.insertNode(
1496 m_indexInBlock
, SpecString
, ToString
, node
->origin
, Edge(toPrimitive
));
1498 fixupToPrimitive(toPrimitive
);
1500 // Don't fix up ToString. ToString and ToPrimitive are originated from the same bytecode and
1501 // ToPrimitive may have an observable side effect. ToString should not be converted into Check
1502 // with speculative type check because OSR exit reproduce an observable side effect done in
1505 right
.setNode(toString
);
1508 convertToMakeRope(node
);
1512 bool isStringPrototypeMethodSane(
1513 JSObject
* stringPrototype
, Structure
* stringPrototypeStructure
, UniquedStringImpl
* uid
)
1515 unsigned attributesUnused
;
1516 PropertyOffset offset
=
1517 stringPrototypeStructure
->getConcurrently(uid
, attributesUnused
);
1518 if (!isValidOffset(offset
))
1521 JSValue value
= m_graph
.tryGetConstantProperty(
1522 stringPrototype
, stringPrototypeStructure
, offset
);
1526 JSFunction
* function
= jsDynamicCast
<JSFunction
*>(value
);
1530 if (function
->executable()->intrinsicFor(CodeForCall
) != StringPrototypeValueOfIntrinsic
)
1536 bool canOptimizeStringObjectAccess(const CodeOrigin
& codeOrigin
)
1538 if (m_graph
.hasExitSite(codeOrigin
, NotStringObject
))
1541 Structure
* stringObjectStructure
= m_graph
.globalObjectFor(codeOrigin
)->stringObjectStructure();
1542 ASSERT(stringObjectStructure
->storedPrototype().isObject());
1543 ASSERT(stringObjectStructure
->storedPrototype().asCell()->classInfo() == StringPrototype::info());
1545 JSObject
* stringPrototypeObject
= asObject(stringObjectStructure
->storedPrototype());
1546 Structure
* stringPrototypeStructure
= stringPrototypeObject
->structure();
1547 if (m_graph
.registerStructure(stringPrototypeStructure
) != StructureRegisteredAndWatched
)
1550 if (stringPrototypeStructure
->isDictionary())
1553 // We're being conservative here. We want DFG's ToString on StringObject to be
1554 // used in both numeric contexts (that would call valueOf()) and string contexts
1555 // (that would call toString()). We don't want the DFG to have to distinguish
1556 // between the two, just because that seems like it would get confusing. So we
1557 // just require both methods to be sane.
1558 if (!isStringPrototypeMethodSane(stringPrototypeObject
, stringPrototypeStructure
, vm().propertyNames
->valueOf
.impl()))
1560 if (!isStringPrototypeMethodSane(stringPrototypeObject
, stringPrototypeStructure
, vm().propertyNames
->toString
.impl()))
1566 void fixupGetAndSetLocalsInBlock(BasicBlock
* block
)
1570 ASSERT(block
->isReachable
);
1572 for (m_indexInBlock
= 0; m_indexInBlock
< block
->size(); ++m_indexInBlock
) {
1573 Node
* node
= m_currentNode
= block
->at(m_indexInBlock
);
1574 if (node
->op() != SetLocal
&& node
->op() != GetLocal
)
1577 VariableAccessData
* variable
= node
->variableAccessData();
1578 switch (node
->op()) {
1580 switch (variable
->flushFormat()) {
1582 node
->setResult(NodeResultDouble
);
1585 node
->setResult(NodeResultInt52
);
1593 switch (variable
->flushFormat()) {
1594 case FlushedJSValue
:
1597 fixEdge
<DoubleRepUse
>(node
->child1());
1600 fixEdge
<Int32Use
>(node
->child1());
1603 fixEdge
<Int52RepUse
>(node
->child1());
1606 fixEdge
<CellUse
>(node
->child1());
1608 case FlushedBoolean
:
1609 fixEdge
<BooleanUse
>(node
->child1());
1612 RELEASE_ASSERT_NOT_REACHED();
1618 RELEASE_ASSERT_NOT_REACHED();
1622 m_insertionSet
.execute(block
);
1625 Node
* checkArray(ArrayMode arrayMode
, const NodeOrigin
& origin
, Node
* array
, Node
* index
, bool (*storageCheck
)(const ArrayMode
&) = canCSEStorage
)
1627 ASSERT(arrayMode
.isSpecific());
1629 if (arrayMode
.type() == Array::String
) {
1630 m_insertionSet
.insertNode(
1631 m_indexInBlock
, SpecNone
, Check
, origin
, Edge(array
, StringUse
));
1633 // Note that we only need to be using a structure check if we opt for SaneChain, since
1634 // that needs to protect against JSArray's __proto__ being changed.
1635 Structure
* structure
= arrayMode
.originalArrayStructure(m_graph
, origin
.semantic
);
1637 Edge indexEdge
= index
? Edge(index
, Int32Use
) : Edge();
1639 if (arrayMode
.doesConversion()) {
1641 m_insertionSet
.insertNode(
1642 m_indexInBlock
, SpecNone
, ArrayifyToStructure
, origin
,
1643 OpInfo(structure
), OpInfo(arrayMode
.asWord()), Edge(array
, CellUse
), indexEdge
);
1645 m_insertionSet
.insertNode(
1646 m_indexInBlock
, SpecNone
, Arrayify
, origin
,
1647 OpInfo(arrayMode
.asWord()), Edge(array
, CellUse
), indexEdge
);
1651 m_insertionSet
.insertNode(
1652 m_indexInBlock
, SpecNone
, CheckStructure
, origin
,
1653 OpInfo(m_graph
.addStructureSet(structure
)), Edge(array
, CellUse
));
1655 m_insertionSet
.insertNode(
1656 m_indexInBlock
, SpecNone
, CheckArray
, origin
,
1657 OpInfo(arrayMode
.asWord()), Edge(array
, CellUse
));
1662 if (!storageCheck(arrayMode
))
1665 if (arrayMode
.usesButterfly()) {
1666 return m_insertionSet
.insertNode(
1667 m_indexInBlock
, SpecNone
, GetButterfly
, origin
, Edge(array
, CellUse
));
1670 return m_insertionSet
.insertNode(
1671 m_indexInBlock
, SpecNone
, GetIndexedPropertyStorage
, origin
,
1672 OpInfo(arrayMode
.asWord()), Edge(array
, KnownCellUse
));
1675 void blessArrayOperation(Edge base
, Edge index
, Edge
& storageChild
)
1677 Node
* node
= m_currentNode
;
1679 switch (node
->arrayMode().type()) {
1680 case Array::ForceExit
: {
1681 m_insertionSet
.insertNode(
1682 m_indexInBlock
, SpecNone
, ForceOSRExit
, node
->origin
);
1686 case Array::SelectUsingPredictions
:
1687 case Array::Unprofiled
:
1688 RELEASE_ASSERT_NOT_REACHED();
1691 case Array::Generic
:
1695 Node
* storage
= checkArray(node
->arrayMode(), node
->origin
, base
.node(), index
.node());
1699 storageChild
= Edge(storage
);
1704 bool alwaysUnboxSimplePrimitives()
1709 // Any boolean, int, or cell value is profitable to unbox on 32-bit because it
1715 template<UseKind useKind
>
1716 void observeUseKindOnNode(Node
* node
)
1718 if (useKind
== UntypedUse
)
1720 observeUseKindOnNode(node
, useKind
);
1723 void observeUseKindOnEdge(Edge edge
)
1725 observeUseKindOnNode(edge
.node(), edge
.useKind());
1728 void observeUseKindOnNode(Node
* node
, UseKind useKind
)
1730 if (node
->op() != GetLocal
)
1733 // FIXME: The way this uses alwaysUnboxSimplePrimitives() is suspicious.
1734 // https://bugs.webkit.org/show_bug.cgi?id=121518
1736 VariableAccessData
* variable
= node
->variableAccessData();
1739 if (alwaysUnboxSimplePrimitives()
1740 || isInt32Speculation(variable
->prediction()))
1741 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1746 case DoubleRepRealUse
:
1747 if (variable
->doubleFormatState() == UsingDoubleFormat
)
1748 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1751 if (alwaysUnboxSimplePrimitives()
1752 || isBooleanSpeculation(variable
->prediction()))
1753 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1756 if (isMachineIntSpeculation(variable
->prediction()))
1757 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1764 case KnownStringUse
:
1765 case StringObjectUse
:
1766 case StringOrStringObjectUse
:
1767 if (alwaysUnboxSimplePrimitives()
1768 || isCellSpeculation(variable
->prediction()))
1769 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1776 template<UseKind useKind
>
1777 void fixEdge(Edge
& edge
)
1779 observeUseKindOnNode
<useKind
>(edge
.node());
1780 edge
.setUseKind(useKind
);
1783 void speculateForBarrier(Edge value
)
1785 // Currently, the DFG won't take advantage of this speculation. But, we want to do it in
1786 // the DFG anyway because if such a speculation would be wrong, we want to know before
1787 // we do an expensive compile.
1789 if (value
->shouldSpeculateInt32()) {
1790 insertCheck
<Int32Use
>(m_indexInBlock
, value
.node());
1794 if (value
->shouldSpeculateBoolean()) {
1795 insertCheck
<BooleanUse
>(m_indexInBlock
, value
.node());
1799 if (value
->shouldSpeculateOther()) {
1800 insertCheck
<OtherUse
>(m_indexInBlock
, value
.node());
1804 if (value
->shouldSpeculateNumber()) {
1805 insertCheck
<NumberUse
>(m_indexInBlock
, value
.node());
1809 if (value
->shouldSpeculateNotCell()) {
1810 insertCheck
<NotCellUse
>(m_indexInBlock
, value
.node());
1815 template<UseKind useKind
>
1816 void insertCheck(unsigned indexInBlock
, Node
* node
)
1818 observeUseKindOnNode
<useKind
>(node
);
1819 m_insertionSet
.insertNode(
1820 indexInBlock
, SpecNone
, Check
, m_currentNode
->origin
, Edge(node
, useKind
));
1823 void fixIntConvertingEdge(Edge
& edge
)
1825 Node
* node
= edge
.node();
1826 if (node
->shouldSpeculateInt32OrBoolean()) {
1827 fixIntOrBooleanEdge(edge
);
1832 if (node
->shouldSpeculateMachineInt())
1833 useKind
= Int52RepUse
;
1834 else if (node
->shouldSpeculateNumber())
1835 useKind
= DoubleRepUse
;
1837 useKind
= NotCellUse
;
1838 Node
* newNode
= m_insertionSet
.insertNode(
1839 m_indexInBlock
, SpecInt32
, ValueToInt32
, m_currentNode
->origin
,
1840 Edge(node
, useKind
));
1841 observeUseKindOnNode(node
, useKind
);
1843 edge
= Edge(newNode
, KnownInt32Use
);
1846 void fixIntOrBooleanEdge(Edge
& edge
)
1848 Node
* node
= edge
.node();
1849 if (!node
->sawBooleans()) {
1850 fixEdge
<Int32Use
>(edge
);
1855 if (node
->shouldSpeculateBoolean())
1856 useKind
= BooleanUse
;
1858 useKind
= UntypedUse
;
1859 Node
* newNode
= m_insertionSet
.insertNode(
1860 m_indexInBlock
, SpecInt32
, BooleanToNumber
, m_currentNode
->origin
,
1861 Edge(node
, useKind
));
1862 observeUseKindOnNode(node
, useKind
);
1864 edge
= Edge(newNode
, Int32Use
);
1867 void fixDoubleOrBooleanEdge(Edge
& edge
)
1869 Node
* node
= edge
.node();
1870 if (!node
->sawBooleans()) {
1871 fixEdge
<DoubleRepUse
>(edge
);
1876 if (node
->shouldSpeculateBoolean())
1877 useKind
= BooleanUse
;
1879 useKind
= UntypedUse
;
1880 Node
* newNode
= m_insertionSet
.insertNode(
1881 m_indexInBlock
, SpecInt32
, BooleanToNumber
, m_currentNode
->origin
,
1882 Edge(node
, useKind
));
1883 observeUseKindOnNode(node
, useKind
);
1885 edge
= Edge(newNode
, DoubleRepUse
);
1888 void truncateConstantToInt32(Edge
& edge
)
1890 Node
* oldNode
= edge
.node();
1892 JSValue value
= oldNode
->asJSValue();
1893 if (value
.isInt32())
1896 value
= jsNumber(JSC::toInt32(value
.asNumber()));
1897 ASSERT(value
.isInt32());
1898 edge
.setNode(m_insertionSet
.insertNode(
1899 m_indexInBlock
, SpecInt32
, JSConstant
, m_currentNode
->origin
,
1900 OpInfo(m_graph
.freeze(value
))));
1903 void truncateConstantsIfNecessary(Node
* node
, AddSpeculationMode mode
)
1905 if (mode
!= SpeculateInt32AndTruncateConstants
)
1908 ASSERT(node
->child1()->hasConstant() || node
->child2()->hasConstant());
1909 if (node
->child1()->hasConstant())
1910 truncateConstantToInt32(node
->child1());
1912 truncateConstantToInt32(node
->child2());
1915 bool attemptToMakeIntegerAdd(Node
* node
)
1917 AddSpeculationMode mode
= m_graph
.addSpeculationMode(node
, FixupPass
);
1918 if (mode
!= DontSpeculateInt32
) {
1919 truncateConstantsIfNecessary(node
, mode
);
1920 fixIntOrBooleanEdge(node
->child1());
1921 fixIntOrBooleanEdge(node
->child2());
1922 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
1923 node
->setArithMode(Arith::Unchecked
);
1925 node
->setArithMode(Arith::CheckOverflow
);
1929 if (m_graph
.addShouldSpeculateMachineInt(node
)) {
1930 fixEdge
<Int52RepUse
>(node
->child1());
1931 fixEdge
<Int52RepUse
>(node
->child2());
1932 node
->setArithMode(Arith::CheckOverflow
);
1933 node
->setResult(NodeResultInt52
);
1940 bool attemptToMakeGetArrayLength(Node
* node
)
1942 if (!isInt32Speculation(node
->prediction()))
1944 CodeBlock
* profiledBlock
= m_graph
.baselineCodeBlockFor(node
->origin
.semantic
);
1945 ArrayProfile
* arrayProfile
=
1946 profiledBlock
->getArrayProfile(node
->origin
.semantic
.bytecodeIndex
);
1947 ArrayMode arrayMode
= ArrayMode(Array::SelectUsingPredictions
);
1949 ConcurrentJITLocker
locker(profiledBlock
->m_lock
);
1950 arrayProfile
->computeUpdatedPrediction(locker
, profiledBlock
);
1951 arrayMode
= ArrayMode::fromObserved(locker
, arrayProfile
, Array::Read
, false);
1952 if (arrayMode
.type() == Array::Unprofiled
) {
1953 // For normal array operations, it makes sense to treat Unprofiled
1954 // accesses as ForceExit and get more data rather than using
1955 // predictions and then possibly ending up with a Generic. But here,
1956 // we treat anything that is Unprofiled as Generic and keep the
1957 // GetById. I.e. ForceExit = Generic. So, there is no harm - and only
1958 // profit - from treating the Unprofiled case as
1959 // SelectUsingPredictions.
1960 arrayMode
= ArrayMode(Array::SelectUsingPredictions
);
1964 arrayMode
= arrayMode
.refine(
1965 m_graph
, node
, node
->child1()->prediction(), node
->prediction());
1967 if (arrayMode
.type() == Array::Generic
) {
1968 // Check if the input is something that we can't get array length for, but for which we
1969 // could insert some conversions in order to transform it into something that we can do it
1971 if (node
->child1()->shouldSpeculateStringObject())
1972 attemptToForceStringArrayModeByToStringConversion
<StringObjectUse
>(arrayMode
, node
);
1973 else if (node
->child1()->shouldSpeculateStringOrStringObject())
1974 attemptToForceStringArrayModeByToStringConversion
<StringOrStringObjectUse
>(arrayMode
, node
);
1977 if (!arrayMode
.supportsLength())
1980 convertToGetArrayLength(node
, arrayMode
);
1984 bool attemptToMakeGetTypedArrayByteLength(Node
* node
)
1986 if (!isInt32Speculation(node
->prediction()))
1989 TypedArrayType type
= typedArrayTypeFromSpeculation(node
->child1()->prediction());
1990 if (!isTypedView(type
))
1993 if (elementSize(type
) == 1) {
1994 convertToGetArrayLength(node
, ArrayMode(toArrayType(type
)));
1998 Node
* length
= prependGetArrayLength(
1999 node
->origin
, node
->child1().node(), ArrayMode(toArrayType(type
)));
2001 Node
* shiftAmount
= m_insertionSet
.insertNode(
2002 m_indexInBlock
, SpecInt32
, JSConstant
, node
->origin
,
2003 OpInfo(m_graph
.freeze(jsNumber(logElementSize(type
)))));
2005 // We can use a BitLShift here because typed arrays will never have a byteLength
2006 // that overflows int32.
2007 node
->setOp(BitLShift
);
2008 node
->clearFlags(NodeMustGenerate
);
2009 observeUseKindOnNode(length
, Int32Use
);
2010 observeUseKindOnNode(shiftAmount
, Int32Use
);
2011 node
->child1() = Edge(length
, Int32Use
);
2012 node
->child2() = Edge(shiftAmount
, Int32Use
);
2016 void convertToGetArrayLength(Node
* node
, ArrayMode arrayMode
)
2018 node
->setOp(GetArrayLength
);
2019 node
->clearFlags(NodeMustGenerate
);
2020 fixEdge
<KnownCellUse
>(node
->child1());
2021 node
->setArrayMode(arrayMode
);
2023 Node
* storage
= checkArray(arrayMode
, node
->origin
, node
->child1().node(), 0, lengthNeedsStorage
);
2027 node
->child2() = Edge(storage
);
2030 Node
* prependGetArrayLength(NodeOrigin origin
, Node
* child
, ArrayMode arrayMode
)
2032 Node
* storage
= checkArray(arrayMode
, origin
, child
, 0, lengthNeedsStorage
);
2033 return m_insertionSet
.insertNode(
2034 m_indexInBlock
, SpecInt32
, GetArrayLength
, origin
,
2035 OpInfo(arrayMode
.asWord()), Edge(child
, KnownCellUse
), Edge(storage
));
2038 bool attemptToMakeGetTypedArrayByteOffset(Node
* node
)
2040 if (!isInt32Speculation(node
->prediction()))
2043 TypedArrayType type
= typedArrayTypeFromSpeculation(node
->child1()->prediction());
2044 if (!isTypedView(type
))
2048 ArrayMode(toArrayType(type
)), node
->origin
, node
->child1().node(),
2049 0, neverNeedsStorage
);
2051 node
->setOp(GetTypedArrayByteOffset
);
2052 node
->clearFlags(NodeMustGenerate
);
2053 fixEdge
<KnownCellUse
>(node
->child1());
2057 void injectTypeConversionsInBlock(BasicBlock
* block
)
2061 ASSERT(block
->isReachable
);
2063 for (m_indexInBlock
= 0; m_indexInBlock
< block
->size(); ++m_indexInBlock
) {
2064 m_currentNode
= block
->at(m_indexInBlock
);
2065 tryToRelaxRepresentation(m_currentNode
);
2066 DFG_NODE_DO_TO_CHILDREN(m_graph
, m_currentNode
, injectTypeConversionsForEdge
);
2068 m_insertionSet
.execute(block
);
2071 void tryToRelaxRepresentation(Node
* node
)
2073 // Some operations may be able to operate more efficiently over looser representations.
2074 // Identify those here. This avoids inserting a redundant representation conversion.
2075 // Also, for some operations, like MovHint, this is a necessary optimization: inserting
2076 // an otherwise-dead conversion just for a MovHint would break OSR's understanding of
2079 switch (node
->op()) {
2082 DFG_NODE_DO_TO_CHILDREN(m_graph
, m_currentNode
, fixEdgeRepresentation
);
2086 if (node
->child1().useKind() == DoubleRepUse
2087 && !node
->child1()->hasDoubleResult()) {
2088 node
->child1().setUseKind(NumberUse
);
2098 void fixEdgeRepresentation(Node
*, Edge
& edge
)
2100 switch (edge
.useKind()) {
2102 case DoubleRepRealUse
:
2103 if (edge
->hasDoubleResult())
2106 if (edge
->hasInt52Result())
2107 edge
.setUseKind(Int52RepUse
);
2108 else if (edge
.useKind() == DoubleRepUse
)
2109 edge
.setUseKind(NumberUse
);
2113 // Nothing we can really do.
2118 if (edge
->hasDoubleResult())
2119 edge
.setUseKind(DoubleRepUse
);
2120 else if (edge
->hasInt52Result())
2121 edge
.setUseKind(Int52RepUse
);
2125 if (edge
->hasDoubleResult())
2126 edge
.setUseKind(DoubleRepRealUse
);
2127 else if (edge
->hasInt52Result())
2128 edge
.setUseKind(Int52RepUse
);
2136 void injectTypeConversionsForEdge(Node
* node
, Edge
& edge
)
2138 ASSERT(node
== m_currentNode
);
2139 Node
* result
= nullptr;
2141 switch (edge
.useKind()) {
2143 case DoubleRepRealUse
:
2144 case DoubleRepMachineIntUse
: {
2145 if (edge
->hasDoubleResult())
2148 if (edge
->isNumberConstant()) {
2149 result
= m_insertionSet
.insertNode(
2150 m_indexInBlock
, SpecBytecodeDouble
, DoubleConstant
, node
->origin
,
2151 OpInfo(m_graph
.freeze(jsDoubleNumber(edge
->asNumber()))));
2152 } else if (edge
->hasInt52Result()) {
2153 result
= m_insertionSet
.insertNode(
2154 m_indexInBlock
, SpecInt52AsDouble
, DoubleRep
, node
->origin
,
2155 Edge(edge
.node(), Int52RepUse
));
2158 if (edge
->shouldSpeculateDoubleReal())
2159 useKind
= RealNumberUse
;
2160 else if (edge
->shouldSpeculateNumber())
2161 useKind
= NumberUse
;
2163 useKind
= NotCellUse
;
2165 result
= m_insertionSet
.insertNode(
2166 m_indexInBlock
, SpecBytecodeDouble
, DoubleRep
, node
->origin
,
2167 Edge(edge
.node(), useKind
));
2170 edge
.setNode(result
);
2175 if (edge
->hasInt52Result())
2178 if (edge
->isMachineIntConstant()) {
2179 result
= m_insertionSet
.insertNode(
2180 m_indexInBlock
, SpecMachineInt
, Int52Constant
, node
->origin
,
2181 OpInfo(edge
->constant()));
2182 } else if (edge
->hasDoubleResult()) {
2183 result
= m_insertionSet
.insertNode(
2184 m_indexInBlock
, SpecMachineInt
, Int52Rep
, node
->origin
,
2185 Edge(edge
.node(), DoubleRepMachineIntUse
));
2186 } else if (edge
->shouldSpeculateInt32ForArithmetic()) {
2187 result
= m_insertionSet
.insertNode(
2188 m_indexInBlock
, SpecInt32
, Int52Rep
, node
->origin
,
2189 Edge(edge
.node(), Int32Use
));
2191 result
= m_insertionSet
.insertNode(
2192 m_indexInBlock
, SpecMachineInt
, Int52Rep
, node
->origin
,
2193 Edge(edge
.node(), MachineIntUse
));
2196 edge
.setNode(result
);
2201 if (!edge
->hasDoubleResult() && !edge
->hasInt52Result())
2204 if (edge
->hasDoubleResult()) {
2205 result
= m_insertionSet
.insertNode(
2206 m_indexInBlock
, SpecBytecodeDouble
, ValueRep
, node
->origin
,
2207 Edge(edge
.node(), DoubleRepUse
));
2209 result
= m_insertionSet
.insertNode(
2210 m_indexInBlock
, SpecInt32
| SpecInt52AsDouble
, ValueRep
, node
->origin
,
2211 Edge(edge
.node(), Int52RepUse
));
2214 edge
.setNode(result
);
2219 BasicBlock
* m_block
;
2220 unsigned m_indexInBlock
;
2221 Node
* m_currentNode
;
2222 InsertionSet m_insertionSet
;
2223 bool m_profitabilityChanged
;
2226 bool performFixup(Graph
& graph
)
2228 SamplingRegion
samplingRegion("DFG Fixup Phase");
2229 return runPhase
<FixupPhase
>(graph
);
2232 } } // namespace JSC::DFG
2234 #endif // ENABLE(DFG_JIT)