2 * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGFixupPhase.h"
32 #include "DFGInsertionSet.h"
34 #include "DFGPredictionPropagationPhase.h"
35 #include "DFGVariableAccessDataDump.h"
36 #include "JSCInlines.h"
38 namespace JSC
{ namespace DFG
{
40 class FixupPhase
: public Phase
{
42 FixupPhase(Graph
& graph
)
43 : Phase(graph
, "fixup")
44 , m_insertionSet(graph
)
50 ASSERT(m_graph
.m_fixpointState
== BeforeFixpoint
);
51 ASSERT(m_graph
.m_form
== ThreadedCPS
);
53 m_profitabilityChanged
= false;
54 for (BlockIndex blockIndex
= 0; blockIndex
< m_graph
.numBlocks(); ++blockIndex
)
55 fixupBlock(m_graph
.block(blockIndex
));
57 while (m_profitabilityChanged
) {
58 m_profitabilityChanged
= false;
60 for (unsigned i
= m_graph
.m_argumentPositions
.size(); i
--;)
61 m_graph
.m_argumentPositions
[i
].mergeArgumentUnboxingAwareness();
63 for (BlockIndex blockIndex
= 0; blockIndex
< m_graph
.numBlocks(); ++blockIndex
)
64 fixupGetAndSetLocalsInBlock(m_graph
.block(blockIndex
));
67 for (BlockIndex blockIndex
= 0; blockIndex
< m_graph
.numBlocks(); ++blockIndex
)
68 injectTypeConversionsInBlock(m_graph
.block(blockIndex
));
74 void fixupBlock(BasicBlock
* block
)
78 ASSERT(block
->isReachable
);
80 for (m_indexInBlock
= 0; m_indexInBlock
< block
->size(); ++m_indexInBlock
) {
81 m_currentNode
= block
->at(m_indexInBlock
);
82 addPhantomsIfNecessary();
83 fixupNode(m_currentNode
);
86 m_insertionSet
.execute(block
);
89 void fixupNode(Node
* node
)
91 NodeType op
= node
->op();
95 // This gets handled by fixupSetLocalsInBlock().
105 fixIntConvertingEdge(node
->child1());
106 fixIntConvertingEdge(node
->child2());
111 fixIntConvertingEdge(node
->child1());
112 fixIntConvertingEdge(node
->child2());
113 node
->setOp(ArithMul
);
114 node
->setArithMode(Arith::Unchecked
);
115 node
->child1().setUseKind(Int32Use
);
116 node
->child2().setUseKind(Int32Use
);
120 case UInt32ToNumber
: {
121 fixIntConvertingEdge(node
->child1());
122 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
123 node
->convertToIdentity();
124 else if (node
->canSpeculateInt32(FixupPass
))
125 node
->setArithMode(Arith::CheckOverflow
);
127 node
->setArithMode(Arith::DoOverflow
);
128 node
->setResult(NodeResultDouble
);
134 if (attemptToMakeIntegerAdd(node
)) {
135 node
->setOp(ArithAdd
);
136 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
139 if (Node::shouldSpeculateNumberOrBooleanExpectingDefined(node
->child1().node(), node
->child2().node())) {
140 fixDoubleOrBooleanEdge(node
->child1());
141 fixDoubleOrBooleanEdge(node
->child2());
142 node
->setOp(ArithAdd
);
143 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
144 node
->setResult(NodeResultDouble
);
148 // FIXME: Optimize for the case where one of the operands is the
149 // empty string. Also consider optimizing for the case where we don't
150 // believe either side is the emtpy string. Both of these things should
153 if (node
->child1()->shouldSpeculateString()
154 && attemptToMakeFastStringAdd
<StringUse
>(node
, node
->child1(), node
->child2()))
156 if (node
->child2()->shouldSpeculateString()
157 && attemptToMakeFastStringAdd
<StringUse
>(node
, node
->child2(), node
->child1()))
159 if (node
->child1()->shouldSpeculateStringObject()
160 && attemptToMakeFastStringAdd
<StringObjectUse
>(node
, node
->child1(), node
->child2()))
162 if (node
->child2()->shouldSpeculateStringObject()
163 && attemptToMakeFastStringAdd
<StringObjectUse
>(node
, node
->child2(), node
->child1()))
165 if (node
->child1()->shouldSpeculateStringOrStringObject()
166 && attemptToMakeFastStringAdd
<StringOrStringObjectUse
>(node
, node
->child1(), node
->child2()))
168 if (node
->child2()->shouldSpeculateStringOrStringObject()
169 && attemptToMakeFastStringAdd
<StringOrStringObjectUse
>(node
, node
->child2(), node
->child1()))
181 if (attemptToMakeIntegerAdd(node
))
183 fixDoubleOrBooleanEdge(node
->child1());
184 fixDoubleOrBooleanEdge(node
->child2());
185 node
->setResult(NodeResultDouble
);
190 if (m_graph
.negateShouldSpeculateInt32(node
, FixupPass
)) {
191 fixIntOrBooleanEdge(node
->child1());
192 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
193 node
->setArithMode(Arith::Unchecked
);
194 else if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
195 node
->setArithMode(Arith::CheckOverflow
);
197 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
200 if (m_graph
.negateShouldSpeculateMachineInt(node
, FixupPass
)) {
201 fixEdge
<Int52RepUse
>(node
->child1());
202 if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
203 node
->setArithMode(Arith::CheckOverflow
);
205 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
206 node
->setResult(NodeResultInt52
);
209 fixDoubleOrBooleanEdge(node
->child1());
210 node
->setResult(NodeResultDouble
);
215 if (m_graph
.mulShouldSpeculateInt32(node
, FixupPass
)) {
216 fixIntOrBooleanEdge(node
->child1());
217 fixIntOrBooleanEdge(node
->child2());
218 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
219 node
->setArithMode(Arith::Unchecked
);
220 else if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
221 node
->setArithMode(Arith::CheckOverflow
);
223 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
226 if (m_graph
.mulShouldSpeculateMachineInt(node
, FixupPass
)) {
227 fixEdge
<Int52RepUse
>(node
->child1());
228 fixEdge
<Int52RepUse
>(node
->child2());
229 if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
230 node
->setArithMode(Arith::CheckOverflow
);
232 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
233 node
->setResult(NodeResultInt52
);
236 fixDoubleOrBooleanEdge(node
->child1());
237 fixDoubleOrBooleanEdge(node
->child2());
238 node
->setResult(NodeResultDouble
);
244 if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node
->child1().node(), node
->child2().node())
245 && node
->canSpeculateInt32(FixupPass
)) {
246 if (optimizeForX86() || optimizeForARM64() || optimizeForARMv7s()) {
247 fixIntOrBooleanEdge(node
->child1());
248 fixIntOrBooleanEdge(node
->child2());
249 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
250 node
->setArithMode(Arith::Unchecked
);
251 else if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
252 node
->setArithMode(Arith::CheckOverflow
);
254 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
258 // This will cause conversion nodes to be inserted later.
259 fixDoubleOrBooleanEdge(node
->child1());
260 fixDoubleOrBooleanEdge(node
->child2());
262 // But we have to make sure that everything is phantom'd until after the
263 // DoubleAsInt32 node, which occurs after the Div/Mod node that the conversions
264 // will be insered on.
265 addRequiredPhantom(node
->child1().node());
266 addRequiredPhantom(node
->child2().node());
268 // We don't need to do ref'ing on the children because we're stealing them from
269 // the original division.
270 Node
* newDivision
= m_insertionSet
.insertNode(
271 m_indexInBlock
, SpecBytecodeDouble
, *node
);
272 newDivision
->setResult(NodeResultDouble
);
274 node
->setOp(DoubleAsInt32
);
275 node
->children
.initialize(Edge(newDivision
, DoubleRepUse
), Edge(), Edge());
276 if (bytecodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
277 node
->setArithMode(Arith::CheckOverflow
);
279 node
->setArithMode(Arith::CheckOverflowAndNegativeZero
);
282 fixDoubleOrBooleanEdge(node
->child1());
283 fixDoubleOrBooleanEdge(node
->child2());
284 node
->setResult(NodeResultDouble
);
290 if (Node::shouldSpeculateInt32OrBooleanForArithmetic(node
->child1().node(), node
->child2().node())
291 && node
->canSpeculateInt32(FixupPass
)) {
292 fixIntOrBooleanEdge(node
->child1());
293 fixIntOrBooleanEdge(node
->child2());
296 fixDoubleOrBooleanEdge(node
->child1());
297 fixDoubleOrBooleanEdge(node
->child2());
298 node
->setResult(NodeResultDouble
);
303 if (node
->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
304 && node
->canSpeculateInt32(FixupPass
)) {
305 fixIntOrBooleanEdge(node
->child1());
308 fixDoubleOrBooleanEdge(node
->child1());
309 node
->setResult(NodeResultDouble
);
317 fixDoubleOrBooleanEdge(node
->child1());
318 node
->setResult(NodeResultDouble
);
323 if (node
->child1()->shouldSpeculateBoolean())
324 fixEdge
<BooleanUse
>(node
->child1());
325 else if (node
->child1()->shouldSpeculateObjectOrOther())
326 fixEdge
<ObjectOrOtherUse
>(node
->child1());
327 else if (node
->child1()->shouldSpeculateInt32OrBoolean())
328 fixIntOrBooleanEdge(node
->child1());
329 else if (node
->child1()->shouldSpeculateNumber())
330 fixEdge
<DoubleRepUse
>(node
->child1());
331 else if (node
->child1()->shouldSpeculateString())
332 fixEdge
<StringUse
>(node
->child1());
337 if (node
->child1()->shouldSpeculateString())
338 fixEdge
<StringUse
>(node
->child1());
339 else if (node
->child1()->shouldSpeculateCell())
340 fixEdge
<CellUse
>(node
->child1());
344 case CompareEqConstant
: {
352 case CompareGreaterEq
: {
353 if (node
->op() == CompareEq
354 && Node::shouldSpeculateBoolean(node
->child1().node(), node
->child2().node())) {
355 fixEdge
<BooleanUse
>(node
->child1());
356 fixEdge
<BooleanUse
>(node
->child2());
357 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
360 if (Node::shouldSpeculateInt32OrBoolean(node
->child1().node(), node
->child2().node())) {
361 fixIntOrBooleanEdge(node
->child1());
362 fixIntOrBooleanEdge(node
->child2());
363 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
367 && Node::shouldSpeculateMachineInt(node
->child1().node(), node
->child2().node())) {
368 fixEdge
<Int52RepUse
>(node
->child1());
369 fixEdge
<Int52RepUse
>(node
->child2());
370 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
373 if (Node::shouldSpeculateNumberOrBoolean(node
->child1().node(), node
->child2().node())) {
374 fixDoubleOrBooleanEdge(node
->child1());
375 fixDoubleOrBooleanEdge(node
->child2());
376 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
379 if (node
->op() != CompareEq
)
381 if (node
->child1()->shouldSpeculateStringIdent() && node
->child2()->shouldSpeculateStringIdent()) {
382 fixEdge
<StringIdentUse
>(node
->child1());
383 fixEdge
<StringIdentUse
>(node
->child2());
384 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
387 if (node
->child1()->shouldSpeculateString() && node
->child2()->shouldSpeculateString() && GPRInfo::numberOfRegisters
>= 7) {
388 fixEdge
<StringUse
>(node
->child1());
389 fixEdge
<StringUse
>(node
->child2());
390 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
393 if (node
->child1()->shouldSpeculateObject() && node
->child2()->shouldSpeculateObject()) {
394 fixEdge
<ObjectUse
>(node
->child1());
395 fixEdge
<ObjectUse
>(node
->child2());
396 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
399 if (node
->child1()->shouldSpeculateObject() && node
->child2()->shouldSpeculateObjectOrOther()) {
400 fixEdge
<ObjectUse
>(node
->child1());
401 fixEdge
<ObjectOrOtherUse
>(node
->child2());
402 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
405 if (node
->child1()->shouldSpeculateObjectOrOther() && node
->child2()->shouldSpeculateObject()) {
406 fixEdge
<ObjectOrOtherUse
>(node
->child1());
407 fixEdge
<ObjectUse
>(node
->child2());
408 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
414 case CompareStrictEq
: {
415 if (Node::shouldSpeculateBoolean(node
->child1().node(), node
->child2().node())) {
416 fixEdge
<BooleanUse
>(node
->child1());
417 fixEdge
<BooleanUse
>(node
->child2());
420 if (Node::shouldSpeculateInt32(node
->child1().node(), node
->child2().node())) {
421 fixEdge
<Int32Use
>(node
->child1());
422 fixEdge
<Int32Use
>(node
->child2());
426 && Node::shouldSpeculateMachineInt(node
->child1().node(), node
->child2().node())) {
427 fixEdge
<Int52RepUse
>(node
->child1());
428 fixEdge
<Int52RepUse
>(node
->child2());
431 if (Node::shouldSpeculateNumber(node
->child1().node(), node
->child2().node())) {
432 fixEdge
<DoubleRepUse
>(node
->child1());
433 fixEdge
<DoubleRepUse
>(node
->child2());
436 if (node
->child1()->shouldSpeculateStringIdent() && node
->child2()->shouldSpeculateStringIdent()) {
437 fixEdge
<StringIdentUse
>(node
->child1());
438 fixEdge
<StringIdentUse
>(node
->child2());
441 if (node
->child1()->shouldSpeculateString() && node
->child2()->shouldSpeculateString() && (GPRInfo::numberOfRegisters
>= 7 || isFTL(m_graph
.m_plan
.mode
))) {
442 fixEdge
<StringUse
>(node
->child1());
443 fixEdge
<StringUse
>(node
->child2());
446 if (node
->child1()->shouldSpeculateObject() && node
->child2()->shouldSpeculateObject()) {
447 fixEdge
<ObjectUse
>(node
->child1());
448 fixEdge
<ObjectUse
>(node
->child2());
451 if (node
->child1()->shouldSpeculateMisc()) {
452 fixEdge
<MiscUse
>(node
->child1());
455 if (node
->child2()->shouldSpeculateMisc()) {
456 fixEdge
<MiscUse
>(node
->child2());
459 if (node
->child1()->shouldSpeculateStringIdent()
460 && node
->child2()->shouldSpeculateNotStringVar()) {
461 fixEdge
<StringIdentUse
>(node
->child1());
462 fixEdge
<NotStringVarUse
>(node
->child2());
465 if (node
->child2()->shouldSpeculateStringIdent()
466 && node
->child1()->shouldSpeculateNotStringVar()) {
467 fixEdge
<StringIdentUse
>(node
->child2());
468 fixEdge
<NotStringVarUse
>(node
->child1());
471 if (node
->child1()->shouldSpeculateString() && (GPRInfo::numberOfRegisters
>= 8 || isFTL(m_graph
.m_plan
.mode
))) {
472 fixEdge
<StringUse
>(node
->child1());
475 if (node
->child2()->shouldSpeculateString() && (GPRInfo::numberOfRegisters
>= 8 || isFTL(m_graph
.m_plan
.mode
))) {
476 fixEdge
<StringUse
>(node
->child2());
482 case StringFromCharCode
:
483 fixEdge
<Int32Use
>(node
->child1());
487 case StringCharCodeAt
: {
488 // Currently we have no good way of refining these.
489 ASSERT(node
->arrayMode() == ArrayMode(Array::String
));
490 blessArrayOperation(node
->child1(), node
->child2(), node
->child3());
491 fixEdge
<KnownCellUse
>(node
->child1());
492 fixEdge
<Int32Use
>(node
->child2());
498 node
->arrayMode().refine(
500 node
->child1()->prediction(),
501 node
->child2()->prediction(),
502 SpecNone
, node
->flags()));
504 blessArrayOperation(node
->child1(), node
->child2(), node
->child3());
506 ArrayMode arrayMode
= node
->arrayMode();
507 switch (arrayMode
.type()) {
509 if (arrayMode
.arrayClass() == Array::OriginalArray
510 && arrayMode
.speculation() == Array::InBounds
511 && m_graph
.globalObjectFor(node
->origin
.semantic
)->arrayPrototypeChainIsSane()
512 && !(node
->flags() & NodeBytecodeUsesAsOther
))
513 node
->setArrayMode(arrayMode
.withSpeculation(Array::SaneChain
));
517 if ((node
->prediction() & ~SpecString
)
518 || m_graph
.hasExitSite(node
->origin
.semantic
, OutOfBounds
))
519 node
->setArrayMode(arrayMode
.withSpeculation(Array::OutOfBounds
));
526 arrayMode
= node
->arrayMode();
527 switch (arrayMode
.type()) {
528 case Array::SelectUsingPredictions
:
529 case Array::Unprofiled
:
530 case Array::Undecided
:
531 RELEASE_ASSERT_NOT_REACHED();
534 #if USE(JSVALUE32_64)
535 fixEdge
<CellUse
>(node
->child1()); // Speculating cell due to register pressure on 32-bit.
538 case Array::ForceExit
:
541 fixEdge
<KnownCellUse
>(node
->child1());
542 fixEdge
<Int32Use
>(node
->child2());
546 switch (arrayMode
.type()) {
548 if (!arrayMode
.isOutOfBounds())
549 node
->setResult(NodeResultDouble
);
552 case Array::Float32Array
:
553 case Array::Float64Array
:
554 node
->setResult(NodeResultDouble
);
557 case Array::Uint32Array
:
558 if (node
->shouldSpeculateInt32())
560 if (node
->shouldSpeculateMachineInt() && enableInt52())
561 node
->setResult(NodeResultInt52
);
563 node
->setResult(NodeResultDouble
);
575 case PutByValAlias
: {
576 Edge
& child1
= m_graph
.varArgChild(node
, 0);
577 Edge
& child2
= m_graph
.varArgChild(node
, 1);
578 Edge
& child3
= m_graph
.varArgChild(node
, 2);
581 node
->arrayMode().refine(
583 child1
->prediction(),
584 child2
->prediction(),
585 child3
->prediction()));
587 blessArrayOperation(child1
, child2
, m_graph
.varArgChild(node
, 3));
589 switch (node
->arrayMode().modeForPut().type()) {
590 case Array::SelectUsingPredictions
:
591 case Array::Unprofiled
:
592 case Array::Undecided
:
593 RELEASE_ASSERT_NOT_REACHED();
595 case Array::ForceExit
:
597 #if USE(JSVALUE32_64)
598 // Due to register pressure on 32-bit, we speculate cell and
599 // ignore the base-is-not-cell case entirely by letting the
600 // baseline JIT handle it.
601 fixEdge
<CellUse
>(child1
);
605 fixEdge
<KnownCellUse
>(child1
);
606 fixEdge
<Int32Use
>(child2
);
607 fixEdge
<Int32Use
>(child3
);
610 fixEdge
<KnownCellUse
>(child1
);
611 fixEdge
<Int32Use
>(child2
);
612 fixEdge
<DoubleRepRealUse
>(child3
);
614 case Array::Int8Array
:
615 case Array::Int16Array
:
616 case Array::Int32Array
:
617 case Array::Uint8Array
:
618 case Array::Uint8ClampedArray
:
619 case Array::Uint16Array
:
620 case Array::Uint32Array
:
621 fixEdge
<KnownCellUse
>(child1
);
622 fixEdge
<Int32Use
>(child2
);
623 if (child3
->shouldSpeculateInt32())
624 fixIntOrBooleanEdge(child3
);
625 else if (child3
->shouldSpeculateMachineInt())
626 fixEdge
<Int52RepUse
>(child3
);
628 fixDoubleOrBooleanEdge(child3
);
630 case Array::Float32Array
:
631 case Array::Float64Array
:
632 fixEdge
<KnownCellUse
>(child1
);
633 fixEdge
<Int32Use
>(child2
);
634 fixDoubleOrBooleanEdge(child3
);
636 case Array::Contiguous
:
637 case Array::ArrayStorage
:
638 case Array::SlowPutArrayStorage
:
639 case Array::Arguments
:
640 fixEdge
<KnownCellUse
>(child1
);
641 fixEdge
<Int32Use
>(child2
);
642 insertStoreBarrier(m_indexInBlock
, child1
);
645 fixEdge
<KnownCellUse
>(child1
);
646 fixEdge
<Int32Use
>(child2
);
653 // May need to refine the array mode in case the value prediction contravenes
654 // the array prediction. For example, we may have evidence showing that the
655 // array is in Int32 mode, but the value we're storing is likely to be a double.
656 // Then we should turn this into a conversion to Double array followed by the
657 // push. On the other hand, we absolutely don't want to refine based on the
658 // base prediction. If it has non-cell garbage in it, then we want that to be
659 // ignored. That's because ArrayPush can't handle any array modes that aren't
660 // array-related - so if refine() turned this into a "Generic" ArrayPush then
661 // that would break things.
663 node
->arrayMode().refine(
665 node
->child1()->prediction() & SpecCell
,
667 node
->child2()->prediction()));
668 blessArrayOperation(node
->child1(), Edge(), node
->child3());
669 fixEdge
<KnownCellUse
>(node
->child1());
671 switch (node
->arrayMode().type()) {
673 fixEdge
<Int32Use
>(node
->child2());
676 fixEdge
<DoubleRepRealUse
>(node
->child2());
678 case Array::Contiguous
:
679 case Array::ArrayStorage
:
680 insertStoreBarrier(m_indexInBlock
, node
->child1());
689 blessArrayOperation(node
->child1(), Edge(), node
->child2());
690 fixEdge
<KnownCellUse
>(node
->child1());
696 fixEdge
<CellUse
>(node
->child1());
697 fixEdge
<CellUse
>(node
->child2());
702 if (node
->child1()->shouldSpeculateBoolean())
703 fixEdge
<BooleanUse
>(node
->child1());
704 else if (node
->child1()->shouldSpeculateObjectOrOther())
705 fixEdge
<ObjectOrOtherUse
>(node
->child1());
706 // FIXME: We should just be able to do shouldSpeculateInt32OrBoolean() and
707 // shouldSpeculateNumberOrBoolean() here, but we can't because then the Branch
708 // could speculate on the result of a non-speculative conversion node.
709 // https://bugs.webkit.org/show_bug.cgi?id=126778
710 else if (node
->child1()->shouldSpeculateInt32())
711 fixEdge
<Int32Use
>(node
->child1());
712 else if (node
->child1()->shouldSpeculateNumber())
713 fixEdge
<DoubleRepUse
>(node
->child1());
715 Node
* logicalNot
= node
->child1().node();
716 if (logicalNot
->op() == LogicalNot
) {
718 // Make sure that OSR exit can't observe the LogicalNot. If it can,
719 // then we must compute it and cannot peephole around it.
722 for (unsigned i
= m_indexInBlock
; i
--;) {
723 Node
* candidate
= m_block
->at(i
);
724 if (candidate
== logicalNot
) {
728 if (candidate
->canExit()) {
734 ASSERT_UNUSED(found
, found
);
737 Edge newChildEdge
= logicalNot
->child1();
738 if (newChildEdge
->hasBooleanResult()) {
739 node
->children
.setChild1(newChildEdge
);
741 BranchData
* data
= node
->branchData();
742 std::swap(data
->taken
, data
->notTaken
);
750 SwitchData
* data
= node
->switchData();
751 switch (data
->kind
) {
753 if (node
->child1()->shouldSpeculateInt32())
754 fixEdge
<Int32Use
>(node
->child1());
757 if (node
->child1()->shouldSpeculateString())
758 fixEdge
<StringUse
>(node
->child1());
761 if (node
->child1()->shouldSpeculateStringIdent())
762 fixEdge
<StringIdentUse
>(node
->child1());
763 else if (node
->child1()->shouldSpeculateString())
764 fixEdge
<StringUse
>(node
->child1());
771 fixupToPrimitive(node
);
780 case NewStringObject
: {
781 fixEdge
<KnownStringUse
>(node
->child1());
786 for (unsigned i
= m_graph
.varArgNumChildren(node
); i
--;) {
787 node
->setIndexingType(
788 leastUpperBoundOfIndexingTypeAndType(
789 node
->indexingType(), m_graph
.varArgChild(node
, i
)->prediction()));
791 switch (node
->indexingType()) {
792 case ALL_BLANK_INDEXING_TYPES
:
795 case ALL_UNDECIDED_INDEXING_TYPES
:
796 if (node
->numChildren()) {
797 // This will only happen if the children have no type predictions. We
798 // would have already exited by now, but insert a forced exit just to
800 m_insertionSet
.insertNode(
801 m_indexInBlock
, SpecNone
, ForceOSRExit
, node
->origin
);
804 case ALL_INT32_INDEXING_TYPES
:
805 for (unsigned operandIndex
= 0; operandIndex
< node
->numChildren(); ++operandIndex
)
806 fixEdge
<Int32Use
>(m_graph
.m_varArgChildren
[node
->firstChild() + operandIndex
]);
808 case ALL_DOUBLE_INDEXING_TYPES
:
809 for (unsigned operandIndex
= 0; operandIndex
< node
->numChildren(); ++operandIndex
)
810 fixEdge
<DoubleRepRealUse
>(m_graph
.m_varArgChildren
[node
->firstChild() + operandIndex
]);
812 case ALL_CONTIGUOUS_INDEXING_TYPES
:
813 case ALL_ARRAY_STORAGE_INDEXING_TYPES
:
822 case NewTypedArray
: {
823 if (node
->child1()->shouldSpeculateInt32()) {
824 fixEdge
<Int32Use
>(node
->child1());
825 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
831 case NewArrayWithSize
: {
832 fixEdge
<Int32Use
>(node
->child1());
837 ECMAMode ecmaMode
= m_graph
.executableFor(node
->origin
.semantic
)->isStrictMode() ? StrictMode
: NotStrictMode
;
839 if (node
->child1()->shouldSpeculateOther()) {
840 if (ecmaMode
== StrictMode
) {
841 fixEdge
<OtherUse
>(node
->child1());
842 node
->convertToIdentity();
846 m_insertionSet
.insertNode(
847 m_indexInBlock
, SpecNone
, Phantom
, node
->origin
,
848 Edge(node
->child1().node(), OtherUse
));
849 observeUseKindOnNode
<OtherUse
>(node
->child1().node());
850 node
->convertToWeakConstant(m_graph
.globalThisObjectFor(node
->origin
.semantic
));
854 if (isFinalObjectSpeculation(node
->child1()->prediction())) {
855 fixEdge
<FinalObjectUse
>(node
->child1());
856 node
->convertToIdentity();
863 case GetMyArgumentByVal
:
864 case GetMyArgumentByValSafe
: {
865 fixEdge
<Int32Use
>(node
->child1());
870 fixEdge
<KnownCellUse
>(node
->child1());
871 insertStoreBarrier(m_indexInBlock
, node
->child1());
875 case PutClosureVar
: {
876 fixEdge
<KnownCellUse
>(node
->child1());
877 insertStoreBarrier(m_indexInBlock
, node
->child1());
881 case GetClosureRegisters
:
885 fixEdge
<KnownCellUse
>(node
->child1());
889 case AllocatePropertyStorage
:
890 case ReallocatePropertyStorage
: {
891 fixEdge
<KnownCellUse
>(node
->child1());
892 insertStoreBarrier(m_indexInBlock
+ 1, node
->child1());
898 if (!node
->child1()->shouldSpeculateCell())
900 StringImpl
* impl
= m_graph
.identifiers()[node
->identifierNumber()];
901 if (impl
== vm().propertyNames
->length
.impl()) {
902 attemptToMakeGetArrayLength(node
);
905 if (impl
== vm().propertyNames
->byteLength
.impl()) {
906 attemptToMakeGetTypedArrayByteLength(node
);
909 if (impl
== vm().propertyNames
->byteOffset
.impl()) {
910 attemptToMakeGetTypedArrayByteOffset(node
);
913 fixEdge
<CellUse
>(node
->child1());
919 case PutByIdDirect
: {
920 fixEdge
<CellUse
>(node
->child1());
921 insertStoreBarrier(m_indexInBlock
, node
->child1());
925 case CheckExecutable
:
927 case StructureTransitionWatchpoint
:
929 case CheckHasInstance
:
932 fixEdge
<CellUse
>(node
->child1());
937 case ArrayifyToStructure
: {
938 fixEdge
<CellUse
>(node
->child1());
940 fixEdge
<Int32Use
>(node
->child2());
945 if (!node
->child1()->hasStorageResult())
946 fixEdge
<KnownCellUse
>(node
->child1());
947 fixEdge
<KnownCellUse
>(node
->child2());
951 case MultiGetByOffset
: {
952 fixEdge
<CellUse
>(node
->child1());
957 if (!node
->child1()->hasStorageResult())
958 fixEdge
<KnownCellUse
>(node
->child1());
959 fixEdge
<KnownCellUse
>(node
->child2());
960 insertStoreBarrier(m_indexInBlock
, node
->child2());
964 case MultiPutByOffset
: {
965 fixEdge
<CellUse
>(node
->child1());
966 insertStoreBarrier(m_indexInBlock
, node
->child1());
971 if (!(node
->child1()->prediction() & ~SpecCell
))
972 fixEdge
<CellUse
>(node
->child1());
973 fixEdge
<CellUse
>(node
->child2());
978 // FIXME: We should at some point have array profiling on op_in, in which
979 // case we would be able to turn this into a kind of GetByVal.
981 fixEdge
<CellUse
>(node
->child2());
987 switch (node
->child1().useKind()) {
989 if (node
->child1()->shouldSpeculateInt32ForArithmetic())
990 node
->child1().setUseKind(Int32Use
);
995 observeUseKindOnEdge(node
->child1());
1000 RELEASE_ASSERT(enableInt52());
1001 node
->convertToIdentity();
1002 fixEdge
<Int52RepUse
>(node
->child1());
1003 node
->setResult(NodeResultInt52
);
1007 case GetArrayLength
:
1011 case PhantomPutStructure
:
1012 case GetIndexedPropertyStorage
:
1013 case GetTypedArrayByteOffset
:
1015 case CheckTierUpInLoop
:
1016 case CheckTierUpAtReturn
:
1017 case CheckTierUpAndOSREnter
:
1018 case InvalidationPoint
:
1021 case ConstantStoragePointer
:
1024 case HardPhantom
: // HardPhantom would be trivial to handle but anyway we assert that we won't see it here yet.
1028 case DoubleConstant
:
1030 case Identity
: // This should have been cleaned up.
1031 case BooleanToNumber
:
1032 // These are just nodes that we don't currently expect to see during fixup.
1033 // If we ever wanted to insert them prior to fixup, then we just have to create
1034 // fixup rules for them.
1035 RELEASE_ASSERT_NOT_REACHED();
1038 case PutGlobalVar
: {
1039 Node
* globalObjectNode
= m_insertionSet
.insertNode(
1040 m_indexInBlock
, SpecNone
, WeakJSConstant
, node
->origin
,
1041 OpInfo(m_graph
.globalObjectFor(node
->origin
.semantic
)));
1042 Node
* barrierNode
= m_graph
.addNode(
1043 SpecNone
, StoreBarrier
, m_currentNode
->origin
,
1044 Edge(globalObjectNode
, KnownCellUse
));
1045 m_insertionSet
.insert(m_indexInBlock
, barrierNode
);
1049 case TearOffActivation
: {
1050 Node
* barrierNode
= m_graph
.addNode(
1051 SpecNone
, StoreBarrierWithNullCheck
, m_currentNode
->origin
,
1052 Edge(node
->child1().node(), UntypedUse
));
1053 m_insertionSet
.insert(m_indexInBlock
, barrierNode
);
1058 if (node
->child1()->shouldSpeculateString()) {
1059 m_insertionSet
.insertNode(
1060 m_indexInBlock
, SpecNone
, Phantom
, node
->origin
,
1061 Edge(node
->child1().node(), StringUse
));
1062 m_graph
.convertToConstant(node
, jsBoolean(true));
1063 observeUseKindOnNode
<StringUse
>(node
);
1067 #if !ASSERT_DISABLED
1068 // Have these no-op cases here to ensure that nobody forgets to add handlers for new opcodes.
1071 case WeakJSConstant
:
1076 case GetLocalUnlinked
:
1081 case VariableWatchpoint
:
1082 case VarInjectionWatchpoint
:
1083 case AllocationProfileWatchpoint
:
1087 case NewArrayBuffer
:
1090 case ProfileWillCall
:
1091 case ProfileDidCall
:
1097 case CreateActivation
:
1098 case CreateArguments
:
1099 case PhantomArguments
:
1100 case TearOffArguments
:
1101 case GetMyArgumentsLength
:
1102 case GetMyArgumentsLengthSafe
:
1103 case CheckArgumentsNotCreated
:
1105 case NewFunctionNoCheck
:
1106 case NewFunctionExpression
:
1110 case ThrowReferenceError
:
1111 case CountExecution
:
1113 case CheckWatchdogTimer
:
1115 case ExtractOSREntryLocal
:
1118 case StoreBarrierWithNullCheck
:
1119 case FunctionReentryWatchpoint
:
1120 case TypedArrayWatchpoint
:
1131 template<UseKind useKind
>
1132 void createToString(Node
* node
, Edge
& edge
)
1134 edge
.setNode(m_insertionSet
.insertNode(
1135 m_indexInBlock
, SpecString
, ToString
, node
->origin
,
1136 Edge(edge
.node(), useKind
)));
1139 template<UseKind useKind
>
1140 void attemptToForceStringArrayModeByToStringConversion(ArrayMode
& arrayMode
, Node
* node
)
1142 ASSERT(arrayMode
== ArrayMode(Array::Generic
));
1144 if (!canOptimizeStringObjectAccess(node
->origin
.semantic
))
1147 createToString
<useKind
>(node
, node
->child1());
1148 arrayMode
= ArrayMode(Array::String
);
1151 template<UseKind useKind
>
1152 bool isStringObjectUse()
1155 case StringObjectUse
:
1156 case StringOrStringObjectUse
:
1163 template<UseKind useKind
>
1164 void convertStringAddUse(Node
* node
, Edge
& edge
)
1166 if (useKind
== StringUse
) {
1167 // This preserves the binaryUseKind() invariant ot ValueAdd: ValueAdd's
1168 // two edges will always have identical use kinds, which makes the
1169 // decision process much easier.
1170 observeUseKindOnNode
<StringUse
>(edge
.node());
1171 m_insertionSet
.insertNode(
1172 m_indexInBlock
, SpecNone
, Phantom
, node
->origin
,
1173 Edge(edge
.node(), StringUse
));
1174 edge
.setUseKind(KnownStringUse
);
1178 // FIXME: We ought to be able to have a ToPrimitiveToString node.
1180 observeUseKindOnNode
<useKind
>(edge
.node());
1181 createToString
<useKind
>(node
, edge
);
1184 void convertToMakeRope(Node
* node
)
1186 node
->setOpAndDefaultFlags(MakeRope
);
1187 fixupMakeRope(node
);
1190 void fixupMakeRope(Node
* node
)
1192 for (unsigned i
= 0; i
< AdjacencyList::Size
; ++i
) {
1193 Edge
& edge
= node
->children
.child(i
);
1196 edge
.setUseKind(KnownStringUse
);
1197 if (!m_graph
.isConstant(edge
.node()))
1199 JSString
* string
= jsCast
<JSString
*>(m_graph
.valueOfJSConstant(edge
.node()).asCell());
1200 if (string
->length())
1203 // Don't allow the MakeRope to have zero children.
1204 if (!i
&& !node
->child2())
1207 node
->children
.removeEdge(i
--);
1210 if (!node
->child2()) {
1211 ASSERT(!node
->child3());
1212 node
->convertToIdentity();
1216 void fixupToPrimitive(Node
* node
)
1218 if (node
->child1()->shouldSpeculateInt32()) {
1219 fixEdge
<Int32Use
>(node
->child1());
1220 node
->convertToIdentity();
1224 if (node
->child1()->shouldSpeculateString()) {
1225 fixEdge
<StringUse
>(node
->child1());
1226 node
->convertToIdentity();
1230 if (node
->child1()->shouldSpeculateStringObject()
1231 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1232 fixEdge
<StringObjectUse
>(node
->child1());
1233 node
->convertToToString();
1237 if (node
->child1()->shouldSpeculateStringOrStringObject()
1238 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1239 fixEdge
<StringOrStringObjectUse
>(node
->child1());
1240 node
->convertToToString();
1245 void fixupToString(Node
* node
)
1247 if (node
->child1()->shouldSpeculateString()) {
1248 fixEdge
<StringUse
>(node
->child1());
1249 node
->convertToIdentity();
1253 if (node
->child1()->shouldSpeculateStringObject()
1254 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1255 fixEdge
<StringObjectUse
>(node
->child1());
1259 if (node
->child1()->shouldSpeculateStringOrStringObject()
1260 && canOptimizeStringObjectAccess(node
->origin
.semantic
)) {
1261 fixEdge
<StringOrStringObjectUse
>(node
->child1());
1265 if (node
->child1()->shouldSpeculateCell()) {
1266 fixEdge
<CellUse
>(node
->child1());
1271 template<UseKind leftUseKind
>
1272 bool attemptToMakeFastStringAdd(Node
* node
, Edge
& left
, Edge
& right
)
1274 Node
* originalLeft
= left
.node();
1275 Node
* originalRight
= right
.node();
1277 ASSERT(leftUseKind
== StringUse
|| leftUseKind
== StringObjectUse
|| leftUseKind
== StringOrStringObjectUse
);
1279 if (isStringObjectUse
<leftUseKind
>() && !canOptimizeStringObjectAccess(node
->origin
.semantic
))
1282 convertStringAddUse
<leftUseKind
>(node
, left
);
1284 if (right
->shouldSpeculateString())
1285 convertStringAddUse
<StringUse
>(node
, right
);
1286 else if (right
->shouldSpeculateStringObject() && canOptimizeStringObjectAccess(node
->origin
.semantic
))
1287 convertStringAddUse
<StringObjectUse
>(node
, right
);
1288 else if (right
->shouldSpeculateStringOrStringObject() && canOptimizeStringObjectAccess(node
->origin
.semantic
))
1289 convertStringAddUse
<StringOrStringObjectUse
>(node
, right
);
1291 // At this point we know that the other operand is something weird. The semantically correct
1292 // way of dealing with this is:
1294 // MakeRope(@left, ToString(ToPrimitive(@right)))
1296 // So that's what we emit. NB, we need to do all relevant type checks on @left before we do
1297 // anything to @right, since ToPrimitive may be effectful.
1299 Node
* toPrimitive
= m_insertionSet
.insertNode(
1300 m_indexInBlock
, resultOfToPrimitive(right
->prediction()), ToPrimitive
,
1301 node
->origin
, Edge(right
.node()));
1302 Node
* toString
= m_insertionSet
.insertNode(
1303 m_indexInBlock
, SpecString
, ToString
, node
->origin
, Edge(toPrimitive
));
1305 fixupToPrimitive(toPrimitive
);
1306 fixupToString(toString
);
1308 right
.setNode(toString
);
1311 // We're doing checks up there, so we need to make sure that the
1312 // *original* inputs to the addition are live up to here.
1313 m_insertionSet
.insertNode(
1314 m_indexInBlock
, SpecNone
, Phantom
, node
->origin
,
1315 Edge(originalLeft
), Edge(originalRight
));
1317 convertToMakeRope(node
);
1321 bool isStringPrototypeMethodSane(Structure
* stringPrototypeStructure
, StringImpl
* uid
)
1323 unsigned attributesUnused
;
1324 JSCell
* specificValue
;
1325 PropertyOffset offset
= stringPrototypeStructure
->getConcurrently(
1326 vm(), uid
, attributesUnused
, specificValue
);
1327 if (!isValidOffset(offset
))
1333 if (!specificValue
->inherits(JSFunction::info()))
1336 JSFunction
* function
= jsCast
<JSFunction
*>(specificValue
);
1337 if (function
->executable()->intrinsicFor(CodeForCall
) != StringPrototypeValueOfIntrinsic
)
1343 bool canOptimizeStringObjectAccess(const CodeOrigin
& codeOrigin
)
1345 if (m_graph
.hasExitSite(codeOrigin
, NotStringObject
))
1348 Structure
* stringObjectStructure
= m_graph
.globalObjectFor(codeOrigin
)->stringObjectStructure();
1349 ASSERT(stringObjectStructure
->storedPrototype().isObject());
1350 ASSERT(stringObjectStructure
->storedPrototype().asCell()->classInfo() == StringPrototype::info());
1352 JSObject
* stringPrototypeObject
= asObject(stringObjectStructure
->storedPrototype());
1353 Structure
* stringPrototypeStructure
= stringPrototypeObject
->structure();
1354 if (!m_graph
.watchpoints().isStillValid(stringPrototypeStructure
->transitionWatchpointSet()))
1357 if (stringPrototypeStructure
->isDictionary())
1360 // We're being conservative here. We want DFG's ToString on StringObject to be
1361 // used in both numeric contexts (that would call valueOf()) and string contexts
1362 // (that would call toString()). We don't want the DFG to have to distinguish
1363 // between the two, just because that seems like it would get confusing. So we
1364 // just require both methods to be sane.
1365 if (!isStringPrototypeMethodSane(stringPrototypeStructure
, vm().propertyNames
->valueOf
.impl()))
1367 if (!isStringPrototypeMethodSane(stringPrototypeStructure
, vm().propertyNames
->toString
.impl()))
1373 void fixupGetAndSetLocalsInBlock(BasicBlock
* block
)
1377 ASSERT(block
->isReachable
);
1379 for (m_indexInBlock
= 0; m_indexInBlock
< block
->size(); ++m_indexInBlock
) {
1380 Node
* node
= m_currentNode
= block
->at(m_indexInBlock
);
1381 if (node
->op() != SetLocal
&& node
->op() != GetLocal
)
1384 VariableAccessData
* variable
= node
->variableAccessData();
1385 switch (node
->op()) {
1387 switch (variable
->flushFormat()) {
1389 node
->setResult(NodeResultDouble
);
1392 node
->setResult(NodeResultInt52
);
1400 switch (variable
->flushFormat()) {
1401 case FlushedJSValue
:
1404 fixEdge
<DoubleRepUse
>(node
->child1());
1407 fixEdge
<Int32Use
>(node
->child1());
1410 fixEdge
<Int52RepUse
>(node
->child1());
1413 fixEdge
<CellUse
>(node
->child1());
1415 case FlushedBoolean
:
1416 fixEdge
<BooleanUse
>(node
->child1());
1419 RELEASE_ASSERT_NOT_REACHED();
1425 RELEASE_ASSERT_NOT_REACHED();
1429 m_insertionSet
.execute(block
);
1432 Node
* checkArray(ArrayMode arrayMode
, const NodeOrigin
& origin
, Node
* array
, Node
* index
, bool (*storageCheck
)(const ArrayMode
&) = canCSEStorage
)
1434 ASSERT(arrayMode
.isSpecific());
1436 if (arrayMode
.type() == Array::String
) {
1437 m_insertionSet
.insertNode(
1438 m_indexInBlock
, SpecNone
, Phantom
, origin
, Edge(array
, StringUse
));
1440 Structure
* structure
= arrayMode
.originalArrayStructure(m_graph
, origin
.semantic
);
1442 Edge indexEdge
= index
? Edge(index
, Int32Use
) : Edge();
1444 if (arrayMode
.doesConversion()) {
1446 m_insertionSet
.insertNode(
1447 m_indexInBlock
, SpecNone
, ArrayifyToStructure
, origin
,
1448 OpInfo(structure
), OpInfo(arrayMode
.asWord()), Edge(array
, CellUse
), indexEdge
);
1450 m_insertionSet
.insertNode(
1451 m_indexInBlock
, SpecNone
, Arrayify
, origin
,
1452 OpInfo(arrayMode
.asWord()), Edge(array
, CellUse
), indexEdge
);
1456 m_insertionSet
.insertNode(
1457 m_indexInBlock
, SpecNone
, CheckStructure
, origin
,
1458 OpInfo(m_graph
.addStructureSet(structure
)), Edge(array
, CellUse
));
1460 m_insertionSet
.insertNode(
1461 m_indexInBlock
, SpecNone
, CheckArray
, origin
,
1462 OpInfo(arrayMode
.asWord()), Edge(array
, CellUse
));
1467 if (!storageCheck(arrayMode
))
1470 if (arrayMode
.usesButterfly()) {
1471 return m_insertionSet
.insertNode(
1472 m_indexInBlock
, SpecNone
, GetButterfly
, origin
, Edge(array
, CellUse
));
1475 return m_insertionSet
.insertNode(
1476 m_indexInBlock
, SpecNone
, GetIndexedPropertyStorage
, origin
,
1477 OpInfo(arrayMode
.asWord()), Edge(array
, KnownCellUse
));
1480 void blessArrayOperation(Edge base
, Edge index
, Edge
& storageChild
)
1482 Node
* node
= m_currentNode
;
1484 switch (node
->arrayMode().type()) {
1485 case Array::ForceExit
: {
1486 m_insertionSet
.insertNode(
1487 m_indexInBlock
, SpecNone
, ForceOSRExit
, node
->origin
);
1491 case Array::SelectUsingPredictions
:
1492 case Array::Unprofiled
:
1493 RELEASE_ASSERT_NOT_REACHED();
1496 case Array::Generic
:
1500 Node
* storage
= checkArray(node
->arrayMode(), node
->origin
, base
.node(), index
.node());
1504 storageChild
= Edge(storage
);
1509 bool alwaysUnboxSimplePrimitives()
1514 // Any boolean, int, or cell value is profitable to unbox on 32-bit because it
1520 template<UseKind useKind
>
1521 void observeUseKindOnNode(Node
* node
)
1523 if (useKind
== UntypedUse
)
1525 observeUseKindOnNode(node
, useKind
);
1528 void observeUseKindOnEdge(Edge edge
)
1530 observeUseKindOnNode(edge
.node(), edge
.useKind());
1533 void observeUseKindOnNode(Node
* node
, UseKind useKind
)
1535 if (node
->op() != GetLocal
)
1538 // FIXME: The way this uses alwaysUnboxSimplePrimitives() is suspicious.
1539 // https://bugs.webkit.org/show_bug.cgi?id=121518
1541 VariableAccessData
* variable
= node
->variableAccessData();
1544 if (alwaysUnboxSimplePrimitives()
1545 || isInt32Speculation(variable
->prediction()))
1546 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1550 case DoubleRepRealUse
:
1551 if (variable
->doubleFormatState() == UsingDoubleFormat
)
1552 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1555 if (alwaysUnboxSimplePrimitives()
1556 || isBooleanSpeculation(variable
->prediction()))
1557 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1560 if (isMachineIntSpeculation(variable
->prediction()))
1561 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1567 case KnownStringUse
:
1568 case StringObjectUse
:
1569 case StringOrStringObjectUse
:
1570 if (alwaysUnboxSimplePrimitives()
1571 || isCellSpeculation(variable
->prediction()))
1572 m_profitabilityChanged
|= variable
->mergeIsProfitableToUnbox(true);
1579 template<UseKind useKind
>
1580 void fixEdge(Edge
& edge
)
1582 observeUseKindOnNode
<useKind
>(edge
.node());
1583 edge
.setUseKind(useKind
);
1586 void insertStoreBarrier(unsigned indexInBlock
, Edge child1
)
1588 Node
* barrierNode
= m_graph
.addNode(SpecNone
, StoreBarrier
, m_currentNode
->origin
, child1
);
1589 m_insertionSet
.insert(indexInBlock
, barrierNode
);
1592 void fixIntConvertingEdge(Edge
& edge
)
1594 Node
* node
= edge
.node();
1595 if (node
->shouldSpeculateInt32OrBoolean()) {
1596 fixIntOrBooleanEdge(edge
);
1601 if (node
->shouldSpeculateMachineInt())
1602 useKind
= Int52RepUse
;
1603 else if (node
->shouldSpeculateNumber())
1604 useKind
= DoubleRepUse
;
1606 useKind
= NotCellUse
;
1607 Node
* newNode
= m_insertionSet
.insertNode(
1608 m_indexInBlock
, SpecInt32
, ValueToInt32
, m_currentNode
->origin
,
1609 Edge(node
, useKind
));
1610 observeUseKindOnNode(node
, useKind
);
1612 edge
= Edge(newNode
, KnownInt32Use
);
1613 addRequiredPhantom(node
);
1616 void fixIntOrBooleanEdge(Edge
& edge
)
1618 Node
* node
= edge
.node();
1619 if (!node
->sawBooleans()) {
1620 fixEdge
<Int32Use
>(edge
);
1625 if (node
->shouldSpeculateBoolean())
1626 useKind
= BooleanUse
;
1628 useKind
= UntypedUse
;
1629 Node
* newNode
= m_insertionSet
.insertNode(
1630 m_indexInBlock
, SpecInt32
, BooleanToNumber
, m_currentNode
->origin
,
1631 Edge(node
, useKind
));
1632 observeUseKindOnNode(node
, useKind
);
1634 edge
= Edge(newNode
, Int32Use
);
1635 addRequiredPhantom(node
);
1638 void fixDoubleOrBooleanEdge(Edge
& edge
)
1640 Node
* node
= edge
.node();
1641 if (!node
->sawBooleans()) {
1642 fixEdge
<DoubleRepUse
>(edge
);
1647 if (node
->shouldSpeculateBoolean())
1648 useKind
= BooleanUse
;
1650 useKind
= UntypedUse
;
1651 Node
* newNode
= m_insertionSet
.insertNode(
1652 m_indexInBlock
, SpecInt32
, BooleanToNumber
, m_currentNode
->origin
,
1653 Edge(node
, useKind
));
1654 observeUseKindOnNode(node
, useKind
);
1656 edge
= Edge(newNode
, DoubleRepUse
);
1657 addRequiredPhantom(node
);
1660 void truncateConstantToInt32(Edge
& edge
)
1662 Node
* oldNode
= edge
.node();
1664 ASSERT(oldNode
->hasConstant());
1665 JSValue value
= m_graph
.valueOfJSConstant(oldNode
);
1666 if (value
.isInt32())
1669 value
= jsNumber(JSC::toInt32(value
.asNumber()));
1670 ASSERT(value
.isInt32());
1671 unsigned constantRegister
;
1672 if (!codeBlock()->findConstant(value
, constantRegister
)) {
1673 constantRegister
= codeBlock()->addConstantLazily();
1674 initializeLazyWriteBarrierForConstant(
1675 m_graph
.m_plan
.writeBarriers
,
1676 codeBlock()->constants()[constantRegister
],
1679 codeBlock()->ownerExecutable(),
1682 edge
.setNode(m_insertionSet
.insertNode(
1683 m_indexInBlock
, SpecInt32
, JSConstant
, m_currentNode
->origin
,
1684 OpInfo(constantRegister
)));
1687 void truncateConstantsIfNecessary(Node
* node
, AddSpeculationMode mode
)
1689 if (mode
!= SpeculateInt32AndTruncateConstants
)
1692 ASSERT(node
->child1()->hasConstant() || node
->child2()->hasConstant());
1693 if (node
->child1()->hasConstant())
1694 truncateConstantToInt32(node
->child1());
1696 truncateConstantToInt32(node
->child2());
1699 bool attemptToMakeIntegerAdd(Node
* node
)
1701 AddSpeculationMode mode
= m_graph
.addSpeculationMode(node
, FixupPass
);
1702 if (mode
!= DontSpeculateInt32
) {
1703 truncateConstantsIfNecessary(node
, mode
);
1704 fixIntOrBooleanEdge(node
->child1());
1705 fixIntOrBooleanEdge(node
->child2());
1706 if (bytecodeCanTruncateInteger(node
->arithNodeFlags()))
1707 node
->setArithMode(Arith::Unchecked
);
1709 node
->setArithMode(Arith::CheckOverflow
);
1713 if (m_graph
.addShouldSpeculateMachineInt(node
)) {
1714 fixEdge
<Int52RepUse
>(node
->child1());
1715 fixEdge
<Int52RepUse
>(node
->child2());
1716 node
->setArithMode(Arith::CheckOverflow
);
1717 node
->setResult(NodeResultInt52
);
1724 bool attemptToMakeGetArrayLength(Node
* node
)
1726 if (!isInt32Speculation(node
->prediction()))
1728 CodeBlock
* profiledBlock
= m_graph
.baselineCodeBlockFor(node
->origin
.semantic
);
1729 ArrayProfile
* arrayProfile
=
1730 profiledBlock
->getArrayProfile(node
->origin
.semantic
.bytecodeIndex
);
1731 ArrayMode arrayMode
= ArrayMode(Array::SelectUsingPredictions
);
1733 ConcurrentJITLocker
locker(profiledBlock
->m_lock
);
1734 arrayProfile
->computeUpdatedPrediction(locker
, profiledBlock
);
1735 arrayMode
= ArrayMode::fromObserved(locker
, arrayProfile
, Array::Read
, false);
1736 if (arrayMode
.type() == Array::Unprofiled
) {
1737 // For normal array operations, it makes sense to treat Unprofiled
1738 // accesses as ForceExit and get more data rather than using
1739 // predictions and then possibly ending up with a Generic. But here,
1740 // we treat anything that is Unprofiled as Generic and keep the
1741 // GetById. I.e. ForceExit = Generic. So, there is no harm - and only
1742 // profit - from treating the Unprofiled case as
1743 // SelectUsingPredictions.
1744 arrayMode
= ArrayMode(Array::SelectUsingPredictions
);
1748 arrayMode
= arrayMode
.refine(
1749 m_graph
, node
, node
->child1()->prediction(), node
->prediction());
1751 if (arrayMode
.type() == Array::Generic
) {
1752 // Check if the input is something that we can't get array length for, but for which we
1753 // could insert some conversions in order to transform it into something that we can do it
1755 if (node
->child1()->shouldSpeculateStringObject())
1756 attemptToForceStringArrayModeByToStringConversion
<StringObjectUse
>(arrayMode
, node
);
1757 else if (node
->child1()->shouldSpeculateStringOrStringObject())
1758 attemptToForceStringArrayModeByToStringConversion
<StringOrStringObjectUse
>(arrayMode
, node
);
1761 if (!arrayMode
.supportsLength())
1764 convertToGetArrayLength(node
, arrayMode
);
1768 bool attemptToMakeGetTypedArrayByteLength(Node
* node
)
1770 if (!isInt32Speculation(node
->prediction()))
1773 TypedArrayType type
= typedArrayTypeFromSpeculation(node
->child1()->prediction());
1774 if (!isTypedView(type
))
1777 if (elementSize(type
) == 1) {
1778 convertToGetArrayLength(node
, ArrayMode(toArrayType(type
)));
1782 Node
* length
= prependGetArrayLength(
1783 node
->origin
, node
->child1().node(), ArrayMode(toArrayType(type
)));
1785 Node
* shiftAmount
= m_insertionSet
.insertNode(
1786 m_indexInBlock
, SpecInt32
, JSConstant
, node
->origin
,
1787 OpInfo(m_graph
.constantRegisterForConstant(jsNumber(logElementSize(type
)))));
1789 // We can use a BitLShift here because typed arrays will never have a byteLength
1790 // that overflows int32.
1791 node
->setOp(BitLShift
);
1792 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
1793 observeUseKindOnNode(length
, Int32Use
);
1794 observeUseKindOnNode(shiftAmount
, Int32Use
);
1795 node
->child1() = Edge(length
, Int32Use
);
1796 node
->child2() = Edge(shiftAmount
, Int32Use
);
1800 void convertToGetArrayLength(Node
* node
, ArrayMode arrayMode
)
1802 node
->setOp(GetArrayLength
);
1803 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
1804 fixEdge
<KnownCellUse
>(node
->child1());
1805 node
->setArrayMode(arrayMode
);
1807 Node
* storage
= checkArray(arrayMode
, node
->origin
, node
->child1().node(), 0, lengthNeedsStorage
);
1811 node
->child2() = Edge(storage
);
1814 Node
* prependGetArrayLength(NodeOrigin origin
, Node
* child
, ArrayMode arrayMode
)
1816 Node
* storage
= checkArray(arrayMode
, origin
, child
, 0, lengthNeedsStorage
);
1817 return m_insertionSet
.insertNode(
1818 m_indexInBlock
, SpecInt32
, GetArrayLength
, origin
,
1819 OpInfo(arrayMode
.asWord()), Edge(child
, KnownCellUse
), Edge(storage
));
1822 bool attemptToMakeGetTypedArrayByteOffset(Node
* node
)
1824 if (!isInt32Speculation(node
->prediction()))
1827 TypedArrayType type
= typedArrayTypeFromSpeculation(node
->child1()->prediction());
1828 if (!isTypedView(type
))
1832 ArrayMode(toArrayType(type
)), node
->origin
, node
->child1().node(),
1833 0, neverNeedsStorage
);
1835 node
->setOp(GetTypedArrayByteOffset
);
1836 node
->clearFlags(NodeMustGenerate
| NodeClobbersWorld
);
1837 fixEdge
<KnownCellUse
>(node
->child1());
1841 void injectTypeConversionsInBlock(BasicBlock
* block
)
1845 ASSERT(block
->isReachable
);
1847 for (m_indexInBlock
= 0; m_indexInBlock
< block
->size(); ++m_indexInBlock
) {
1848 m_currentNode
= block
->at(m_indexInBlock
);
1849 addPhantomsIfNecessary();
1850 tryToRelaxRepresentation(m_currentNode
);
1851 DFG_NODE_DO_TO_CHILDREN(m_graph
, m_currentNode
, injectTypeConversionsForEdge
);
1853 clearPhantomsAtEnd();
1854 m_insertionSet
.execute(block
);
1857 void tryToRelaxRepresentation(Node
* node
)
1859 // Some operations may be able to operate more efficiently over looser representations.
1860 // Identify those here. This avoids inserting a redundant representation conversion.
1861 // Also, for some operations, like MovHint, this is a necessary optimization: inserting
1862 // an otherwise-dead conversion just for a MovHint would break OSR's understanding of
1865 switch (node
->op()) {
1870 DFG_NODE_DO_TO_CHILDREN(m_graph
, m_currentNode
, fixEdgeRepresentation
);
1874 if (node
->child1().useKind() == DoubleRepUse
1875 && !node
->child1()->hasDoubleResult()) {
1876 node
->child1().setUseKind(NumberUse
);
1886 void fixEdgeRepresentation(Node
*, Edge
& edge
)
1888 switch (edge
.useKind()) {
1890 case DoubleRepRealUse
:
1891 if (edge
->hasDoubleResult())
1894 if (edge
->hasInt52Result())
1895 edge
.setUseKind(Int52RepUse
);
1896 else if (edge
.useKind() == DoubleRepUse
)
1897 edge
.setUseKind(NumberUse
);
1901 // Nothing we can really do.
1906 if (edge
->hasDoubleResult())
1907 edge
.setUseKind(DoubleRepUse
);
1908 else if (edge
->hasInt52Result())
1909 edge
.setUseKind(Int52RepUse
);
1917 void injectTypeConversionsForEdge(Node
* node
, Edge
& edge
)
1919 ASSERT(node
== m_currentNode
);
1920 Node
* result
= nullptr;
1922 switch (edge
.useKind()) {
1924 case DoubleRepRealUse
:
1925 case DoubleRepMachineIntUse
: {
1926 if (edge
->hasDoubleResult())
1929 addRequiredPhantom(edge
.node());
1931 if (edge
->op() == JSConstant
&& m_graph
.isNumberConstant(edge
.node())) {
1932 result
= m_insertionSet
.insertNode(
1933 m_indexInBlock
, SpecBytecodeDouble
, DoubleConstant
, node
->origin
,
1934 OpInfo(m_graph
.constantRegisterForConstant(
1935 jsDoubleNumber(m_graph
.valueOfNumberConstant(edge
.node())))));
1936 } else if (edge
->hasInt52Result()) {
1937 result
= m_insertionSet
.insertNode(
1938 m_indexInBlock
, SpecInt52AsDouble
, DoubleRep
, node
->origin
,
1939 Edge(edge
.node(), Int52RepUse
));
1941 result
= m_insertionSet
.insertNode(
1942 m_indexInBlock
, SpecBytecodeDouble
, DoubleRep
, node
->origin
,
1943 Edge(edge
.node(), NumberUse
));
1946 edge
.setNode(result
);
1951 if (edge
->hasInt52Result())
1954 addRequiredPhantom(edge
.node());
1956 if (edge
->op() == JSConstant
&& m_graph
.isMachineIntConstant(edge
.node())) {
1957 result
= m_insertionSet
.insertNode(
1958 m_indexInBlock
, SpecMachineInt
, Int52Constant
, node
->origin
,
1959 OpInfo(edge
->constantNumber()));
1960 } else if (edge
->hasDoubleResult()) {
1961 result
= m_insertionSet
.insertNode(
1962 m_indexInBlock
, SpecMachineInt
, Int52Rep
, node
->origin
,
1963 Edge(edge
.node(), DoubleRepMachineIntUse
));
1964 } else if (edge
->shouldSpeculateInt32ForArithmetic()) {
1965 result
= m_insertionSet
.insertNode(
1966 m_indexInBlock
, SpecInt32
, Int52Rep
, node
->origin
,
1967 Edge(edge
.node(), Int32Use
));
1969 result
= m_insertionSet
.insertNode(
1970 m_indexInBlock
, SpecMachineInt
, Int52Rep
, node
->origin
,
1971 Edge(edge
.node(), MachineIntUse
));
1974 edge
.setNode(result
);
1979 if (!edge
->hasDoubleResult() && !edge
->hasInt52Result())
1982 addRequiredPhantom(edge
.node());
1984 if (edge
->hasDoubleResult()) {
1985 result
= m_insertionSet
.insertNode(
1986 m_indexInBlock
, SpecBytecodeDouble
, ValueRep
, node
->origin
,
1987 Edge(edge
.node(), DoubleRepUse
));
1989 result
= m_insertionSet
.insertNode(
1990 m_indexInBlock
, SpecInt32
| SpecInt52AsDouble
, ValueRep
, node
->origin
,
1991 Edge(edge
.node(), Int52RepUse
));
1994 edge
.setNode(result
);
1999 void addRequiredPhantom(Node
* node
)
2001 m_requiredPhantoms
.append(node
);
2004 void addPhantomsIfNecessary()
2006 if (m_requiredPhantoms
.isEmpty())
2009 for (unsigned i
= m_requiredPhantoms
.size(); i
--;) {
2010 Node
* node
= m_requiredPhantoms
[i
];
2011 m_insertionSet
.insertNode(
2012 m_indexInBlock
, SpecNone
, Phantom
, m_currentNode
->origin
,
2013 node
->defaultEdge());
2016 m_requiredPhantoms
.resize(0);
2019 void clearPhantomsAtEnd()
2021 // Terminal nodes don't need post-phantoms, and inserting them would violate
2022 // the current requirement that a terminal is the last thing in a block. We
2023 // should eventually change that requirement. Currently we get around this by
2024 // ensuring that all terminals accept just one input, and if that input is a
2025 // conversion node then no further speculations will be performed. See
2026 // references to the bug, below, for places where we have to have hacks to
2027 // work around this.
2028 // FIXME: Get rid of this by allowing Phantoms after terminals.
2029 // https://bugs.webkit.org/show_bug.cgi?id=126778
2031 m_requiredPhantoms
.resize(0);
2034 BasicBlock
* m_block
;
2035 unsigned m_indexInBlock
;
2036 Node
* m_currentNode
;
2037 InsertionSet m_insertionSet
;
2038 bool m_profitabilityChanged
;
2039 Vector
<Node
*, 3> m_requiredPhantoms
;
2042 bool performFixup(Graph
& graph
)
2044 SamplingRegion
samplingRegion("DFG Fixup Phase");
2045 return runPhase
<FixupPhase
>(graph
);
2048 } } // namespace JSC::DFG
2050 #endif // ENABLE(DFG_JIT)