2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 namespace JSC
{ namespace DFG
{
34 GPRReg
SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex
, DataFormat
& returnFormat
)
36 Node
& node
= m_jit
.graph()[nodeIndex
];
37 VirtualRegister virtualRegister
= node
.virtualRegister();
38 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
40 switch (info
.registerFormat()) {
41 case DataFormatNone
: {
42 GPRReg gpr
= allocate();
44 if (node
.isConstant()) {
45 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
46 if (isInt32Constant(nodeIndex
)) {
47 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex
)), gpr
);
48 info
.fillInteger(gpr
);
49 returnFormat
= DataFormatInteger
;
52 m_jit
.move(constantAsJSValueAsImmPtr(nodeIndex
), gpr
);
54 DataFormat spillFormat
= info
.spillFormat();
55 ASSERT(spillFormat
& DataFormatJS
);
57 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
59 if (spillFormat
== DataFormatJSInteger
) {
60 // If we know this was spilled as an integer we can fill without checking.
62 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
63 info
.fillInteger(gpr
);
64 returnFormat
= DataFormatInteger
;
67 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
68 info
.fillJSValue(gpr
, DataFormatJSInteger
);
69 returnFormat
= DataFormatJSInteger
;
72 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
75 // Fill as JSValue, and fall through.
76 info
.fillJSValue(gpr
, DataFormatJSInteger
);
81 // Check the value is an integer.
82 GPRReg gpr
= info
.gpr();
84 speculationCheck(m_jit
.branchPtr(MacroAssembler::Below
, gpr
, GPRInfo::tagTypeNumberRegister
));
85 info
.fillJSValue(gpr
, DataFormatJSInteger
);
86 // If !strict we're done, return.
88 returnFormat
= DataFormatJSInteger
;
91 // else fall through & handle as DataFormatJSInteger.
95 case DataFormatJSInteger
: {
96 // In a strict fill we need to strip off the value tag.
98 GPRReg gpr
= info
.gpr();
100 // If the register has already been locked we need to take a copy.
101 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
102 if (m_gprs
.isLocked(gpr
))
106 info
.fillInteger(gpr
);
109 m_jit
.zeroExtend32ToPtr(gpr
, result
);
110 returnFormat
= DataFormatInteger
;
114 GPRReg gpr
= info
.gpr();
116 returnFormat
= DataFormatJSInteger
;
120 case DataFormatInteger
: {
121 GPRReg gpr
= info
.gpr();
123 returnFormat
= DataFormatInteger
;
127 case DataFormatDouble
:
129 case DataFormatJSDouble
:
130 case DataFormatJSCell
: {
131 terminateSpeculativeExecution();
132 returnFormat
= DataFormatInteger
;
137 ASSERT_NOT_REACHED();
138 return InvalidGPRReg
;
141 SpeculationCheck::SpeculationCheck(MacroAssembler::Jump check
, SpeculativeJIT
* jit
, unsigned recoveryIndex
)
143 , m_nodeIndex(jit
->m_compileIndex
)
144 , m_recoveryIndex(recoveryIndex
)
146 for (gpr_iterator iter
= jit
->m_gprs
.begin(); iter
!= jit
->m_gprs
.end(); ++iter
) {
147 if (iter
.name() != InvalidVirtualRegister
) {
148 GenerationInfo
& info
= jit
->m_generationInfo
[iter
.name()];
149 m_gprInfo
[iter
.index()].nodeIndex
= info
.nodeIndex();
150 m_gprInfo
[iter
.index()].format
= info
.registerFormat();
152 m_gprInfo
[iter
.index()].nodeIndex
= NoNode
;
154 for (fpr_iterator iter
= jit
->m_fprs
.begin(); iter
!= jit
->m_fprs
.end(); ++iter
) {
155 if (iter
.name() != InvalidVirtualRegister
) {
156 GenerationInfo
& info
= jit
->m_generationInfo
[iter
.name()];
157 ASSERT(info
.registerFormat() == DataFormatDouble
);
158 m_fprInfo
[iter
.index()] = info
.nodeIndex();
160 m_fprInfo
[iter
.index()] = NoNode
;
164 GPRReg
SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex
, DataFormat
& returnFormat
)
166 return fillSpeculateIntInternal
<false>(nodeIndex
, returnFormat
);
169 GPRReg
SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex
)
171 DataFormat mustBeDataFormatInteger
;
172 GPRReg result
= fillSpeculateIntInternal
<true>(nodeIndex
, mustBeDataFormatInteger
);
173 ASSERT(mustBeDataFormatInteger
== DataFormatInteger
);
177 GPRReg
SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex
)
179 Node
& node
= m_jit
.graph()[nodeIndex
];
180 VirtualRegister virtualRegister
= node
.virtualRegister();
181 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
183 switch (info
.registerFormat()) {
184 case DataFormatNone
: {
185 GPRReg gpr
= allocate();
187 if (node
.isConstant()) {
188 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
189 JSValue jsValue
= constantAsJSValue(nodeIndex
);
190 if (jsValue
.isCell()) {
191 m_jit
.move(MacroAssembler::TrustedImmPtr(jsValue
.asCell()), gpr
);
192 info
.fillJSValue(gpr
, DataFormatJSCell
);
195 terminateSpeculativeExecution();
198 ASSERT(info
.spillFormat() & DataFormatJS
);
199 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
200 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
202 if (info
.spillFormat() != DataFormatJSCell
)
203 speculationCheck(m_jit
.branchTestPtr(MacroAssembler::NonZero
, gpr
, GPRInfo::tagMaskRegister
));
204 info
.fillJSValue(gpr
, DataFormatJSCell
);
209 case DataFormatJSCell
: {
210 GPRReg gpr
= info
.gpr();
216 GPRReg gpr
= info
.gpr();
218 speculationCheck(m_jit
.branchTestPtr(MacroAssembler::NonZero
, gpr
, GPRInfo::tagMaskRegister
));
219 info
.fillJSValue(gpr
, DataFormatJSCell
);
223 case DataFormatJSInteger
:
224 case DataFormatInteger
:
225 case DataFormatJSDouble
:
226 case DataFormatDouble
: {
227 terminateSpeculativeExecution();
232 ASSERT_NOT_REACHED();
233 return InvalidGPRReg
;
236 void SpeculativeJIT::compilePeepHoleBranch(Node
& node
, JITCompiler::RelationalCondition condition
)
238 Node
& branchNode
= m_jit
.graph()[m_compileIndex
+ 1];
239 BlockIndex taken
= m_jit
.graph().blockIndexForBytecodeOffset(branchNode
.takenBytecodeOffset());
240 BlockIndex notTaken
= m_jit
.graph().blockIndexForBytecodeOffset(branchNode
.notTakenBytecodeOffset());
242 // The branch instruction will branch to the taken block.
243 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
244 if (taken
== (m_block
+ 1)) {
245 condition
= JITCompiler::invert(condition
);
246 BlockIndex tmp
= taken
;
252 if (isJSConstantWithInt32Value(node
.child1
, imm
)) {
253 SpeculateIntegerOperand
op2(this, node
.child2
);
254 addBranch(m_jit
.branch32(condition
, JITCompiler::Imm32(imm
), op2
.gpr()), taken
);
255 } else if (isJSConstantWithInt32Value(node
.child2
, imm
)) {
256 SpeculateIntegerOperand
op1(this, node
.child1
);
257 addBranch(m_jit
.branch32(condition
, op1
.gpr(), JITCompiler::Imm32(imm
)), taken
);
259 SpeculateIntegerOperand
op1(this, node
.child1
);
260 SpeculateIntegerOperand
op2(this, node
.child2
);
261 addBranch(m_jit
.branch32(condition
, op1
.gpr(), op2
.gpr()), taken
);
264 // Check for fall through, otherwise we need to jump.
265 if (notTaken
!= (m_block
+ 1))
266 addBranch(m_jit
.jump(), notTaken
);
269 void SpeculativeJIT::compile(Node
& node
)
271 NodeType op
= node
.op
;
277 initConstantInfo(m_compileIndex
);
281 GPRTemporary
result(this);
282 PredictedType prediction
= m_jit
.graph().getPrediction(node
.local());
283 if (prediction
== PredictInt32
) {
284 m_jit
.load32(JITCompiler::payloadFor(node
.local()), result
.gpr());
286 // Like integerResult, but don't useChildren - our children are phi nodes,
287 // and don't represent values within this dataflow with virtual registers.
288 VirtualRegister virtualRegister
= node
.virtualRegister();
289 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
290 m_generationInfo
[virtualRegister
].initInteger(m_compileIndex
, node
.refCount(), result
.gpr());
292 m_jit
.loadPtr(JITCompiler::addressFor(node
.local()), result
.gpr());
294 // Like jsValueResult, but don't useChildren - our children are phi nodes,
295 // and don't represent values within this dataflow with virtual registers.
296 VirtualRegister virtualRegister
= node
.virtualRegister();
297 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
298 m_generationInfo
[virtualRegister
].initJSValue(m_compileIndex
, node
.refCount(), result
.gpr(), (prediction
== PredictArray
) ? DataFormatJSCell
: DataFormatJS
);
304 switch (m_jit
.graph().getPrediction(node
.local())) {
306 SpeculateIntegerOperand
value(this, node
.child1
);
307 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
.local()));
308 noResult(m_compileIndex
);
312 SpeculateCellOperand
cell(this, node
.child1
);
313 m_jit
.storePtr(cell
.gpr(), JITCompiler::addressFor(node
.local()));
314 noResult(m_compileIndex
);
319 JSValueOperand
value(this, node
.child1
);
320 m_jit
.storePtr(value
.gpr(), JITCompiler::addressFor(node
.local()));
321 noResult(m_compileIndex
);
331 if (isInt32Constant(node
.child1
)) {
332 SpeculateIntegerOperand
op2(this, node
.child2
);
333 GPRTemporary
result(this, op2
);
335 bitOp(op
, valueOfInt32Constant(node
.child1
), op2
.gpr(), result
.gpr());
337 integerResult(result
.gpr(), m_compileIndex
);
338 } else if (isInt32Constant(node
.child2
)) {
339 SpeculateIntegerOperand
op1(this, node
.child1
);
340 GPRTemporary
result(this, op1
);
342 bitOp(op
, valueOfInt32Constant(node
.child2
), op1
.gpr(), result
.gpr());
344 integerResult(result
.gpr(), m_compileIndex
);
346 SpeculateIntegerOperand
op1(this, node
.child1
);
347 SpeculateIntegerOperand
op2(this, node
.child2
);
348 GPRTemporary
result(this, op1
, op2
);
350 GPRReg reg1
= op1
.gpr();
351 GPRReg reg2
= op2
.gpr();
352 bitOp(op
, reg1
, reg2
, result
.gpr());
354 integerResult(result
.gpr(), m_compileIndex
);
361 if (isInt32Constant(node
.child2
)) {
362 SpeculateIntegerOperand
op1(this, node
.child1
);
363 GPRTemporary
result(this, op1
);
365 shiftOp(op
, op1
.gpr(), valueOfInt32Constant(node
.child2
) & 0x1f, result
.gpr());
367 integerResult(result
.gpr(), m_compileIndex
);
369 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
370 SpeculateIntegerOperand
op1(this, node
.child1
);
371 SpeculateIntegerOperand
op2(this, node
.child2
);
372 GPRTemporary
result(this, op1
);
374 GPRReg reg1
= op1
.gpr();
375 GPRReg reg2
= op2
.gpr();
376 shiftOp(op
, reg1
, reg2
, result
.gpr());
378 integerResult(result
.gpr(), m_compileIndex
);
382 case UInt32ToNumber
: {
383 IntegerOperand
op1(this, node
.child1
);
384 GPRTemporary
result(this, op1
);
386 // Test the operand is positive.
387 speculationCheck(m_jit
.branch32(MacroAssembler::LessThan
, op1
.gpr(), TrustedImm32(0)));
389 m_jit
.move(op1
.gpr(), result
.gpr());
390 integerResult(result
.gpr(), m_compileIndex
, op1
.format());
394 case NumberToInt32
: {
395 SpeculateIntegerOperand
op1(this, node
.child1
);
396 GPRTemporary
result(this, op1
);
397 m_jit
.move(op1
.gpr(), result
.gpr());
398 integerResult(result
.gpr(), m_compileIndex
, op1
.format());
402 case Int32ToNumber
: {
403 SpeculateIntegerOperand
op1(this, node
.child1
);
404 GPRTemporary
result(this, op1
);
405 m_jit
.move(op1
.gpr(), result
.gpr());
406 integerResult(result
.gpr(), m_compileIndex
, op1
.format());
411 SpeculateIntegerOperand
op1(this, node
.child1
);
412 GPRTemporary
result(this, op1
);
413 m_jit
.move(op1
.gpr(), result
.gpr());
414 integerResult(result
.gpr(), m_compileIndex
, op1
.format());
418 case ValueToNumber
: {
419 SpeculateIntegerOperand
op1(this, node
.child1
);
420 GPRTemporary
result(this, op1
);
421 m_jit
.move(op1
.gpr(), result
.gpr());
422 integerResult(result
.gpr(), m_compileIndex
, op1
.format());
429 if (isDoubleConstantWithInt32Value(node
.child1
, imm1
)) {
430 SpeculateIntegerOperand
op2(this, node
.child2
);
431 GPRTemporary
result(this);
433 speculationCheck(m_jit
.branchAdd32(MacroAssembler::Overflow
, op2
.gpr(), Imm32(imm1
), result
.gpr()));
435 integerResult(result
.gpr(), m_compileIndex
);
440 if (isDoubleConstantWithInt32Value(node
.child2
, imm2
)) {
441 SpeculateIntegerOperand
op1(this, node
.child1
);
442 GPRTemporary
result(this);
444 speculationCheck(m_jit
.branchAdd32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr()));
446 integerResult(result
.gpr(), m_compileIndex
);
450 SpeculateIntegerOperand
op1(this, node
.child1
);
451 SpeculateIntegerOperand
op2(this, node
.child2
);
452 GPRTemporary
result(this, op1
, op2
);
454 GPRReg gpr1
= op1
.gpr();
455 GPRReg gpr2
= op2
.gpr();
456 GPRReg gprResult
= result
.gpr();
457 MacroAssembler::Jump check
= m_jit
.branchAdd32(MacroAssembler::Overflow
, gpr1
, gpr2
, gprResult
);
459 if (gpr1
== gprResult
)
460 speculationCheck(check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr2
));
461 else if (gpr2
== gprResult
)
462 speculationCheck(check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr1
));
464 speculationCheck(check
);
466 integerResult(gprResult
, m_compileIndex
);
472 if (isDoubleConstantWithInt32Value(node
.child2
, imm2
)) {
473 SpeculateIntegerOperand
op1(this, node
.child1
);
474 GPRTemporary
result(this);
476 speculationCheck(m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr()));
478 integerResult(result
.gpr(), m_compileIndex
);
482 SpeculateIntegerOperand
op1(this, node
.child1
);
483 SpeculateIntegerOperand
op2(this, node
.child2
);
484 GPRTemporary
result(this);
486 speculationCheck(m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), op2
.gpr(), result
.gpr()));
488 integerResult(result
.gpr(), m_compileIndex
);
493 SpeculateIntegerOperand
op1(this, node
.child1
);
494 SpeculateIntegerOperand
op2(this, node
.child2
);
495 GPRTemporary
result(this);
497 GPRReg reg1
= op1
.gpr();
498 GPRReg reg2
= op2
.gpr();
499 speculationCheck(m_jit
.branchMul32(MacroAssembler::Overflow
, reg1
, reg2
, result
.gpr()));
501 MacroAssembler::Jump resultNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, result
.gpr());
502 speculationCheck(m_jit
.branch32(MacroAssembler::LessThan
, reg1
, TrustedImm32(0)));
503 speculationCheck(m_jit
.branch32(MacroAssembler::LessThan
, reg2
, TrustedImm32(0)));
504 resultNonZero
.link(&m_jit
);
506 integerResult(result
.gpr(), m_compileIndex
);
511 SpeculateIntegerOperand
op1(this, node
.child1
);
512 SpeculateIntegerOperand
op2(this, node
.child2
);
513 GPRTemporary
result(this, op1
, op2
);
517 terminateSpeculativeExecution();
519 integerResult(result
.gpr(), m_compileIndex
);
524 SpeculateIntegerOperand
op1(this, node
.child1
);
525 SpeculateIntegerOperand
op2(this, node
.child2
);
526 GPRTemporary
result(this, op1
, op2
);
530 terminateSpeculativeExecution();
532 integerResult(result
.gpr(), m_compileIndex
);
537 JSValueOperand
value(this, node
.child1
);
538 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
540 m_jit
.move(value
.gpr(), result
.gpr());
541 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
542 speculationCheck(m_jit
.branchTestPtr(JITCompiler::NonZero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
543 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue
)), result
.gpr());
545 // If we add a DataFormatBool, we should use it here.
546 jsValueResult(result
.gpr(), m_compileIndex
);
551 // Fused compare & branch.
552 if (detectPeepHoleBranch()) {
553 // detectPeepHoleBranch currently only permits the branch to be the very next node,
554 // so can be no intervening nodes to also reference the compare.
555 ASSERT(node
.adjustedRefCount() == 1);
557 compilePeepHoleBranch(node
, JITCompiler::LessThan
);
565 // Normal case, not fused to branch.
566 SpeculateIntegerOperand
op1(this, node
.child1
);
567 SpeculateIntegerOperand
op2(this, node
.child2
);
568 GPRTemporary
result(this, op1
, op2
);
570 m_jit
.compare32(JITCompiler::LessThan
, op1
.gpr(), op2
.gpr(), result
.gpr());
572 // If we add a DataFormatBool, we should use it here.
573 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
574 jsValueResult(result
.gpr(), m_compileIndex
);
578 case CompareLessEq
: {
579 // Fused compare & branch.
580 if (detectPeepHoleBranch()) {
581 // detectPeepHoleBranch currently only permits the branch to be the very next node,
582 // so can be no intervening nodes to also reference the compare.
583 ASSERT(node
.adjustedRefCount() == 1);
585 compilePeepHoleBranch(node
, JITCompiler::LessThanOrEqual
);
593 // Normal case, not fused to branch.
594 SpeculateIntegerOperand
op1(this, node
.child1
);
595 SpeculateIntegerOperand
op2(this, node
.child2
);
596 GPRTemporary
result(this, op1
, op2
);
598 m_jit
.compare32(JITCompiler::LessThanOrEqual
, op1
.gpr(), op2
.gpr(), result
.gpr());
600 // If we add a DataFormatBool, we should use it here.
601 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
602 jsValueResult(result
.gpr(), m_compileIndex
);
607 SpeculateIntegerOperand
op1(this, node
.child1
);
608 SpeculateIntegerOperand
op2(this, node
.child2
);
609 GPRTemporary
result(this, op1
, op2
);
611 m_jit
.compare32(JITCompiler::Equal
, op1
.gpr(), op2
.gpr(), result
.gpr());
613 // If we add a DataFormatBool, we should use it here.
614 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
615 jsValueResult(result
.gpr(), m_compileIndex
);
619 case CompareStrictEq
: {
620 SpeculateIntegerOperand
op1(this, node
.child1
);
621 SpeculateIntegerOperand
op2(this, node
.child2
);
622 GPRTemporary
result(this, op1
, op2
);
624 m_jit
.compare32(JITCompiler::Equal
, op1
.gpr(), op2
.gpr(), result
.gpr());
626 // If we add a DataFormatBool, we should use it here.
627 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
628 jsValueResult(result
.gpr(), m_compileIndex
);
633 NodeIndex alias
= node
.child3
;
634 if (alias
!= NoNode
) {
635 // FIXME: result should be able to reuse child1, child2. Should have an 'UnusedOperand' type.
636 JSValueOperand
aliasedValue(this, node
.child3
);
637 GPRTemporary
result(this, aliasedValue
);
638 m_jit
.move(aliasedValue
.gpr(), result
.gpr());
639 jsValueResult(result
.gpr(), m_compileIndex
);
643 SpeculateCellOperand
base(this, node
.child1
);
644 SpeculateStrictInt32Operand
property(this, node
.child2
);
645 GPRTemporary
storage(this);
647 GPRReg baseReg
= base
.gpr();
648 GPRReg propertyReg
= property
.gpr();
649 GPRReg storageReg
= storage
.gpr();
651 // Get the array storage. We haven't yet checked this is a JSArray, so this is only safe if
652 // an access with offset JSArray::storageOffset() is valid for all JSCells!
653 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSArray::storageOffset()), storageReg
);
655 // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
656 // If we have predicted the base to be type array, we can skip the check.
657 Node
& baseNode
= m_jit
.graph()[node
.child1
];
658 if (baseNode
.op
!= GetLocal
|| m_jit
.graph().getPrediction(baseNode
.local()) != PredictArray
)
659 speculationCheck(m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseReg
), MacroAssembler::TrustedImmPtr(m_jit
.globalData()->jsArrayVPtr
)));
660 speculationCheck(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(baseReg
, JSArray::vectorLengthOffset())));
662 // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache
663 // the storage pointer - especially if there happens to be another register free right now. If we do so,
664 // then we'll need to allocate a new temporary for result.
665 GPRTemporary
& result
= storage
;
666 m_jit
.loadPtr(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), result
.gpr());
667 speculationCheck(m_jit
.branchTestPtr(MacroAssembler::Zero
, result
.gpr()));
669 jsValueResult(result
.gpr(), m_compileIndex
);
674 SpeculateCellOperand
base(this, node
.child1
);
675 SpeculateStrictInt32Operand
property(this, node
.child2
);
676 JSValueOperand
value(this, node
.child3
);
677 GPRTemporary
storage(this);
679 // Map base, property & value into registers, allocate a register for storage.
680 GPRReg baseReg
= base
.gpr();
681 GPRReg propertyReg
= property
.gpr();
682 GPRReg valueReg
= value
.gpr();
683 GPRReg storageReg
= storage
.gpr();
685 // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
686 // If we have predicted the base to be type array, we can skip the check.
687 Node
& baseNode
= m_jit
.graph()[node
.child1
];
688 if (baseNode
.op
!= GetLocal
|| m_jit
.graph().getPrediction(baseNode
.local()) != PredictArray
)
689 speculationCheck(m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseReg
), MacroAssembler::TrustedImmPtr(m_jit
.globalData()->jsArrayVPtr
)));
690 speculationCheck(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(baseReg
, JSArray::vectorLengthOffset())));
692 // Get the array storage.
693 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSArray::storageOffset()), storageReg
);
695 // Check if we're writing to a hole; if so increment m_numValuesInVector.
696 MacroAssembler::Jump notHoleValue
= m_jit
.branchTestPtr(MacroAssembler::NonZero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
697 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
699 // If we're writing to a hole we might be growing the array;
700 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
701 m_jit
.add32(TrustedImm32(1), propertyReg
);
702 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
703 m_jit
.sub32(TrustedImm32(1), propertyReg
);
705 lengthDoesNotNeedUpdate
.link(&m_jit
);
706 notHoleValue
.link(&m_jit
);
708 // Store the value to the array.
709 m_jit
.storePtr(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
711 noResult(m_compileIndex
);
715 case PutByValAlias
: {
716 SpeculateCellOperand
base(this, node
.child1
);
717 SpeculateStrictInt32Operand
property(this, node
.child2
);
718 JSValueOperand
value(this, node
.child3
);
719 GPRTemporary
storage(this, base
); // storage may overwrite base.
721 // Get the array storage.
722 GPRReg storageReg
= storage
.gpr();
723 m_jit
.loadPtr(MacroAssembler::Address(base
.gpr(), JSArray::storageOffset()), storageReg
);
725 // Map property & value into registers.
726 GPRReg propertyReg
= property
.gpr();
727 GPRReg valueReg
= value
.gpr();
729 // Store the value to the array.
730 m_jit
.storePtr(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
732 noResult(m_compileIndex
);
737 BlockIndex taken
= m_jit
.graph().blockIndexForBytecodeOffset(node
.takenBytecodeOffset());
738 if (taken
!= (m_block
+ 1))
739 addBranch(m_jit
.jump(), taken
);
740 noResult(m_compileIndex
);
745 JSValueOperand
value(this, node
.child1
);
746 GPRReg valueReg
= value
.gpr();
748 BlockIndex taken
= m_jit
.graph().blockIndexForBytecodeOffset(node
.takenBytecodeOffset());
749 BlockIndex notTaken
= m_jit
.graph().blockIndexForBytecodeOffset(node
.notTakenBytecodeOffset());
752 addBranch(m_jit
.branchPtr(MacroAssembler::Equal
, valueReg
, MacroAssembler::ImmPtr(JSValue::encode(jsNumber(0)))), notTaken
);
753 MacroAssembler::Jump isNonZeroInteger
= m_jit
.branchPtr(MacroAssembler::AboveOrEqual
, valueReg
, GPRInfo::tagTypeNumberRegister
);
756 addBranch(m_jit
.branchPtr(MacroAssembler::Equal
, valueReg
, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false)))), notTaken
);
757 speculationCheck(m_jit
.branchPtr(MacroAssembler::NotEqual
, valueReg
, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true)))));
759 if (taken
== (m_block
+ 1))
760 isNonZeroInteger
.link(&m_jit
);
762 addBranch(isNonZeroInteger
, taken
);
763 addBranch(m_jit
.jump(), taken
);
766 noResult(m_compileIndex
);
771 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT1
);
772 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
773 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
775 #if DFG_SUCCESS_STATS
776 static SamplingCounter
counter("SpeculativeJIT");
777 m_jit
.emitCount(counter
);
780 // Return the result in returnValueGPR.
781 JSValueOperand
op1(this, node
.child1
);
782 m_jit
.move(op1
.gpr(), GPRInfo::returnValueGPR
);
784 // Grab the return address.
785 m_jit
.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, GPRInfo::regT1
);
786 // Restore our caller's "r".
787 m_jit
.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, GPRInfo::callFrameRegister
);
789 m_jit
.restoreReturnAddressBeforeReturn(GPRInfo::regT1
);
792 noResult(m_compileIndex
);
797 SpeculateCellOperand
thisValue(this, node
.child1
);
798 GPRTemporary
temp(this);
800 m_jit
.loadPtr(JITCompiler::Address(thisValue
.gpr(), JSCell::structureOffset()), temp
.gpr());
801 speculationCheck(m_jit
.branchTest8(JITCompiler::NonZero
, JITCompiler::Address(temp
.gpr(), Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(NeedsThisConversion
)));
803 cellResult(thisValue
.gpr(), m_compileIndex
);
808 JSValueOperand
base(this, node
.child1
);
809 GPRReg baseGPR
= base
.gpr();
812 GPRResult
result(this);
813 callOperation(operationGetById
, result
.gpr(), baseGPR
, identifier(node
.identifierNumber()));
814 jsValueResult(result
.gpr(), m_compileIndex
);
819 JSValueOperand
base(this, node
.child1
);
820 JSValueOperand
value(this, node
.child2
);
821 GPRReg valueGPR
= value
.gpr();
822 GPRReg baseGPR
= base
.gpr();
825 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByIdStrict
: operationPutByIdNonStrict
, valueGPR
, baseGPR
, identifier(node
.identifierNumber()));
826 noResult(m_compileIndex
);
830 case PutByIdDirect
: {
831 JSValueOperand
base(this, node
.child1
);
832 JSValueOperand
value(this, node
.child2
);
833 GPRReg valueGPR
= value
.gpr();
834 GPRReg baseGPR
= base
.gpr();
837 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByIdDirectStrict
: operationPutByIdDirectNonStrict
, valueGPR
, baseGPR
, identifier(node
.identifierNumber()));
838 noResult(m_compileIndex
);
843 GPRTemporary
result(this);
845 JSVariableObject
* globalObject
= m_jit
.codeBlock()->globalObject();
846 m_jit
.loadPtr(globalObject
->addressOfRegisters(), result
.gpr());
847 m_jit
.loadPtr(JITCompiler::addressForGlobalVar(result
.gpr(), node
.varNumber()), result
.gpr());
849 jsValueResult(result
.gpr(), m_compileIndex
);
854 JSValueOperand
value(this, node
.child1
);
855 GPRTemporary
temp(this);
857 JSVariableObject
* globalObject
= m_jit
.codeBlock()->globalObject();
858 m_jit
.loadPtr(globalObject
->addressOfRegisters(), temp
.gpr());
859 m_jit
.storePtr(value
.gpr(), JITCompiler::addressForGlobalVar(temp
.gpr(), node
.varNumber()));
861 noResult(m_compileIndex
);
866 ASSERT_NOT_REACHED();
869 if (node
.hasResult() && node
.mustGenerate())
873 void SpeculativeJIT::compile(BasicBlock
& block
)
875 ASSERT(m_compileIndex
== block
.begin
);
876 m_blockHeads
[m_block
] = m_jit
.label();
877 #if DFG_JIT_BREAK_ON_EVERY_BLOCK
881 for (; m_compileIndex
< block
.end
; ++m_compileIndex
) {
882 Node
& node
= m_jit
.graph()[m_compileIndex
];
883 if (!node
.shouldGenerate())
886 #if DFG_DEBUG_VERBOSE
887 fprintf(stderr
, "SpeculativeJIT generating Node @%d at JIT offset 0x%x\n", (int)m_compileIndex
, m_jit
.debugOffset());
889 #if DFG_JIT_BREAK_ON_EVERY_NODE
900 // If we are making type predictions about our arguments then
901 // we need to check that they are correct on function entry.
902 void SpeculativeJIT::checkArgumentTypes()
904 ASSERT(!m_compileIndex
);
905 for (int i
= 0; i
< m_jit
.codeBlock()->m_numParameters
; ++i
) {
906 VirtualRegister virtualRegister
= (VirtualRegister
)(m_jit
.codeBlock()->thisRegister() + i
);
907 switch (m_jit
.graph().getPrediction(virtualRegister
)) {
909 speculationCheck(m_jit
.branchPtr(MacroAssembler::Below
, JITCompiler::addressFor(virtualRegister
), GPRInfo::tagTypeNumberRegister
));
913 GPRTemporary
temp(this);
914 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), temp
.gpr());
915 speculationCheck(m_jit
.branchTestPtr(MacroAssembler::NonZero
, temp
.gpr(), GPRInfo::tagMaskRegister
));
916 speculationCheck(m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(temp
.gpr()), MacroAssembler::TrustedImmPtr(m_jit
.globalData()->jsArrayVPtr
)));
926 // For any vars that we will be treating as numeric, write 0 to
927 // the var on entry. Throughout the block we will only read/write
928 // to the payload, by writing the tag now we prevent the GC from
929 // misinterpreting values as pointers.
930 void SpeculativeJIT::initializeVariableTypes()
932 ASSERT(!m_compileIndex
);
933 for (int var
= 0; var
< m_jit
.codeBlock()->m_numVars
; ++var
) {
934 if (m_jit
.graph().getPrediction(var
) == PredictInt32
)
935 m_jit
.storePtr(GPRInfo::tagTypeNumberRegister
, JITCompiler::addressFor((VirtualRegister
)var
));
939 bool SpeculativeJIT::compile()
941 checkArgumentTypes();
942 initializeVariableTypes();
944 ASSERT(!m_compileIndex
);
945 for (m_block
= 0; m_block
< m_jit
.graph().m_blocks
.size(); ++m_block
) {
946 compile(*m_jit
.graph().m_blocks
[m_block
]);
954 } } // namespace JSC::DFG