2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 namespace JSC
{ namespace DFG
{
35 GPRReg
SpeculativeJIT::fillInteger(NodeIndex nodeIndex
, DataFormat
& returnFormat
)
37 Node
& node
= at(nodeIndex
);
38 VirtualRegister virtualRegister
= node
.virtualRegister();
39 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
41 if (info
.registerFormat() == DataFormatNone
) {
42 GPRReg gpr
= allocate();
44 if (node
.hasConstant()) {
45 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
46 if (isInt32Constant(nodeIndex
)) {
47 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex
)), gpr
);
48 info
.fillInteger(gpr
);
49 returnFormat
= DataFormatInteger
;
52 if (isNumberConstant(nodeIndex
)) {
53 JSValue jsValue
= jsNumber(valueOfNumberConstant(nodeIndex
));
54 m_jit
.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue
)), gpr
);
56 ASSERT(isJSConstant(nodeIndex
));
57 JSValue jsValue
= valueOfJSConstant(nodeIndex
);
58 m_jit
.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue
)), gpr
);
60 } else if (info
.spillFormat() == DataFormatInteger
) {
61 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
62 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
63 // Tag it, since fillInteger() is used when we want a boxed integer.
64 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
66 ASSERT(info
.spillFormat() == DataFormatJS
|| info
.spillFormat() == DataFormatJSInteger
);
67 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
68 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
71 // Since we statically know that we're filling an integer, and values
72 // in the RegisterFile are boxed, this must be DataFormatJSInteger.
73 // We will check this with a jitAssert below.
74 info
.fillJSValue(gpr
, DataFormatJSInteger
);
78 switch (info
.registerFormat()) {
80 // Should have filled, above.
81 case DataFormatJSDouble
:
82 case DataFormatDouble
:
85 case DataFormatJSCell
:
86 case DataFormatBoolean
:
87 case DataFormatJSBoolean
:
88 case DataFormatStorage
:
89 // Should only be calling this function if we know this operand to be integer.
92 case DataFormatJSInteger
: {
93 GPRReg gpr
= info
.gpr();
95 m_jit
.jitAssertIsJSInt32(gpr
);
96 returnFormat
= DataFormatJSInteger
;
100 case DataFormatInteger
: {
101 GPRReg gpr
= info
.gpr();
103 m_jit
.jitAssertIsInt32(gpr
);
104 returnFormat
= DataFormatInteger
;
109 ASSERT_NOT_REACHED();
110 return InvalidGPRReg
;
113 FPRReg
SpeculativeJIT::fillDouble(NodeIndex nodeIndex
)
115 Node
& node
= at(nodeIndex
);
116 VirtualRegister virtualRegister
= node
.virtualRegister();
117 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
119 if (info
.registerFormat() == DataFormatNone
) {
120 if (node
.hasConstant()) {
121 GPRReg gpr
= allocate();
123 if (isInt32Constant(nodeIndex
)) {
124 // FIXME: should not be reachable?
125 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex
)), gpr
);
126 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
127 info
.fillInteger(gpr
);
129 } else if (isNumberConstant(nodeIndex
)) {
130 FPRReg fpr
= fprAllocate();
131 m_jit
.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex
)))), gpr
);
132 m_jit
.movePtrToDouble(gpr
, fpr
);
135 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
136 info
.fillDouble(fpr
);
139 // FIXME: should not be reachable?
140 ASSERT(isJSConstant(nodeIndex
));
141 JSValue jsValue
= valueOfJSConstant(nodeIndex
);
142 m_jit
.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue
)), gpr
);
143 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
144 info
.fillJSValue(gpr
, DataFormatJS
);
148 DataFormat spillFormat
= info
.spillFormat();
149 switch (spillFormat
) {
150 case DataFormatDouble
: {
151 FPRReg fpr
= fprAllocate();
152 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
153 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
154 info
.fillDouble(fpr
);
158 case DataFormatInteger
: {
159 GPRReg gpr
= allocate();
161 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
162 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
163 info
.fillInteger(gpr
);
169 GPRReg gpr
= allocate();
171 ASSERT(spillFormat
& DataFormatJS
);
172 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
173 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
174 info
.fillJSValue(gpr
, spillFormat
);
181 switch (info
.registerFormat()) {
183 // Should have filled, above.
185 case DataFormatJSCell
:
186 case DataFormatBoolean
:
187 case DataFormatJSBoolean
:
188 case DataFormatStorage
:
189 // Should only be calling this function if we know this operand to be numeric.
190 ASSERT_NOT_REACHED();
193 GPRReg jsValueGpr
= info
.gpr();
194 m_gprs
.lock(jsValueGpr
);
195 FPRReg fpr
= fprAllocate();
196 GPRReg tempGpr
= allocate(); // FIXME: can we skip this allocation on the last use of the virtual register?
198 JITCompiler::Jump isInteger
= m_jit
.branchPtr(MacroAssembler::AboveOrEqual
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
);
200 m_jit
.jitAssertIsJSDouble(jsValueGpr
);
202 // First, if we get here we have a double encoded as a JSValue
203 m_jit
.move(jsValueGpr
, tempGpr
);
204 unboxDouble(tempGpr
, fpr
);
205 JITCompiler::Jump hasUnboxedDouble
= m_jit
.jump();
207 // Finally, handle integers.
208 isInteger
.link(&m_jit
);
209 m_jit
.convertInt32ToDouble(jsValueGpr
, fpr
);
210 hasUnboxedDouble
.link(&m_jit
);
212 m_gprs
.release(jsValueGpr
);
213 m_gprs
.unlock(jsValueGpr
);
214 m_gprs
.unlock(tempGpr
);
215 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
216 info
.fillDouble(fpr
);
221 case DataFormatJSInteger
:
222 case DataFormatInteger
: {
223 FPRReg fpr
= fprAllocate();
224 GPRReg gpr
= info
.gpr();
226 m_jit
.convertInt32ToDouble(gpr
, fpr
);
232 case DataFormatJSDouble
: {
233 GPRReg gpr
= info
.gpr();
234 FPRReg fpr
= fprAllocate();
235 if (m_gprs
.isLocked(gpr
)) {
236 // Make sure we don't trample gpr if it is in use.
237 GPRReg temp
= allocate();
238 m_jit
.move(gpr
, temp
);
239 unboxDouble(temp
, fpr
);
242 unboxDouble(gpr
, fpr
);
245 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
247 info
.fillDouble(fpr
);
251 case DataFormatDouble
: {
252 FPRReg fpr
= info
.fpr();
258 ASSERT_NOT_REACHED();
259 return InvalidFPRReg
;
262 GPRReg
SpeculativeJIT::fillJSValue(NodeIndex nodeIndex
)
264 Node
& node
= at(nodeIndex
);
265 VirtualRegister virtualRegister
= node
.virtualRegister();
266 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
268 switch (info
.registerFormat()) {
269 case DataFormatNone
: {
270 GPRReg gpr
= allocate();
272 if (node
.hasConstant()) {
273 if (isInt32Constant(nodeIndex
)) {
274 info
.fillJSValue(gpr
, DataFormatJSInteger
);
275 JSValue jsValue
= jsNumber(valueOfInt32Constant(nodeIndex
));
276 m_jit
.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue
)), gpr
);
277 } else if (isNumberConstant(nodeIndex
)) {
278 info
.fillJSValue(gpr
, DataFormatJSDouble
);
279 JSValue
jsValue(JSValue::EncodeAsDouble
, valueOfNumberConstant(nodeIndex
));
280 m_jit
.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue
)), gpr
);
282 ASSERT(isJSConstant(nodeIndex
));
283 JSValue jsValue
= valueOfJSConstant(nodeIndex
);
284 m_jit
.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue
)), gpr
);
285 info
.fillJSValue(gpr
, DataFormatJS
);
288 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
290 DataFormat spillFormat
= info
.spillFormat();
291 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
292 if (spillFormat
== DataFormatInteger
) {
293 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
294 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
295 spillFormat
= DataFormatJSInteger
;
297 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
298 if (spillFormat
== DataFormatDouble
) {
299 // Need to box the double, since we want a JSValue.
300 m_jit
.subPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
301 spillFormat
= DataFormatJSDouble
;
303 ASSERT(spillFormat
& DataFormatJS
);
305 info
.fillJSValue(gpr
, spillFormat
);
310 case DataFormatInteger
: {
311 GPRReg gpr
= info
.gpr();
312 // If the register has already been locked we need to take a copy.
313 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
314 if (m_gprs
.isLocked(gpr
)) {
315 GPRReg result
= allocate();
316 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, gpr
, result
);
320 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
321 info
.fillJSValue(gpr
, DataFormatJSInteger
);
325 case DataFormatDouble
: {
326 FPRReg fpr
= info
.fpr();
327 GPRReg gpr
= boxDouble(fpr
);
330 info
.fillJSValue(gpr
, DataFormatJSDouble
);
332 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderJS
);
338 // No retag required on JSVALUE64!
340 case DataFormatJSInteger
:
341 case DataFormatJSDouble
:
342 case DataFormatJSCell
:
343 case DataFormatJSBoolean
: {
344 GPRReg gpr
= info
.gpr();
349 case DataFormatBoolean
:
350 case DataFormatStorage
:
351 // this type currently never occurs
352 ASSERT_NOT_REACHED();
355 ASSERT_NOT_REACHED();
356 return InvalidGPRReg
;
359 void SpeculativeJIT::nonSpeculativeValueToNumber(Node
& node
)
361 if (isKnownNumeric(node
.child1().index())) {
362 JSValueOperand
op1(this, node
.child1());
363 GPRTemporary
result(this, op1
);
364 m_jit
.move(op1
.gpr(), result
.gpr());
365 jsValueResult(result
.gpr(), m_compileIndex
);
369 JSValueOperand
op1(this, node
.child1());
370 GPRTemporary
result(this);
372 ASSERT(!isInt32Constant(node
.child1().index()));
373 ASSERT(!isNumberConstant(node
.child1().index()));
375 GPRReg jsValueGpr
= op1
.gpr();
376 GPRReg gpr
= result
.gpr();
379 JITCompiler::Jump isInteger
= m_jit
.branchPtr(MacroAssembler::AboveOrEqual
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
);
380 JITCompiler::Jump nonNumeric
= m_jit
.branchTestPtr(MacroAssembler::Zero
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
);
382 // First, if we get here we have a double encoded as a JSValue
383 m_jit
.move(jsValueGpr
, gpr
);
384 JITCompiler::Jump hasUnboxedDouble
= m_jit
.jump();
386 // Next handle cells (& other JS immediates)
387 nonNumeric
.link(&m_jit
);
388 silentSpillAllRegisters(gpr
);
389 callOperation(dfgConvertJSValueToNumber
, FPRInfo::returnValueFPR
, jsValueGpr
);
390 boxDouble(FPRInfo::returnValueFPR
, gpr
);
391 silentFillAllRegisters(gpr
);
392 JITCompiler::Jump hasCalledToNumber
= m_jit
.jump();
394 // Finally, handle integers.
395 isInteger
.link(&m_jit
);
396 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, jsValueGpr
, gpr
);
397 hasUnboxedDouble
.link(&m_jit
);
398 hasCalledToNumber
.link(&m_jit
);
400 jsValueResult(result
.gpr(), m_compileIndex
, UseChildrenCalledExplicitly
);
403 void SpeculativeJIT::nonSpeculativeValueToInt32(Node
& node
)
405 ASSERT(!isInt32Constant(node
.child1().index()));
407 if (isKnownInteger(node
.child1().index())) {
408 IntegerOperand
op1(this, node
.child1());
409 GPRTemporary
result(this, op1
);
410 m_jit
.zeroExtend32ToPtr(op1
.gpr(), result
.gpr());
411 integerResult(result
.gpr(), m_compileIndex
);
415 GenerationInfo
& childInfo
= m_generationInfo
[at(node
.child1()).virtualRegister()];
416 if (childInfo
.isJSDouble()) {
417 DoubleOperand
op1(this, node
.child1());
418 GPRTemporary
result(this);
419 FPRReg fpr
= op1
.fpr();
420 GPRReg gpr
= result
.gpr();
422 JITCompiler::Jump truncatedToInteger
= m_jit
.branchTruncateDoubleToInt32(fpr
, gpr
, JITCompiler::BranchIfTruncateSuccessful
);
424 silentSpillAllRegisters(gpr
);
425 callOperation(toInt32
, gpr
, fpr
);
426 silentFillAllRegisters(gpr
);
428 truncatedToInteger
.link(&m_jit
);
429 integerResult(gpr
, m_compileIndex
, UseChildrenCalledExplicitly
);
433 JSValueOperand
op1(this, node
.child1());
434 GPRTemporary
result(this, op1
);
435 GPRReg jsValueGpr
= op1
.gpr();
436 GPRReg resultGPR
= result
.gpr();
439 JITCompiler::Jump isInteger
= m_jit
.branchPtr(MacroAssembler::AboveOrEqual
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
);
441 // First handle non-integers
442 silentSpillAllRegisters(resultGPR
);
443 callOperation(dfgConvertJSValueToInt32
, resultGPR
, jsValueGpr
);
444 silentFillAllRegisters(resultGPR
);
445 JITCompiler::Jump hasCalledToInt32
= m_jit
.jump();
447 // Then handle integers.
448 isInteger
.link(&m_jit
);
449 m_jit
.zeroExtend32ToPtr(jsValueGpr
, resultGPR
);
450 hasCalledToInt32
.link(&m_jit
);
451 integerResult(resultGPR
, m_compileIndex
, UseChildrenCalledExplicitly
);
454 void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node
& node
)
456 IntegerOperand
op1(this, node
.child1());
457 FPRTemporary
boxer(this);
458 GPRTemporary
result(this, op1
);
460 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, op1
.gpr(), TrustedImm32(0));
462 m_jit
.convertInt32ToDouble(op1
.gpr(), boxer
.fpr());
463 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), boxer
.fpr());
465 boxDouble(boxer
.fpr(), result
.gpr());
467 JITCompiler::Jump done
= m_jit
.jump();
469 positive
.link(&m_jit
);
471 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, op1
.gpr(), result
.gpr());
475 jsValueResult(result
.gpr(), m_compileIndex
);
478 JITCompiler::Call
SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg resultGPR
, GPRReg scratchGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
480 JITCompiler::DataLabelPtr structureToCompare
;
481 JITCompiler::PatchableJump structureCheck
= m_jit
.patchableBranchPtrWithPatch(JITCompiler::NotEqual
, JITCompiler::Address(baseGPR
, JSCell::structureOffset()), structureToCompare
, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
483 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::offsetOfPropertyStorage()), resultGPR
);
484 JITCompiler::DataLabelCompact loadWithPatch
= m_jit
.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR
, 0), resultGPR
);
486 JITCompiler::Jump done
= m_jit
.jump();
488 structureCheck
.m_jump
.link(&m_jit
);
490 if (slowPathTarget
.isSet())
491 slowPathTarget
.link(&m_jit
);
493 JITCompiler::Label slowCase
= m_jit
.label();
495 if (spillMode
== NeedToSpill
)
496 silentSpillAllRegisters(resultGPR
);
497 JITCompiler::Call functionCall
= callOperation(operationGetByIdOptimize
, resultGPR
, baseGPR
, identifier(identifierNumber
));
498 if (spillMode
== NeedToSpill
)
499 silentFillAllRegisters(resultGPR
);
503 JITCompiler::Label doneLabel
= m_jit
.label();
505 m_jit
.addPropertyAccess(PropertyAccessRecord(codeOrigin
, structureToCompare
, functionCall
, structureCheck
, loadWithPatch
, slowCase
, doneLabel
, safeCast
<int8_t>(baseGPR
), safeCast
<int8_t>(resultGPR
), safeCast
<int8_t>(scratchGPR
), spillMode
== NeedToSpill
? PropertyAccessRecord::RegistersInUse
: PropertyAccessRecord::RegistersFlushed
));
507 if (scratchGPR
!= resultGPR
&& scratchGPR
!= InvalidGPRReg
&& spillMode
== NeedToSpill
)
513 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg valueGPR
, Edge valueUse
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind putKind
, JITCompiler::Jump slowPathTarget
)
516 JITCompiler::DataLabelPtr structureToCompare
;
517 JITCompiler::PatchableJump structureCheck
= m_jit
.patchableBranchPtrWithPatch(JITCompiler::NotEqual
, JITCompiler::Address(baseGPR
, JSCell::structureOffset()), structureToCompare
, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
519 writeBarrier(baseGPR
, valueGPR
, valueUse
, WriteBarrierForPropertyAccess
, scratchGPR
);
521 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::offsetOfPropertyStorage()), scratchGPR
);
522 JITCompiler::DataLabel32 storeWithPatch
= m_jit
.storePtrWithAddressOffsetPatch(valueGPR
, JITCompiler::Address(scratchGPR
, 0));
524 JITCompiler::Jump done
= m_jit
.jump();
526 structureCheck
.m_jump
.link(&m_jit
);
528 if (slowPathTarget
.isSet())
529 slowPathTarget
.link(&m_jit
);
531 JITCompiler::Label slowCase
= m_jit
.label();
533 silentSpillAllRegisters(InvalidGPRReg
);
534 V_DFGOperation_EJCI optimizedCall
;
535 if (m_jit
.strictModeFor(at(m_compileIndex
).codeOrigin
)) {
536 if (putKind
== Direct
)
537 optimizedCall
= operationPutByIdDirectStrictOptimize
;
539 optimizedCall
= operationPutByIdStrictOptimize
;
541 if (putKind
== Direct
)
542 optimizedCall
= operationPutByIdDirectNonStrictOptimize
;
544 optimizedCall
= operationPutByIdNonStrictOptimize
;
546 JITCompiler::Call functionCall
= callOperation(optimizedCall
, valueGPR
, baseGPR
, identifier(identifierNumber
));
547 silentFillAllRegisters(InvalidGPRReg
);
550 JITCompiler::Label doneLabel
= m_jit
.label();
552 m_jit
.addPropertyAccess(PropertyAccessRecord(codeOrigin
, structureToCompare
, functionCall
, structureCheck
, JITCompiler::DataLabelCompact(storeWithPatch
.label()), slowCase
, doneLabel
, safeCast
<int8_t>(baseGPR
), safeCast
<int8_t>(valueGPR
), safeCast
<int8_t>(scratchGPR
)));
555 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
)
557 JSValueOperand
arg(this, operand
);
558 GPRReg argGPR
= arg
.gpr();
560 GPRTemporary
result(this, arg
);
561 GPRReg resultGPR
= result
.gpr();
563 JITCompiler::Jump notCell
;
565 if (!isKnownCell(operand
.index()))
566 notCell
= m_jit
.branchTestPtr(MacroAssembler::NonZero
, argGPR
, GPRInfo::tagMaskRegister
);
568 m_jit
.loadPtr(JITCompiler::Address(argGPR
, JSCell::structureOffset()), resultGPR
);
569 m_jit
.test8(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, JITCompiler::Address(resultGPR
, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined
), resultGPR
);
571 if (!isKnownCell(operand
.index())) {
572 JITCompiler::Jump done
= m_jit
.jump();
574 notCell
.link(&m_jit
);
576 m_jit
.move(argGPR
, resultGPR
);
577 m_jit
.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
578 m_jit
.comparePtr(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm32(ValueNull
), resultGPR
);
583 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
584 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
);
587 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand
, NodeIndex branchNodeIndex
, bool invert
)
589 Node
& branchNode
= at(branchNodeIndex
);
590 BlockIndex taken
= branchNode
.takenBlockIndex();
591 BlockIndex notTaken
= branchNode
.notTakenBlockIndex();
593 if (taken
== (m_block
+ 1)) {
595 BlockIndex tmp
= taken
;
600 JSValueOperand
arg(this, operand
);
601 GPRReg argGPR
= arg
.gpr();
603 GPRTemporary
result(this, arg
);
604 GPRReg resultGPR
= result
.gpr();
606 JITCompiler::Jump notCell
;
608 if (!isKnownCell(operand
.index()))
609 notCell
= m_jit
.branchTestPtr(MacroAssembler::NonZero
, argGPR
, GPRInfo::tagMaskRegister
);
611 m_jit
.loadPtr(JITCompiler::Address(argGPR
, JSCell::structureOffset()), resultGPR
);
612 branchTest8(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, JITCompiler::Address(resultGPR
, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined
), taken
);
614 if (!isKnownCell(operand
.index())) {
615 jump(notTaken
, ForceJump
);
617 notCell
.link(&m_jit
);
619 m_jit
.move(argGPR
, resultGPR
);
620 m_jit
.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
621 branchPtr(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull
)), taken
);
627 bool SpeculativeJIT::nonSpeculativeCompareNull(Node
& node
, Edge operand
, bool invert
)
629 unsigned branchIndexInBlock
= detectPeepHoleBranch();
630 if (branchIndexInBlock
!= UINT_MAX
) {
631 NodeIndex branchNodeIndex
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
633 ASSERT(node
.adjustedRefCount() == 1);
635 nonSpeculativePeepholeBranchNull(operand
, branchNodeIndex
, invert
);
639 m_indexInBlock
= branchIndexInBlock
;
640 m_compileIndex
= branchNodeIndex
;
645 nonSpeculativeNonPeepholeCompareNull(operand
, invert
);
650 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node
& node
, NodeIndex branchNodeIndex
, MacroAssembler::RelationalCondition cond
, S_DFGOperation_EJJ helperFunction
)
652 Node
& branchNode
= at(branchNodeIndex
);
653 BlockIndex taken
= branchNode
.takenBlockIndex();
654 BlockIndex notTaken
= branchNode
.notTakenBlockIndex();
656 JITCompiler::ResultCondition callResultCondition
= JITCompiler::NonZero
;
658 // The branch instruction will branch to the taken block.
659 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
660 if (taken
== (m_block
+ 1)) {
661 cond
= JITCompiler::invert(cond
);
662 callResultCondition
= JITCompiler::Zero
;
663 BlockIndex tmp
= taken
;
668 JSValueOperand
arg1(this, node
.child1());
669 JSValueOperand
arg2(this, node
.child2());
670 GPRReg arg1GPR
= arg1
.gpr();
671 GPRReg arg2GPR
= arg2
.gpr();
673 JITCompiler::JumpList slowPath
;
675 if (isKnownNotInteger(node
.child1().index()) || isKnownNotInteger(node
.child2().index())) {
676 GPRResult
result(this);
677 GPRReg resultGPR
= result
.gpr();
683 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
685 branchTest32(callResultCondition
, resultGPR
, taken
);
687 GPRTemporary
result(this, arg2
);
688 GPRReg resultGPR
= result
.gpr();
693 if (!isKnownInteger(node
.child1().index()))
694 slowPath
.append(m_jit
.branchPtr(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
695 if (!isKnownInteger(node
.child2().index()))
696 slowPath
.append(m_jit
.branchPtr(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
698 branch32(cond
, arg1GPR
, arg2GPR
, taken
);
700 if (!isKnownInteger(node
.child1().index()) || !isKnownInteger(node
.child2().index())) {
701 jump(notTaken
, ForceJump
);
703 slowPath
.link(&m_jit
);
705 silentSpillAllRegisters(resultGPR
);
706 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
707 silentFillAllRegisters(resultGPR
);
709 branchTest32(callResultCondition
, resultGPR
, taken
);
715 m_indexInBlock
= m_jit
.graph().m_blocks
[m_block
]->size() - 1;
716 m_compileIndex
= branchNodeIndex
;
719 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node
& node
, MacroAssembler::RelationalCondition cond
, S_DFGOperation_EJJ helperFunction
)
721 JSValueOperand
arg1(this, node
.child1());
722 JSValueOperand
arg2(this, node
.child2());
723 GPRReg arg1GPR
= arg1
.gpr();
724 GPRReg arg2GPR
= arg2
.gpr();
726 JITCompiler::JumpList slowPath
;
728 if (isKnownNotInteger(node
.child1().index()) || isKnownNotInteger(node
.child2().index())) {
729 GPRResult
result(this);
730 GPRReg resultGPR
= result
.gpr();
736 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
738 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
739 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
741 GPRTemporary
result(this, arg2
);
742 GPRReg resultGPR
= result
.gpr();
747 if (!isKnownInteger(node
.child1().index()))
748 slowPath
.append(m_jit
.branchPtr(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
749 if (!isKnownInteger(node
.child2().index()))
750 slowPath
.append(m_jit
.branchPtr(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
752 m_jit
.compare32(cond
, arg1GPR
, arg2GPR
, resultGPR
);
754 if (!isKnownInteger(node
.child1().index()) || !isKnownInteger(node
.child2().index())) {
755 JITCompiler::Jump haveResult
= m_jit
.jump();
757 slowPath
.link(&m_jit
);
759 silentSpillAllRegisters(resultGPR
);
760 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
761 silentFillAllRegisters(resultGPR
);
763 m_jit
.andPtr(TrustedImm32(1), resultGPR
);
765 haveResult
.link(&m_jit
);
768 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
770 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
774 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node
& node
, NodeIndex branchNodeIndex
, bool invert
)
776 Node
& branchNode
= at(branchNodeIndex
);
777 BlockIndex taken
= branchNode
.takenBlockIndex();
778 BlockIndex notTaken
= branchNode
.notTakenBlockIndex();
780 // The branch instruction will branch to the taken block.
781 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
782 if (taken
== (m_block
+ 1)) {
784 BlockIndex tmp
= taken
;
789 JSValueOperand
arg1(this, node
.child1());
790 JSValueOperand
arg2(this, node
.child2());
791 GPRReg arg1GPR
= arg1
.gpr();
792 GPRReg arg2GPR
= arg2
.gpr();
794 GPRTemporary
result(this);
795 GPRReg resultGPR
= result
.gpr();
800 if (isKnownCell(node
.child1().index()) && isKnownCell(node
.child2().index())) {
801 // see if we get lucky: if the arguments are cells and they reference the same
802 // cell, then they must be strictly equal.
803 branchPtr(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
805 silentSpillAllRegisters(resultGPR
);
806 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
807 silentFillAllRegisters(resultGPR
);
809 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
811 m_jit
.orPtr(arg1GPR
, arg2GPR
, resultGPR
);
813 JITCompiler::Jump twoCellsCase
= m_jit
.branchTestPtr(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
815 JITCompiler::Jump leftOK
= m_jit
.branchPtr(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
816 JITCompiler::Jump leftDouble
= m_jit
.branchTestPtr(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
818 JITCompiler::Jump rightOK
= m_jit
.branchPtr(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
819 JITCompiler::Jump rightDouble
= m_jit
.branchTestPtr(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
820 rightOK
.link(&m_jit
);
822 branchPtr(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, taken
);
823 jump(notTaken
, ForceJump
);
825 twoCellsCase
.link(&m_jit
);
826 branchPtr(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
828 leftDouble
.link(&m_jit
);
829 rightDouble
.link(&m_jit
);
831 silentSpillAllRegisters(resultGPR
);
832 callOperation(operationCompareStrictEq
, resultGPR
, arg1GPR
, arg2GPR
);
833 silentFillAllRegisters(resultGPR
);
835 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
841 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node
& node
, bool invert
)
843 JSValueOperand
arg1(this, node
.child1());
844 JSValueOperand
arg2(this, node
.child2());
845 GPRReg arg1GPR
= arg1
.gpr();
846 GPRReg arg2GPR
= arg2
.gpr();
848 GPRTemporary
result(this);
849 GPRReg resultGPR
= result
.gpr();
854 if (isKnownCell(node
.child1().index()) && isKnownCell(node
.child2().index())) {
855 // see if we get lucky: if the arguments are cells and they reference the same
856 // cell, then they must be strictly equal.
857 JITCompiler::Jump notEqualCase
= m_jit
.branchPtr(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
);
859 m_jit
.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
861 JITCompiler::Jump done
= m_jit
.jump();
863 notEqualCase
.link(&m_jit
);
865 silentSpillAllRegisters(resultGPR
);
866 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
867 silentFillAllRegisters(resultGPR
);
869 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultGPR
);
870 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
874 m_jit
.orPtr(arg1GPR
, arg2GPR
, resultGPR
);
876 JITCompiler::Jump twoCellsCase
= m_jit
.branchTestPtr(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
878 JITCompiler::Jump leftOK
= m_jit
.branchPtr(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
879 JITCompiler::Jump leftDouble
= m_jit
.branchTestPtr(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
881 JITCompiler::Jump rightOK
= m_jit
.branchPtr(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
882 JITCompiler::Jump rightDouble
= m_jit
.branchTestPtr(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
883 rightOK
.link(&m_jit
);
885 m_jit
.comparePtr(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, resultGPR
);
887 JITCompiler::Jump done1
= m_jit
.jump();
889 twoCellsCase
.link(&m_jit
);
890 JITCompiler::Jump notEqualCase
= m_jit
.branchPtr(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
);
892 m_jit
.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
894 JITCompiler::Jump done2
= m_jit
.jump();
896 leftDouble
.link(&m_jit
);
897 rightDouble
.link(&m_jit
);
898 notEqualCase
.link(&m_jit
);
900 silentSpillAllRegisters(resultGPR
);
901 callOperation(operationCompareStrictEq
, resultGPR
, arg1GPR
, arg2GPR
);
902 silentFillAllRegisters(resultGPR
);
904 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultGPR
);
908 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
913 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
916 void SpeculativeJIT::emitCall(Node
& node
)
918 P_DFGOperation_E slowCallFunction
;
920 if (node
.op() == Call
)
921 slowCallFunction
= operationLinkCall
;
923 ASSERT(node
.op() == Construct
);
924 slowCallFunction
= operationLinkConstruct
;
927 // For constructors, the this argument is not passed but we have to make space
929 int dummyThisArgument
= node
.op() == Call
? 0 : 1;
931 CallLinkInfo::CallType callType
= node
.op() == Call
? CallLinkInfo::Call
: CallLinkInfo::Construct
;
933 Edge calleeEdge
= m_jit
.graph().m_varArgChildren
[node
.firstChild()];
934 JSValueOperand
callee(this, calleeEdge
);
935 GPRReg calleeGPR
= callee
.gpr();
938 // The call instruction's first child is either the function (normal call) or the
939 // receiver (method call). subsequent children are the arguments.
940 int numPassedArgs
= node
.numChildren() - 1;
942 m_jit
.store32(MacroAssembler::TrustedImm32(numPassedArgs
+ dummyThisArgument
), callFramePayloadSlot(RegisterFile::ArgumentCount
));
943 m_jit
.storePtr(GPRInfo::callFrameRegister
, callFrameSlot(RegisterFile::CallerFrame
));
944 m_jit
.storePtr(calleeGPR
, callFrameSlot(RegisterFile::Callee
));
946 for (int i
= 0; i
< numPassedArgs
; i
++) {
947 Edge argEdge
= m_jit
.graph().m_varArgChildren
[node
.firstChild() + 1 + i
];
948 JSValueOperand
arg(this, argEdge
);
949 GPRReg argGPR
= arg
.gpr();
952 m_jit
.storePtr(argGPR
, argumentSlot(i
+ dummyThisArgument
));
957 GPRResult
result(this);
958 GPRReg resultGPR
= result
.gpr();
960 JITCompiler::DataLabelPtr targetToCheck
;
961 JITCompiler::Jump slowPath
;
963 slowPath
= m_jit
.branchPtrWithPatch(MacroAssembler::NotEqual
, calleeGPR
, targetToCheck
, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue())));
964 m_jit
.loadPtr(MacroAssembler::Address(calleeGPR
, OBJECT_OFFSETOF(JSFunction
, m_scopeChain
)), resultGPR
);
965 m_jit
.storePtr(resultGPR
, callFrameSlot(RegisterFile::ScopeChain
));
967 m_jit
.addPtr(TrustedImm32(m_jit
.codeBlock()->m_numCalleeRegisters
* sizeof(Register
)), GPRInfo::callFrameRegister
);
969 CodeOrigin codeOrigin
= at(m_compileIndex
).codeOrigin
;
970 CallBeginToken token
= m_jit
.beginCall();
971 JITCompiler::Call fastCall
= m_jit
.nearCall();
972 m_jit
.notifyCall(fastCall
, codeOrigin
, token
);
974 JITCompiler::Jump done
= m_jit
.jump();
976 slowPath
.link(&m_jit
);
978 m_jit
.addPtr(TrustedImm32(m_jit
.codeBlock()->m_numCalleeRegisters
* sizeof(Register
)), GPRInfo::callFrameRegister
, GPRInfo::argumentGPR0
);
979 token
= m_jit
.beginCall();
980 JITCompiler::Call slowCall
= m_jit
.appendCall(slowCallFunction
);
981 m_jit
.addFastExceptionCheck(slowCall
, codeOrigin
, token
);
982 m_jit
.addPtr(TrustedImm32(m_jit
.codeBlock()->m_numCalleeRegisters
* sizeof(Register
)), GPRInfo::callFrameRegister
);
983 token
= m_jit
.beginCall();
984 JITCompiler::Call theCall
= m_jit
.call(GPRInfo::returnValueGPR
);
985 m_jit
.notifyCall(theCall
, codeOrigin
, token
);
989 m_jit
.move(GPRInfo::returnValueGPR
, resultGPR
);
991 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJS
, UseChildrenCalledExplicitly
);
993 m_jit
.addJSCall(fastCall
, slowCall
, targetToCheck
, callType
, at(m_compileIndex
).codeOrigin
);
996 template<bool strict
>
997 GPRReg
SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex
, DataFormat
& returnFormat
)
999 #if DFG_ENABLE(DEBUG_VERBOSE)
1000 dataLog("SpecInt@%d ", nodeIndex
);
1002 Node
& node
= at(nodeIndex
);
1003 VirtualRegister virtualRegister
= node
.virtualRegister();
1004 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1006 switch (info
.registerFormat()) {
1007 case DataFormatNone
: {
1008 if ((node
.hasConstant() && !isInt32Constant(nodeIndex
)) || info
.spillFormat() == DataFormatDouble
) {
1009 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1010 returnFormat
= DataFormatInteger
;
1014 GPRReg gpr
= allocate();
1016 if (node
.hasConstant()) {
1017 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1018 ASSERT(isInt32Constant(nodeIndex
));
1019 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex
)), gpr
);
1020 info
.fillInteger(gpr
);
1021 returnFormat
= DataFormatInteger
;
1025 DataFormat spillFormat
= info
.spillFormat();
1027 ASSERT((spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInteger
);
1029 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1031 if (spillFormat
== DataFormatJSInteger
|| spillFormat
== DataFormatInteger
) {
1032 // If we know this was spilled as an integer we can fill without checking.
1034 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
1035 info
.fillInteger(gpr
);
1036 returnFormat
= DataFormatInteger
;
1039 if (spillFormat
== DataFormatInteger
) {
1040 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
1041 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, gpr
);
1043 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
1044 info
.fillJSValue(gpr
, DataFormatJSInteger
);
1045 returnFormat
= DataFormatJSInteger
;
1048 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
1050 // Fill as JSValue, and fall through.
1051 info
.fillJSValue(gpr
, DataFormatJSInteger
);
1055 case DataFormatJS
: {
1056 // Check the value is an integer.
1057 GPRReg gpr
= info
.gpr();
1059 speculationCheck(BadType
, JSValueRegs(gpr
), nodeIndex
, m_jit
.branchPtr(MacroAssembler::Below
, gpr
, GPRInfo::tagTypeNumberRegister
));
1060 info
.fillJSValue(gpr
, DataFormatJSInteger
);
1061 // If !strict we're done, return.
1063 returnFormat
= DataFormatJSInteger
;
1066 // else fall through & handle as DataFormatJSInteger.
1070 case DataFormatJSInteger
: {
1071 // In a strict fill we need to strip off the value tag.
1073 GPRReg gpr
= info
.gpr();
1075 // If the register has already been locked we need to take a copy.
1076 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
1077 if (m_gprs
.isLocked(gpr
))
1078 result
= allocate();
1081 info
.fillInteger(gpr
);
1084 m_jit
.zeroExtend32ToPtr(gpr
, result
);
1085 returnFormat
= DataFormatInteger
;
1089 GPRReg gpr
= info
.gpr();
1091 returnFormat
= DataFormatJSInteger
;
1095 case DataFormatInteger
: {
1096 GPRReg gpr
= info
.gpr();
1098 returnFormat
= DataFormatInteger
;
1102 case DataFormatDouble
:
1103 case DataFormatJSDouble
: {
1104 if (node
.hasConstant() && isInt32Constant(nodeIndex
)) {
1105 GPRReg gpr
= allocate();
1106 ASSERT(isInt32Constant(nodeIndex
));
1107 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex
)), gpr
);
1108 returnFormat
= DataFormatInteger
;
1112 case DataFormatCell
:
1113 case DataFormatBoolean
:
1114 case DataFormatJSCell
:
1115 case DataFormatJSBoolean
: {
1116 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1117 returnFormat
= DataFormatInteger
;
1121 case DataFormatStorage
:
1122 ASSERT_NOT_REACHED();
1125 ASSERT_NOT_REACHED();
1126 return InvalidGPRReg
;
1129 GPRReg
SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex
, DataFormat
& returnFormat
)
1131 return fillSpeculateIntInternal
<false>(nodeIndex
, returnFormat
);
1134 GPRReg
SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex
)
1136 DataFormat mustBeDataFormatInteger
;
1137 GPRReg result
= fillSpeculateIntInternal
<true>(nodeIndex
, mustBeDataFormatInteger
);
1138 ASSERT(mustBeDataFormatInteger
== DataFormatInteger
);
1142 FPRReg
SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex
)
1144 #if DFG_ENABLE(DEBUG_VERBOSE)
1145 dataLog("SpecDouble@%d ", nodeIndex
);
1147 Node
& node
= at(nodeIndex
);
1148 VirtualRegister virtualRegister
= node
.virtualRegister();
1149 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1151 if (info
.registerFormat() == DataFormatNone
) {
1152 if (node
.hasConstant()) {
1153 GPRReg gpr
= allocate();
1155 if (isInt32Constant(nodeIndex
)) {
1156 FPRReg fpr
= fprAllocate();
1157 m_jit
.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(static_cast<double>(valueOfInt32Constant(nodeIndex
))))), gpr
);
1158 m_jit
.movePtrToDouble(gpr
, fpr
);
1161 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1162 info
.fillDouble(fpr
);
1165 if (isNumberConstant(nodeIndex
)) {
1166 FPRReg fpr
= fprAllocate();
1167 m_jit
.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex
)))), gpr
);
1168 m_jit
.movePtrToDouble(gpr
, fpr
);
1171 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1172 info
.fillDouble(fpr
);
1175 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1176 return fprAllocate();
1179 DataFormat spillFormat
= info
.spillFormat();
1180 switch (spillFormat
) {
1181 case DataFormatDouble
: {
1182 FPRReg fpr
= fprAllocate();
1183 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
1184 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1185 info
.fillDouble(fpr
);
1189 case DataFormatInteger
: {
1190 GPRReg gpr
= allocate();
1192 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1193 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
1194 info
.fillInteger(gpr
);
1200 GPRReg gpr
= allocate();
1202 ASSERT(spillFormat
& DataFormatJS
);
1203 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1204 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
1205 info
.fillJSValue(gpr
, spillFormat
);
1211 switch (info
.registerFormat()) {
1212 case DataFormatNone
: // Should have filled, above.
1213 case DataFormatBoolean
: // This type never occurs.
1214 case DataFormatStorage
:
1215 ASSERT_NOT_REACHED();
1217 case DataFormatCell
:
1218 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1219 return fprAllocate();
1221 case DataFormatJSCell
:
1223 case DataFormatJSBoolean
: {
1224 GPRReg jsValueGpr
= info
.gpr();
1225 m_gprs
.lock(jsValueGpr
);
1226 FPRReg fpr
= fprAllocate();
1227 GPRReg tempGpr
= allocate();
1229 JITCompiler::Jump isInteger
= m_jit
.branchPtr(MacroAssembler::AboveOrEqual
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
);
1231 speculationCheck(BadType
, JSValueRegs(jsValueGpr
), nodeIndex
, m_jit
.branchTestPtr(MacroAssembler::Zero
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
));
1233 // First, if we get here we have a double encoded as a JSValue
1234 m_jit
.move(jsValueGpr
, tempGpr
);
1235 unboxDouble(tempGpr
, fpr
);
1236 JITCompiler::Jump hasUnboxedDouble
= m_jit
.jump();
1238 // Finally, handle integers.
1239 isInteger
.link(&m_jit
);
1240 m_jit
.convertInt32ToDouble(jsValueGpr
, fpr
);
1241 hasUnboxedDouble
.link(&m_jit
);
1243 m_gprs
.release(jsValueGpr
);
1244 m_gprs
.unlock(jsValueGpr
);
1245 m_gprs
.unlock(tempGpr
);
1246 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1247 info
.fillDouble(fpr
);
1252 case DataFormatJSInteger
:
1253 case DataFormatInteger
: {
1254 FPRReg fpr
= fprAllocate();
1255 GPRReg gpr
= info
.gpr();
1257 m_jit
.convertInt32ToDouble(gpr
, fpr
);
1263 case DataFormatJSDouble
: {
1264 GPRReg gpr
= info
.gpr();
1265 FPRReg fpr
= fprAllocate();
1266 if (m_gprs
.isLocked(gpr
)) {
1267 // Make sure we don't trample gpr if it is in use.
1268 GPRReg temp
= allocate();
1269 m_jit
.move(gpr
, temp
);
1270 unboxDouble(temp
, fpr
);
1273 unboxDouble(gpr
, fpr
);
1275 m_gprs
.release(gpr
);
1276 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1278 info
.fillDouble(fpr
);
1282 case DataFormatDouble
: {
1283 FPRReg fpr
= info
.fpr();
1289 ASSERT_NOT_REACHED();
1290 return InvalidFPRReg
;
1293 GPRReg
SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex
)
1295 #if DFG_ENABLE(DEBUG_VERBOSE)
1296 dataLog("SpecCell@%d ", nodeIndex
);
1298 Node
& node
= at(nodeIndex
);
1299 VirtualRegister virtualRegister
= node
.virtualRegister();
1300 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1302 switch (info
.registerFormat()) {
1303 case DataFormatNone
: {
1304 if (info
.spillFormat() == DataFormatInteger
|| info
.spillFormat() == DataFormatDouble
) {
1305 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1309 GPRReg gpr
= allocate();
1311 if (node
.hasConstant()) {
1312 JSValue jsValue
= valueOfJSConstant(nodeIndex
);
1313 if (jsValue
.isCell()) {
1314 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1315 m_jit
.move(MacroAssembler::TrustedImmPtr(jsValue
.asCell()), gpr
);
1316 info
.fillJSValue(gpr
, DataFormatJSCell
);
1319 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1322 ASSERT(info
.spillFormat() & DataFormatJS
);
1323 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1324 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
1326 info
.fillJSValue(gpr
, DataFormatJS
);
1327 if (info
.spillFormat() != DataFormatJSCell
)
1328 speculationCheck(BadType
, JSValueRegs(gpr
), nodeIndex
, m_jit
.branchTestPtr(MacroAssembler::NonZero
, gpr
, GPRInfo::tagMaskRegister
));
1329 info
.fillJSValue(gpr
, DataFormatJSCell
);
1333 case DataFormatCell
:
1334 case DataFormatJSCell
: {
1335 GPRReg gpr
= info
.gpr();
1340 case DataFormatJS
: {
1341 GPRReg gpr
= info
.gpr();
1343 speculationCheck(BadType
, JSValueRegs(gpr
), nodeIndex
, m_jit
.branchTestPtr(MacroAssembler::NonZero
, gpr
, GPRInfo::tagMaskRegister
));
1344 info
.fillJSValue(gpr
, DataFormatJSCell
);
1348 case DataFormatJSInteger
:
1349 case DataFormatInteger
:
1350 case DataFormatJSDouble
:
1351 case DataFormatDouble
:
1352 case DataFormatJSBoolean
:
1353 case DataFormatBoolean
: {
1354 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1358 case DataFormatStorage
:
1359 ASSERT_NOT_REACHED();
1362 ASSERT_NOT_REACHED();
1363 return InvalidGPRReg
;
1366 GPRReg
SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex
)
1368 #if DFG_ENABLE(DEBUG_VERBOSE)
1369 dataLog("SpecBool@%d ", nodeIndex
);
1371 Node
& node
= at(nodeIndex
);
1372 VirtualRegister virtualRegister
= node
.virtualRegister();
1373 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1375 switch (info
.registerFormat()) {
1376 case DataFormatNone
: {
1377 if (info
.spillFormat() == DataFormatInteger
|| info
.spillFormat() == DataFormatDouble
) {
1378 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1382 GPRReg gpr
= allocate();
1384 if (node
.hasConstant()) {
1385 JSValue jsValue
= valueOfJSConstant(nodeIndex
);
1386 if (jsValue
.isBoolean()) {
1387 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1388 m_jit
.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue
)), gpr
);
1389 info
.fillJSValue(gpr
, DataFormatJSBoolean
);
1392 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1395 ASSERT(info
.spillFormat() & DataFormatJS
);
1396 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1397 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
1399 info
.fillJSValue(gpr
, DataFormatJS
);
1400 if (info
.spillFormat() != DataFormatJSBoolean
) {
1401 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1402 speculationCheck(BadType
, JSValueRegs(gpr
), nodeIndex
, m_jit
.branchTestPtr(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1403 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1405 info
.fillJSValue(gpr
, DataFormatJSBoolean
);
1409 case DataFormatBoolean
:
1410 case DataFormatJSBoolean
: {
1411 GPRReg gpr
= info
.gpr();
1416 case DataFormatJS
: {
1417 GPRReg gpr
= info
.gpr();
1419 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1420 speculationCheck(BadType
, JSValueRegs(gpr
), nodeIndex
, m_jit
.branchTestPtr(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1421 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1422 info
.fillJSValue(gpr
, DataFormatJSBoolean
);
1426 case DataFormatJSInteger
:
1427 case DataFormatInteger
:
1428 case DataFormatJSDouble
:
1429 case DataFormatDouble
:
1430 case DataFormatJSCell
:
1431 case DataFormatCell
: {
1432 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
1436 case DataFormatStorage
:
1437 ASSERT_NOT_REACHED();
1440 ASSERT_NOT_REACHED();
1441 return InvalidGPRReg
;
1444 JITCompiler::Jump
SpeculativeJIT::convertToDouble(GPRReg value
, FPRReg result
, GPRReg tmp
)
1446 JITCompiler::Jump isInteger
= m_jit
.branchPtr(MacroAssembler::AboveOrEqual
, value
, GPRInfo::tagTypeNumberRegister
);
1448 JITCompiler::Jump notNumber
= m_jit
.branchTestPtr(MacroAssembler::Zero
, value
, GPRInfo::tagTypeNumberRegister
);
1450 m_jit
.move(value
, tmp
);
1451 unboxDouble(tmp
, result
);
1453 JITCompiler::Jump done
= m_jit
.jump();
1455 isInteger
.link(&m_jit
);
1457 m_jit
.convertInt32ToDouble(value
, result
);
1464 void SpeculativeJIT::compileObjectEquality(Node
& node
, const ClassInfo
* classInfo
, PredictionChecker predictionCheck
)
1466 SpeculateCellOperand
op1(this, node
.child1());
1467 SpeculateCellOperand
op2(this, node
.child2());
1468 GPRTemporary
result(this, op1
);
1470 GPRReg op1GPR
= op1
.gpr();
1471 GPRReg op2GPR
= op2
.gpr();
1472 GPRReg resultGPR
= result
.gpr();
1474 if (!predictionCheck(m_state
.forNode(node
.child1()).m_type
))
1475 speculationCheck(BadType
, JSValueRegs(op1GPR
), node
.child1().index(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(op1GPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo
)));
1476 if (!predictionCheck(m_state
.forNode(node
.child2()).m_type
))
1477 speculationCheck(BadType
, JSValueRegs(op2GPR
), node
.child2().index(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(op2GPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo
)));
1479 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1480 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1481 MacroAssembler::Jump done
= m_jit
.jump();
1482 falseCase
.link(&m_jit
);
1483 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1486 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
);
1489 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(
1490 Edge leftChild
, Edge rightChild
,
1491 const ClassInfo
* classInfo
, PredictionChecker predictionCheck
)
1493 SpeculateCellOperand
op1(this, leftChild
);
1494 JSValueOperand
op2(this, rightChild
);
1495 GPRTemporary
result(this);
1497 GPRReg op1GPR
= op1
.gpr();
1498 GPRReg op2GPR
= op2
.gpr();
1499 GPRReg resultGPR
= result
.gpr();
1501 if (!predictionCheck(m_state
.forNode(leftChild
).m_type
)) {
1503 BadType
, JSValueRegs(op1GPR
), leftChild
.index(),
1505 MacroAssembler::NotEqual
,
1506 MacroAssembler::Address(op1GPR
, JSCell::classInfoOffset()),
1507 MacroAssembler::TrustedImmPtr(classInfo
)));
1510 // It seems that most of the time when programs do a == b where b may be either null/undefined
1511 // or an object, b is usually an object. Balance the branches to make that case fast.
1512 MacroAssembler::Jump rightNotCell
=
1513 m_jit
.branchTestPtr(MacroAssembler::NonZero
, op2GPR
, GPRInfo::tagMaskRegister
);
1515 // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
1516 // proof, when filtered on cell, demonstrates that we have an object of the desired type
1517 // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
1519 if (!predictionCheck(m_state
.forNode(rightChild
).m_type
& PredictCell
)) {
1521 BadType
, JSValueRegs(op2GPR
), rightChild
.index(),
1523 MacroAssembler::NotEqual
,
1524 MacroAssembler::Address(op2GPR
, JSCell::classInfoOffset()),
1525 MacroAssembler::TrustedImmPtr(classInfo
)));
1528 // At this point we know that we can perform a straight-forward equality comparison on pointer
1529 // values because both left and right are pointers to objects that have no special equality
1531 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1532 MacroAssembler::Jump trueCase
= m_jit
.jump();
1534 rightNotCell
.link(&m_jit
);
1536 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1537 // prove that it is either null or undefined.
1538 if (!isOtherPrediction(m_state
.forNode(rightChild
).m_type
& ~PredictCell
)) {
1539 m_jit
.move(op2GPR
, resultGPR
);
1540 m_jit
.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1543 BadType
, JSValueRegs(op2GPR
), rightChild
.index(),
1545 MacroAssembler::NotEqual
, resultGPR
,
1546 MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull
))));
1549 falseCase
.link(&m_jit
);
1550 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1551 MacroAssembler::Jump done
= m_jit
.jump();
1552 trueCase
.link(&m_jit
);
1553 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1556 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
);
1559 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(
1560 Edge leftChild
, Edge rightChild
, NodeIndex branchNodeIndex
,
1561 const ClassInfo
* classInfo
, PredictionChecker predictionCheck
)
1563 Node
& branchNode
= at(branchNodeIndex
);
1564 BlockIndex taken
= branchNode
.takenBlockIndex();
1565 BlockIndex notTaken
= branchNode
.notTakenBlockIndex();
1567 SpeculateCellOperand
op1(this, leftChild
);
1568 JSValueOperand
op2(this, rightChild
);
1569 GPRTemporary
result(this);
1571 GPRReg op1GPR
= op1
.gpr();
1572 GPRReg op2GPR
= op2
.gpr();
1573 GPRReg resultGPR
= result
.gpr();
1575 if (!predictionCheck(m_state
.forNode(leftChild
).m_type
)) {
1577 BadType
, JSValueRegs(op1GPR
), leftChild
.index(),
1579 MacroAssembler::NotEqual
,
1580 MacroAssembler::Address(op1GPR
, JSCell::classInfoOffset()),
1581 MacroAssembler::TrustedImmPtr(classInfo
)));
1584 // It seems that most of the time when programs do a == b where b may be either null/undefined
1585 // or an object, b is usually an object. Balance the branches to make that case fast.
1586 MacroAssembler::Jump rightNotCell
=
1587 m_jit
.branchTestPtr(MacroAssembler::NonZero
, op2GPR
, GPRInfo::tagMaskRegister
);
1589 // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
1590 // proof, when filtered on cell, demonstrates that we have an object of the desired type
1591 // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
1593 if (!predictionCheck(m_state
.forNode(rightChild
).m_type
& PredictCell
)) {
1595 BadType
, JSValueRegs(op2GPR
), rightChild
.index(),
1597 MacroAssembler::NotEqual
,
1598 MacroAssembler::Address(op2GPR
, JSCell::classInfoOffset()),
1599 MacroAssembler::TrustedImmPtr(classInfo
)));
1602 // At this point we know that we can perform a straight-forward equality comparison on pointer
1603 // values because both left and right are pointers to objects that have no special equality
1605 branchPtr(MacroAssembler::Equal
, op1GPR
, op2GPR
, taken
);
1607 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1608 // prove that it is either null or undefined.
1609 if (isOtherPrediction(m_state
.forNode(rightChild
).m_type
& ~PredictCell
))
1610 rightNotCell
.link(&m_jit
);
1612 jump(notTaken
, ForceJump
);
1614 rightNotCell
.link(&m_jit
);
1615 m_jit
.move(op2GPR
, resultGPR
);
1616 m_jit
.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1619 BadType
, JSValueRegs(op2GPR
), rightChild
.index(),
1621 MacroAssembler::NotEqual
, resultGPR
,
1622 MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull
))));
1628 void SpeculativeJIT::compileIntegerCompare(Node
& node
, MacroAssembler::RelationalCondition condition
)
1630 SpeculateIntegerOperand
op1(this, node
.child1());
1631 SpeculateIntegerOperand
op2(this, node
.child2());
1632 GPRTemporary
result(this, op1
, op2
);
1634 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
1636 // If we add a DataFormatBool, we should use it here.
1637 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1638 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
1641 void SpeculativeJIT::compileDoubleCompare(Node
& node
, MacroAssembler::DoubleCondition condition
)
1643 SpeculateDoubleOperand
op1(this, node
.child1());
1644 SpeculateDoubleOperand
op2(this, node
.child2());
1645 GPRTemporary
result(this);
1647 m_jit
.move(TrustedImm32(ValueTrue
), result
.gpr());
1648 MacroAssembler::Jump trueCase
= m_jit
.branchDouble(condition
, op1
.fpr(), op2
.fpr());
1649 m_jit
.xorPtr(TrustedImm32(true), result
.gpr());
1650 trueCase
.link(&m_jit
);
1652 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
1655 void SpeculativeJIT::compileValueAdd(Node
& node
)
1657 JSValueOperand
op1(this, node
.child1());
1658 JSValueOperand
op2(this, node
.child2());
1660 GPRReg op1GPR
= op1
.gpr();
1661 GPRReg op2GPR
= op2
.gpr();
1665 GPRResult
result(this);
1666 if (isKnownNotNumber(node
.child1().index()) || isKnownNotNumber(node
.child2().index()))
1667 callOperation(operationValueAddNotNumber
, result
.gpr(), op1GPR
, op2GPR
);
1669 callOperation(operationValueAdd
, result
.gpr(), op1GPR
, op2GPR
);
1671 jsValueResult(result
.gpr(), m_compileIndex
);
1674 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse
, const ClassInfo
* classInfo
, bool needSpeculationCheck
)
1676 JSValueOperand
value(this, nodeUse
);
1677 GPRTemporary
result(this);
1678 GPRReg valueGPR
= value
.gpr();
1679 GPRReg resultGPR
= result
.gpr();
1681 MacroAssembler::Jump notCell
= m_jit
.branchTestPtr(MacroAssembler::NonZero
, valueGPR
, GPRInfo::tagMaskRegister
);
1682 if (needSpeculationCheck
)
1683 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
, m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(valueGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo
)));
1684 m_jit
.move(TrustedImm32(static_cast<int32_t>(ValueFalse
)), resultGPR
);
1685 MacroAssembler::Jump done
= m_jit
.jump();
1687 notCell
.link(&m_jit
);
1689 if (needSpeculationCheck
) {
1690 m_jit
.move(valueGPR
, resultGPR
);
1691 m_jit
.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1692 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
, m_jit
.branchPtr(MacroAssembler::NotEqual
, resultGPR
, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull
))));
1694 m_jit
.move(TrustedImm32(static_cast<int32_t>(ValueTrue
)), resultGPR
);
1698 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
);
1701 void SpeculativeJIT::compileLogicalNot(Node
& node
)
1703 if (at(node
.child1()).shouldSpeculateFinalObjectOrOther()) {
1704 compileObjectOrOtherLogicalNot(node
.child1(), &JSFinalObject::s_info
, !isFinalObjectOrOtherPrediction(m_state
.forNode(node
.child1()).m_type
));
1707 if (at(node
.child1()).shouldSpeculateArrayOrOther()) {
1708 compileObjectOrOtherLogicalNot(node
.child1(), &JSArray::s_info
, !isArrayOrOtherPrediction(m_state
.forNode(node
.child1()).m_type
));
1711 if (at(node
.child1()).shouldSpeculateInteger()) {
1712 SpeculateIntegerOperand
value(this, node
.child1());
1713 GPRTemporary
result(this, value
);
1714 m_jit
.compare32(MacroAssembler::Equal
, value
.gpr(), MacroAssembler::TrustedImm32(0), result
.gpr());
1715 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1716 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
1719 if (at(node
.child1()).shouldSpeculateNumber()) {
1720 SpeculateDoubleOperand
value(this, node
.child1());
1721 FPRTemporary
scratch(this);
1722 GPRTemporary
result(this);
1723 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
1724 MacroAssembler::Jump nonZero
= m_jit
.branchDoubleNonZero(value
.fpr(), scratch
.fpr());
1725 m_jit
.xor32(TrustedImm32(true), result
.gpr());
1726 nonZero
.link(&m_jit
);
1727 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
1731 PredictedType prediction
= m_jit
.getPrediction(node
.child1());
1732 if (isBooleanPrediction(prediction
)) {
1733 if (isBooleanPrediction(m_state
.forNode(node
.child1()).m_type
)) {
1734 SpeculateBooleanOperand
value(this, node
.child1());
1735 GPRTemporary
result(this, value
);
1737 m_jit
.move(value
.gpr(), result
.gpr());
1738 m_jit
.xorPtr(TrustedImm32(true), result
.gpr());
1740 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
1744 JSValueOperand
value(this, node
.child1());
1745 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1747 m_jit
.move(value
.gpr(), result
.gpr());
1748 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
1749 speculationCheck(BadType
, JSValueRegs(value
.gpr()), node
.child1(), m_jit
.branchTestPtr(JITCompiler::NonZero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1750 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue
)), result
.gpr());
1752 // If we add a DataFormatBool, we should use it here.
1753 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
1757 JSValueOperand
arg1(this, node
.child1());
1758 GPRTemporary
result(this);
1760 GPRReg arg1GPR
= arg1
.gpr();
1761 GPRReg resultGPR
= result
.gpr();
1765 m_jit
.move(arg1GPR
, resultGPR
);
1766 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse
)), resultGPR
);
1767 JITCompiler::Jump fastCase
= m_jit
.branchTestPtr(JITCompiler::Zero
, resultGPR
, TrustedImm32(static_cast<int32_t>(~1)));
1769 silentSpillAllRegisters(resultGPR
);
1770 callOperation(dfgConvertJSValueToBoolean
, resultGPR
, arg1GPR
);
1771 silentFillAllRegisters(resultGPR
);
1773 fastCase
.link(&m_jit
);
1775 m_jit
.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue
)), resultGPR
);
1776 jsValueResult(resultGPR
, m_compileIndex
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
1779 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse
, BlockIndex taken
, BlockIndex notTaken
, const ClassInfo
* classInfo
, bool needSpeculationCheck
)
1781 JSValueOperand
value(this, nodeUse
);
1782 GPRTemporary
scratch(this);
1783 GPRReg valueGPR
= value
.gpr();
1784 GPRReg scratchGPR
= scratch
.gpr();
1786 MacroAssembler::Jump notCell
= m_jit
.branchTestPtr(MacroAssembler::NonZero
, valueGPR
, GPRInfo::tagMaskRegister
);
1787 if (needSpeculationCheck
)
1788 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
.index(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(valueGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo
)));
1789 jump(taken
, ForceJump
);
1791 notCell
.link(&m_jit
);
1793 if (needSpeculationCheck
) {
1794 m_jit
.move(valueGPR
, scratchGPR
);
1795 m_jit
.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined
), scratchGPR
);
1796 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
.index(), m_jit
.branchPtr(MacroAssembler::NotEqual
, scratchGPR
, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull
))));
1800 noResult(m_compileIndex
);
1803 void SpeculativeJIT::emitBranch(Node
& node
)
1805 BlockIndex taken
= node
.takenBlockIndex();
1806 BlockIndex notTaken
= node
.notTakenBlockIndex();
1808 if (at(node
.child1()).shouldSpeculateFinalObjectOrOther()) {
1809 emitObjectOrOtherBranch(node
.child1(), taken
, notTaken
, &JSFinalObject::s_info
, !isFinalObjectOrOtherPrediction(m_state
.forNode(node
.child1()).m_type
));
1810 } else if (at(node
.child1()).shouldSpeculateArrayOrOther()) {
1811 emitObjectOrOtherBranch(node
.child1(), taken
, notTaken
, &JSArray::s_info
, !isArrayOrOtherPrediction(m_state
.forNode(node
.child1()).m_type
));
1812 } else if (at(node
.child1()).shouldSpeculateNumber()) {
1813 if (at(node
.child1()).shouldSpeculateInteger()) {
1814 bool invert
= false;
1816 if (taken
== (m_block
+ 1)) {
1818 BlockIndex tmp
= taken
;
1823 SpeculateIntegerOperand
value(this, node
.child1());
1824 branchTest32(invert
? MacroAssembler::Zero
: MacroAssembler::NonZero
, value
.gpr(), taken
);
1826 SpeculateDoubleOperand
value(this, node
.child1());
1827 FPRTemporary
scratch(this);
1828 branchDoubleNonZero(value
.fpr(), scratch
.fpr(), taken
);
1833 noResult(m_compileIndex
);
1835 JSValueOperand
value(this, node
.child1());
1836 GPRReg valueGPR
= value
.gpr();
1838 bool predictBoolean
= isBooleanPrediction(m_jit
.getPrediction(node
.child1()));
1840 if (predictBoolean
) {
1841 if (isBooleanPrediction(m_state
.forNode(node
.child1()).m_type
)) {
1842 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
1844 if (taken
== (m_block
+ 1)) {
1845 condition
= MacroAssembler::Zero
;
1846 BlockIndex tmp
= taken
;
1851 branchTest32(condition
, valueGPR
, TrustedImm32(true), taken
);
1854 branchPtr(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken
);
1855 branchPtr(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken
);
1857 speculationCheck(BadType
, JSValueRegs(valueGPR
), node
.child1(), m_jit
.jump());
1861 GPRTemporary
result(this);
1862 GPRReg resultGPR
= result
.gpr();
1864 branchPtr(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImmPtr(JSValue::encode(jsNumber(0))), notTaken
);
1865 branchPtr(MacroAssembler::AboveOrEqual
, valueGPR
, GPRInfo::tagTypeNumberRegister
, taken
);
1867 if (!predictBoolean
) {
1868 branchPtr(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken
);
1869 branchPtr(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken
);
1874 silentSpillAllRegisters(resultGPR
);
1875 callOperation(dfgConvertJSValueToBoolean
, resultGPR
, valueGPR
);
1876 silentFillAllRegisters(resultGPR
);
1878 branchTest32(MacroAssembler::NonZero
, resultGPR
, taken
);
1882 noResult(m_compileIndex
, UseChildrenCalledExplicitly
);
1886 void SpeculativeJIT::compile(Node
& node
)
1888 NodeType op
= node
.op();
1892 initConstantInfo(m_compileIndex
);
1895 case WeakJSConstant
:
1896 m_jit
.addWeakReference(node
.weakConstant());
1897 initConstantInfo(m_compileIndex
);
1901 PredictedType prediction
= node
.variableAccessData()->prediction();
1902 AbstractValue
& value
= block()->valuesAtHead
.operand(node
.local());
1904 // If we have no prediction for this local, then don't attempt to compile.
1905 if (prediction
== PredictNone
|| value
.isClear()) {
1906 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), NoNode
);
1910 if (!m_jit
.graph().isCaptured(node
.local())) {
1911 if (node
.variableAccessData()->shouldUseDoubleFormat()) {
1912 FPRTemporary
result(this);
1913 m_jit
.loadDouble(JITCompiler::addressFor(node
.local()), result
.fpr());
1914 VirtualRegister virtualRegister
= node
.virtualRegister();
1915 m_fprs
.retain(result
.fpr(), virtualRegister
, SpillOrderDouble
);
1916 m_generationInfo
[virtualRegister
].initDouble(m_compileIndex
, node
.refCount(), result
.fpr());
1920 if (isInt32Prediction(value
.m_type
)) {
1921 GPRTemporary
result(this);
1922 m_jit
.load32(JITCompiler::payloadFor(node
.local()), result
.gpr());
1924 // Like integerResult, but don't useChildren - our children are phi nodes,
1925 // and don't represent values within this dataflow with virtual registers.
1926 VirtualRegister virtualRegister
= node
.virtualRegister();
1927 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
1928 m_generationInfo
[virtualRegister
].initInteger(m_compileIndex
, node
.refCount(), result
.gpr());
1933 GPRTemporary
result(this);
1934 m_jit
.loadPtr(JITCompiler::addressFor(node
.local()), result
.gpr());
1936 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1937 // and don't represent values within this dataflow with virtual registers.
1938 VirtualRegister virtualRegister
= node
.virtualRegister();
1939 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1942 if (m_jit
.graph().isCaptured(node
.local()))
1943 format
= DataFormatJS
;
1944 else if (isCellPrediction(value
.m_type
))
1945 format
= DataFormatJSCell
;
1946 else if (isBooleanPrediction(value
.m_type
))
1947 format
= DataFormatJSBoolean
;
1949 format
= DataFormatJS
;
1951 m_generationInfo
[virtualRegister
].initJSValue(m_compileIndex
, node
.refCount(), result
.gpr(), format
);
1956 // SetLocal doubles as a hint as to where a node will be stored and
1957 // as a speculation point. So before we speculate make sure that we
1958 // know where the child of this node needs to go in the virtual
1960 compileMovHint(node
);
1962 // As far as OSR is concerned, we're on the bytecode index corresponding
1963 // to the *next* instruction, since we've already "executed" the
1964 // SetLocal and whatever other DFG Nodes are associated with the same
1965 // bytecode index as the SetLocal.
1966 ASSERT(m_codeOriginForOSR
== node
.codeOrigin
);
1967 Node
* nextNode
= &at(block()->at(m_indexInBlock
+ 1));
1969 // But even more oddly, we need to be super careful about the following
1976 // This next piece of crazy takes care of this.
1977 if (nextNode
->op() == Flush
&& nextNode
->child1() == m_compileIndex
)
1978 nextNode
= &at(block()->at(m_indexInBlock
+ 2));
1980 // Oddly, it's possible for the bytecode index for the next node to be
1981 // equal to ours. This will happen for op_post_inc. And, even more oddly,
1982 // this is just fine. Ordinarily, this wouldn't be fine, since if the
1983 // next node failed OSR then we'd be OSR-ing with this SetLocal's local
1984 // variable already set even though from the standpoint of the old JIT,
1985 // this SetLocal should not have executed. But for op_post_inc, it's just
1986 // fine, because this SetLocal's local (i.e. the LHS in a x = y++
1987 // statement) would be dead anyway - so the fact that DFG would have
1988 // already made the assignment, and baked it into the register file during
1989 // OSR exit, would not be visible to the old JIT in any way.
1990 m_codeOriginForOSR
= nextNode
->codeOrigin
;
1992 if (!m_jit
.graph().isCaptured(node
.local())) {
1993 if (node
.variableAccessData()->shouldUseDoubleFormat()) {
1994 SpeculateDoubleOperand
value(this, node
.child1());
1995 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
.local()));
1996 noResult(m_compileIndex
);
1997 // Indicate that it's no longer necessary to retrieve the value of
1998 // this bytecode variable from registers or other locations in the register file,
1999 // but that it is stored as a double.
2000 valueSourceReferenceForOperand(node
.local()) = ValueSource(DoubleInRegisterFile
);
2004 PredictedType predictedType
= node
.variableAccessData()->argumentAwarePrediction();
2005 if (isInt32Prediction(predictedType
)) {
2006 SpeculateIntegerOperand
value(this, node
.child1());
2007 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
.local()));
2008 noResult(m_compileIndex
);
2009 valueSourceReferenceForOperand(node
.local()) = ValueSource(Int32InRegisterFile
);
2012 if (isArrayPrediction(predictedType
)) {
2013 SpeculateCellOperand
cell(this, node
.child1());
2014 GPRReg cellGPR
= cell
.gpr();
2015 if (!isArrayPrediction(m_state
.forNode(node
.child1()).m_type
))
2016 speculationCheck(BadType
, JSValueRegs(cellGPR
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(cellGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info
)));
2017 m_jit
.storePtr(cellGPR
, JITCompiler::addressFor(node
.local()));
2018 noResult(m_compileIndex
);
2019 valueSourceReferenceForOperand(node
.local()) = ValueSource(CellInRegisterFile
);
2022 if (isBooleanPrediction(predictedType
)) {
2023 SpeculateBooleanOperand
boolean(this, node
.child1());
2024 m_jit
.storePtr(boolean
.gpr(), JITCompiler::addressFor(node
.local()));
2025 noResult(m_compileIndex
);
2026 valueSourceReferenceForOperand(node
.local()) = ValueSource(BooleanInRegisterFile
);
2031 JSValueOperand
value(this, node
.child1());
2032 m_jit
.storePtr(value
.gpr(), JITCompiler::addressFor(node
.local()));
2033 noResult(m_compileIndex
);
2035 valueSourceReferenceForOperand(node
.local()) = ValueSource(ValueInRegisterFile
);
2040 // This is a no-op; it just marks the fact that the argument is being used.
2041 // But it may be profitable to use this as a hook to run speculation checks
2042 // on arguments, thereby allowing us to trivially eliminate such checks if
2043 // the argument is not used.
2049 if (isInt32Constant(node
.child1().index())) {
2050 SpeculateIntegerOperand
op2(this, node
.child2());
2051 GPRTemporary
result(this, op2
);
2053 bitOp(op
, valueOfInt32Constant(node
.child1().index()), op2
.gpr(), result
.gpr());
2055 integerResult(result
.gpr(), m_compileIndex
);
2056 } else if (isInt32Constant(node
.child2().index())) {
2057 SpeculateIntegerOperand
op1(this, node
.child1());
2058 GPRTemporary
result(this, op1
);
2060 bitOp(op
, valueOfInt32Constant(node
.child2().index()), op1
.gpr(), result
.gpr());
2062 integerResult(result
.gpr(), m_compileIndex
);
2064 SpeculateIntegerOperand
op1(this, node
.child1());
2065 SpeculateIntegerOperand
op2(this, node
.child2());
2066 GPRTemporary
result(this, op1
, op2
);
2068 GPRReg reg1
= op1
.gpr();
2069 GPRReg reg2
= op2
.gpr();
2070 bitOp(op
, reg1
, reg2
, result
.gpr());
2072 integerResult(result
.gpr(), m_compileIndex
);
2079 if (isInt32Constant(node
.child2().index())) {
2080 SpeculateIntegerOperand
op1(this, node
.child1());
2081 GPRTemporary
result(this, op1
);
2083 shiftOp(op
, op1
.gpr(), valueOfInt32Constant(node
.child2().index()) & 0x1f, result
.gpr());
2085 integerResult(result
.gpr(), m_compileIndex
);
2087 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2088 SpeculateIntegerOperand
op1(this, node
.child1());
2089 SpeculateIntegerOperand
op2(this, node
.child2());
2090 GPRTemporary
result(this, op1
);
2092 GPRReg reg1
= op1
.gpr();
2093 GPRReg reg2
= op2
.gpr();
2094 shiftOp(op
, reg1
, reg2
, result
.gpr());
2096 integerResult(result
.gpr(), m_compileIndex
);
2100 case UInt32ToNumber
: {
2101 compileUInt32ToNumber(node
);
2105 case DoubleAsInt32
: {
2106 compileDoubleAsInt32(node
);
2110 case ValueToInt32
: {
2111 compileValueToInt32(node
);
2115 case Int32ToDouble
: {
2116 compileInt32ToDouble(node
);
2121 if (!isNumberPrediction(m_state
.forNode(node
.child1()).m_type
)) {
2122 JSValueOperand
op1(this, node
.child1());
2123 JITCompiler::Jump isInteger
= m_jit
.branchPtr(MacroAssembler::AboveOrEqual
, op1
.gpr(), GPRInfo::tagTypeNumberRegister
);
2125 BadType
, JSValueRegs(op1
.gpr()), node
.child1().index(),
2126 m_jit
.branchTestPtr(MacroAssembler::Zero
, op1
.gpr(), GPRInfo::tagTypeNumberRegister
));
2127 isInteger
.link(&m_jit
);
2129 noResult(m_compileIndex
);
2139 compileArithSub(node
);
2143 compileArithNegate(node
);
2147 compileArithMul(node
);
2151 if (Node::shouldSpeculateInteger(at(node
.child1()), at(node
.child2())) && node
.canSpeculateInteger()) {
2152 compileIntegerArithDivForX86(node
);
2156 SpeculateDoubleOperand
op1(this, node
.child1());
2157 SpeculateDoubleOperand
op2(this, node
.child2());
2158 FPRTemporary
result(this, op1
);
2160 FPRReg reg1
= op1
.fpr();
2161 FPRReg reg2
= op2
.fpr();
2162 m_jit
.divDouble(reg1
, reg2
, result
.fpr());
2164 doubleResult(result
.fpr(), m_compileIndex
);
2169 compileArithMod(node
);
2174 if (at(node
.child1()).shouldSpeculateInteger() && node
.canSpeculateInteger()) {
2175 SpeculateIntegerOperand
op1(this, node
.child1());
2176 GPRTemporary
result(this);
2177 GPRTemporary
scratch(this);
2179 m_jit
.zeroExtend32ToPtr(op1
.gpr(), result
.gpr());
2180 m_jit
.rshift32(result
.gpr(), MacroAssembler::TrustedImm32(31), scratch
.gpr());
2181 m_jit
.add32(scratch
.gpr(), result
.gpr());
2182 m_jit
.xor32(scratch
.gpr(), result
.gpr());
2183 speculationCheck(Overflow
, JSValueRegs(), NoNode
, m_jit
.branch32(MacroAssembler::Equal
, result
.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2184 integerResult(result
.gpr(), m_compileIndex
);
2188 SpeculateDoubleOperand
op1(this, node
.child1());
2189 FPRTemporary
result(this);
2191 m_jit
.absDouble(op1
.fpr(), result
.fpr());
2192 doubleResult(result
.fpr(), m_compileIndex
);
2198 if (Node::shouldSpeculateInteger(at(node
.child1()), at(node
.child2())) && node
.canSpeculateInteger()) {
2199 SpeculateStrictInt32Operand
op1(this, node
.child1());
2200 SpeculateStrictInt32Operand
op2(this, node
.child2());
2201 GPRTemporary
result(this, op1
);
2203 MacroAssembler::Jump op1Less
= m_jit
.branch32(op
== ArithMin
? MacroAssembler::LessThan
: MacroAssembler::GreaterThan
, op1
.gpr(), op2
.gpr());
2204 m_jit
.move(op2
.gpr(), result
.gpr());
2205 if (op1
.gpr() != result
.gpr()) {
2206 MacroAssembler::Jump done
= m_jit
.jump();
2207 op1Less
.link(&m_jit
);
2208 m_jit
.move(op1
.gpr(), result
.gpr());
2211 op1Less
.link(&m_jit
);
2213 integerResult(result
.gpr(), m_compileIndex
);
2217 SpeculateDoubleOperand
op1(this, node
.child1());
2218 SpeculateDoubleOperand
op2(this, node
.child2());
2219 FPRTemporary
result(this, op1
);
2221 MacroAssembler::JumpList done
;
2223 MacroAssembler::Jump op1Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleLessThan
: MacroAssembler::DoubleGreaterThan
, op1
.fpr(), op2
.fpr());
2225 // op2 is eather the lesser one or one of then is NaN
2226 MacroAssembler::Jump op2Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleGreaterThanOrEqual
: MacroAssembler::DoubleLessThanOrEqual
, op1
.fpr(), op2
.fpr());
2228 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2229 // op1 + op2 and putting it into result.
2230 m_jit
.addDouble(op1
.fpr(), op2
.fpr(), result
.fpr());
2231 done
.append(m_jit
.jump());
2233 op2Less
.link(&m_jit
);
2234 m_jit
.moveDouble(op2
.fpr(), result
.fpr());
2236 if (op1
.fpr() != result
.fpr()) {
2237 done
.append(m_jit
.jump());
2239 op1Less
.link(&m_jit
);
2240 m_jit
.moveDouble(op1
.fpr(), result
.fpr());
2242 op1Less
.link(&m_jit
);
2246 doubleResult(result
.fpr(), m_compileIndex
);
2251 SpeculateDoubleOperand
op1(this, node
.child1());
2252 FPRTemporary
result(this, op1
);
2254 m_jit
.sqrtDouble(op1
.fpr(), result
.fpr());
2256 doubleResult(result
.fpr(), m_compileIndex
);
2261 compileLogicalNot(node
);
2265 if (compare(node
, JITCompiler::LessThan
, JITCompiler::DoubleLessThan
, operationCompareLess
))
2270 if (compare(node
, JITCompiler::LessThanOrEqual
, JITCompiler::DoubleLessThanOrEqual
, operationCompareLessEq
))
2274 case CompareGreater
:
2275 if (compare(node
, JITCompiler::GreaterThan
, JITCompiler::DoubleGreaterThan
, operationCompareGreater
))
2279 case CompareGreaterEq
:
2280 if (compare(node
, JITCompiler::GreaterThanOrEqual
, JITCompiler::DoubleGreaterThanOrEqual
, operationCompareGreaterEq
))
2285 if (isNullConstant(node
.child1().index())) {
2286 if (nonSpeculativeCompareNull(node
, node
.child2()))
2290 if (isNullConstant(node
.child2().index())) {
2291 if (nonSpeculativeCompareNull(node
, node
.child1()))
2295 if (compare(node
, JITCompiler::Equal
, JITCompiler::DoubleEqual
, operationCompareEq
))
2299 case CompareStrictEq
:
2300 if (compileStrictEq(node
))
2304 case StringCharCodeAt
: {
2305 compileGetCharCodeAt(node
);
2309 case StringCharAt
: {
2310 // Relies on StringCharAt node having same basic layout as GetByVal
2311 compileGetByValOnString(node
);
2316 if (!node
.prediction() || !at(node
.child1()).prediction() || !at(node
.child2()).prediction()) {
2317 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), NoNode
);
2321 if (!at(node
.child2()).shouldSpeculateInteger() || !isActionableArrayPrediction(at(node
.child1()).prediction())) {
2322 JSValueOperand
base(this, node
.child1());
2323 JSValueOperand
property(this, node
.child2());
2324 GPRReg baseGPR
= base
.gpr();
2325 GPRReg propertyGPR
= property
.gpr();
2328 GPRResult
result(this);
2329 callOperation(operationGetByVal
, result
.gpr(), baseGPR
, propertyGPR
);
2331 jsValueResult(result
.gpr(), m_compileIndex
);
2335 if (at(node
.child1()).prediction() == PredictString
) {
2336 compileGetByValOnString(node
);
2342 if (at(node
.child1()).shouldSpeculateInt8Array()) {
2343 compileGetByValOnIntTypedArray(m_jit
.globalData()->int8ArrayDescriptor(), node
, sizeof(int8_t), isInt8ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, SignedTypedArray
);
2349 if (at(node
.child1()).shouldSpeculateInt16Array()) {
2350 compileGetByValOnIntTypedArray(m_jit
.globalData()->int16ArrayDescriptor(), node
, sizeof(int16_t), isInt16ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, SignedTypedArray
);
2356 if (at(node
.child1()).shouldSpeculateInt32Array()) {
2357 compileGetByValOnIntTypedArray(m_jit
.globalData()->int32ArrayDescriptor(), node
, sizeof(int32_t), isInt32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, SignedTypedArray
);
2363 if (at(node
.child1()).shouldSpeculateUint8Array()) {
2364 compileGetByValOnIntTypedArray(m_jit
.globalData()->uint8ArrayDescriptor(), node
, sizeof(uint8_t), isUint8ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
);
2370 if (at(node
.child1()).shouldSpeculateUint8ClampedArray()) {
2371 compileGetByValOnIntTypedArray(m_jit
.globalData()->uint8ClampedArrayDescriptor(), node
, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
);
2377 if (at(node
.child1()).shouldSpeculateUint16Array()) {
2378 compileGetByValOnIntTypedArray(m_jit
.globalData()->uint16ArrayDescriptor(), node
, sizeof(uint16_t), isUint16ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
);
2384 if (at(node
.child1()).shouldSpeculateUint32Array()) {
2385 compileGetByValOnIntTypedArray(m_jit
.globalData()->uint32ArrayDescriptor(), node
, sizeof(uint32_t), isUint32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
);
2391 if (at(node
.child1()).shouldSpeculateFloat32Array()) {
2392 compileGetByValOnFloatTypedArray(m_jit
.globalData()->float32ArrayDescriptor(), node
, sizeof(float), isFloat32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
);
2398 if (at(node
.child1()).shouldSpeculateFloat64Array()) {
2399 compileGetByValOnFloatTypedArray(m_jit
.globalData()->float64ArrayDescriptor(), node
, sizeof(double), isFloat64ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
);
2405 ASSERT(at(node
.child1()).shouldSpeculateArray());
2407 SpeculateCellOperand
base(this, node
.child1());
2408 SpeculateStrictInt32Operand
property(this, node
.child2());
2409 StorageOperand
storage(this, node
.child3());
2411 GPRReg baseReg
= base
.gpr();
2412 GPRReg propertyReg
= property
.gpr();
2413 GPRReg storageReg
= storage
.gpr();
2418 if (!isArrayPrediction(m_state
.forNode(node
.child1()).m_type
))
2419 speculationCheck(BadType
, JSValueRegs(baseReg
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseReg
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info
)));
2420 speculationCheck(Uncountable
, JSValueRegs(), NoNode
, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(baseReg
, JSArray::vectorLengthOffset())));
2422 // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache
2423 // the storage pointer - especially if there happens to be another register free right now. If we do so,
2424 // then we'll need to allocate a new temporary for result.
2425 GPRTemporary
result(this);
2426 m_jit
.loadPtr(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), result
.gpr());
2427 speculationCheck(Uncountable
, JSValueRegs(), NoNode
, m_jit
.branchTestPtr(MacroAssembler::Zero
, result
.gpr()));
2429 jsValueResult(result
.gpr(), m_compileIndex
);
2434 if (!at(node
.child1()).prediction() || !at(node
.child2()).prediction()) {
2435 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), NoNode
);
2439 if (!at(node
.child2()).shouldSpeculateInteger() || !isActionableMutableArrayPrediction(at(node
.child1()).prediction())) {
2440 JSValueOperand
arg1(this, node
.child1());
2441 JSValueOperand
arg2(this, node
.child2());
2442 JSValueOperand
arg3(this, node
.child3());
2443 GPRReg arg1GPR
= arg1
.gpr();
2444 GPRReg arg2GPR
= arg2
.gpr();
2445 GPRReg arg3GPR
= arg3
.gpr();
2448 callOperation(m_jit
.strictModeFor(node
.codeOrigin
) ? operationPutByValStrict
: operationPutByValNonStrict
, arg1GPR
, arg2GPR
, arg3GPR
);
2450 noResult(m_compileIndex
);
2454 SpeculateCellOperand
base(this, node
.child1());
2455 SpeculateStrictInt32Operand
property(this, node
.child2());
2456 if (at(node
.child1()).shouldSpeculateInt8Array()) {
2457 compilePutByValForIntTypedArray(m_jit
.globalData()->int8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int8_t), isInt8ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, SignedTypedArray
);
2463 if (at(node
.child1()).shouldSpeculateInt16Array()) {
2464 compilePutByValForIntTypedArray(m_jit
.globalData()->int16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int16_t), isInt16ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, SignedTypedArray
);
2470 if (at(node
.child1()).shouldSpeculateInt32Array()) {
2471 compilePutByValForIntTypedArray(m_jit
.globalData()->int32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int32_t), isInt32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, SignedTypedArray
);
2477 if (at(node
.child1()).shouldSpeculateUint8Array()) {
2478 compilePutByValForIntTypedArray(m_jit
.globalData()->uint8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), isUint8ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
);
2484 if (at(node
.child1()).shouldSpeculateUint8ClampedArray()) {
2485 compilePutByValForIntTypedArray(m_jit
.globalData()->uint8ClampedArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
, ClampRounding
);
2489 if (at(node
.child1()).shouldSpeculateUint16Array()) {
2490 compilePutByValForIntTypedArray(m_jit
.globalData()->uint16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint16_t), isUint16ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
);
2496 if (at(node
.child1()).shouldSpeculateUint32Array()) {
2497 compilePutByValForIntTypedArray(m_jit
.globalData()->uint32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint32_t), isUint32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
, UnsignedTypedArray
);
2503 if (at(node
.child1()).shouldSpeculateFloat32Array()) {
2504 compilePutByValForFloatTypedArray(m_jit
.globalData()->float32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(float), isFloat32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
);
2510 if (at(node
.child1()).shouldSpeculateFloat64Array()) {
2511 compilePutByValForFloatTypedArray(m_jit
.globalData()->float64ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(double), isFloat64ArrayPrediction(m_state
.forNode(node
.child1()).m_type
) ? NoTypedArrayTypeSpecCheck
: AllTypedArraySpecChecks
);
2517 ASSERT(at(node
.child1()).shouldSpeculateArray());
2519 JSValueOperand
value(this, node
.child3());
2520 GPRTemporary
scratch(this);
2522 // Map base, property & value into registers, allocate a scratch register.
2523 GPRReg baseReg
= base
.gpr();
2524 GPRReg propertyReg
= property
.gpr();
2525 GPRReg valueReg
= value
.gpr();
2526 GPRReg scratchReg
= scratch
.gpr();
2531 writeBarrier(baseReg
, value
.gpr(), node
.child3(), WriteBarrierForPropertyAccess
, scratchReg
);
2533 // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
2534 // If we have predicted the base to be type array, we can skip the check.
2535 if (!isArrayPrediction(m_state
.forNode(node
.child1()).m_type
))
2536 speculationCheck(BadType
, JSValueRegs(baseReg
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseReg
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info
)));
2542 MacroAssembler::Jump withinArrayBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(baseReg
, JSArray::vectorLengthOffset()));
2544 // Code to handle put beyond array bounds.
2545 silentSpillAllRegisters(scratchReg
);
2546 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
, baseReg
, propertyReg
, valueReg
);
2547 silentFillAllRegisters(scratchReg
);
2548 JITCompiler::Jump wasBeyondArrayBounds
= m_jit
.jump();
2550 withinArrayBounds
.link(&m_jit
);
2552 // Get the array storage.
2553 GPRReg storageReg
= scratchReg
;
2554 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSArray::storageOffset()), storageReg
);
2556 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2557 MacroAssembler::Jump notHoleValue
= m_jit
.branchTestPtr(MacroAssembler::NonZero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2558 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
2560 // If we're writing to a hole we might be growing the array;
2561 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
2562 m_jit
.add32(TrustedImm32(1), propertyReg
);
2563 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
2564 m_jit
.sub32(TrustedImm32(1), propertyReg
);
2566 lengthDoesNotNeedUpdate
.link(&m_jit
);
2567 notHoleValue
.link(&m_jit
);
2569 // Store the value to the array.
2570 m_jit
.storePtr(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2572 wasBeyondArrayBounds
.link(&m_jit
);
2574 noResult(m_compileIndex
, UseChildrenCalledExplicitly
);
2578 case PutByValAlias
: {
2579 if (!at(node
.child1()).prediction() || !at(node
.child2()).prediction()) {
2580 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), NoNode
);
2584 ASSERT(isActionableMutableArrayPrediction(at(node
.child1()).prediction()));
2585 ASSERT(at(node
.child2()).shouldSpeculateInteger());
2587 SpeculateCellOperand
base(this, node
.child1());
2588 SpeculateStrictInt32Operand
property(this, node
.child2());
2589 if (at(node
.child1()).shouldSpeculateInt8Array()) {
2590 compilePutByValForIntTypedArray(m_jit
.globalData()->int8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int8_t), NoTypedArraySpecCheck
, SignedTypedArray
);
2596 if (at(node
.child1()).shouldSpeculateInt16Array()) {
2597 compilePutByValForIntTypedArray(m_jit
.globalData()->int16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int16_t), NoTypedArraySpecCheck
, SignedTypedArray
);
2603 if (at(node
.child1()).shouldSpeculateInt32Array()) {
2604 compilePutByValForIntTypedArray(m_jit
.globalData()->int32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int32_t), NoTypedArraySpecCheck
, SignedTypedArray
);
2610 if (at(node
.child1()).shouldSpeculateUint8Array()) {
2611 compilePutByValForIntTypedArray(m_jit
.globalData()->uint8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), NoTypedArraySpecCheck
, UnsignedTypedArray
);
2617 if (at(node
.child1()).shouldSpeculateUint8ClampedArray()) {
2618 compilePutByValForIntTypedArray(m_jit
.globalData()->uint8ClampedArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), NoTypedArraySpecCheck
, UnsignedTypedArray
, ClampRounding
);
2624 if (at(node
.child1()).shouldSpeculateUint16Array()) {
2625 compilePutByValForIntTypedArray(m_jit
.globalData()->uint16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint16_t), NoTypedArraySpecCheck
, UnsignedTypedArray
);
2631 if (at(node
.child1()).shouldSpeculateUint32Array()) {
2632 compilePutByValForIntTypedArray(m_jit
.globalData()->uint32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint32_t), NoTypedArraySpecCheck
, UnsignedTypedArray
);
2638 if (at(node
.child1()).shouldSpeculateFloat32Array()) {
2639 compilePutByValForFloatTypedArray(m_jit
.globalData()->float32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(float), NoTypedArraySpecCheck
);
2645 if (at(node
.child1()).shouldSpeculateFloat64Array()) {
2646 compilePutByValForFloatTypedArray(m_jit
.globalData()->float64ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(double), NoTypedArraySpecCheck
);
2652 ASSERT(at(node
.child1()).shouldSpeculateArray());
2654 JSValueOperand
value(this, node
.child3());
2655 GPRTemporary
scratch(this);
2657 GPRReg baseReg
= base
.gpr();
2658 GPRReg scratchReg
= scratch
.gpr();
2660 writeBarrier(base
.gpr(), value
.gpr(), node
.child3(), WriteBarrierForPropertyAccess
, scratchReg
);
2662 // Get the array storage.
2663 GPRReg storageReg
= scratchReg
;
2664 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSArray::storageOffset()), storageReg
);
2666 // Store the value to the array.
2667 GPRReg propertyReg
= property
.gpr();
2668 GPRReg valueReg
= value
.gpr();
2669 m_jit
.storePtr(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2671 noResult(m_compileIndex
);
2676 if (compileRegExpExec(node
))
2678 if (!node
.adjustedRefCount()) {
2679 SpeculateCellOperand
base(this, node
.child1());
2680 SpeculateCellOperand
argument(this, node
.child2());
2681 GPRReg baseGPR
= base
.gpr();
2682 GPRReg argumentGPR
= argument
.gpr();
2685 GPRResult
result(this);
2686 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2688 // Must use jsValueResult because otherwise we screw up register
2689 // allocation, which thinks that this node has a result.
2690 jsValueResult(result
.gpr(), m_compileIndex
);
2694 SpeculateCellOperand
base(this, node
.child1());
2695 SpeculateCellOperand
argument(this, node
.child2());
2696 GPRReg baseGPR
= base
.gpr();
2697 GPRReg argumentGPR
= argument
.gpr();
2700 GPRResult
result(this);
2701 callOperation(operationRegExpExec
, result
.gpr(), baseGPR
, argumentGPR
);
2703 jsValueResult(result
.gpr(), m_compileIndex
);
2708 SpeculateCellOperand
base(this, node
.child1());
2709 SpeculateCellOperand
argument(this, node
.child2());
2710 GPRReg baseGPR
= base
.gpr();
2711 GPRReg argumentGPR
= argument
.gpr();
2714 GPRResult
result(this);
2715 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2717 // If we add a DataFormatBool, we should use it here.
2718 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
2719 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
2724 SpeculateCellOperand
base(this, node
.child1());
2725 JSValueOperand
value(this, node
.child2());
2726 GPRTemporary
storage(this);
2727 GPRTemporary
storageLength(this);
2729 GPRReg baseGPR
= base
.gpr();
2730 GPRReg valueGPR
= value
.gpr();
2731 GPRReg storageGPR
= storage
.gpr();
2732 GPRReg storageLengthGPR
= storageLength
.gpr();
2734 writeBarrier(baseGPR
, valueGPR
, node
.child2(), WriteBarrierForPropertyAccess
, storageGPR
, storageLengthGPR
);
2736 if (!isArrayPrediction(m_state
.forNode(node
.child1()).m_type
))
2737 speculationCheck(BadType
, JSValueRegs(baseGPR
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info
)));
2739 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSArray::storageOffset()), storageGPR
);
2740 m_jit
.load32(MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), storageLengthGPR
);
2742 // Refuse to handle bizarre lengths.
2743 speculationCheck(Uncountable
, JSValueRegs(), NoNode
, m_jit
.branch32(MacroAssembler::Above
, storageLengthGPR
, TrustedImm32(0x7ffffffe)));
2745 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(baseGPR
, JSArray::vectorLengthOffset()));
2747 m_jit
.storePtr(valueGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2749 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2750 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
2751 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
2752 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
2754 MacroAssembler::Jump done
= m_jit
.jump();
2756 slowPath
.link(&m_jit
);
2758 silentSpillAllRegisters(storageLengthGPR
);
2759 callOperation(operationArrayPush
, storageLengthGPR
, valueGPR
, baseGPR
);
2760 silentFillAllRegisters(storageLengthGPR
);
2764 jsValueResult(storageLengthGPR
, m_compileIndex
);
2769 SpeculateCellOperand
base(this, node
.child1());
2770 GPRTemporary
value(this);
2771 GPRTemporary
storage(this);
2772 GPRTemporary
storageLength(this);
2774 GPRReg baseGPR
= base
.gpr();
2775 GPRReg valueGPR
= value
.gpr();
2776 GPRReg storageGPR
= storage
.gpr();
2777 GPRReg storageLengthGPR
= storageLength
.gpr();
2779 if (!isArrayPrediction(m_state
.forNode(node
.child1()).m_type
))
2780 speculationCheck(BadType
, JSValueRegs(baseGPR
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info
)));
2782 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSArray::storageOffset()), storageGPR
);
2783 m_jit
.load32(MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), storageLengthGPR
);
2785 MacroAssembler::Jump emptyArrayCase
= m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
);
2787 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
2789 MacroAssembler::Jump slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(baseGPR
, JSArray::vectorLengthOffset()));
2791 m_jit
.loadPtr(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), valueGPR
);
2793 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
2795 MacroAssembler::Jump holeCase
= m_jit
.branchTestPtr(MacroAssembler::Zero
, valueGPR
);
2797 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2798 m_jit
.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
2800 MacroAssembler::JumpList done
;
2802 done
.append(m_jit
.jump());
2804 holeCase
.link(&m_jit
);
2805 emptyArrayCase
.link(&m_jit
);
2806 m_jit
.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR
);
2807 done
.append(m_jit
.jump());
2809 slowCase
.link(&m_jit
);
2811 silentSpillAllRegisters(valueGPR
);
2812 callOperation(operationArrayPop
, valueGPR
, baseGPR
);
2813 silentFillAllRegisters(valueGPR
);
2817 jsValueResult(valueGPR
, m_compileIndex
);
2822 BlockIndex taken
= node
.takenBlockIndex();
2824 noResult(m_compileIndex
);
2829 if (isStrictInt32(node
.child1().index()) || at(node
.child1()).shouldSpeculateInteger()) {
2830 SpeculateIntegerOperand
op(this, node
.child1());
2832 BlockIndex taken
= node
.takenBlockIndex();
2833 BlockIndex notTaken
= node
.notTakenBlockIndex();
2835 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
2837 if (taken
== (m_block
+ 1)) {
2838 condition
= MacroAssembler::Zero
;
2839 BlockIndex tmp
= taken
;
2844 branchTest32(condition
, op
.gpr(), taken
);
2847 noResult(m_compileIndex
);
2854 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT1
);
2855 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
2856 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
2858 #if DFG_ENABLE(SUCCESS_STATS)
2859 static SamplingCounter
counter("SpeculativeJIT");
2860 m_jit
.emitCount(counter
);
2863 // Return the result in returnValueGPR.
2864 JSValueOperand
op1(this, node
.child1());
2865 m_jit
.move(op1
.gpr(), GPRInfo::returnValueGPR
);
2867 // Grab the return address.
2868 m_jit
.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC
, GPRInfo::regT1
);
2869 // Restore our caller's "r".
2870 m_jit
.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame
, GPRInfo::callFrameRegister
);
2872 m_jit
.restoreReturnAddressBeforeReturn(GPRInfo::regT1
);
2875 noResult(m_compileIndex
);
2880 case ThrowReferenceError
: {
2881 // We expect that throw statements are rare and are intended to exit the code block
2882 // anyway, so we just OSR back to the old JIT for now.
2883 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), NoNode
);
2888 if (at(node
.child1()).shouldSpeculateInteger()) {
2889 // It's really profitable to speculate integer, since it's really cheap,
2890 // it means we don't have to do any real work, and we emit a lot less code.
2892 SpeculateIntegerOperand
op1(this, node
.child1());
2893 GPRTemporary
result(this, op1
);
2895 m_jit
.move(op1
.gpr(), result
.gpr());
2896 if (op1
.format() == DataFormatInteger
)
2897 m_jit
.orPtr(GPRInfo::tagTypeNumberRegister
, result
.gpr());
2899 jsValueResult(result
.gpr(), m_compileIndex
);
2903 // FIXME: Add string speculation here.
2905 JSValueOperand
op1(this, node
.child1());
2906 GPRTemporary
result(this, op1
);
2908 GPRReg op1GPR
= op1
.gpr();
2909 GPRReg resultGPR
= result
.gpr();
2913 if (!(m_state
.forNode(node
.child1()).m_type
& ~(PredictNumber
| PredictBoolean
)))
2914 m_jit
.move(op1GPR
, resultGPR
);
2916 MacroAssembler::JumpList alreadyPrimitive
;
2918 alreadyPrimitive
.append(m_jit
.branchTestPtr(MacroAssembler::NonZero
, op1GPR
, GPRInfo::tagMaskRegister
));
2919 alreadyPrimitive
.append(m_jit
.branchPtr(MacroAssembler::Equal
, MacroAssembler::Address(op1GPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info
)));
2921 silentSpillAllRegisters(resultGPR
);
2922 callOperation(operationToPrimitive
, resultGPR
, op1GPR
);
2923 silentFillAllRegisters(resultGPR
);
2925 MacroAssembler::Jump done
= m_jit
.jump();
2927 alreadyPrimitive
.link(&m_jit
);
2928 m_jit
.move(op1GPR
, resultGPR
);
2933 jsValueResult(resultGPR
, m_compileIndex
, UseChildrenCalledExplicitly
);
2939 // We really don't want to grow the register file just to do a StrCat or NewArray.
2940 // Say we have 50 functions on the stack that all have a StrCat in them that has
2941 // upwards of 10 operands. In the DFG this would mean that each one gets
2942 // some random virtual register, and then to do the StrCat we'd need a second
2943 // span of 10 operands just to have somewhere to copy the 10 operands to, where
2944 // they'd be contiguous and we could easily tell the C code how to find them.
2945 // Ugly! So instead we use the scratchBuffer infrastructure in JSGlobalData. That
2946 // way, those 50 functions will share the same scratchBuffer for offloading their
2947 // StrCat operands. It's about as good as we can do, unless we start doing
2948 // virtual register coalescing to ensure that operands to StrCat get spilled
2949 // in exactly the place where StrCat wants them, or else have the StrCat
2950 // refer to those operands' SetLocal instructions to force them to spill in
2951 // the right place. Basically, any way you cut it, the current approach
2952 // probably has the best balance of performance and sensibility in the sense
2953 // that it does not increase the complexity of the DFG JIT just to make StrCat
2956 size_t scratchSize
= sizeof(EncodedJSValue
) * node
.numChildren();
2957 ScratchBuffer
* scratchBuffer
= m_jit
.globalData()->scratchBufferForSize(scratchSize
);
2958 EncodedJSValue
* buffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
2960 for (unsigned operandIdx
= 0; operandIdx
< node
.numChildren(); ++operandIdx
) {
2961 JSValueOperand
operand(this, m_jit
.graph().m_varArgChildren
[node
.firstChild() + operandIdx
]);
2962 GPRReg opGPR
= operand
.gpr();
2965 m_jit
.storePtr(opGPR
, buffer
+ operandIdx
);
2971 GPRTemporary
scratch(this);
2973 // Tell GC mark phase how much of the scratch buffer is active during call.
2974 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
2975 m_jit
.storePtr(TrustedImmPtr(scratchSize
), scratch
.gpr());
2978 GPRResult
result(this);
2980 callOperation(op
== StrCat
? operationStrCat
: operationNewArray
, result
.gpr(), static_cast<void *>(buffer
), node
.numChildren());
2983 GPRTemporary
scratch(this);
2985 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
2986 m_jit
.storePtr(TrustedImmPtr(0), scratch
.gpr());
2989 cellResult(result
.gpr(), m_compileIndex
, UseChildrenCalledExplicitly
);
2993 case NewArrayBuffer
: {
2995 GPRResult
result(this);
2997 callOperation(operationNewArrayBuffer
, result
.gpr(), node
.startConstant(), node
.numConstants());
2999 cellResult(result
.gpr(), m_compileIndex
);
3005 GPRResult
result(this);
3007 callOperation(operationNewRegexp
, result
.gpr(), m_jit
.codeBlock()->regexp(node
.regexpIndex()));
3009 cellResult(result
.gpr(), m_compileIndex
);
3014 if (isObjectPrediction(m_state
.forNode(node
.child1()).m_type
)) {
3015 SpeculateCellOperand
thisValue(this, node
.child1());
3016 GPRTemporary
result(this, thisValue
);
3017 m_jit
.move(thisValue
.gpr(), result
.gpr());
3018 cellResult(result
.gpr(), m_compileIndex
);
3022 if (isOtherPrediction(at(node
.child1()).prediction())) {
3023 JSValueOperand
thisValue(this, node
.child1());
3024 GPRTemporary
scratch(this, thisValue
);
3025 GPRReg thisValueGPR
= thisValue
.gpr();
3026 GPRReg scratchGPR
= scratch
.gpr();
3028 if (!isOtherPrediction(m_state
.forNode(node
.child1()).m_type
)) {
3029 m_jit
.move(thisValueGPR
, scratchGPR
);
3030 m_jit
.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined
), scratchGPR
);
3031 speculationCheck(BadType
, JSValueRegs(thisValueGPR
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, scratchGPR
, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull
))));
3034 m_jit
.move(MacroAssembler::TrustedImmPtr(m_jit
.globalThisObjectFor(node
.codeOrigin
)), scratchGPR
);
3035 cellResult(scratchGPR
, m_compileIndex
);
3039 if (isObjectPrediction(at(node
.child1()).prediction())) {
3040 SpeculateCellOperand
thisValue(this, node
.child1());
3041 GPRTemporary
result(this, thisValue
);
3042 GPRReg thisValueGPR
= thisValue
.gpr();
3043 GPRReg resultGPR
= result
.gpr();
3045 if (!isObjectPrediction(m_state
.forNode(node
.child1()).m_type
))
3046 speculationCheck(BadType
, JSValueRegs(thisValueGPR
), node
.child1(), m_jit
.branchPtr(JITCompiler::Equal
, JITCompiler::Address(thisValueGPR
, JSCell::classInfoOffset()), JITCompiler::TrustedImmPtr(&JSString::s_info
)));
3048 m_jit
.move(thisValueGPR
, resultGPR
);
3050 cellResult(resultGPR
, m_compileIndex
);
3054 JSValueOperand
thisValue(this, node
.child1());
3055 GPRReg thisValueGPR
= thisValue
.gpr();
3059 GPRResult
result(this);
3060 callOperation(operationConvertThis
, result
.gpr(), thisValueGPR
);
3062 cellResult(result
.gpr(), m_compileIndex
);
3067 // Note that there is not so much profit to speculate here. The only things we
3068 // speculate on are (1) that it's a cell, since that eliminates cell checks
3069 // later if the proto is reused, and (2) if we have a FinalObject prediction
3070 // then we speculate because we want to get recompiled if it isn't (since
3071 // otherwise we'd start taking slow path a lot).
3073 SpeculateCellOperand
proto(this, node
.child1());
3074 GPRTemporary
result(this);
3075 GPRTemporary
scratch(this);
3077 GPRReg protoGPR
= proto
.gpr();
3078 GPRReg resultGPR
= result
.gpr();
3079 GPRReg scratchGPR
= scratch
.gpr();
3083 MacroAssembler::JumpList slowPath
;
3085 // Need to verify that the prototype is an object. If we have reason to believe
3086 // that it's a FinalObject then we speculate on that directly. Otherwise we
3087 // do the slow (structure-based) check.
3088 if (at(node
.child1()).shouldSpeculateFinalObject()) {
3089 if (!isFinalObjectPrediction(m_state
.forNode(node
.child1()).m_type
))
3090 speculationCheck(BadType
, JSValueRegs(protoGPR
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(protoGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSFinalObject::s_info
)));
3092 m_jit
.loadPtr(MacroAssembler::Address(protoGPR
, JSCell::structureOffset()), scratchGPR
);
3093 slowPath
.append(m_jit
.branch8(MacroAssembler::Below
, MacroAssembler::Address(scratchGPR
, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType
)));
3096 // Load the inheritorID (the Structure that objects who have protoGPR as the prototype
3097 // use to refer to that prototype). If the inheritorID is not set, go to slow path.
3098 m_jit
.loadPtr(MacroAssembler::Address(protoGPR
, JSObject::offsetOfInheritorID()), scratchGPR
);
3099 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, scratchGPR
));
3101 emitAllocateJSFinalObject(scratchGPR
, resultGPR
, scratchGPR
, slowPath
);
3103 MacroAssembler::Jump done
= m_jit
.jump();
3105 slowPath
.link(&m_jit
);
3107 silentSpillAllRegisters(resultGPR
);
3108 if (node
.codeOrigin
.inlineCallFrame
)
3109 callOperation(operationCreateThisInlined
, resultGPR
, protoGPR
, node
.codeOrigin
.inlineCallFrame
->callee
.get());
3111 callOperation(operationCreateThis
, resultGPR
, protoGPR
);
3112 silentFillAllRegisters(resultGPR
);
3116 cellResult(resultGPR
, m_compileIndex
, UseChildrenCalledExplicitly
);
3121 GPRTemporary
result(this);
3122 GPRTemporary
scratch(this);
3124 GPRReg resultGPR
= result
.gpr();
3125 GPRReg scratchGPR
= scratch
.gpr();
3127 MacroAssembler::JumpList slowPath
;
3129 emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit
.globalObjectFor(node
.codeOrigin
)->emptyObjectStructure()), resultGPR
, scratchGPR
, slowPath
);
3131 MacroAssembler::Jump done
= m_jit
.jump();
3133 slowPath
.link(&m_jit
);
3135 silentSpillAllRegisters(resultGPR
);
3136 callOperation(operationNewObject
, resultGPR
);
3137 silentFillAllRegisters(resultGPR
);
3141 cellResult(resultGPR
, m_compileIndex
);
3146 GPRTemporary
result(this);
3147 m_jit
.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister
>(RegisterFile::Callee
)), result
.gpr());
3148 cellResult(result
.gpr(), m_compileIndex
);
3152 case GetScopeChain
: {
3153 GPRTemporary
result(this);
3154 GPRReg resultGPR
= result
.gpr();
3156 m_jit
.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister
>(RegisterFile::ScopeChain
)), resultGPR
);
3157 bool checkTopLevel
= m_jit
.codeBlock()->codeType() == FunctionCode
&& m_jit
.codeBlock()->needsFullScopeChain();
3158 int skip
= node
.scopeChainDepth();
3159 ASSERT(skip
|| !checkTopLevel
);
3160 if (checkTopLevel
&& skip
--) {
3161 JITCompiler::Jump activationNotCreated
;
3163 activationNotCreated
= m_jit
.branchTestPtr(JITCompiler::Zero
, JITCompiler::addressFor(static_cast<VirtualRegister
>(m_jit
.codeBlock()->activationRegister())));
3164 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), resultGPR
);
3165 activationNotCreated
.link(&m_jit
);
3168 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), resultGPR
);
3170 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), resultGPR
);
3172 cellResult(resultGPR
, m_compileIndex
);
3175 case GetScopedVar
: {
3176 SpeculateCellOperand
scopeChain(this, node
.child1());
3177 GPRTemporary
result(this);
3178 GPRReg resultGPR
= result
.gpr();
3179 m_jit
.loadPtr(JITCompiler::Address(scopeChain
.gpr(), JSVariableObject::offsetOfRegisters()), resultGPR
);
3180 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, node
.varNumber() * sizeof(Register
)), resultGPR
);
3181 jsValueResult(resultGPR
, m_compileIndex
);
3184 case PutScopedVar
: {
3185 SpeculateCellOperand
scopeChain(this, node
.child1());
3186 GPRTemporary
scratchRegister(this);
3187 GPRReg scratchGPR
= scratchRegister
.gpr();
3188 m_jit
.loadPtr(JITCompiler::Address(scopeChain
.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR
);
3189 JSValueOperand
value(this, node
.child2());
3190 m_jit
.storePtr(value
.gpr(), JITCompiler::Address(scratchGPR
, node
.varNumber() * sizeof(Register
)));
3191 writeBarrier(scopeChain
.gpr(), value
.gpr(), node
.child2(), WriteBarrierForVariableAccess
, scratchGPR
);
3192 noResult(m_compileIndex
);
3196 if (!node
.prediction()) {
3197 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), NoNode
);
3201 if (isCellPrediction(at(node
.child1()).prediction())) {
3202 SpeculateCellOperand
base(this, node
.child1());
3203 GPRTemporary
result(this, base
);
3205 GPRReg baseGPR
= base
.gpr();
3206 GPRReg resultGPR
= result
.gpr();
3209 if (resultGPR
== baseGPR
)
3210 scratchGPR
= tryAllocate();
3212 scratchGPR
= resultGPR
;
3216 cachedGetById(node
.codeOrigin
, baseGPR
, resultGPR
, scratchGPR
, node
.identifierNumber());
3218 jsValueResult(resultGPR
, m_compileIndex
, UseChildrenCalledExplicitly
);
3222 JSValueOperand
base(this, node
.child1());
3223 GPRTemporary
result(this, base
);
3225 GPRReg baseGPR
= base
.gpr();
3226 GPRReg resultGPR
= result
.gpr();
3229 if (resultGPR
== baseGPR
)
3230 scratchGPR
= tryAllocate();
3232 scratchGPR
= resultGPR
;
3236 JITCompiler::Jump notCell
= m_jit
.branchTestPtr(JITCompiler::NonZero
, baseGPR
, GPRInfo::tagMaskRegister
);
3238 cachedGetById(node
.codeOrigin
, baseGPR
, resultGPR
, scratchGPR
, node
.identifierNumber(), notCell
);
3240 jsValueResult(resultGPR
, m_compileIndex
, UseChildrenCalledExplicitly
);
3245 case GetByIdFlush
: {
3246 if (!node
.prediction()) {
3247 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), NoNode
);
3251 if (isCellPrediction(at(node
.child1()).prediction())) {
3252 SpeculateCellOperand
base(this, node
.child1());
3253 GPRReg baseGPR
= base
.gpr();
3255 GPRResult
result(this);
3257 GPRReg resultGPR
= result
.gpr();
3259 GPRReg scratchGPR
= selectScratchGPR(baseGPR
, resultGPR
);
3265 cachedGetById(node
.codeOrigin
, baseGPR
, resultGPR
, scratchGPR
, node
.identifierNumber(), JITCompiler::Jump(), DontSpill
);
3267 jsValueResult(resultGPR
, m_compileIndex
, UseChildrenCalledExplicitly
);
3271 JSValueOperand
base(this, node
.child1());
3272 GPRReg baseGPR
= base
.gpr();
3274 GPRResult
result(this);
3275 GPRReg resultGPR
= result
.gpr();
3277 GPRReg scratchGPR
= selectScratchGPR(baseGPR
, resultGPR
);
3282 JITCompiler::Jump notCell
= m_jit
.branchTestPtr(JITCompiler::NonZero
, baseGPR
, GPRInfo::tagMaskRegister
);
3284 cachedGetById(node
.codeOrigin
, baseGPR
, resultGPR
, scratchGPR
, node
.identifierNumber(), notCell
, DontSpill
);
3286 jsValueResult(resultGPR
, m_compileIndex
, UseChildrenCalledExplicitly
);
3291 case GetArrayLength
: {
3292 SpeculateCellOperand
base(this, node
.child1());
3293 GPRTemporary
result(this);
3295 GPRReg baseGPR
= base
.gpr();
3296 GPRReg resultGPR
= result
.gpr();
3298 if (!isArrayPrediction(m_state
.forNode(node
.child1()).m_type
))
3299 speculationCheck(BadType
, JSValueRegs(baseGPR
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info
)));
3301 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSArray::storageOffset()), resultGPR
);
3302 m_jit
.load32(MacroAssembler::Address(resultGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), resultGPR
);
3304 speculationCheck(Uncountable
, JSValueRegs(), NoNode
, m_jit
.branch32(MacroAssembler::LessThan
, resultGPR
, MacroAssembler::TrustedImm32(0)));
3306 integerResult(resultGPR
, m_compileIndex
);
3310 case GetStringLength
: {
3311 SpeculateCellOperand
base(this, node
.child1());
3312 GPRTemporary
result(this);
3314 GPRReg baseGPR
= base
.gpr();
3315 GPRReg resultGPR
= result
.gpr();
3317 if (!isStringPrediction(m_state
.forNode(node
.child1()).m_type
))
3318 speculationCheck(BadType
, JSValueRegs(baseGPR
), node
.child1(), m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(baseGPR
, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info
)));
3320 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSString::offsetOfLength()), resultGPR
);
3322 integerResult(resultGPR
, m_compileIndex
);
3326 case GetInt8ArrayLength
: {
3327 compileGetTypedArrayLength(m_jit
.globalData()->int8ArrayDescriptor(), node
, !isInt8ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3330 case GetInt16ArrayLength
: {
3331 compileGetTypedArrayLength(m_jit
.globalData()->int16ArrayDescriptor(), node
, !isInt16ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3334 case GetInt32ArrayLength
: {
3335 compileGetTypedArrayLength(m_jit
.globalData()->int32ArrayDescriptor(), node
, !isInt32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3338 case GetUint8ArrayLength
: {
3339 compileGetTypedArrayLength(m_jit
.globalData()->uint8ArrayDescriptor(), node
, !isUint8ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3342 case GetUint8ClampedArrayLength
: {
3343 compileGetTypedArrayLength(m_jit
.globalData()->uint8ClampedArrayDescriptor(), node
, !isUint8ClampedArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3346 case GetUint16ArrayLength
: {
3347 compileGetTypedArrayLength(m_jit
.globalData()->uint16ArrayDescriptor(), node
, !isUint16ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3350 case GetUint32ArrayLength
: {
3351 compileGetTypedArrayLength(m_jit
.globalData()->uint32ArrayDescriptor(), node
, !isUint32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3354 case GetFloat32ArrayLength
: {
3355 compileGetTypedArrayLength(m_jit
.globalData()->float32ArrayDescriptor(), node
, !isFloat32ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3358 case GetFloat64ArrayLength
: {
3359 compileGetTypedArrayLength(m_jit
.globalData()->float64ArrayDescriptor(), node
, !isFloat64ArrayPrediction(m_state
.forNode(node
.child1()).m_type
));
3362 case CheckFunction
: {
3363 SpeculateCellOperand
function(this, node
.child1());
3364 speculationCheck(BadCache
, JSValueRegs(), NoNode
, m_jit
.branchWeakPtr(JITCompiler::NotEqual
, function
.gpr(), node
.function()));
3365 noResult(m_compileIndex
);
3368 case CheckStructure
: {
3369 if (m_state
.forNode(node
.child1()).m_structure
.isSubsetOf(node
.structureSet())) {
3370 noResult(m_compileIndex
);
3374 SpeculateCellOperand
base(this, node
.child1());
3376 ASSERT(node
.structureSet().size());
3378 if (node
.structureSet().size() == 1)
3379 speculationCheck(BadCache
, JSValueRegs(), NoNode
, m_jit
.branchWeakPtr(JITCompiler::NotEqual
, JITCompiler::Address(base
.gpr(), JSCell::structureOffset()), node
.structureSet()[0]));
3381 GPRTemporary
structure(this);
3383 m_jit
.loadPtr(JITCompiler::Address(base
.gpr(), JSCell::structureOffset()), structure
.gpr());
3385 JITCompiler::JumpList done
;
3387 for (size_t i
= 0; i
< node
.structureSet().size() - 1; ++i
)
3388 done
.append(m_jit
.branchWeakPtr(JITCompiler::Equal
, structure
.gpr(), node
.structureSet()[i
]));
3390 speculationCheck(BadCache
, JSValueRegs(), NoNode
, m_jit
.branchWeakPtr(JITCompiler::NotEqual
, structure
.gpr(), node
.structureSet().last()));
3395 noResult(m_compileIndex
);
3399 case PutStructure
: {
3400 SpeculateCellOperand
base(this, node
.child1());
3401 GPRReg baseGPR
= base
.gpr();
3403 m_jit
.addWeakReferenceTransition(
3404 node
.codeOrigin
.codeOriginOwner(),
3405 node
.structureTransitionData().previousStructure
,
3406 node
.structureTransitionData().newStructure
);
3408 #if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
3409 // Must always emit this write barrier as the structure transition itself requires it
3410 writeBarrier(baseGPR
, node
.structureTransitionData().newStructure
, WriteBarrierForGenericAccess
);
3413 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(node
.structureTransitionData().newStructure
), MacroAssembler::Address(baseGPR
, JSCell::structureOffset()));
3415 noResult(m_compileIndex
);
3419 case GetPropertyStorage
: {
3420 SpeculateCellOperand
base(this, node
.child1());
3421 GPRTemporary
result(this, base
);
3423 GPRReg baseGPR
= base
.gpr();
3424 GPRReg resultGPR
= result
.gpr();
3426 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::offsetOfPropertyStorage()), resultGPR
);
3428 storageResult(resultGPR
, m_compileIndex
);
3432 case GetIndexedPropertyStorage
: {
3433 compileGetIndexedPropertyStorage(node
);
3438 StorageOperand
storage(this, node
.child1());
3439 GPRTemporary
result(this, storage
);
3441 GPRReg storageGPR
= storage
.gpr();
3442 GPRReg resultGPR
= result
.gpr();
3444 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
.storageAccessDataIndex()];
3446 m_jit
.loadPtr(JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
)), resultGPR
);
3448 jsValueResult(resultGPR
, m_compileIndex
);
3453 #if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
3454 SpeculateCellOperand
base(this, node
.child1());
3456 StorageOperand
storage(this, node
.child2());
3457 JSValueOperand
value(this, node
.child3());
3459 GPRReg storageGPR
= storage
.gpr();
3460 GPRReg valueGPR
= value
.gpr();
3462 #if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
3463 writeBarrier(base
.gpr(), value
.gpr(), node
.child3(), WriteBarrierForPropertyAccess
);
3466 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
.storageAccessDataIndex()];
3468 m_jit
.storePtr(valueGPR
, JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
)));
3470 noResult(m_compileIndex
);
3475 SpeculateCellOperand
base(this, node
.child1());
3476 JSValueOperand
value(this, node
.child2());
3477 GPRTemporary
scratch(this);
3479 GPRReg baseGPR
= base
.gpr();
3480 GPRReg valueGPR
= value
.gpr();
3481 GPRReg scratchGPR
= scratch
.gpr();
3486 cachedPutById(node
.codeOrigin
, baseGPR
, valueGPR
, node
.child2(), scratchGPR
, node
.identifierNumber(), NotDirect
);
3488 noResult(m_compileIndex
, UseChildrenCalledExplicitly
);
3492 case PutByIdDirect
: {
3493 SpeculateCellOperand
base(this, node
.child1());
3494 JSValueOperand
value(this, node
.child2());
3495 GPRTemporary
scratch(this);
3497 GPRReg baseGPR
= base
.gpr();
3498 GPRReg valueGPR
= value
.gpr();
3499 GPRReg scratchGPR
= scratch
.gpr();
3504 cachedPutById(node
.codeOrigin
, baseGPR
, valueGPR
, node
.child2(), scratchGPR
, node
.identifierNumber(), Direct
);
3506 noResult(m_compileIndex
, UseChildrenCalledExplicitly
);
3510 case GetGlobalVar
: {
3511 GPRTemporary
result(this);
3513 JSVariableObject
* globalObject
= m_jit
.globalObjectFor(node
.codeOrigin
);
3514 m_jit
.loadPtr(globalObject
->addressOfRegisters(), result
.gpr());
3515 m_jit
.loadPtr(JITCompiler::addressForGlobalVar(result
.gpr(), node
.varNumber()), result
.gpr());
3517 jsValueResult(result
.gpr(), m_compileIndex
);
3521 case PutGlobalVar
: {
3522 JSValueOperand
value(this, node
.child1());
3523 GPRTemporary
globalObject(this);
3524 GPRTemporary
scratch(this);
3526 GPRReg globalObjectReg
= globalObject
.gpr();
3527 GPRReg scratchReg
= scratch
.gpr();
3529 m_jit
.move(MacroAssembler::TrustedImmPtr(m_jit
.globalObjectFor(node
.codeOrigin
)), globalObjectReg
);
3531 writeBarrier(m_jit
.globalObjectFor(node
.codeOrigin
), value
.gpr(), node
.child1(), WriteBarrierForVariableAccess
, scratchReg
);
3533 m_jit
.loadPtr(MacroAssembler::Address(globalObjectReg
, JSVariableObject::offsetOfRegisters()), scratchReg
);
3534 m_jit
.storePtr(value
.gpr(), JITCompiler::addressForGlobalVar(scratchReg
, node
.varNumber()));
3536 noResult(m_compileIndex
);
3540 case CheckHasInstance
: {
3541 SpeculateCellOperand
base(this, node
.child1());
3542 GPRTemporary
structure(this);
3544 // Speculate that base 'ImplementsDefaultHasInstance'.
3545 m_jit
.loadPtr(MacroAssembler::Address(base
.gpr(), JSCell::structureOffset()), structure
.gpr());
3546 speculationCheck(Uncountable
, JSValueRegs(), NoNode
, m_jit
.branchTest8(MacroAssembler::Zero
, MacroAssembler::Address(structure
.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance
)));
3548 noResult(m_compileIndex
);
3553 compileInstanceOf(node
);
3558 JSValueOperand
value(this, node
.child1());
3559 GPRTemporary
result(this);
3561 JITCompiler::Jump isCell
= m_jit
.branchTestPtr(JITCompiler::Zero
, value
.gpr(), GPRInfo::tagMaskRegister
);
3563 m_jit
.comparePtr(JITCompiler::Equal
, value
.gpr(), TrustedImm32(ValueUndefined
), result
.gpr());
3564 JITCompiler::Jump done
= m_jit
.jump();
3566 isCell
.link(&m_jit
);
3567 m_jit
.loadPtr(JITCompiler::Address(value
.gpr(), JSCell::structureOffset()), result
.gpr());
3568 m_jit
.test8(JITCompiler::NonZero
, JITCompiler::Address(result
.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined
), result
.gpr());
3571 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
3572 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
3577 JSValueOperand
value(this, node
.child1());
3578 GPRTemporary
result(this, value
);
3580 m_jit
.move(value
.gpr(), result
.gpr());
3581 m_jit
.xorPtr(JITCompiler::TrustedImm32(ValueFalse
), result
.gpr());
3582 m_jit
.testPtr(JITCompiler::Zero
, result
.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result
.gpr());
3583 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
3584 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
3589 JSValueOperand
value(this, node
.child1());
3590 GPRTemporary
result(this, value
);
3592 m_jit
.testPtr(JITCompiler::NonZero
, value
.gpr(), GPRInfo::tagTypeNumberRegister
, result
.gpr());
3593 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
3594 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
3599 JSValueOperand
value(this, node
.child1());
3600 GPRTemporary
result(this, value
);
3602 JITCompiler::Jump isNotCell
= m_jit
.branchTestPtr(JITCompiler::NonZero
, value
.gpr(), GPRInfo::tagMaskRegister
);
3604 m_jit
.loadPtr(JITCompiler::Address(value
.gpr(), JSCell::structureOffset()), result
.gpr());
3605 m_jit
.compare8(JITCompiler::Equal
, JITCompiler::Address(result
.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType
), result
.gpr());
3606 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
3607 JITCompiler::Jump done
= m_jit
.jump();
3609 isNotCell
.link(&m_jit
);
3610 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
3613 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
3618 JSValueOperand
value(this, node
.child1());
3619 GPRReg valueGPR
= value
.gpr();
3620 GPRResult
result(this);
3621 GPRReg resultGPR
= result
.gpr();
3623 callOperation(operationIsObject
, resultGPR
, valueGPR
);
3624 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
3625 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
3630 JSValueOperand
value(this, node
.child1());
3631 GPRReg valueGPR
= value
.gpr();
3632 GPRResult
result(this);
3633 GPRReg resultGPR
= result
.gpr();
3635 callOperation(operationIsFunction
, resultGPR
, valueGPR
);
3636 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
3637 jsValueResult(result
.gpr(), m_compileIndex
, DataFormatJSBoolean
);
3646 #if ENABLE(DEBUG_WITH_BREAKPOINT)
3649 ASSERT_NOT_REACHED();
3660 GPRResult
result(this);
3661 callOperation(operationResolve
, result
.gpr(), identifier(node
.identifierNumber()));
3662 jsValueResult(result
.gpr(), m_compileIndex
);
3668 GPRResult
result(this);
3669 callOperation(operationResolveBase
, result
.gpr(), identifier(node
.identifierNumber()));
3670 jsValueResult(result
.gpr(), m_compileIndex
);
3674 case ResolveBaseStrictPut
: {
3676 GPRResult
result(this);
3677 callOperation(operationResolveBaseStrictPut
, result
.gpr(), identifier(node
.identifierNumber()));
3678 jsValueResult(result
.gpr(), m_compileIndex
);
3682 case ResolveGlobal
: {
3683 GPRTemporary
globalObject(this);
3684 GPRTemporary
resolveInfo(this);
3685 GPRTemporary
result(this);
3687 GPRReg globalObjectGPR
= globalObject
.gpr();
3688 GPRReg resolveInfoGPR
= resolveInfo
.gpr();
3689 GPRReg resultGPR
= result
.gpr();
3691 ResolveGlobalData
& data
= m_jit
.graph().m_resolveGlobalData
[node
.resolveGlobalDataIndex()];
3692 GlobalResolveInfo
* resolveInfoAddress
= &(m_jit
.codeBlock()->globalResolveInfo(data
.resolveInfoIndex
));
3694 // Check Structure of global object
3695 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.globalObjectFor(node
.codeOrigin
)), globalObjectGPR
);
3696 m_jit
.move(JITCompiler::TrustedImmPtr(resolveInfoAddress
), resolveInfoGPR
);
3697 m_jit
.loadPtr(JITCompiler::Address(resolveInfoGPR
, OBJECT_OFFSETOF(GlobalResolveInfo
, structure
)), resultGPR
);
3698 JITCompiler::Jump structuresMatch
= m_jit
.branchPtr(JITCompiler::Equal
, resultGPR
, JITCompiler::Address(globalObjectGPR
, JSCell::structureOffset()));
3700 silentSpillAllRegisters(resultGPR
);
3701 callOperation(operationResolveGlobal
, resultGPR
, resolveInfoGPR
, &m_jit
.codeBlock()->identifier(data
.identifierNumber
));
3702 silentFillAllRegisters(resultGPR
);
3704 JITCompiler::Jump wasSlow
= m_jit
.jump();
3707 structuresMatch
.link(&m_jit
);
3708 m_jit
.loadPtr(JITCompiler::Address(globalObjectGPR
, JSObject::offsetOfPropertyStorage()), resultGPR
);
3709 m_jit
.load32(JITCompiler::Address(resolveInfoGPR
, OBJECT_OFFSETOF(GlobalResolveInfo
, offset
)), resolveInfoGPR
);
3710 m_jit
.loadPtr(JITCompiler::BaseIndex(resultGPR
, resolveInfoGPR
, JITCompiler::ScalePtr
), resultGPR
);
3712 wasSlow
.link(&m_jit
);
3714 jsValueResult(resultGPR
, m_compileIndex
);
3718 case CreateActivation
: {
3719 JSValueOperand
value(this, node
.child1());
3720 GPRTemporary
result(this, value
);
3722 GPRReg valueGPR
= value
.gpr();
3723 GPRReg resultGPR
= result
.gpr();
3725 m_jit
.move(valueGPR
, resultGPR
);
3727 JITCompiler::Jump alreadyCreated
= m_jit
.branchTestPtr(JITCompiler::NonZero
, resultGPR
);
3729 silentSpillAllRegisters(resultGPR
);
3730 callOperation(operationCreateActivation
, resultGPR
);
3731 silentFillAllRegisters(resultGPR
);
3733 alreadyCreated
.link(&m_jit
);
3735 cellResult(resultGPR
, m_compileIndex
);
3739 case TearOffActivation
: {
3740 JSValueOperand
value(this, node
.child1());
3741 GPRReg valueGPR
= value
.gpr();
3743 JITCompiler::Jump notCreated
= m_jit
.branchTestPtr(JITCompiler::Zero
, valueGPR
);
3745 silentSpillAllRegisters(InvalidGPRReg
);
3746 callOperation(operationTearOffActivation
, valueGPR
);
3747 silentFillAllRegisters(InvalidGPRReg
);
3749 notCreated
.link(&m_jit
);
3751 noResult(m_compileIndex
);
3755 case NewFunctionNoCheck
:
3756 compileNewFunctionNoCheck(node
);
3760 JSValueOperand
value(this, node
.child1());
3761 GPRTemporary
result(this, value
);
3763 GPRReg valueGPR
= value
.gpr();
3764 GPRReg resultGPR
= result
.gpr();
3766 m_jit
.move(valueGPR
, resultGPR
);
3768 JITCompiler::Jump alreadyCreated
= m_jit
.branchTestPtr(JITCompiler::NonZero
, resultGPR
);
3770 silentSpillAllRegisters(resultGPR
);
3772 operationNewFunction
, resultGPR
, m_jit
.codeBlock()->functionDecl(node
.functionDeclIndex()));
3773 silentFillAllRegisters(resultGPR
);
3775 alreadyCreated
.link(&m_jit
);
3777 cellResult(resultGPR
, m_compileIndex
);
3781 case NewFunctionExpression
:
3782 compileNewFunctionExpression(node
);
3785 case ForceOSRExit
: {
3786 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), NoNode
);
3792 noResult(m_compileIndex
);
3797 ASSERT_NOT_REACHED();
3801 ASSERT_NOT_REACHED();
3808 if (node
.hasResult() && node
.mustGenerate())
3809 use(m_compileIndex
);
3814 } } // namespace JSC::DFG