2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "ArrayPrototype.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
34 #include "DFGOperations.h"
35 #include "DFGSlowPathGenerator.h"
37 #include "DirectArguments.h"
38 #include "GetterSetter.h"
39 #include "JSCInlines.h"
40 #include "JSEnvironmentRecord.h"
41 #include "JSLexicalEnvironment.h"
42 #include "JSPropertyNameEnumerator.h"
43 #include "ObjectPrototype.h"
44 #include "SetupVarargsFrame.h"
45 #include "SpillRegistersMode.h"
46 #include "TypeProfilerLog.h"
48 namespace JSC
{ namespace DFG
{
52 void SpeculativeJIT::boxInt52(GPRReg sourceGPR
, GPRReg targetGPR
, DataFormat format
)
55 if (sourceGPR
== targetGPR
)
60 FPRReg fpr
= fprAllocate();
62 if (format
== DataFormatInt52
)
63 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), sourceGPR
);
65 ASSERT(format
== DataFormatStrictInt52
);
67 m_jit
.boxInt52(sourceGPR
, targetGPR
, tempGPR
, fpr
);
69 if (format
== DataFormatInt52
&& sourceGPR
!= targetGPR
)
70 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), sourceGPR
);
72 if (tempGPR
!= targetGPR
)
78 GPRReg
SpeculativeJIT::fillJSValue(Edge edge
)
80 VirtualRegister virtualRegister
= edge
->virtualRegister();
81 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
83 switch (info
.registerFormat()) {
84 case DataFormatNone
: {
85 GPRReg gpr
= allocate();
87 if (edge
->hasConstant()) {
88 JSValue jsValue
= edge
->asJSValue();
89 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
90 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
91 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
93 DataFormat spillFormat
= info
.spillFormat();
94 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
95 switch (spillFormat
) {
96 case DataFormatInt32
: {
97 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
98 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
99 spillFormat
= DataFormatJSInt32
;
104 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
105 DFG_ASSERT(m_jit
.graph(), m_currentNode
, spillFormat
& DataFormatJS
);
108 info
.fillJSValue(*m_stream
, gpr
, spillFormat
);
113 case DataFormatInt32
: {
114 GPRReg gpr
= info
.gpr();
115 // If the register has already been locked we need to take a copy.
116 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
117 if (m_gprs
.isLocked(gpr
)) {
118 GPRReg result
= allocate();
119 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
, result
);
123 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
124 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
129 // No retag required on JSVALUE64!
131 case DataFormatJSInt32
:
132 case DataFormatJSDouble
:
133 case DataFormatJSCell
:
134 case DataFormatJSBoolean
: {
135 GPRReg gpr
= info
.gpr();
140 case DataFormatBoolean
:
141 case DataFormatStorage
:
142 case DataFormatDouble
:
143 case DataFormatInt52
:
144 // this type currently never occurs
145 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Bad data format");
148 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Corrupt data format");
149 return InvalidGPRReg
;
153 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg resultGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
155 JITGetByIdGenerator
gen(
156 m_jit
.codeBlock(), codeOrigin
, usedRegisters(), JSValueRegs(baseGPR
),
157 JSValueRegs(resultGPR
), spillMode
);
158 gen
.generateFastPath(m_jit
);
160 JITCompiler::JumpList slowCases
;
161 if (slowPathTarget
.isSet())
162 slowCases
.append(slowPathTarget
);
163 slowCases
.append(gen
.slowPathJump());
165 auto slowPath
= slowPathCall(
166 slowCases
, this, operationGetByIdOptimize
, resultGPR
, gen
.stubInfo(), baseGPR
,
167 identifierUID(identifierNumber
), spillMode
);
169 m_jit
.addGetById(gen
, slowPath
.get());
170 addSlowPathGenerator(WTF::move(slowPath
));
173 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg valueGPR
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind putKind
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
175 JITPutByIdGenerator
gen(
176 m_jit
.codeBlock(), codeOrigin
, usedRegisters(), JSValueRegs(baseGPR
),
177 JSValueRegs(valueGPR
), scratchGPR
, spillMode
, m_jit
.ecmaModeFor(codeOrigin
), putKind
);
179 gen
.generateFastPath(m_jit
);
181 JITCompiler::JumpList slowCases
;
182 if (slowPathTarget
.isSet())
183 slowCases
.append(slowPathTarget
);
184 slowCases
.append(gen
.slowPathJump());
186 auto slowPath
= slowPathCall(
187 slowCases
, this, gen
.slowPathFunction(), NoResult
, gen
.stubInfo(), valueGPR
, baseGPR
,
188 identifierUID(identifierNumber
));
190 m_jit
.addPutById(gen
, slowPath
.get());
191 addSlowPathGenerator(WTF::move(slowPath
));
194 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
)
196 JSValueOperand
arg(this, operand
);
197 GPRReg argGPR
= arg
.gpr();
199 GPRTemporary
result(this, Reuse
, arg
);
200 GPRReg resultGPR
= result
.gpr();
202 JITCompiler::Jump notCell
;
204 JITCompiler::Jump notMasqueradesAsUndefined
;
205 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
206 if (!isKnownCell(operand
.node()))
207 notCell
= m_jit
.branchIfNotCell(JSValueRegs(argGPR
));
209 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultGPR
);
210 notMasqueradesAsUndefined
= m_jit
.jump();
212 GPRTemporary
localGlobalObject(this);
213 GPRTemporary
remoteGlobalObject(this);
214 GPRTemporary
scratch(this);
216 if (!isKnownCell(operand
.node()))
217 notCell
= m_jit
.branchIfNotCell(JSValueRegs(argGPR
));
219 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
220 JITCompiler::NonZero
,
221 JITCompiler::Address(argGPR
, JSCell::typeInfoFlagsOffset()),
222 JITCompiler::TrustedImm32(MasqueradesAsUndefined
));
224 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultGPR
);
225 notMasqueradesAsUndefined
= m_jit
.jump();
227 isMasqueradesAsUndefined
.link(&m_jit
);
228 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
229 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
230 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
231 m_jit
.emitLoadStructure(argGPR
, resultGPR
, scratch
.gpr());
232 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
233 m_jit
.comparePtr(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, resultGPR
);
236 if (!isKnownCell(operand
.node())) {
237 JITCompiler::Jump done
= m_jit
.jump();
239 notCell
.link(&m_jit
);
241 m_jit
.move(argGPR
, resultGPR
);
242 m_jit
.and64(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
243 m_jit
.compare64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm32(ValueNull
), resultGPR
);
248 notMasqueradesAsUndefined
.link(&m_jit
);
250 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
251 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
254 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand
, Node
* branchNode
, bool invert
)
256 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
257 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
259 if (taken
== nextBlock()) {
261 BasicBlock
* tmp
= taken
;
266 JSValueOperand
arg(this, operand
);
267 GPRReg argGPR
= arg
.gpr();
269 GPRTemporary
result(this, Reuse
, arg
);
270 GPRReg resultGPR
= result
.gpr();
272 JITCompiler::Jump notCell
;
274 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
275 if (!isKnownCell(operand
.node()))
276 notCell
= m_jit
.branchIfNotCell(JSValueRegs(argGPR
));
278 jump(invert
? taken
: notTaken
, ForceJump
);
280 GPRTemporary
localGlobalObject(this);
281 GPRTemporary
remoteGlobalObject(this);
282 GPRTemporary
scratch(this);
284 if (!isKnownCell(operand
.node()))
285 notCell
= m_jit
.branchIfNotCell(JSValueRegs(argGPR
));
287 branchTest8(JITCompiler::Zero
,
288 JITCompiler::Address(argGPR
, JSCell::typeInfoFlagsOffset()),
289 JITCompiler::TrustedImm32(MasqueradesAsUndefined
),
290 invert
? taken
: notTaken
);
292 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
293 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
294 m_jit
.move(TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
295 m_jit
.emitLoadStructure(argGPR
, resultGPR
, scratch
.gpr());
296 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
297 branchPtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, invert
? notTaken
: taken
);
300 if (!isKnownCell(operand
.node())) {
301 jump(notTaken
, ForceJump
);
303 notCell
.link(&m_jit
);
305 m_jit
.move(argGPR
, resultGPR
);
306 m_jit
.and64(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
307 branch64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm64(ValueNull
), taken
);
313 bool SpeculativeJIT::nonSpeculativeCompareNull(Node
* node
, Edge operand
, bool invert
)
315 unsigned branchIndexInBlock
= detectPeepHoleBranch();
316 if (branchIndexInBlock
!= UINT_MAX
) {
317 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
319 DFG_ASSERT(m_jit
.graph(), node
, node
->adjustedRefCount() == 1);
321 nonSpeculativePeepholeBranchNull(operand
, branchNode
, invert
);
325 m_indexInBlock
= branchIndexInBlock
;
326 m_currentNode
= branchNode
;
331 nonSpeculativeNonPeepholeCompareNull(operand
, invert
);
336 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node
* node
, Node
* branchNode
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
338 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
339 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
341 JITCompiler::ResultCondition callResultCondition
= JITCompiler::NonZero
;
343 // The branch instruction will branch to the taken block.
344 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
345 if (taken
== nextBlock()) {
346 cond
= JITCompiler::invert(cond
);
347 callResultCondition
= JITCompiler::Zero
;
348 BasicBlock
* tmp
= taken
;
353 JSValueOperand
arg1(this, node
->child1());
354 JSValueOperand
arg2(this, node
->child2());
355 GPRReg arg1GPR
= arg1
.gpr();
356 GPRReg arg2GPR
= arg2
.gpr();
358 JITCompiler::JumpList slowPath
;
360 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
361 GPRFlushedCallResult
result(this);
362 GPRReg resultGPR
= result
.gpr();
368 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
370 branchTest32(callResultCondition
, resultGPR
, taken
);
372 GPRTemporary
result(this, Reuse
, arg2
);
373 GPRReg resultGPR
= result
.gpr();
378 if (!isKnownInteger(node
->child1().node()))
379 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
380 if (!isKnownInteger(node
->child2().node()))
381 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
383 branch32(cond
, arg1GPR
, arg2GPR
, taken
);
385 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
386 jump(notTaken
, ForceJump
);
388 slowPath
.link(&m_jit
);
390 silentSpillAllRegisters(resultGPR
);
391 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
392 silentFillAllRegisters(resultGPR
);
394 branchTest32(callResultCondition
, resultGPR
, taken
);
400 m_indexInBlock
= m_block
->size() - 1;
401 m_currentNode
= branchNode
;
404 template<typename JumpType
>
405 class CompareAndBoxBooleanSlowPathGenerator
406 : public CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
> {
408 CompareAndBoxBooleanSlowPathGenerator(
409 JumpType from
, SpeculativeJIT
* jit
,
410 S_JITOperation_EJJ function
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
411 : CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
>(
412 from
, jit
, function
, NeedToSpill
, result
)
419 virtual void generateInternal(SpeculativeJIT
* jit
) override
422 this->recordCall(jit
->callOperation(this->m_function
, this->m_result
, m_arg1
, m_arg2
));
423 jit
->m_jit
.and32(JITCompiler::TrustedImm32(1), this->m_result
);
424 jit
->m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), this->m_result
);
433 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
435 ASSERT(node
->isBinaryUseKind(UntypedUse
));
436 JSValueOperand
arg1(this, node
->child1());
437 JSValueOperand
arg2(this, node
->child2());
438 GPRReg arg1GPR
= arg1
.gpr();
439 GPRReg arg2GPR
= arg2
.gpr();
441 JITCompiler::JumpList slowPath
;
443 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
444 GPRFlushedCallResult
result(this);
445 GPRReg resultGPR
= result
.gpr();
451 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
453 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
454 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
456 GPRTemporary
result(this, Reuse
, arg2
);
457 GPRReg resultGPR
= result
.gpr();
462 if (!isKnownInteger(node
->child1().node()))
463 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
464 if (!isKnownInteger(node
->child2().node()))
465 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
467 m_jit
.compare32(cond
, arg1GPR
, arg2GPR
, resultGPR
);
468 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
470 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
471 addSlowPathGenerator(std::make_unique
<CompareAndBoxBooleanSlowPathGenerator
<JITCompiler::JumpList
>>(
472 slowPath
, this, helperFunction
, resultGPR
, arg1GPR
, arg2GPR
));
475 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
479 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node
* node
, Node
* branchNode
, bool invert
)
481 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
482 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
484 // The branch instruction will branch to the taken block.
485 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
486 if (taken
== nextBlock()) {
488 BasicBlock
* tmp
= taken
;
493 JSValueOperand
arg1(this, node
->child1());
494 JSValueOperand
arg2(this, node
->child2());
495 GPRReg arg1GPR
= arg1
.gpr();
496 GPRReg arg2GPR
= arg2
.gpr();
498 GPRTemporary
result(this);
499 GPRReg resultGPR
= result
.gpr();
504 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
505 // see if we get lucky: if the arguments are cells and they reference the same
506 // cell, then they must be strictly equal.
507 branch64(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
509 silentSpillAllRegisters(resultGPR
);
510 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
511 silentFillAllRegisters(resultGPR
);
513 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
515 m_jit
.or64(arg1GPR
, arg2GPR
, resultGPR
);
517 JITCompiler::Jump twoCellsCase
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
519 JITCompiler::Jump leftOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
520 JITCompiler::Jump leftDouble
= m_jit
.branchTest64(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
522 JITCompiler::Jump rightOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
523 JITCompiler::Jump rightDouble
= m_jit
.branchTest64(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
524 rightOK
.link(&m_jit
);
526 branch64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, taken
);
527 jump(notTaken
, ForceJump
);
529 twoCellsCase
.link(&m_jit
);
530 branch64(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
532 leftDouble
.link(&m_jit
);
533 rightDouble
.link(&m_jit
);
535 silentSpillAllRegisters(resultGPR
);
536 callOperation(operationCompareStrictEq
, resultGPR
, arg1GPR
, arg2GPR
);
537 silentFillAllRegisters(resultGPR
);
539 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
545 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node
* node
, bool invert
)
547 JSValueOperand
arg1(this, node
->child1());
548 JSValueOperand
arg2(this, node
->child2());
549 GPRReg arg1GPR
= arg1
.gpr();
550 GPRReg arg2GPR
= arg2
.gpr();
552 GPRTemporary
result(this);
553 GPRReg resultGPR
= result
.gpr();
558 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
559 // see if we get lucky: if the arguments are cells and they reference the same
560 // cell, then they must be strictly equal.
561 // FIXME: this should flush registers instead of silent spill/fill.
562 JITCompiler::Jump notEqualCase
= m_jit
.branch64(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
);
564 m_jit
.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
566 JITCompiler::Jump done
= m_jit
.jump();
568 notEqualCase
.link(&m_jit
);
570 silentSpillAllRegisters(resultGPR
);
571 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
572 silentFillAllRegisters(resultGPR
);
574 m_jit
.and64(JITCompiler::TrustedImm32(1), resultGPR
);
575 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
579 m_jit
.or64(arg1GPR
, arg2GPR
, resultGPR
);
581 JITCompiler::JumpList slowPathCases
;
583 JITCompiler::Jump twoCellsCase
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
585 JITCompiler::Jump leftOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
586 slowPathCases
.append(m_jit
.branchTest64(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
588 JITCompiler::Jump rightOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
589 slowPathCases
.append(m_jit
.branchTest64(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
590 rightOK
.link(&m_jit
);
592 m_jit
.compare64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, resultGPR
);
593 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
595 JITCompiler::Jump done
= m_jit
.jump();
597 twoCellsCase
.link(&m_jit
);
598 slowPathCases
.append(m_jit
.branch64(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
));
600 m_jit
.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
602 addSlowPathGenerator(std::make_unique
<CompareAndBoxBooleanSlowPathGenerator
<MacroAssembler::JumpList
>>(
603 slowPathCases
, this, operationCompareStrictEq
, resultGPR
, arg1GPR
,
609 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
612 void SpeculativeJIT::compileMiscStrictEq(Node
* node
)
614 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
615 JSValueOperand
op2(this, node
->child2(), ManualOperandSpeculation
);
616 GPRTemporary
result(this);
618 if (node
->child1().useKind() == MiscUse
)
619 speculateMisc(node
->child1(), op1
.jsValueRegs());
620 if (node
->child2().useKind() == MiscUse
)
621 speculateMisc(node
->child2(), op2
.jsValueRegs());
623 m_jit
.compare64(JITCompiler::Equal
, op1
.gpr(), op2
.gpr(), result
.gpr());
624 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
625 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
628 void SpeculativeJIT::emitCall(Node
* node
)
630 CallLinkInfo::CallType callType
;
631 bool isVarargs
= false;
632 bool isForwardVarargs
= false;
633 switch (node
->op()) {
635 callType
= CallLinkInfo::Call
;
638 callType
= CallLinkInfo::Construct
;
641 callType
= CallLinkInfo::CallVarargs
;
644 case ConstructVarargs
:
645 callType
= CallLinkInfo::ConstructVarargs
;
648 case CallForwardVarargs
:
649 callType
= CallLinkInfo::CallVarargs
;
650 isForwardVarargs
= true;
652 case ConstructForwardVarargs
:
653 callType
= CallLinkInfo::ConstructVarargs
;
654 isForwardVarargs
= true;
657 DFG_CRASH(m_jit
.graph(), node
, "bad node type");
661 Edge calleeEdge
= m_jit
.graph().child(node
, 0);
663 // Gotta load the arguments somehow. Varargs is trickier.
664 if (isVarargs
|| isForwardVarargs
) {
665 CallVarargsData
* data
= node
->callVarargsData();
668 unsigned numUsedStackSlots
= m_jit
.graph().m_nextMachineLocal
;
670 if (isForwardVarargs
) {
678 scratchGPR1
= JITCompiler::selectScratchGPR();
679 scratchGPR2
= JITCompiler::selectScratchGPR(scratchGPR1
);
680 scratchGPR3
= JITCompiler::selectScratchGPR(scratchGPR1
, scratchGPR2
);
682 m_jit
.move(TrustedImm32(numUsedStackSlots
), scratchGPR2
);
683 JITCompiler::JumpList slowCase
;
684 emitSetupVarargsFrameFastCase(m_jit
, scratchGPR2
, scratchGPR1
, scratchGPR2
, scratchGPR3
, node
->child2()->origin
.semantic
.inlineCallFrame
, data
->firstVarArgOffset
, slowCase
);
685 JITCompiler::Jump done
= m_jit
.jump();
686 slowCase
.link(&m_jit
);
687 callOperation(operationThrowStackOverflowForVarargs
);
688 m_jit
.abortWithReason(DFGVarargsThrowingPathDidNotThrow
);
690 resultGPR
= scratchGPR2
;
697 auto loadArgumentsGPR
= [&] (GPRReg reservedGPR
) {
698 if (reservedGPR
!= InvalidGPRReg
)
700 JSValueOperand
arguments(this, node
->child2());
701 argumentsGPR
= arguments
.gpr();
702 if (reservedGPR
!= InvalidGPRReg
)
706 scratchGPR1
= JITCompiler::selectScratchGPR(argumentsGPR
, reservedGPR
);
707 scratchGPR2
= JITCompiler::selectScratchGPR(argumentsGPR
, scratchGPR1
, reservedGPR
);
708 scratchGPR3
= JITCompiler::selectScratchGPR(argumentsGPR
, scratchGPR1
, scratchGPR2
, reservedGPR
);
711 loadArgumentsGPR(InvalidGPRReg
);
713 DFG_ASSERT(m_jit
.graph(), node
, isFlushed());
715 // Right now, arguments is in argumentsGPR and the register file is flushed.
716 callOperation(operationSizeFrameForVarargs
, GPRInfo::returnValueGPR
, argumentsGPR
, numUsedStackSlots
, data
->firstVarArgOffset
);
718 // Now we have the argument count of the callee frame, but we've lost the arguments operand.
719 // Reconstruct the arguments operand while preserving the callee frame.
720 loadArgumentsGPR(GPRInfo::returnValueGPR
);
721 m_jit
.move(TrustedImm32(numUsedStackSlots
), scratchGPR1
);
722 emitSetVarargsFrame(m_jit
, GPRInfo::returnValueGPR
, false, scratchGPR1
, scratchGPR1
);
723 m_jit
.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC
) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), scratchGPR1
, JITCompiler::stackPointerRegister
);
725 callOperation(operationSetupVarargsFrame
, GPRInfo::returnValueGPR
, scratchGPR1
, argumentsGPR
, data
->firstVarArgOffset
, GPRInfo::returnValueGPR
);
726 resultGPR
= GPRInfo::returnValueGPR
;
729 m_jit
.addPtr(TrustedImm32(sizeof(CallerFrameAndPC
)), resultGPR
, JITCompiler::stackPointerRegister
);
731 DFG_ASSERT(m_jit
.graph(), node
, isFlushed());
733 // We don't need the arguments array anymore.
737 // Now set up the "this" argument.
738 JSValueOperand
thisArgument(this, node
->child3());
739 GPRReg thisArgumentGPR
= thisArgument
.gpr();
742 m_jit
.store64(thisArgumentGPR
, JITCompiler::calleeArgumentSlot(0));
744 // The call instruction's first child is the function; the subsequent children are the
746 int numPassedArgs
= node
->numChildren() - 1;
748 m_jit
.store32(MacroAssembler::TrustedImm32(numPassedArgs
), JITCompiler::calleeFramePayloadSlot(JSStack::ArgumentCount
));
750 for (int i
= 0; i
< numPassedArgs
; i
++) {
751 Edge argEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + 1 + i
];
752 JSValueOperand
arg(this, argEdge
);
753 GPRReg argGPR
= arg
.gpr();
756 m_jit
.store64(argGPR
, JITCompiler::calleeArgumentSlot(i
));
760 JSValueOperand
callee(this, calleeEdge
);
761 GPRReg calleeGPR
= callee
.gpr();
763 m_jit
.store64(calleeGPR
, JITCompiler::calleeFrameSlot(JSStack::Callee
));
767 GPRFlushedCallResult
result(this);
768 GPRReg resultGPR
= result
.gpr();
770 JITCompiler::DataLabelPtr targetToCheck
;
771 JITCompiler::Jump slowPath
;
773 m_jit
.emitStoreCodeOrigin(node
->origin
.semantic
);
775 CallLinkInfo
* callLinkInfo
= m_jit
.codeBlock()->addCallLinkInfo();
777 slowPath
= m_jit
.branchPtrWithPatch(MacroAssembler::NotEqual
, calleeGPR
, targetToCheck
, MacroAssembler::TrustedImmPtr(0));
779 JITCompiler::Call fastCall
= m_jit
.nearCall();
781 JITCompiler::Jump done
= m_jit
.jump();
783 slowPath
.link(&m_jit
);
785 m_jit
.move(calleeGPR
, GPRInfo::regT0
); // Callee needs to be in regT0
786 m_jit
.move(MacroAssembler::TrustedImmPtr(callLinkInfo
), GPRInfo::regT2
); // Link info needs to be in regT2
787 JITCompiler::Call slowCall
= m_jit
.nearCall();
791 m_jit
.move(GPRInfo::returnValueGPR
, resultGPR
);
793 jsValueResult(resultGPR
, m_currentNode
, DataFormatJS
, UseChildrenCalledExplicitly
);
795 callLinkInfo
->setUpCall(callType
, m_currentNode
->origin
.semantic
, calleeGPR
);
796 m_jit
.addJSCall(fastCall
, slowCall
, targetToCheck
, callLinkInfo
);
798 // If we were varargs, then after the calls are done, we need to reestablish our stack pointer.
799 if (isVarargs
|| isForwardVarargs
)
800 m_jit
.addPtr(TrustedImm32(m_jit
.graph().stackPointerOffset() * sizeof(Register
)), GPRInfo::callFrameRegister
, JITCompiler::stackPointerRegister
);
803 // Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it
804 // http://llvm.org/bugs/show_bug.cgi?id=18619
805 #if COMPILER(CLANG) && defined(__has_warning)
806 #pragma clang diagnostic push
807 #if __has_warning("-Wimplicit-fallthrough")
808 #pragma clang diagnostic ignored "-Wimplicit-fallthrough"
811 template<bool strict
>
812 GPRReg
SpeculativeJIT::fillSpeculateInt32Internal(Edge edge
, DataFormat
& returnFormat
)
814 AbstractValue
& value
= m_state
.forNode(edge
);
815 SpeculatedType type
= value
.m_type
;
816 ASSERT(edge
.useKind() != KnownInt32Use
|| !(value
.m_type
& ~SpecInt32
));
818 m_interpreter
.filter(value
, SpecInt32
);
819 if (value
.isClear()) {
820 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
821 returnFormat
= DataFormatInt32
;
825 VirtualRegister virtualRegister
= edge
->virtualRegister();
826 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
828 switch (info
.registerFormat()) {
829 case DataFormatNone
: {
830 GPRReg gpr
= allocate();
832 if (edge
->hasConstant()) {
833 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
834 ASSERT(edge
->isInt32Constant());
835 m_jit
.move(MacroAssembler::Imm32(edge
->asInt32()), gpr
);
836 info
.fillInt32(*m_stream
, gpr
);
837 returnFormat
= DataFormatInt32
;
841 DataFormat spillFormat
= info
.spillFormat();
843 DFG_ASSERT(m_jit
.graph(), m_currentNode
, (spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInt32
);
845 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
847 if (spillFormat
== DataFormatJSInt32
|| spillFormat
== DataFormatInt32
) {
848 // If we know this was spilled as an integer we can fill without checking.
850 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
851 info
.fillInt32(*m_stream
, gpr
);
852 returnFormat
= DataFormatInt32
;
855 if (spillFormat
== DataFormatInt32
) {
856 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
857 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
859 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
860 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
861 returnFormat
= DataFormatJSInt32
;
864 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
866 // Fill as JSValue, and fall through.
867 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
873 DFG_ASSERT(m_jit
.graph(), m_currentNode
, !(type
& SpecInt52
));
874 // Check the value is an integer.
875 GPRReg gpr
= info
.gpr();
877 if (type
& ~SpecInt32
)
878 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branch64(MacroAssembler::Below
, gpr
, GPRInfo::tagTypeNumberRegister
));
879 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
880 // If !strict we're done, return.
882 returnFormat
= DataFormatJSInt32
;
885 // else fall through & handle as DataFormatJSInt32.
890 case DataFormatJSInt32
: {
891 // In a strict fill we need to strip off the value tag.
893 GPRReg gpr
= info
.gpr();
895 // If the register has already been locked we need to take a copy.
896 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
897 if (m_gprs
.isLocked(gpr
))
901 info
.fillInt32(*m_stream
, gpr
);
904 m_jit
.zeroExtend32ToPtr(gpr
, result
);
905 returnFormat
= DataFormatInt32
;
909 GPRReg gpr
= info
.gpr();
911 returnFormat
= DataFormatJSInt32
;
915 case DataFormatInt32
: {
916 GPRReg gpr
= info
.gpr();
918 returnFormat
= DataFormatInt32
;
922 case DataFormatJSDouble
:
924 case DataFormatBoolean
:
925 case DataFormatJSCell
:
926 case DataFormatJSBoolean
:
927 case DataFormatDouble
:
928 case DataFormatStorage
:
929 case DataFormatInt52
:
930 case DataFormatStrictInt52
:
931 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Bad data format");
934 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Corrupt data format");
935 return InvalidGPRReg
;
938 #if COMPILER(CLANG) && defined(__has_warning)
939 #pragma clang diagnostic pop
942 GPRReg
SpeculativeJIT::fillSpeculateInt32(Edge edge
, DataFormat
& returnFormat
)
944 return fillSpeculateInt32Internal
<false>(edge
, returnFormat
);
947 GPRReg
SpeculativeJIT::fillSpeculateInt32Strict(Edge edge
)
949 DataFormat mustBeDataFormatInt32
;
950 GPRReg result
= fillSpeculateInt32Internal
<true>(edge
, mustBeDataFormatInt32
);
951 DFG_ASSERT(m_jit
.graph(), m_currentNode
, mustBeDataFormatInt32
== DataFormatInt32
);
955 GPRReg
SpeculativeJIT::fillSpeculateInt52(Edge edge
, DataFormat desiredFormat
)
957 ASSERT(desiredFormat
== DataFormatInt52
|| desiredFormat
== DataFormatStrictInt52
);
958 AbstractValue
& value
= m_state
.forNode(edge
);
960 m_interpreter
.filter(value
, SpecMachineInt
);
961 if (value
.isClear()) {
962 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
966 VirtualRegister virtualRegister
= edge
->virtualRegister();
967 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
969 switch (info
.registerFormat()) {
970 case DataFormatNone
: {
971 GPRReg gpr
= allocate();
973 if (edge
->hasConstant()) {
974 JSValue jsValue
= edge
->asJSValue();
975 ASSERT(jsValue
.isMachineInt());
976 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
977 int64_t value
= jsValue
.asMachineInt();
978 if (desiredFormat
== DataFormatInt52
)
979 value
= value
<< JSValue::int52ShiftAmount
;
980 m_jit
.move(MacroAssembler::Imm64(value
), gpr
);
981 info
.fillGPR(*m_stream
, gpr
, desiredFormat
);
985 DataFormat spillFormat
= info
.spillFormat();
987 DFG_ASSERT(m_jit
.graph(), m_currentNode
, spillFormat
== DataFormatInt52
|| spillFormat
== DataFormatStrictInt52
);
989 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
991 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
992 if (desiredFormat
== DataFormatStrictInt52
) {
993 if (spillFormat
== DataFormatInt52
)
994 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
995 info
.fillStrictInt52(*m_stream
, gpr
);
998 if (spillFormat
== DataFormatStrictInt52
)
999 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
1000 info
.fillInt52(*m_stream
, gpr
);
1004 case DataFormatStrictInt52
: {
1005 GPRReg gpr
= info
.gpr();
1006 bool wasLocked
= m_gprs
.isLocked(gpr
);
1008 if (desiredFormat
== DataFormatStrictInt52
)
1011 GPRReg result
= allocate();
1012 m_jit
.move(gpr
, result
);
1016 info
.fillInt52(*m_stream
, gpr
);
1017 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
1021 case DataFormatInt52
: {
1022 GPRReg gpr
= info
.gpr();
1023 bool wasLocked
= m_gprs
.isLocked(gpr
);
1025 if (desiredFormat
== DataFormatInt52
)
1028 GPRReg result
= allocate();
1029 m_jit
.move(gpr
, result
);
1033 info
.fillStrictInt52(*m_stream
, gpr
);
1034 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
1039 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Bad data format");
1040 return InvalidGPRReg
;
1044 FPRReg
SpeculativeJIT::fillSpeculateDouble(Edge edge
)
1046 ASSERT(edge
.useKind() == DoubleRepUse
|| edge
.useKind() == DoubleRepRealUse
|| edge
.useKind() == DoubleRepMachineIntUse
);
1047 ASSERT(edge
->hasDoubleResult());
1048 VirtualRegister virtualRegister
= edge
->virtualRegister();
1049 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1051 if (info
.registerFormat() == DataFormatNone
) {
1052 if (edge
->hasConstant()) {
1053 GPRReg gpr
= allocate();
1055 if (edge
->isNumberConstant()) {
1056 FPRReg fpr
= fprAllocate();
1057 m_jit
.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(edge
->asNumber())), gpr
);
1058 m_jit
.move64ToDouble(gpr
, fpr
);
1061 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1062 info
.fillDouble(*m_stream
, fpr
);
1065 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1066 return fprAllocate();
1069 DataFormat spillFormat
= info
.spillFormat();
1070 if (spillFormat
!= DataFormatDouble
) {
1072 m_jit
.graph(), m_currentNode
, toCString(
1073 "Expected ", edge
, " to have double format but instead it is spilled as ",
1074 dataFormatToString(spillFormat
)).data());
1076 DFG_ASSERT(m_jit
.graph(), m_currentNode
, spillFormat
== DataFormatDouble
);
1077 FPRReg fpr
= fprAllocate();
1078 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
1079 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1080 info
.fillDouble(*m_stream
, fpr
);
1084 DFG_ASSERT(m_jit
.graph(), m_currentNode
, info
.registerFormat() == DataFormatDouble
);
1085 FPRReg fpr
= info
.fpr();
1090 GPRReg
SpeculativeJIT::fillSpeculateCell(Edge edge
)
1092 AbstractValue
& value
= m_state
.forNode(edge
);
1093 SpeculatedType type
= value
.m_type
;
1094 ASSERT((edge
.useKind() != KnownCellUse
&& edge
.useKind() != KnownStringUse
) || !(value
.m_type
& ~SpecCell
));
1096 m_interpreter
.filter(value
, SpecCell
);
1097 if (value
.isClear()) {
1098 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1102 VirtualRegister virtualRegister
= edge
->virtualRegister();
1103 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1105 switch (info
.registerFormat()) {
1106 case DataFormatNone
: {
1107 GPRReg gpr
= allocate();
1109 if (edge
->hasConstant()) {
1110 JSValue jsValue
= edge
->asJSValue();
1111 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1112 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
1113 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1117 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1118 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
1120 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
1121 if (type
& ~SpecCell
)
1122 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchIfNotCell(JSValueRegs(gpr
)));
1123 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1127 case DataFormatCell
:
1128 case DataFormatJSCell
: {
1129 GPRReg gpr
= info
.gpr();
1131 if (!ASSERT_DISABLED
) {
1132 MacroAssembler::Jump checkCell
= m_jit
.branchIfCell(JSValueRegs(gpr
));
1133 m_jit
.abortWithReason(DFGIsNotCell
);
1134 checkCell
.link(&m_jit
);
1139 case DataFormatJS
: {
1140 GPRReg gpr
= info
.gpr();
1142 if (type
& ~SpecCell
)
1143 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchIfNotCell(JSValueRegs(gpr
)));
1144 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1148 case DataFormatJSInt32
:
1149 case DataFormatInt32
:
1150 case DataFormatJSDouble
:
1151 case DataFormatJSBoolean
:
1152 case DataFormatBoolean
:
1153 case DataFormatDouble
:
1154 case DataFormatStorage
:
1155 case DataFormatInt52
:
1156 case DataFormatStrictInt52
:
1157 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Bad data format");
1160 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Corrupt data format");
1161 return InvalidGPRReg
;
1165 GPRReg
SpeculativeJIT::fillSpeculateBoolean(Edge edge
)
1167 AbstractValue
& value
= m_state
.forNode(edge
);
1168 SpeculatedType type
= value
.m_type
;
1170 m_interpreter
.filter(value
, SpecBoolean
);
1171 if (value
.isClear()) {
1172 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1176 VirtualRegister virtualRegister
= edge
->virtualRegister();
1177 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1179 switch (info
.registerFormat()) {
1180 case DataFormatNone
: {
1181 GPRReg gpr
= allocate();
1183 if (edge
->hasConstant()) {
1184 JSValue jsValue
= edge
->asJSValue();
1185 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1186 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
1187 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1190 DFG_ASSERT(m_jit
.graph(), m_currentNode
, info
.spillFormat() & DataFormatJS
);
1191 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1192 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
1194 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
1195 if (type
& ~SpecBoolean
) {
1196 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1197 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1198 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1200 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1204 case DataFormatBoolean
:
1205 case DataFormatJSBoolean
: {
1206 GPRReg gpr
= info
.gpr();
1211 case DataFormatJS
: {
1212 GPRReg gpr
= info
.gpr();
1214 if (type
& ~SpecBoolean
) {
1215 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1216 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1217 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1219 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1223 case DataFormatJSInt32
:
1224 case DataFormatInt32
:
1225 case DataFormatJSDouble
:
1226 case DataFormatJSCell
:
1227 case DataFormatCell
:
1228 case DataFormatDouble
:
1229 case DataFormatStorage
:
1230 case DataFormatInt52
:
1231 case DataFormatStrictInt52
:
1232 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Bad data format");
1235 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Corrupt data format");
1236 return InvalidGPRReg
;
1240 void SpeculativeJIT::compileBaseValueStoreBarrier(Edge
& baseEdge
, Edge
& valueEdge
)
1243 ASSERT(!isKnownNotCell(valueEdge
.node()));
1245 SpeculateCellOperand
base(this, baseEdge
);
1246 JSValueOperand
value(this, valueEdge
);
1247 GPRTemporary
scratch1(this);
1248 GPRTemporary
scratch2(this);
1250 writeBarrier(base
.gpr(), value
.gpr(), valueEdge
, scratch1
.gpr(), scratch2
.gpr());
1252 UNUSED_PARAM(baseEdge
);
1253 UNUSED_PARAM(valueEdge
);
1257 void SpeculativeJIT::compileObjectEquality(Node
* node
)
1259 SpeculateCellOperand
op1(this, node
->child1());
1260 SpeculateCellOperand
op2(this, node
->child2());
1261 GPRTemporary
result(this, Reuse
, op1
);
1263 GPRReg op1GPR
= op1
.gpr();
1264 GPRReg op2GPR
= op2
.gpr();
1265 GPRReg resultGPR
= result
.gpr();
1267 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1269 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1271 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1274 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1275 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1277 MacroAssembler::NonZero
,
1278 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1279 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1282 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1283 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1285 MacroAssembler::NonZero
,
1286 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1287 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1290 MacroAssembler::Jump falseCase
= m_jit
.branch64(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1291 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1292 MacroAssembler::Jump done
= m_jit
.jump();
1293 falseCase
.link(&m_jit
);
1294 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1297 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1300 void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild
, Edge otherChild
)
1302 SpeculateCellOperand
op1(this, objectChild
);
1303 JSValueOperand
op2(this, otherChild
);
1304 GPRTemporary
result(this);
1306 GPRReg op1GPR
= op1
.gpr();
1307 GPRReg op2GPR
= op2
.gpr();
1308 GPRReg resultGPR
= result
.gpr();
1310 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR
), objectChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1312 // At this point we know that we can perform a straight-forward equality comparison on pointer
1313 // values because we are doing strict equality.
1314 m_jit
.compare64(MacroAssembler::Equal
, op1GPR
, op2GPR
, resultGPR
);
1315 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
1316 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1319 void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild
, Edge otherChild
, Node
* branchNode
)
1321 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1322 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1324 SpeculateCellOperand
op1(this, objectChild
);
1325 JSValueOperand
op2(this, otherChild
);
1327 GPRReg op1GPR
= op1
.gpr();
1328 GPRReg op2GPR
= op2
.gpr();
1330 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR
), objectChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1332 if (taken
== nextBlock()) {
1333 branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2GPR
, notTaken
);
1336 branchPtr(MacroAssembler::Equal
, op1GPR
, op2GPR
, taken
);
1341 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
)
1343 SpeculateCellOperand
op1(this, leftChild
);
1344 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1345 GPRTemporary
result(this);
1347 GPRReg op1GPR
= op1
.gpr();
1348 GPRReg op2GPR
= op2
.gpr();
1349 GPRReg resultGPR
= result
.gpr();
1351 bool masqueradesAsUndefinedWatchpointValid
=
1352 masqueradesAsUndefinedWatchpointIsStillValid();
1354 if (masqueradesAsUndefinedWatchpointValid
) {
1356 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1359 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1360 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1362 MacroAssembler::NonZero
,
1363 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1364 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1367 // It seems that most of the time when programs do a == b where b may be either null/undefined
1368 // or an object, b is usually an object. Balance the branches to make that case fast.
1369 MacroAssembler::Jump rightNotCell
= m_jit
.branchIfNotCell(JSValueRegs(op2GPR
));
1371 // We know that within this branch, rightChild must be a cell.
1372 if (masqueradesAsUndefinedWatchpointValid
) {
1374 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1377 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1378 speculationCheck(BadType
, JSValueRegs(op2GPR
), rightChild
,
1380 MacroAssembler::NonZero
,
1381 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1382 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1385 // At this point we know that we can perform a straight-forward equality comparison on pointer
1386 // values because both left and right are pointers to objects that have no special equality
1388 MacroAssembler::Jump falseCase
= m_jit
.branch64(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1389 MacroAssembler::Jump trueCase
= m_jit
.jump();
1391 rightNotCell
.link(&m_jit
);
1393 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1394 // prove that it is either null or undefined.
1395 if (needsTypeCheck(rightChild
, SpecCell
| SpecOther
)) {
1396 m_jit
.move(op2GPR
, resultGPR
);
1397 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1400 JSValueRegs(op2GPR
), rightChild
, SpecCell
| SpecOther
,
1402 MacroAssembler::NotEqual
, resultGPR
,
1403 MacroAssembler::TrustedImm64(ValueNull
)));
1406 falseCase
.link(&m_jit
);
1407 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1408 MacroAssembler::Jump done
= m_jit
.jump();
1409 trueCase
.link(&m_jit
);
1410 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1413 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1416 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
, Node
* branchNode
)
1418 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1419 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1421 SpeculateCellOperand
op1(this, leftChild
);
1422 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1423 GPRTemporary
result(this);
1425 GPRReg op1GPR
= op1
.gpr();
1426 GPRReg op2GPR
= op2
.gpr();
1427 GPRReg resultGPR
= result
.gpr();
1429 bool masqueradesAsUndefinedWatchpointValid
=
1430 masqueradesAsUndefinedWatchpointIsStillValid();
1432 if (masqueradesAsUndefinedWatchpointValid
) {
1434 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1437 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1438 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1440 MacroAssembler::NonZero
,
1441 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1442 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1445 // It seems that most of the time when programs do a == b where b may be either null/undefined
1446 // or an object, b is usually an object. Balance the branches to make that case fast.
1447 MacroAssembler::Jump rightNotCell
= m_jit
.branchIfNotCell(JSValueRegs(op2GPR
));
1449 // We know that within this branch, rightChild must be a cell.
1450 if (masqueradesAsUndefinedWatchpointValid
) {
1452 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1455 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1456 speculationCheck(BadType
, JSValueRegs(op2GPR
), rightChild
,
1458 MacroAssembler::NonZero
,
1459 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1460 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1463 // At this point we know that we can perform a straight-forward equality comparison on pointer
1464 // values because both left and right are pointers to objects that have no special equality
1466 branch64(MacroAssembler::Equal
, op1GPR
, op2GPR
, taken
);
1468 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1469 // prove that it is either null or undefined.
1470 if (!needsTypeCheck(rightChild
, SpecCell
| SpecOther
))
1471 rightNotCell
.link(&m_jit
);
1473 jump(notTaken
, ForceJump
);
1475 rightNotCell
.link(&m_jit
);
1476 m_jit
.move(op2GPR
, resultGPR
);
1477 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1480 JSValueRegs(op2GPR
), rightChild
, SpecCell
| SpecOther
, m_jit
.branch64(
1481 MacroAssembler::NotEqual
, resultGPR
,
1482 MacroAssembler::TrustedImm64(ValueNull
)));
1488 void SpeculativeJIT::compileInt32Compare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1490 SpeculateInt32Operand
op1(this, node
->child1());
1491 SpeculateInt32Operand
op2(this, node
->child2());
1492 GPRTemporary
result(this, Reuse
, op1
, op2
);
1494 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
1496 // If we add a DataFormatBool, we should use it here.
1497 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1498 jsValueResult(result
.gpr(), m_currentNode
, DataFormatJSBoolean
);
1501 void SpeculativeJIT::compileInt52Compare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1503 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
1504 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
1505 GPRTemporary
result(this, Reuse
, op1
, op2
);
1507 m_jit
.compare64(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
1509 // If we add a DataFormatBool, we should use it here.
1510 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1511 jsValueResult(result
.gpr(), m_currentNode
, DataFormatJSBoolean
);
1514 void SpeculativeJIT::compilePeepHoleInt52Branch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1516 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1517 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1519 // The branch instruction will branch to the taken block.
1520 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1521 if (taken
== nextBlock()) {
1522 condition
= JITCompiler::invert(condition
);
1523 BasicBlock
* tmp
= taken
;
1528 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
1529 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
1531 branch64(condition
, op1
.gpr(), op2
.gpr(), taken
);
1535 void SpeculativeJIT::compileDoubleCompare(Node
* node
, MacroAssembler::DoubleCondition condition
)
1537 SpeculateDoubleOperand
op1(this, node
->child1());
1538 SpeculateDoubleOperand
op2(this, node
->child2());
1539 GPRTemporary
result(this);
1541 m_jit
.move(TrustedImm32(ValueTrue
), result
.gpr());
1542 MacroAssembler::Jump trueCase
= m_jit
.branchDouble(condition
, op1
.fpr(), op2
.fpr());
1543 m_jit
.xor64(TrustedImm32(true), result
.gpr());
1544 trueCase
.link(&m_jit
);
1546 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1549 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse
)
1551 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1552 GPRTemporary
result(this);
1553 GPRReg valueGPR
= value
.gpr();
1554 GPRReg resultGPR
= result
.gpr();
1555 GPRTemporary structure
;
1556 GPRReg structureGPR
= InvalidGPRReg
;
1557 GPRTemporary scratch
;
1558 GPRReg scratchGPR
= InvalidGPRReg
;
1560 bool masqueradesAsUndefinedWatchpointValid
=
1561 masqueradesAsUndefinedWatchpointIsStillValid();
1563 if (!masqueradesAsUndefinedWatchpointValid
) {
1564 // The masquerades as undefined case will use the structure register, so allocate it here.
1565 // Do this at the top of the function to avoid branching around a register allocation.
1566 GPRTemporary
realStructure(this);
1567 GPRTemporary
realScratch(this);
1568 structure
.adopt(realStructure
);
1569 scratch
.adopt(realScratch
);
1570 structureGPR
= structure
.gpr();
1571 scratchGPR
= scratch
.gpr();
1574 MacroAssembler::Jump notCell
= m_jit
.branchIfNotCell(JSValueRegs(valueGPR
));
1575 if (masqueradesAsUndefinedWatchpointValid
) {
1577 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(valueGPR
));
1580 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(valueGPR
));
1582 MacroAssembler::Jump isNotMasqueradesAsUndefined
=
1584 MacroAssembler::Zero
,
1585 MacroAssembler::Address(valueGPR
, JSCell::typeInfoFlagsOffset()),
1586 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
));
1588 m_jit
.emitLoadStructure(valueGPR
, structureGPR
, scratchGPR
);
1589 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
,
1591 MacroAssembler::Equal
,
1592 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1593 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1595 isNotMasqueradesAsUndefined
.link(&m_jit
);
1597 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1598 MacroAssembler::Jump done
= m_jit
.jump();
1600 notCell
.link(&m_jit
);
1602 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1603 m_jit
.move(valueGPR
, resultGPR
);
1604 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1606 JSValueRegs(valueGPR
), nodeUse
, SpecCell
| SpecOther
, m_jit
.branch64(
1607 MacroAssembler::NotEqual
,
1609 MacroAssembler::TrustedImm64(ValueNull
)));
1611 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1615 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1618 void SpeculativeJIT::compileLogicalNot(Node
* node
)
1620 switch (node
->child1().useKind()) {
1621 case ObjectOrOtherUse
: {
1622 compileObjectOrOtherLogicalNot(node
->child1());
1627 SpeculateInt32Operand
value(this, node
->child1());
1628 GPRTemporary
result(this, Reuse
, value
);
1629 m_jit
.compare32(MacroAssembler::Equal
, value
.gpr(), MacroAssembler::TrustedImm32(0), result
.gpr());
1630 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1631 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1635 case DoubleRepUse
: {
1636 SpeculateDoubleOperand
value(this, node
->child1());
1637 FPRTemporary
scratch(this);
1638 GPRTemporary
result(this);
1639 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
1640 MacroAssembler::Jump nonZero
= m_jit
.branchDoubleNonZero(value
.fpr(), scratch
.fpr());
1641 m_jit
.xor32(TrustedImm32(true), result
.gpr());
1642 nonZero
.link(&m_jit
);
1643 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1648 if (!needsTypeCheck(node
->child1(), SpecBoolean
)) {
1649 SpeculateBooleanOperand
value(this, node
->child1());
1650 GPRTemporary
result(this, Reuse
, value
);
1652 m_jit
.move(value
.gpr(), result
.gpr());
1653 m_jit
.xor64(TrustedImm32(true), result
.gpr());
1655 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1659 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
1660 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1662 m_jit
.move(value
.gpr(), result
.gpr());
1663 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
1665 JSValueRegs(value
.gpr()), node
->child1(), SpecBoolean
, m_jit
.branchTest64(
1666 JITCompiler::NonZero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1667 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue
)), result
.gpr());
1669 // If we add a DataFormatBool, we should use it here.
1670 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1675 JSValueOperand
arg1(this, node
->child1());
1676 GPRTemporary
result(this);
1678 GPRReg arg1GPR
= arg1
.gpr();
1679 GPRReg resultGPR
= result
.gpr();
1683 m_jit
.move(arg1GPR
, resultGPR
);
1684 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), resultGPR
);
1685 JITCompiler::Jump slowCase
= m_jit
.branchTest64(JITCompiler::NonZero
, resultGPR
, TrustedImm32(static_cast<int32_t>(~1)));
1687 addSlowPathGenerator(
1688 slowPathCall(slowCase
, this, operationConvertJSValueToBoolean
, resultGPR
, arg1GPR
));
1690 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue
)), resultGPR
);
1691 jsValueResult(resultGPR
, node
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
1695 return compileStringZeroLength(node
);
1698 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
1703 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse
, BasicBlock
* taken
, BasicBlock
* notTaken
)
1705 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1706 GPRTemporary
scratch(this);
1707 GPRTemporary structure
;
1708 GPRReg valueGPR
= value
.gpr();
1709 GPRReg scratchGPR
= scratch
.gpr();
1710 GPRReg structureGPR
= InvalidGPRReg
;
1712 if (!masqueradesAsUndefinedWatchpointIsStillValid()) {
1713 GPRTemporary
realStructure(this);
1714 structure
.adopt(realStructure
);
1715 structureGPR
= structure
.gpr();
1718 MacroAssembler::Jump notCell
= m_jit
.branchIfNotCell(JSValueRegs(valueGPR
));
1719 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1721 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(valueGPR
));
1724 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(valueGPR
));
1726 JITCompiler::Jump isNotMasqueradesAsUndefined
= m_jit
.branchTest8(
1728 MacroAssembler::Address(valueGPR
, JSCell::typeInfoFlagsOffset()),
1729 TrustedImm32(MasqueradesAsUndefined
));
1731 m_jit
.emitLoadStructure(valueGPR
, structureGPR
, scratchGPR
);
1732 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
,
1734 MacroAssembler::Equal
,
1735 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1736 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1738 isNotMasqueradesAsUndefined
.link(&m_jit
);
1740 jump(taken
, ForceJump
);
1742 notCell
.link(&m_jit
);
1744 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1745 m_jit
.move(valueGPR
, scratchGPR
);
1746 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), scratchGPR
);
1748 JSValueRegs(valueGPR
), nodeUse
, SpecCell
| SpecOther
, m_jit
.branch64(
1749 MacroAssembler::NotEqual
, scratchGPR
, MacroAssembler::TrustedImm64(ValueNull
)));
1753 noResult(m_currentNode
);
1756 void SpeculativeJIT::emitBranch(Node
* node
)
1758 BasicBlock
* taken
= node
->branchData()->taken
.block
;
1759 BasicBlock
* notTaken
= node
->branchData()->notTaken
.block
;
1761 switch (node
->child1().useKind()) {
1762 case ObjectOrOtherUse
: {
1763 emitObjectOrOtherBranch(node
->child1(), taken
, notTaken
);
1768 case DoubleRepUse
: {
1769 if (node
->child1().useKind() == Int32Use
) {
1770 bool invert
= false;
1772 if (taken
== nextBlock()) {
1774 BasicBlock
* tmp
= taken
;
1779 SpeculateInt32Operand
value(this, node
->child1());
1780 branchTest32(invert
? MacroAssembler::Zero
: MacroAssembler::NonZero
, value
.gpr(), taken
);
1782 SpeculateDoubleOperand
value(this, node
->child1());
1783 FPRTemporary
scratch(this);
1784 branchDoubleNonZero(value
.fpr(), scratch
.fpr(), taken
);
1794 emitStringBranch(node
->child1(), taken
, notTaken
);
1800 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
1801 GPRReg valueGPR
= value
.gpr();
1803 if (node
->child1().useKind() == BooleanUse
) {
1804 if (!needsTypeCheck(node
->child1(), SpecBoolean
)) {
1805 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
1807 if (taken
== nextBlock()) {
1808 condition
= MacroAssembler::Zero
;
1809 BasicBlock
* tmp
= taken
;
1814 branchTest32(condition
, valueGPR
, TrustedImm32(true), taken
);
1817 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken
);
1818 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken
);
1820 typeCheck(JSValueRegs(valueGPR
), node
->child1(), SpecBoolean
, m_jit
.jump());
1824 GPRTemporary
result(this);
1825 GPRReg resultGPR
= result
.gpr();
1827 if (node
->child1()->prediction() & SpecInt32
) {
1828 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken
);
1829 branch64(MacroAssembler::AboveOrEqual
, valueGPR
, GPRInfo::tagTypeNumberRegister
, taken
);
1832 if (node
->child1()->prediction() & SpecBoolean
) {
1833 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken
);
1834 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken
);
1839 silentSpillAllRegisters(resultGPR
);
1840 callOperation(operationConvertJSValueToBoolean
, resultGPR
, valueGPR
);
1841 silentFillAllRegisters(resultGPR
);
1843 branchTest32(MacroAssembler::NonZero
, resultGPR
, taken
);
1847 noResult(node
, UseChildrenCalledExplicitly
);
1852 DFG_CRASH(m_jit
.graph(), m_currentNode
, "Bad use kind");
1856 void SpeculativeJIT::compile(Node
* node
)
1858 NodeType op
= node
->op();
1860 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1861 m_jit
.clearRegisterAllocationOffsets();
1866 case DoubleConstant
:
1868 case PhantomDirectArguments
:
1869 case PhantomClonedArguments
:
1870 initConstantInfo(node
);
1874 speculate(node
, node
->child1());
1875 switch (node
->child1().useKind()) {
1877 case DoubleRepRealUse
:
1878 case DoubleRepMachineIntUse
: {
1879 SpeculateDoubleOperand
op(this, node
->child1());
1880 FPRTemporary
scratch(this, op
);
1881 m_jit
.moveDouble(op
.fpr(), scratch
.fpr());
1882 doubleResult(scratch
.fpr(), node
);
1886 SpeculateInt52Operand
op(this, node
->child1());
1887 GPRTemporary
result(this, Reuse
, op
);
1888 m_jit
.move(op
.gpr(), result
.gpr());
1889 int52Result(result
.gpr(), node
);
1893 JSValueOperand
op(this, node
->child1());
1894 GPRTemporary
result(this, Reuse
, op
);
1895 m_jit
.move(op
.gpr(), result
.gpr());
1896 jsValueResult(result
.gpr(), node
);
1904 AbstractValue
& value
= m_state
.variables().operand(node
->local());
1906 // If the CFA is tracking this variable and it found that the variable
1907 // cannot have been assigned, then don't attempt to proceed.
1908 if (value
.isClear()) {
1909 m_compileOkay
= false;
1913 switch (node
->variableAccessData()->flushFormat()) {
1914 case FlushedDouble
: {
1915 FPRTemporary
result(this);
1916 m_jit
.loadDouble(JITCompiler::addressFor(node
->machineLocal()), result
.fpr());
1917 VirtualRegister virtualRegister
= node
->virtualRegister();
1918 m_fprs
.retain(result
.fpr(), virtualRegister
, SpillOrderDouble
);
1919 generationInfoFromVirtualRegister(virtualRegister
).initDouble(node
, node
->refCount(), result
.fpr());
1923 case FlushedInt32
: {
1924 GPRTemporary
result(this);
1925 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1927 // Like int32Result, but don't useChildren - our children are phi nodes,
1928 // and don't represent values within this dataflow with virtual registers.
1929 VirtualRegister virtualRegister
= node
->virtualRegister();
1930 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
1931 generationInfoFromVirtualRegister(virtualRegister
).initInt32(node
, node
->refCount(), result
.gpr());
1935 case FlushedInt52
: {
1936 GPRTemporary
result(this);
1937 m_jit
.load64(JITCompiler::addressFor(node
->machineLocal()), result
.gpr());
1939 VirtualRegister virtualRegister
= node
->virtualRegister();
1940 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1941 generationInfoFromVirtualRegister(virtualRegister
).initInt52(node
, node
->refCount(), result
.gpr());
1946 GPRTemporary
result(this);
1947 m_jit
.load64(JITCompiler::addressFor(node
->machineLocal()), result
.gpr());
1949 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1950 // and don't represent values within this dataflow with virtual registers.
1951 VirtualRegister virtualRegister
= node
->virtualRegister();
1952 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1955 if (isCellSpeculation(value
.m_type
))
1956 format
= DataFormatJSCell
;
1957 else if (isBooleanSpeculation(value
.m_type
))
1958 format
= DataFormatJSBoolean
;
1960 format
= DataFormatJS
;
1962 generationInfoFromVirtualRegister(virtualRegister
).initJSValue(node
, node
->refCount(), result
.gpr(), format
);
1968 case GetLocalUnlinked
: {
1969 GPRTemporary
result(this);
1971 m_jit
.load64(JITCompiler::addressFor(node
->unlinkedMachineLocal()), result
.gpr());
1973 jsValueResult(result
.gpr(), node
);
1978 compileMovHint(m_currentNode
);
1984 recordSetLocal(m_currentNode
->unlinkedLocal(), VirtualRegister(), DataFormatDead
);
1990 switch (node
->variableAccessData()->flushFormat()) {
1991 case FlushedDouble
: {
1992 SpeculateDoubleOperand
value(this, node
->child1());
1993 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
->machineLocal()));
1995 // Indicate that it's no longer necessary to retrieve the value of
1996 // this bytecode variable from registers or other locations in the stack,
1997 // but that it is stored as a double.
1998 recordSetLocal(DataFormatDouble
);
2002 case FlushedInt32
: {
2003 SpeculateInt32Operand
value(this, node
->child1());
2004 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->machineLocal()));
2006 recordSetLocal(DataFormatInt32
);
2010 case FlushedInt52
: {
2011 SpeculateInt52Operand
value(this, node
->child1());
2012 m_jit
.store64(value
.gpr(), JITCompiler::addressFor(node
->machineLocal()));
2014 recordSetLocal(DataFormatInt52
);
2019 SpeculateCellOperand
cell(this, node
->child1());
2020 GPRReg cellGPR
= cell
.gpr();
2021 m_jit
.store64(cellGPR
, JITCompiler::addressFor(node
->machineLocal()));
2023 recordSetLocal(DataFormatCell
);
2027 case FlushedBoolean
: {
2028 SpeculateBooleanOperand
boolean(this, node
->child1());
2029 m_jit
.store64(boolean
.gpr(), JITCompiler::addressFor(node
->machineLocal()));
2031 recordSetLocal(DataFormatBoolean
);
2035 case FlushedJSValue
: {
2036 JSValueOperand
value(this, node
->child1());
2037 m_jit
.store64(value
.gpr(), JITCompiler::addressFor(node
->machineLocal()));
2039 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
2044 DFG_CRASH(m_jit
.graph(), node
, "Bad flush format");
2052 // This is a no-op; it just marks the fact that the argument is being used.
2053 // But it may be profitable to use this as a hook to run speculation checks
2054 // on arguments, thereby allowing us to trivially eliminate such checks if
2055 // the argument is not used.
2056 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
2062 if (node
->child1()->isInt32Constant()) {
2063 SpeculateInt32Operand
op2(this, node
->child2());
2064 GPRTemporary
result(this, Reuse
, op2
);
2066 bitOp(op
, node
->child1()->asInt32(), op2
.gpr(), result
.gpr());
2068 int32Result(result
.gpr(), node
);
2069 } else if (node
->child2()->isInt32Constant()) {
2070 SpeculateInt32Operand
op1(this, node
->child1());
2071 GPRTemporary
result(this, Reuse
, op1
);
2073 bitOp(op
, node
->child2()->asInt32(), op1
.gpr(), result
.gpr());
2075 int32Result(result
.gpr(), node
);
2077 SpeculateInt32Operand
op1(this, node
->child1());
2078 SpeculateInt32Operand
op2(this, node
->child2());
2079 GPRTemporary
result(this, Reuse
, op1
, op2
);
2081 GPRReg reg1
= op1
.gpr();
2082 GPRReg reg2
= op2
.gpr();
2083 bitOp(op
, reg1
, reg2
, result
.gpr());
2085 int32Result(result
.gpr(), node
);
2092 if (node
->child2()->isInt32Constant()) {
2093 SpeculateInt32Operand
op1(this, node
->child1());
2094 GPRTemporary
result(this, Reuse
, op1
);
2096 shiftOp(op
, op1
.gpr(), node
->child2()->asInt32() & 0x1f, result
.gpr());
2098 int32Result(result
.gpr(), node
);
2100 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2101 SpeculateInt32Operand
op1(this, node
->child1());
2102 SpeculateInt32Operand
op2(this, node
->child2());
2103 GPRTemporary
result(this, Reuse
, op1
);
2105 GPRReg reg1
= op1
.gpr();
2106 GPRReg reg2
= op2
.gpr();
2107 shiftOp(op
, reg1
, reg2
, result
.gpr());
2109 int32Result(result
.gpr(), node
);
2113 case UInt32ToNumber
: {
2114 compileUInt32ToNumber(node
);
2118 case DoubleAsInt32
: {
2119 compileDoubleAsInt32(node
);
2123 case ValueToInt32
: {
2124 compileValueToInt32(node
);
2129 compileDoubleRep(node
);
2134 compileValueRep(node
);
2139 switch (node
->child1().useKind()) {
2141 SpeculateInt32Operand
operand(this, node
->child1());
2142 GPRTemporary
result(this, Reuse
, operand
);
2144 m_jit
.signExtend32ToPtr(operand
.gpr(), result
.gpr());
2146 strictInt52Result(result
.gpr(), node
);
2150 case MachineIntUse
: {
2151 GPRTemporary
result(this);
2152 GPRReg resultGPR
= result
.gpr();
2154 convertMachineInt(node
->child1(), resultGPR
);
2156 strictInt52Result(resultGPR
, node
);
2160 case DoubleRepMachineIntUse
: {
2161 SpeculateDoubleOperand
value(this, node
->child1());
2162 FPRReg valueFPR
= value
.fpr();
2164 GPRFlushedCallResult
result(this);
2165 GPRReg resultGPR
= result
.gpr();
2169 callOperation(operationConvertDoubleToInt52
, resultGPR
, valueFPR
);
2172 JSValueRegs(), node
->child1(), SpecInt52AsDouble
,
2174 JITCompiler::Equal
, resultGPR
,
2175 JITCompiler::TrustedImm64(JSValue::notInt52
)));
2177 strictInt52Result(resultGPR
, node
);
2182 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
2188 JSValueOperand
op1(this, node
->child1());
2189 JSValueOperand
op2(this, node
->child2());
2191 GPRReg op1GPR
= op1
.gpr();
2192 GPRReg op2GPR
= op2
.gpr();
2196 GPRFlushedCallResult
result(this);
2197 if (isKnownNotNumber(node
->child1().node()) || isKnownNotNumber(node
->child2().node()))
2198 callOperation(operationValueAddNotNumber
, result
.gpr(), op1GPR
, op2GPR
);
2200 callOperation(operationValueAdd
, result
.gpr(), op1GPR
, op2GPR
);
2202 jsValueResult(result
.gpr(), node
);
2211 compileArithClz32(node
);
2215 compileMakeRope(node
);
2219 compileArithSub(node
);
2223 compileArithNegate(node
);
2227 compileArithMul(node
);
2231 compileArithDiv(node
);
2236 compileArithMod(node
);
2241 switch (node
->child1().useKind()) {
2243 SpeculateStrictInt32Operand
op1(this, node
->child1());
2244 GPRTemporary
result(this);
2245 GPRTemporary
scratch(this);
2247 m_jit
.move(op1
.gpr(), result
.gpr());
2248 m_jit
.rshift32(result
.gpr(), MacroAssembler::TrustedImm32(31), scratch
.gpr());
2249 m_jit
.add32(scratch
.gpr(), result
.gpr());
2250 m_jit
.xor32(scratch
.gpr(), result
.gpr());
2251 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, result
.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2252 int32Result(result
.gpr(), node
);
2256 case DoubleRepUse
: {
2257 SpeculateDoubleOperand
op1(this, node
->child1());
2258 FPRTemporary
result(this);
2260 m_jit
.absDouble(op1
.fpr(), result
.fpr());
2261 doubleResult(result
.fpr(), node
);
2266 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
2274 switch (node
->binaryUseKind()) {
2276 SpeculateStrictInt32Operand
op1(this, node
->child1());
2277 SpeculateStrictInt32Operand
op2(this, node
->child2());
2278 GPRTemporary
result(this, Reuse
, op1
);
2280 MacroAssembler::Jump op1Less
= m_jit
.branch32(op
== ArithMin
? MacroAssembler::LessThan
: MacroAssembler::GreaterThan
, op1
.gpr(), op2
.gpr());
2281 m_jit
.move(op2
.gpr(), result
.gpr());
2282 if (op1
.gpr() != result
.gpr()) {
2283 MacroAssembler::Jump done
= m_jit
.jump();
2284 op1Less
.link(&m_jit
);
2285 m_jit
.move(op1
.gpr(), result
.gpr());
2288 op1Less
.link(&m_jit
);
2290 int32Result(result
.gpr(), node
);
2294 case DoubleRepUse
: {
2295 SpeculateDoubleOperand
op1(this, node
->child1());
2296 SpeculateDoubleOperand
op2(this, node
->child2());
2297 FPRTemporary
result(this, op1
);
2299 FPRReg op1FPR
= op1
.fpr();
2300 FPRReg op2FPR
= op2
.fpr();
2301 FPRReg resultFPR
= result
.fpr();
2303 MacroAssembler::JumpList done
;
2305 MacroAssembler::Jump op1Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleLessThan
: MacroAssembler::DoubleGreaterThan
, op1FPR
, op2FPR
);
2307 // op2 is eather the lesser one or one of then is NaN
2308 MacroAssembler::Jump op2Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleGreaterThanOrEqual
: MacroAssembler::DoubleLessThanOrEqual
, op1FPR
, op2FPR
);
2310 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2311 // op1 + op2 and putting it into result.
2312 m_jit
.addDouble(op1FPR
, op2FPR
, resultFPR
);
2313 done
.append(m_jit
.jump());
2315 op2Less
.link(&m_jit
);
2316 m_jit
.moveDouble(op2FPR
, resultFPR
);
2318 if (op1FPR
!= resultFPR
) {
2319 done
.append(m_jit
.jump());
2321 op1Less
.link(&m_jit
);
2322 m_jit
.moveDouble(op1FPR
, resultFPR
);
2324 op1Less
.link(&m_jit
);
2328 doubleResult(resultFPR
, node
);
2333 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
2340 compileArithPow(node
);
2344 compileArithSqrt(node
);
2348 SpeculateDoubleOperand
op1(this, node
->child1());
2349 FPRTemporary
result(this, op1
);
2351 m_jit
.convertDoubleToFloat(op1
.fpr(), result
.fpr());
2352 m_jit
.convertFloatToDouble(result
.fpr(), result
.fpr());
2354 doubleResult(result
.fpr(), node
);
2359 compileArithRound(node
);
2363 SpeculateDoubleOperand
op1(this, node
->child1());
2364 FPRReg op1FPR
= op1
.fpr();
2368 FPRResult
result(this);
2369 callOperation(sin
, result
.fpr(), op1FPR
);
2370 doubleResult(result
.fpr(), node
);
2375 SpeculateDoubleOperand
op1(this, node
->child1());
2376 FPRReg op1FPR
= op1
.fpr();
2380 FPRResult
result(this);
2381 callOperation(cos
, result
.fpr(), op1FPR
);
2382 doubleResult(result
.fpr(), node
);
2387 compileArithLog(node
);
2391 compileLogicalNot(node
);
2395 if (compare(node
, JITCompiler::LessThan
, JITCompiler::DoubleLessThan
, operationCompareLess
))
2400 if (compare(node
, JITCompiler::LessThanOrEqual
, JITCompiler::DoubleLessThanOrEqual
, operationCompareLessEq
))
2404 case CompareGreater
:
2405 if (compare(node
, JITCompiler::GreaterThan
, JITCompiler::DoubleGreaterThan
, operationCompareGreater
))
2409 case CompareGreaterEq
:
2410 if (compare(node
, JITCompiler::GreaterThanOrEqual
, JITCompiler::DoubleGreaterThanOrEqual
, operationCompareGreaterEq
))
2414 case CompareEqConstant
:
2415 ASSERT(node
->child2()->asJSValue().isNull());
2416 if (nonSpeculativeCompareNull(node
, node
->child1()))
2421 if (compare(node
, JITCompiler::Equal
, JITCompiler::DoubleEqual
, operationCompareEq
))
2425 case CompareStrictEq
:
2426 if (compileStrictEq(node
))
2430 case StringCharCodeAt
: {
2431 compileGetCharCodeAt(node
);
2435 case StringCharAt
: {
2436 // Relies on StringCharAt node having same basic layout as GetByVal
2437 compileGetByValOnString(node
);
2441 case StringFromCharCode
: {
2442 compileFromCharCode(node
);
2452 case ArrayifyToStructure
: {
2458 switch (node
->arrayMode().type()) {
2459 case Array::SelectUsingPredictions
:
2460 case Array::ForceExit
:
2461 DFG_CRASH(m_jit
.graph(), node
, "Bad array mode type");
2463 case Array::Generic
: {
2464 JSValueOperand
base(this, node
->child1());
2465 JSValueOperand
property(this, node
->child2());
2466 GPRReg baseGPR
= base
.gpr();
2467 GPRReg propertyGPR
= property
.gpr();
2470 GPRFlushedCallResult
result(this);
2471 callOperation(operationGetByVal
, result
.gpr(), baseGPR
, propertyGPR
);
2473 jsValueResult(result
.gpr(), node
);
2477 case Array::Contiguous
: {
2478 if (node
->arrayMode().isInBounds()) {
2479 SpeculateStrictInt32Operand
property(this, node
->child2());
2480 StorageOperand
storage(this, node
->child3());
2482 GPRReg propertyReg
= property
.gpr();
2483 GPRReg storageReg
= storage
.gpr();
2488 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2490 GPRTemporary
result(this);
2491 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.gpr());
2492 if (node
->arrayMode().isSaneChain()) {
2493 ASSERT(node
->arrayMode().type() == Array::Contiguous
);
2494 JITCompiler::Jump notHole
= m_jit
.branchTest64(
2495 MacroAssembler::NonZero
, result
.gpr());
2496 m_jit
.move(TrustedImm64(JSValue::encode(jsUndefined())), result
.gpr());
2497 notHole
.link(&m_jit
);
2500 LoadFromHole
, JSValueRegs(), 0,
2501 m_jit
.branchTest64(MacroAssembler::Zero
, result
.gpr()));
2503 jsValueResult(result
.gpr(), node
, node
->arrayMode().type() == Array::Int32
? DataFormatJSInt32
: DataFormatJS
);
2507 SpeculateCellOperand
base(this, node
->child1());
2508 SpeculateStrictInt32Operand
property(this, node
->child2());
2509 StorageOperand
storage(this, node
->child3());
2511 GPRReg baseReg
= base
.gpr();
2512 GPRReg propertyReg
= property
.gpr();
2513 GPRReg storageReg
= storage
.gpr();
2518 GPRTemporary
result(this);
2519 GPRReg resultReg
= result
.gpr();
2521 MacroAssembler::JumpList slowCases
;
2523 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2525 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), resultReg
);
2526 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, resultReg
));
2528 addSlowPathGenerator(
2530 slowCases
, this, operationGetByValArrayInt
,
2531 result
.gpr(), baseReg
, propertyReg
));
2533 jsValueResult(resultReg
, node
);
2537 case Array::Double
: {
2538 if (node
->arrayMode().isInBounds()) {
2539 SpeculateStrictInt32Operand
property(this, node
->child2());
2540 StorageOperand
storage(this, node
->child3());
2542 GPRReg propertyReg
= property
.gpr();
2543 GPRReg storageReg
= storage
.gpr();
2548 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2550 FPRTemporary
result(this);
2551 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.fpr());
2552 if (!node
->arrayMode().isSaneChain())
2553 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, result
.fpr(), result
.fpr()));
2554 doubleResult(result
.fpr(), node
);
2558 SpeculateCellOperand
base(this, node
->child1());
2559 SpeculateStrictInt32Operand
property(this, node
->child2());
2560 StorageOperand
storage(this, node
->child3());
2562 GPRReg baseReg
= base
.gpr();
2563 GPRReg propertyReg
= property
.gpr();
2564 GPRReg storageReg
= storage
.gpr();
2569 GPRTemporary
result(this);
2570 FPRTemporary
temp(this);
2571 GPRReg resultReg
= result
.gpr();
2572 FPRReg tempReg
= temp
.fpr();
2574 MacroAssembler::JumpList slowCases
;
2576 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2578 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), tempReg
);
2579 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempReg
, tempReg
));
2580 boxDouble(tempReg
, resultReg
);
2582 addSlowPathGenerator(
2584 slowCases
, this, operationGetByValArrayInt
,
2585 result
.gpr(), baseReg
, propertyReg
));
2587 jsValueResult(resultReg
, node
);
2591 case Array::ArrayStorage
:
2592 case Array::SlowPutArrayStorage
: {
2593 if (node
->arrayMode().isInBounds()) {
2594 SpeculateStrictInt32Operand
property(this, node
->child2());
2595 StorageOperand
storage(this, node
->child3());
2597 GPRReg propertyReg
= property
.gpr();
2598 GPRReg storageReg
= storage
.gpr();
2603 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2605 GPRTemporary
result(this);
2606 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), result
.gpr());
2607 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchTest64(MacroAssembler::Zero
, result
.gpr()));
2609 jsValueResult(result
.gpr(), node
);
2613 SpeculateCellOperand
base(this, node
->child1());
2614 SpeculateStrictInt32Operand
property(this, node
->child2());
2615 StorageOperand
storage(this, node
->child3());
2617 GPRReg baseReg
= base
.gpr();
2618 GPRReg propertyReg
= property
.gpr();
2619 GPRReg storageReg
= storage
.gpr();
2624 GPRTemporary
result(this);
2625 GPRReg resultReg
= result
.gpr();
2627 MacroAssembler::JumpList slowCases
;
2629 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2631 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), resultReg
);
2632 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, resultReg
));
2634 addSlowPathGenerator(
2636 slowCases
, this, operationGetByValArrayInt
,
2637 result
.gpr(), baseReg
, propertyReg
));
2639 jsValueResult(resultReg
, node
);
2643 compileGetByValOnString(node
);
2645 case Array::DirectArguments
:
2646 compileGetByValOnDirectArguments(node
);
2648 case Array::ScopedArguments
:
2649 compileGetByValOnScopedArguments(node
);
2652 TypedArrayType type
= node
->arrayMode().typedArrayType();
2654 compileGetByValOnIntTypedArray(node
, type
);
2656 compileGetByValOnFloatTypedArray(node
, type
);
2661 case PutByValDirect
:
2663 case PutByValAlias
: {
2664 Edge child1
= m_jit
.graph().varArgChild(node
, 0);
2665 Edge child2
= m_jit
.graph().varArgChild(node
, 1);
2666 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
2667 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
2669 ArrayMode arrayMode
= node
->arrayMode().modeForPut();
2670 bool alreadyHandled
= false;
2672 switch (arrayMode
.type()) {
2673 case Array::SelectUsingPredictions
:
2674 case Array::ForceExit
:
2675 DFG_CRASH(m_jit
.graph(), node
, "Bad array mode type");
2677 case Array::Generic
: {
2678 DFG_ASSERT(m_jit
.graph(), node
, node
->op() == PutByVal
|| node
->op() == PutByValDirect
);
2680 JSValueOperand
arg1(this, child1
);
2681 JSValueOperand
arg2(this, child2
);
2682 JSValueOperand
arg3(this, child3
);
2683 GPRReg arg1GPR
= arg1
.gpr();
2684 GPRReg arg2GPR
= arg2
.gpr();
2685 GPRReg arg3GPR
= arg3
.gpr();
2687 if (node
->op() == PutByValDirect
)
2688 callOperation(m_jit
.isStrictModeFor(node
->origin
.semantic
) ? operationPutByValDirectStrict
: operationPutByValDirectNonStrict
, arg1GPR
, arg2GPR
, arg3GPR
);
2690 callOperation(m_jit
.isStrictModeFor(node
->origin
.semantic
) ? operationPutByValStrict
: operationPutByValNonStrict
, arg1GPR
, arg2GPR
, arg3GPR
);
2693 alreadyHandled
= true;
2703 // FIXME: the base may not be necessary for some array access modes. But we have to
2704 // keep it alive to this point, so it's likely to be in a register anyway. Likely
2705 // no harm in locking it here.
2706 SpeculateCellOperand
base(this, child1
);
2707 SpeculateStrictInt32Operand
property(this, child2
);
2709 GPRReg baseReg
= base
.gpr();
2710 GPRReg propertyReg
= property
.gpr();
2712 switch (arrayMode
.type()) {
2714 case Array::Contiguous
: {
2715 JSValueOperand
value(this, child3
, ManualOperandSpeculation
);
2717 GPRReg valueReg
= value
.gpr();
2722 if (arrayMode
.type() == Array::Int32
) {
2724 JSValueRegs(valueReg
), child3
, SpecInt32
,
2726 MacroAssembler::Below
, valueReg
, GPRInfo::tagTypeNumberRegister
));
2729 StorageOperand
storage(this, child4
);
2730 GPRReg storageReg
= storage
.gpr();
2732 if (node
->op() == PutByValAlias
) {
2733 // Store the value to the array.
2734 GPRReg propertyReg
= property
.gpr();
2735 GPRReg valueReg
= value
.gpr();
2736 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2742 GPRTemporary temporary
;
2743 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
2745 MacroAssembler::Jump slowCase
;
2747 if (arrayMode
.isInBounds()) {
2749 OutOfBounds
, JSValueRegs(), 0,
2750 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2752 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2754 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
2756 if (!arrayMode
.isOutOfBounds())
2757 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
2759 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
2760 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2762 inBounds
.link(&m_jit
);
2765 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2772 if (arrayMode
.isOutOfBounds()) {
2773 if (node
->op() == PutByValDirect
) {
2774 addSlowPathGenerator(slowPathCall(
2776 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
2777 NoResult
, baseReg
, propertyReg
, valueReg
));
2779 addSlowPathGenerator(slowPathCall(
2781 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2782 NoResult
, baseReg
, propertyReg
, valueReg
));
2786 noResult(node
, UseChildrenCalledExplicitly
);
2790 case Array::Double
: {
2791 compileDoublePutByVal(node
, base
, property
);
2795 case Array::ArrayStorage
:
2796 case Array::SlowPutArrayStorage
: {
2797 JSValueOperand
value(this, child3
);
2799 GPRReg valueReg
= value
.gpr();
2804 StorageOperand
storage(this, child4
);
2805 GPRReg storageReg
= storage
.gpr();
2807 if (node
->op() == PutByValAlias
) {
2808 // Store the value to the array.
2809 GPRReg propertyReg
= property
.gpr();
2810 GPRReg valueReg
= value
.gpr();
2811 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2817 GPRTemporary temporary
;
2818 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
2820 MacroAssembler::JumpList slowCases
;
2822 MacroAssembler::Jump beyondArrayBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2823 if (!arrayMode
.isOutOfBounds())
2824 speculationCheck(OutOfBounds
, JSValueRegs(), 0, beyondArrayBounds
);
2826 slowCases
.append(beyondArrayBounds
);
2828 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2829 if (arrayMode
.isInBounds()) {
2831 StoreToHole
, JSValueRegs(), 0,
2832 m_jit
.branchTest64(MacroAssembler::Zero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]))));
2834 MacroAssembler::Jump notHoleValue
= m_jit
.branchTest64(MacroAssembler::NonZero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2835 if (arrayMode
.isSlowPut()) {
2836 // This is sort of strange. If we wanted to optimize this code path, we would invert
2837 // the above branch. But it's simply not worth it since this only happens if we're
2838 // already having a bad time.
2839 slowCases
.append(m_jit
.jump());
2841 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, ArrayStorage::numValuesInVectorOffset()));
2843 // If we're writing to a hole we might be growing the array;
2844 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2845 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
2846 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2848 lengthDoesNotNeedUpdate
.link(&m_jit
);
2850 notHoleValue
.link(&m_jit
);
2853 // Store the value to the array.
2854 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2861 if (!slowCases
.empty()) {
2862 if (node
->op() == PutByValDirect
) {
2863 addSlowPathGenerator(slowPathCall(
2865 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
2866 NoResult
, baseReg
, propertyReg
, valueReg
));
2868 addSlowPathGenerator(slowPathCall(
2870 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2871 NoResult
, baseReg
, propertyReg
, valueReg
));
2875 noResult(node
, UseChildrenCalledExplicitly
);
2880 TypedArrayType type
= arrayMode
.typedArrayType();
2882 compilePutByValForIntTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2884 compilePutByValForFloatTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2891 if (compileRegExpExec(node
))
2893 if (!node
->adjustedRefCount()) {
2894 SpeculateCellOperand
base(this, node
->child1());
2895 SpeculateCellOperand
argument(this, node
->child2());
2896 GPRReg baseGPR
= base
.gpr();
2897 GPRReg argumentGPR
= argument
.gpr();
2900 GPRFlushedCallResult
result(this);
2901 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2903 // Must use jsValueResult because otherwise we screw up register
2904 // allocation, which thinks that this node has a result.
2905 jsValueResult(result
.gpr(), node
);
2909 SpeculateCellOperand
base(this, node
->child1());
2910 SpeculateCellOperand
argument(this, node
->child2());
2911 GPRReg baseGPR
= base
.gpr();
2912 GPRReg argumentGPR
= argument
.gpr();
2915 GPRFlushedCallResult
result(this);
2916 callOperation(operationRegExpExec
, result
.gpr(), baseGPR
, argumentGPR
);
2918 jsValueResult(result
.gpr(), node
);
2923 SpeculateCellOperand
base(this, node
->child1());
2924 SpeculateCellOperand
argument(this, node
->child2());
2925 GPRReg baseGPR
= base
.gpr();
2926 GPRReg argumentGPR
= argument
.gpr();
2929 GPRFlushedCallResult
result(this);
2930 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2932 // If we add a DataFormatBool, we should use it here.
2933 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
2934 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
2939 ASSERT(node
->arrayMode().isJSArray());
2941 SpeculateCellOperand
base(this, node
->child1());
2942 GPRTemporary
storageLength(this);
2944 GPRReg baseGPR
= base
.gpr();
2945 GPRReg storageLengthGPR
= storageLength
.gpr();
2947 StorageOperand
storage(this, node
->child3());
2948 GPRReg storageGPR
= storage
.gpr();
2950 switch (node
->arrayMode().type()) {
2952 case Array::Contiguous
: {
2953 JSValueOperand
value(this, node
->child2(), ManualOperandSpeculation
);
2954 GPRReg valueGPR
= value
.gpr();
2956 if (node
->arrayMode().type() == Array::Int32
) {
2958 JSValueRegs(valueGPR
), node
->child2(), SpecInt32
,
2960 MacroAssembler::Below
, valueGPR
, GPRInfo::tagTypeNumberRegister
));
2963 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2964 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2965 m_jit
.store64(valueGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
2966 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2967 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2968 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
2970 addSlowPathGenerator(
2972 slowPath
, this, operationArrayPush
, storageLengthGPR
,
2973 valueGPR
, baseGPR
));
2975 jsValueResult(storageLengthGPR
, node
);
2979 case Array::Double
: {
2980 SpeculateDoubleOperand
value(this, node
->child2());
2981 FPRReg valueFPR
= value
.fpr();
2984 JSValueRegs(), node
->child2(), SpecDoubleReal
,
2985 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, valueFPR
, valueFPR
));
2987 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2988 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2989 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
2990 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2991 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2992 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
2994 addSlowPathGenerator(
2996 slowPath
, this, operationArrayPushDouble
, storageLengthGPR
,
2997 valueFPR
, baseGPR
));
2999 jsValueResult(storageLengthGPR
, node
);
3003 case Array::ArrayStorage
: {
3004 JSValueOperand
value(this, node
->child2());
3005 GPRReg valueGPR
= value
.gpr();
3007 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3009 // Refuse to handle bizarre lengths.
3010 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Above
, storageLengthGPR
, TrustedImm32(0x7ffffffe)));
3012 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
3014 m_jit
.store64(valueGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
3016 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3017 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3018 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3019 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
3021 addSlowPathGenerator(
3023 slowPath
, this, operationArrayPush
, NoResult
, storageLengthGPR
,
3024 valueGPR
, baseGPR
));
3026 jsValueResult(storageLengthGPR
, node
);
3038 ASSERT(node
->arrayMode().isJSArray());
3040 SpeculateCellOperand
base(this, node
->child1());
3041 StorageOperand
storage(this, node
->child2());
3042 GPRTemporary
value(this);
3043 GPRTemporary
storageLength(this);
3044 FPRTemporary
temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop().
3046 GPRReg baseGPR
= base
.gpr();
3047 GPRReg storageGPR
= storage
.gpr();
3048 GPRReg valueGPR
= value
.gpr();
3049 GPRReg storageLengthGPR
= storageLength
.gpr();
3050 FPRReg tempFPR
= temp
.fpr();
3052 switch (node
->arrayMode().type()) {
3055 case Array::Contiguous
: {
3057 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
3058 MacroAssembler::Jump undefinedCase
=
3059 m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
);
3060 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
3062 storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3063 MacroAssembler::Jump slowCase
;
3064 if (node
->arrayMode().type() == Array::Double
) {
3066 MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
),
3068 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3069 // length and the new length.
3071 MacroAssembler::TrustedImm64(bitwise_cast
<int64_t>(PNaN
)), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3072 slowCase
= m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempFPR
, tempFPR
);
3073 boxDouble(tempFPR
, valueGPR
);
3076 MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
),
3078 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3079 // length and the new length.
3081 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3082 slowCase
= m_jit
.branchTest64(MacroAssembler::Zero
, valueGPR
);
3085 addSlowPathGenerator(
3087 undefinedCase
, this,
3088 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR
));
3089 addSlowPathGenerator(
3091 slowCase
, this, operationArrayPopAndRecoverLength
, valueGPR
, baseGPR
));
3093 // We can't know for sure that the result is an int because of the slow paths. :-/
3094 jsValueResult(valueGPR
, node
);
3098 case Array::ArrayStorage
: {
3099 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3101 JITCompiler::Jump undefinedCase
=
3102 m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
);
3104 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
3106 JITCompiler::JumpList slowCases
;
3107 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset())));
3109 m_jit
.load64(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), valueGPR
);
3110 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, valueGPR
));
3112 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3114 m_jit
.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
3115 m_jit
.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3117 addSlowPathGenerator(
3119 undefinedCase
, this,
3120 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR
));
3122 addSlowPathGenerator(
3124 slowCases
, this, operationArrayPop
, valueGPR
, baseGPR
));
3126 jsValueResult(valueGPR
, node
);
3138 jump(node
->targetBlock());
3152 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT1
);
3153 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
3154 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
3156 // Return the result in returnValueGPR.
3157 JSValueOperand
op1(this, node
->child1());
3158 m_jit
.move(op1
.gpr(), GPRInfo::returnValueGPR
);
3160 m_jit
.emitFunctionEpilogue();
3168 case ThrowReferenceError
: {
3169 // We expect that throw statements are rare and are intended to exit the code block
3170 // anyway, so we just OSR back to the old JIT for now.
3171 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
3175 case BooleanToNumber
: {
3176 switch (node
->child1().useKind()) {
3178 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
3179 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
3181 m_jit
.move(value
.gpr(), result
.gpr());
3182 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
3184 JSValueRegs(value
.gpr()), node
->child1(), SpecBoolean
, m_jit
.branchTest64(
3185 JITCompiler::NonZero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
3187 int32Result(result
.gpr(), node
);
3192 JSValueOperand
value(this, node
->child1());
3193 GPRTemporary
result(this);
3195 if (!m_interpreter
.needsTypeCheck(node
->child1(), SpecBoolInt32
| SpecBoolean
)) {
3196 m_jit
.move(value
.gpr(), result
.gpr());
3197 m_jit
.and32(TrustedImm32(1), result
.gpr());
3198 int32Result(result
.gpr(), node
);
3202 m_jit
.move(value
.gpr(), result
.gpr());
3203 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
3204 JITCompiler::Jump isBoolean
= m_jit
.branchTest64(
3205 JITCompiler::Zero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1)));
3206 m_jit
.move(value
.gpr(), result
.gpr());
3207 JITCompiler::Jump done
= m_jit
.jump();
3208 isBoolean
.link(&m_jit
);
3209 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, result
.gpr());
3212 jsValueResult(result
.gpr(), node
);
3217 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
3224 DFG_ASSERT(m_jit
.graph(), node
, node
->child1().useKind() == UntypedUse
);
3225 JSValueOperand
op1(this, node
->child1());
3226 GPRTemporary
result(this, Reuse
, op1
);
3228 GPRReg op1GPR
= op1
.gpr();
3229 GPRReg resultGPR
= result
.gpr();
3233 MacroAssembler::Jump alreadyPrimitive
= m_jit
.branchIfNotCell(JSValueRegs(op1GPR
));
3234 MacroAssembler::Jump notPrimitive
= m_jit
.branchIfObject(op1GPR
);
3236 alreadyPrimitive
.link(&m_jit
);
3237 m_jit
.move(op1GPR
, resultGPR
);
3239 addSlowPathGenerator(
3240 slowPathCall(notPrimitive
, this, operationToPrimitive
, resultGPR
, op1GPR
));
3242 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3247 case CallStringConstructor
: {
3248 if (node
->child1().useKind() == UntypedUse
) {
3249 JSValueOperand
op1(this, node
->child1());
3250 GPRReg op1GPR
= op1
.gpr();
3252 GPRFlushedCallResult
result(this);
3253 GPRReg resultGPR
= result
.gpr();
3257 JITCompiler::Jump done
;
3258 if (node
->child1()->prediction() & SpecString
) {
3259 JITCompiler::Jump slowPath1
= m_jit
.branchIfNotCell(JSValueRegs(op1GPR
));
3260 JITCompiler::Jump slowPath2
= m_jit
.branchIfNotString(op1GPR
);
3261 m_jit
.move(op1GPR
, resultGPR
);
3262 done
= m_jit
.jump();
3263 slowPath1
.link(&m_jit
);
3264 slowPath2
.link(&m_jit
);
3267 callOperation(operationToString
, resultGPR
, op1GPR
);
3269 ASSERT(op
== CallStringConstructor
);
3270 callOperation(operationCallStringConstructor
, resultGPR
, op1GPR
);
3274 cellResult(resultGPR
, node
);
3278 compileToStringOrCallStringConstructorOnCell(node
);
3282 case NewStringObject
: {
3283 compileNewStringObject(node
);
3288 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3289 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3290 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3291 DFG_ASSERT(m_jit
.graph(), node
, structure
->indexingType() == node
->indexingType());
3293 hasUndecided(structure
->indexingType())
3294 || hasInt32(structure
->indexingType())
3295 || hasDouble(structure
->indexingType())
3296 || hasContiguous(structure
->indexingType()));
3298 unsigned numElements
= node
->numChildren();
3300 GPRTemporary
result(this);
3301 GPRTemporary
storage(this);
3303 GPRReg resultGPR
= result
.gpr();
3304 GPRReg storageGPR
= storage
.gpr();
3306 emitAllocateJSArray(resultGPR
, structure
, storageGPR
, numElements
);
3308 // At this point, one way or another, resultGPR and storageGPR have pointers to
3309 // the JSArray and the Butterfly, respectively.
3311 ASSERT(!hasUndecided(structure
->indexingType()) || !node
->numChildren());
3313 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3314 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3315 switch (node
->indexingType()) {
3316 case ALL_BLANK_INDEXING_TYPES
:
3317 case ALL_UNDECIDED_INDEXING_TYPES
:
3320 case ALL_DOUBLE_INDEXING_TYPES
: {
3321 SpeculateDoubleOperand
operand(this, use
);
3322 FPRReg opFPR
= operand
.fpr();
3324 JSValueRegs(), use
, SpecDoubleReal
,
3326 MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3327 m_jit
.storeDouble(opFPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * operandIdx
));
3330 case ALL_INT32_INDEXING_TYPES
:
3331 case ALL_CONTIGUOUS_INDEXING_TYPES
: {
3332 JSValueOperand
operand(this, use
, ManualOperandSpeculation
);
3333 GPRReg opGPR
= operand
.gpr();
3334 if (hasInt32(node
->indexingType())) {
3336 JSValueRegs(opGPR
), use
, SpecInt32
,
3338 MacroAssembler::Below
, opGPR
, GPRInfo::tagTypeNumberRegister
));
3340 m_jit
.store64(opGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
));
3349 // Yuck, we should *really* have a way of also returning the storageGPR. But
3350 // that's the least of what's wrong with this code. We really shouldn't be
3351 // allocating the array after having computed - and probably spilled to the
3352 // stack - all of the things that will go into the array. The solution to that
3353 // bigger problem will also likely fix the redundancy in reloading the storage
3354 // pointer that we currently have.
3356 cellResult(resultGPR
, node
);
3360 if (!node
->numChildren()) {
3362 GPRFlushedCallResult
result(this);
3363 callOperation(operationNewEmptyArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()));
3364 cellResult(result
.gpr(), node
);
3368 size_t scratchSize
= sizeof(EncodedJSValue
) * node
->numChildren();
3369 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(scratchSize
);
3370 EncodedJSValue
* buffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
3372 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3373 // Need to perform the speculations that this node promises to perform. If we're
3374 // emitting code here and the indexing type is not array storage then there is
3375 // probably something hilarious going on and we're already failing at all the
3376 // things, but at least we're going to be sound.
3377 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3378 switch (node
->indexingType()) {
3379 case ALL_BLANK_INDEXING_TYPES
:
3380 case ALL_UNDECIDED_INDEXING_TYPES
:
3383 case ALL_DOUBLE_INDEXING_TYPES
: {
3384 SpeculateDoubleOperand
operand(this, use
);
3385 GPRTemporary
scratch(this);
3386 FPRReg opFPR
= operand
.fpr();
3387 GPRReg scratchGPR
= scratch
.gpr();
3389 JSValueRegs(), use
, SpecDoubleReal
,
3391 MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3392 m_jit
.boxDouble(opFPR
, scratchGPR
);
3393 m_jit
.store64(scratchGPR
, buffer
+ operandIdx
);
3396 case ALL_INT32_INDEXING_TYPES
: {
3397 JSValueOperand
operand(this, use
, ManualOperandSpeculation
);
3398 GPRReg opGPR
= operand
.gpr();
3399 if (hasInt32(node
->indexingType())) {
3401 JSValueRegs(opGPR
), use
, SpecInt32
,
3403 MacroAssembler::Below
, opGPR
, GPRInfo::tagTypeNumberRegister
));
3405 m_jit
.store64(opGPR
, buffer
+ operandIdx
);
3408 case ALL_CONTIGUOUS_INDEXING_TYPES
:
3409 case ALL_ARRAY_STORAGE_INDEXING_TYPES
: {
3410 JSValueOperand
operand(this, use
);
3411 GPRReg opGPR
= operand
.gpr();
3412 m_jit
.store64(opGPR
, buffer
+ operandIdx
);
3422 switch (node
->indexingType()) {
3423 case ALL_DOUBLE_INDEXING_TYPES
:
3424 case ALL_INT32_INDEXING_TYPES
:
3434 GPRTemporary
scratch(this);
3436 // Tell GC mark phase how much of the scratch buffer is active during call.
3437 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3438 m_jit
.storePtr(TrustedImmPtr(scratchSize
), scratch
.gpr());
3441 GPRFlushedCallResult
result(this);
3444 operationNewArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3445 static_cast<void*>(buffer
), node
->numChildren());
3448 GPRTemporary
scratch(this);
3450 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3451 m_jit
.storePtr(TrustedImmPtr(0), scratch
.gpr());
3454 cellResult(result
.gpr(), node
, UseChildrenCalledExplicitly
);
3458 case NewArrayWithSize
: {
3459 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3460 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3461 SpeculateStrictInt32Operand
size(this, node
->child1());
3462 GPRTemporary
result(this);
3463 GPRTemporary
storage(this);
3464 GPRTemporary
scratch(this);
3465 GPRTemporary
scratch2(this);
3467 GPRReg sizeGPR
= size
.gpr();
3468 GPRReg resultGPR
= result
.gpr();
3469 GPRReg storageGPR
= storage
.gpr();
3470 GPRReg scratchGPR
= scratch
.gpr();
3471 GPRReg scratch2GPR
= scratch2
.gpr();
3473 MacroAssembler::JumpList slowCases
;
3474 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH
)));
3476 ASSERT((1 << 3) == sizeof(JSValue
));
3477 m_jit
.move(sizeGPR
, scratchGPR
);
3478 m_jit
.lshift32(TrustedImm32(3), scratchGPR
);
3479 m_jit
.add32(TrustedImm32(sizeof(IndexingHeader
)), scratchGPR
, resultGPR
);
3481 emitAllocateBasicStorage(resultGPR
, storageGPR
));
3482 m_jit
.subPtr(scratchGPR
, storageGPR
);
3483 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3484 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
3486 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3487 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3489 if (hasDouble(node
->indexingType())) {
3490 m_jit
.move(TrustedImm64(bitwise_cast
<int64_t>(PNaN
)), scratchGPR
);
3491 m_jit
.move(sizeGPR
, scratch2GPR
);
3492 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, scratch2GPR
);
3493 MacroAssembler::Label loop
= m_jit
.label();
3494 m_jit
.sub32(TrustedImm32(1), scratch2GPR
);
3495 m_jit
.store64(scratchGPR
, MacroAssembler::BaseIndex(storageGPR
, scratch2GPR
, MacroAssembler::TimesEight
));
3496 m_jit
.branchTest32(MacroAssembler::NonZero
, scratch2GPR
).linkTo(loop
, &m_jit
);
3500 addSlowPathGenerator(std::make_unique
<CallArrayAllocatorWithVariableSizeSlowPathGenerator
>(
3501 slowCases
, this, operationNewArrayWithSize
, resultGPR
,
3502 globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3503 globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
),
3506 cellResult(resultGPR
, node
);
3510 SpeculateStrictInt32Operand
size(this, node
->child1());
3511 GPRReg sizeGPR
= size
.gpr();
3513 GPRFlushedCallResult
result(this);
3514 GPRReg resultGPR
= result
.gpr();
3515 GPRReg structureGPR
= selectScratchGPR(sizeGPR
);
3516 MacroAssembler::Jump bigLength
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH
));
3517 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType())), structureGPR
);
3518 MacroAssembler::Jump done
= m_jit
.jump();
3519 bigLength
.link(&m_jit
);
3520 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
)), structureGPR
);
3522 callOperation(operationNewArrayWithSize
, resultGPR
, structureGPR
, sizeGPR
);
3523 cellResult(resultGPR
, node
);
3527 case NewArrayBuffer
: {
3528 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3529 IndexingType indexingType
= node
->indexingType();
3530 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(indexingType
)) {
3531 unsigned numElements
= node
->numConstants();
3533 GPRTemporary
result(this);
3534 GPRTemporary
storage(this);
3536 GPRReg resultGPR
= result
.gpr();
3537 GPRReg storageGPR
= storage
.gpr();
3539 emitAllocateJSArray(resultGPR
, globalObject
->arrayStructureForIndexingTypeDuringAllocation(indexingType
), storageGPR
, numElements
);
3541 DFG_ASSERT(m_jit
.graph(), node
, indexingType
& IsArray
);
3542 JSValue
* data
= m_jit
.codeBlock()->constantBuffer(node
->startConstant());
3543 if (indexingType
== ArrayWithDouble
) {
3544 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3545 double value
= data
[index
].asNumber();
3547 Imm64(bitwise_cast
<int64_t>(value
)),
3548 MacroAssembler::Address(storageGPR
, sizeof(double) * index
));
3551 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3553 Imm64(JSValue::encode(data
[index
])),
3554 MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * index
));
3558 cellResult(resultGPR
, node
);
3563 GPRFlushedCallResult
result(this);
3565 callOperation(operationNewArrayBuffer
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()), node
->startConstant(), node
->numConstants());
3567 cellResult(result
.gpr(), node
);
3571 case NewTypedArray
: {
3572 switch (node
->child1().useKind()) {
3574 compileNewTypedArray(node
);
3577 JSValueOperand
argument(this, node
->child1());
3578 GPRReg argumentGPR
= argument
.gpr();
3582 GPRFlushedCallResult
result(this);
3583 GPRReg resultGPR
= result
.gpr();
3585 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3587 operationNewTypedArrayWithOneArgumentForType(node
->typedArrayType()),
3588 resultGPR
, globalObject
->typedArrayStructure(node
->typedArrayType()),
3591 cellResult(resultGPR
, node
);
3595 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
3603 GPRFlushedCallResult
result(this);
3605 callOperation(operationNewRegexp
, result
.gpr(), m_jit
.codeBlock()->regexp(node
->regexpIndex()));
3607 cellResult(result
.gpr(), node
);
3612 ASSERT(node
->child1().useKind() == UntypedUse
);
3613 JSValueOperand
thisValue(this, node
->child1());
3614 GPRTemporary
temp(this);
3615 GPRReg thisValueGPR
= thisValue
.gpr();
3616 GPRReg tempGPR
= temp
.gpr();
3618 MacroAssembler::JumpList slowCases
;
3619 slowCases
.append(m_jit
.branchIfNotCell(JSValueRegs(thisValueGPR
)));
3620 slowCases
.append(m_jit
.branch8(
3621 MacroAssembler::NotEqual
,
3622 MacroAssembler::Address(thisValueGPR
, JSCell::typeInfoTypeOffset()),
3623 TrustedImm32(FinalObjectType
)));
3624 m_jit
.move(thisValueGPR
, tempGPR
);
3625 J_JITOperation_EJ function
;
3626 if (m_jit
.graph().executableFor(node
->origin
.semantic
)->isStrictMode())
3627 function
= operationToThisStrict
;
3629 function
= operationToThis
;
3630 addSlowPathGenerator(
3631 slowPathCall(slowCases
, this, function
, tempGPR
, thisValueGPR
));
3633 jsValueResult(tempGPR
, node
);
3638 // Note that there is not so much profit to speculate here. The only things we
3639 // speculate on are (1) that it's a cell, since that eliminates cell checks
3640 // later if the proto is reused, and (2) if we have a FinalObject prediction
3641 // then we speculate because we want to get recompiled if it isn't (since
3642 // otherwise we'd start taking slow path a lot).
3644 SpeculateCellOperand
callee(this, node
->child1());
3645 GPRTemporary
result(this);
3646 GPRTemporary
allocator(this);
3647 GPRTemporary
structure(this);
3648 GPRTemporary
scratch(this);
3650 GPRReg calleeGPR
= callee
.gpr();
3651 GPRReg resultGPR
= result
.gpr();
3652 GPRReg allocatorGPR
= allocator
.gpr();
3653 GPRReg structureGPR
= structure
.gpr();
3654 GPRReg scratchGPR
= scratch
.gpr();
3655 // Rare data is only used to access the allocator & structure
3656 // We can avoid using an additional GPR this way
3657 GPRReg rareDataGPR
= structureGPR
;
3659 MacroAssembler::JumpList slowPath
;
3661 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfRareData()), rareDataGPR
);
3662 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, rareDataGPR
));
3663 m_jit
.loadPtr(JITCompiler::Address(rareDataGPR
, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR
);
3664 m_jit
.loadPtr(JITCompiler::Address(rareDataGPR
, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR
);
3665 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, allocatorGPR
));
3666 emitAllocateJSObject(resultGPR
, allocatorGPR
, structureGPR
, TrustedImmPtr(0), scratchGPR
, slowPath
);
3668 addSlowPathGenerator(slowPathCall(slowPath
, this, operationCreateThis
, resultGPR
, calleeGPR
, node
->inlineCapacity()));
3670 cellResult(resultGPR
, node
);
3675 GPRTemporary
result(this);
3676 GPRTemporary
allocator(this);
3677 GPRTemporary
scratch(this);
3679 GPRReg resultGPR
= result
.gpr();
3680 GPRReg allocatorGPR
= allocator
.gpr();
3681 GPRReg scratchGPR
= scratch
.gpr();
3683 MacroAssembler::JumpList slowPath
;
3685 Structure
* structure
= node
->structure();
3686 size_t allocationSize
= JSFinalObject::allocationSize(structure
->inlineCapacity());
3687 MarkedAllocator
* allocatorPtr
= &m_jit
.vm()->heap
.allocatorForObjectWithoutDestructor(allocationSize
);
3689 m_jit
.move(TrustedImmPtr(allocatorPtr
), allocatorGPR
);
3690 emitAllocateJSObject(resultGPR
, allocatorGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, slowPath
);
3692 addSlowPathGenerator(slowPathCall(slowPath
, this, operationNewObject
, resultGPR
, structure
));
3694 cellResult(resultGPR
, node
);
3699 GPRTemporary
result(this);
3700 m_jit
.loadPtr(JITCompiler::addressFor(JSStack::Callee
), result
.gpr());
3701 cellResult(result
.gpr(), node
);
3705 case GetArgumentCount
: {
3706 GPRTemporary
result(this);
3707 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), result
.gpr());
3708 int32Result(result
.gpr(), node
);
3713 compileGetScope(node
);
3717 compileSkipScope(node
);
3720 case GetClosureVar
: {
3721 SpeculateCellOperand
base(this, node
->child1());
3722 GPRTemporary
result(this);
3723 GPRReg baseGPR
= base
.gpr();
3724 GPRReg resultGPR
= result
.gpr();
3726 m_jit
.load64(JITCompiler::Address(baseGPR
, JSEnvironmentRecord::offsetOfVariable(node
->scopeOffset())), resultGPR
);
3727 jsValueResult(resultGPR
, node
);
3730 case PutClosureVar
: {
3731 SpeculateCellOperand
base(this, node
->child1());
3732 JSValueOperand
value(this, node
->child2());
3734 GPRReg baseGPR
= base
.gpr();
3735 GPRReg valueGPR
= value
.gpr();
3737 m_jit
.store64(valueGPR
, JITCompiler::Address(baseGPR
, JSEnvironmentRecord::offsetOfVariable(node
->scopeOffset())));
3742 ASSERT(node
->prediction());
3744 switch (node
->child1().useKind()) {
3746 SpeculateCellOperand
base(this, node
->child1());
3747 GPRTemporary
result(this, Reuse
, base
);
3749 GPRReg baseGPR
= base
.gpr();
3750 GPRReg resultGPR
= result
.gpr();
3754 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber());
3756 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3761 JSValueOperand
base(this, node
->child1());
3762 GPRTemporary
result(this, Reuse
, base
);
3764 GPRReg baseGPR
= base
.gpr();
3765 GPRReg resultGPR
= result
.gpr();
3769 JITCompiler::Jump notCell
= m_jit
.branchIfNotCell(JSValueRegs(baseGPR
));
3771 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber(), notCell
);
3773 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3778 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
3784 case GetByIdFlush
: {
3785 if (!node
->prediction()) {
3786 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3790 switch (node
->child1().useKind()) {
3792 SpeculateCellOperand
base(this, node
->child1());
3793 GPRReg baseGPR
= base
.gpr();
3795 GPRFlushedCallResult
result(this);
3797 GPRReg resultGPR
= result
.gpr();
3803 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber(), JITCompiler::Jump(), DontSpill
);
3805 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3810 JSValueOperand
base(this, node
->child1());
3811 GPRReg baseGPR
= base
.gpr();
3813 GPRFlushedCallResult
result(this);
3814 GPRReg resultGPR
= result
.gpr();
3819 JITCompiler::Jump notCell
= m_jit
.branchIfNotCell(JSValueRegs(baseGPR
));
3821 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber(), notCell
, DontSpill
);
3823 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3828 DFG_CRASH(m_jit
.graph(), node
, "Bad use kind");
3834 case GetArrayLength
:
3835 compileGetArrayLength(node
);
3839 SpeculateCellOperand
cell(this, node
->child1());
3840 speculationCheck(BadCell
, JSValueSource::unboxedCell(cell
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, cell
.gpr(), node
->cellOperand()->cell()));
3845 case CheckNotEmpty
: {
3846 JSValueOperand
operand(this, node
->child1());
3847 GPRReg gpr
= operand
.gpr();
3848 speculationCheck(TDZFailure
, JSValueSource(), nullptr, m_jit
.branchTest64(JITCompiler::Zero
, gpr
));
3853 case GetExecutable
: {
3854 SpeculateCellOperand
function(this, node
->child1());
3855 GPRTemporary
result(this, Reuse
, function
);
3856 GPRReg functionGPR
= function
.gpr();
3857 GPRReg resultGPR
= result
.gpr();
3858 speculateCellType(node
->child1(), functionGPR
, SpecFunction
, JSFunctionType
);
3859 m_jit
.loadPtr(JITCompiler::Address(functionGPR
, JSFunction::offsetOfExecutable()), resultGPR
);
3860 cellResult(resultGPR
, node
);
3864 case CheckStructure
: {
3865 SpeculateCellOperand
base(this, node
->child1());
3867 ASSERT(node
->structureSet().size());
3870 if (node
->child1()->hasConstant())
3871 exitKind
= BadConstantCache
;
3873 exitKind
= BadCache
;
3875 if (node
->structureSet().size() == 1) {
3877 exitKind
, JSValueSource::unboxedCell(base
.gpr()), 0,
3878 m_jit
.branchWeakStructure(
3879 JITCompiler::NotEqual
,
3880 JITCompiler::Address(base
.gpr(), JSCell::structureIDOffset()),
3881 node
->structureSet()[0]));
3883 JITCompiler::JumpList done
;
3885 for (size_t i
= 0; i
< node
->structureSet().size() - 1; ++i
)
3886 done
.append(m_jit
.branchWeakStructure(JITCompiler::Equal
, MacroAssembler::Address(base
.gpr(), JSCell::structureIDOffset()), node
->structureSet()[i
]));
3889 exitKind
, JSValueSource::unboxedCell(base
.gpr()), 0,
3890 m_jit
.branchWeakStructure(
3891 JITCompiler::NotEqual
, MacroAssembler::Address(base
.gpr(), JSCell::structureIDOffset()), node
->structureSet().last()));
3900 case PutStructure
: {
3901 Structure
* oldStructure
= node
->transition()->previous
;
3902 Structure
* newStructure
= node
->transition()->next
;
3904 m_jit
.jitCode()->common
.notifyCompilingStructureTransition(m_jit
.graph().m_plan
, m_jit
.codeBlock(), node
);
3906 SpeculateCellOperand
base(this, node
->child1());
3907 GPRReg baseGPR
= base
.gpr();
3909 ASSERT_UNUSED(oldStructure
, oldStructure
->indexingType() == newStructure
->indexingType());
3910 ASSERT(oldStructure
->typeInfo().type() == newStructure
->typeInfo().type());
3911 ASSERT(oldStructure
->typeInfo().inlineTypeFlags() == newStructure
->typeInfo().inlineTypeFlags());
3912 m_jit
.store32(MacroAssembler::TrustedImm32(newStructure
->id()), MacroAssembler::Address(baseGPR
, JSCell::structureIDOffset()));
3918 case AllocatePropertyStorage
:
3919 compileAllocatePropertyStorage(node
);
3922 case ReallocatePropertyStorage
:
3923 compileReallocatePropertyStorage(node
);
3926 case GetButterfly
: {
3927 SpeculateCellOperand
base(this, node
->child1());
3928 GPRTemporary
result(this, Reuse
, base
);
3930 GPRReg baseGPR
= base
.gpr();
3931 GPRReg resultGPR
= result
.gpr();
3933 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), resultGPR
);
3935 storageResult(resultGPR
, node
);
3939 case GetIndexedPropertyStorage
: {
3940 compileGetIndexedPropertyStorage(node
);
3944 case ConstantStoragePointer
: {
3945 compileConstantStoragePointer(node
);
3949 case GetTypedArrayByteOffset
: {
3950 compileGetTypedArrayByteOffset(node
);
3955 case GetGetterSetterByOffset
: {
3956 StorageOperand
storage(this, node
->child1());
3957 GPRTemporary
result(this, Reuse
, storage
);
3959 GPRReg storageGPR
= storage
.gpr();
3960 GPRReg resultGPR
= result
.gpr();
3962 StorageAccessData
& storageAccessData
= node
->storageAccessData();
3964 m_jit
.load64(JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
)), resultGPR
);
3966 jsValueResult(resultGPR
, node
);
3971 SpeculateCellOperand
op1(this, node
->child1());
3972 GPRTemporary
result(this, Reuse
, op1
);
3974 GPRReg op1GPR
= op1
.gpr();
3975 GPRReg resultGPR
= result
.gpr();
3977 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, GetterSetter::offsetOfGetter()), resultGPR
);
3979 cellResult(resultGPR
, node
);
3984 SpeculateCellOperand
op1(this, node
->child1());
3985 GPRTemporary
result(this, Reuse
, op1
);
3987 GPRReg op1GPR
= op1
.gpr();
3988 GPRReg resultGPR
= result
.gpr();
3990 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, GetterSetter::offsetOfSetter()), resultGPR
);
3992 cellResult(resultGPR
, node
);
3997 StorageOperand
storage(this, node
->child1());
3998 JSValueOperand
value(this, node
->child3());
3999 GPRTemporary
scratch1(this);
4000 GPRTemporary
scratch2(this);
4002 GPRReg storageGPR
= storage
.gpr();
4003 GPRReg valueGPR
= value
.gpr();
4005 speculate(node
, node
->child2());
4007 StorageAccessData
& storageAccessData
= node
->storageAccessData();
4009 m_jit
.store64(valueGPR
, JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
)));
4015 case PutByIdFlush
: {
4016 SpeculateCellOperand
base(this, node
->child1());
4017 JSValueOperand
value(this, node
->child2());
4018 GPRTemporary
scratch(this);
4020 GPRReg baseGPR
= base
.gpr();
4021 GPRReg valueGPR
= value
.gpr();
4022 GPRReg scratchGPR
= scratch
.gpr();
4025 cachedPutById(node
->origin
.semantic
, baseGPR
, valueGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
, MacroAssembler::Jump(), DontSpill
);
4032 SpeculateCellOperand
base(this, node
->child1());
4033 JSValueOperand
value(this, node
->child2());
4034 GPRTemporary
scratch(this);
4036 GPRReg baseGPR
= base
.gpr();
4037 GPRReg valueGPR
= value
.gpr();
4038 GPRReg scratchGPR
= scratch
.gpr();
4040 cachedPutById(node
->origin
.semantic
, baseGPR
, valueGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
);
4046 case PutByIdDirect
: {
4047 SpeculateCellOperand
base(this, node
->child1());
4048 JSValueOperand
value(this, node
->child2());
4049 GPRTemporary
scratch(this);
4051 GPRReg baseGPR
= base
.gpr();
4052 GPRReg valueGPR
= value
.gpr();
4053 GPRReg scratchGPR
= scratch
.gpr();
4055 cachedPutById(node
->origin
.semantic
, baseGPR
, valueGPR
, scratchGPR
, node
->identifierNumber(), Direct
);
4061 case GetGlobalVar
: {
4062 GPRTemporary
result(this);
4064 m_jit
.load64(node
->variablePointer(), result
.gpr());
4066 jsValueResult(result
.gpr(), node
);
4070 case PutGlobalVar
: {
4071 JSValueOperand
value(this, node
->child2());
4073 m_jit
.store64(value
.gpr(), node
->variablePointer());
4080 compileNotifyWrite(node
);
4084 case VarInjectionWatchpoint
: {
4089 case CheckHasInstance
: {
4090 SpeculateCellOperand
base(this, node
->child1());
4091 GPRTemporary
structure(this);
4093 // Speculate that base 'ImplementsDefaultHasInstance'.
4094 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branchTest8(
4095 MacroAssembler::Zero
,
4096 MacroAssembler::Address(base
.gpr(), JSCell::typeInfoFlagsOffset()),
4097 MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance
)));
4104 compileInstanceOf(node
);
4109 JSValueOperand
value(this, node
->child1());
4110 GPRTemporary
result(this);
4111 GPRTemporary
localGlobalObject(this);
4112 GPRTemporary
remoteGlobalObject(this);
4113 GPRTemporary
scratch(this);
4115 JITCompiler::Jump isCell
= m_jit
.branchIfCell(value
.jsValueRegs());
4117 m_jit
.compare64(JITCompiler::Equal
, value
.gpr(), TrustedImm32(ValueUndefined
), result
.gpr());
4118 JITCompiler::Jump done
= m_jit
.jump();
4120 isCell
.link(&m_jit
);
4121 JITCompiler::Jump notMasqueradesAsUndefined
;
4122 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
4123 m_jit
.move(TrustedImm32(0), result
.gpr());
4124 notMasqueradesAsUndefined
= m_jit
.jump();
4126 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
4127 JITCompiler::NonZero
,
4128 JITCompiler::Address(value
.gpr(), JSCell::typeInfoFlagsOffset()),
4129 TrustedImm32(MasqueradesAsUndefined
));
4130 m_jit
.move(TrustedImm32(0), result
.gpr());
4131 notMasqueradesAsUndefined
= m_jit
.jump();
4133 isMasqueradesAsUndefined
.link(&m_jit
);
4134 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
4135 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
4136 m_jit
.move(TrustedImmPtr(m_jit
.globalObjectFor(node
->origin
.semantic
)), localGlobalObjectGPR
);
4137 m_jit
.emitLoadStructure(value
.gpr(), result
.gpr(), scratch
.gpr());
4138 m_jit
.loadPtr(JITCompiler::Address(result
.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
4139 m_jit
.comparePtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, result
.gpr());
4142 notMasqueradesAsUndefined
.link(&m_jit
);
4144 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4145 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4150 JSValueOperand
value(this, node
->child1());
4151 GPRTemporary
result(this, Reuse
, value
);
4153 m_jit
.move(value
.gpr(), result
.gpr());
4154 m_jit
.xor64(JITCompiler::TrustedImm32(ValueFalse
), result
.gpr());
4155 m_jit
.test64(JITCompiler::Zero
, result
.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result
.gpr());
4156 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4157 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4162 JSValueOperand
value(this, node
->child1());
4163 GPRTemporary
result(this, Reuse
, value
);
4165 m_jit
.test64(JITCompiler::NonZero
, value
.gpr(), GPRInfo::tagTypeNumberRegister
, result
.gpr());
4166 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4167 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4172 JSValueOperand
value(this, node
->child1());
4173 GPRTemporary
result(this, Reuse
, value
);
4175 JITCompiler::Jump isNotCell
= m_jit
.branchIfNotCell(value
.jsValueRegs());
4177 m_jit
.compare8(JITCompiler::Equal
,
4178 JITCompiler::Address(value
.gpr(), JSCell::typeInfoTypeOffset()),
4179 TrustedImm32(StringType
),
4181 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4182 JITCompiler::Jump done
= m_jit
.jump();
4184 isNotCell
.link(&m_jit
);
4185 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
4188 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4193 JSValueOperand
value(this, node
->child1());
4194 GPRTemporary
result(this, Reuse
, value
);
4196 JITCompiler::Jump isNotCell
= m_jit
.branchIfNotCell(value
.jsValueRegs());
4198 m_jit
.compare8(JITCompiler::AboveOrEqual
,
4199 JITCompiler::Address(value
.gpr(), JSCell::typeInfoTypeOffset()),
4200 TrustedImm32(ObjectType
),
4202 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4203 JITCompiler::Jump done
= m_jit
.jump();
4205 isNotCell
.link(&m_jit
);
4206 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
4209 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4213 case IsObjectOrNull
: {
4214 compileIsObjectOrNull(node
);
4219 compileIsFunction(node
);
4224 compileTypeOf(node
);
4234 case CallForwardVarargs
:
4235 case ConstructVarargs
:
4236 case ConstructForwardVarargs
:
4241 LoadVarargsData
* data
= node
->loadVarargsData();
4243 GPRReg argumentsGPR
;
4245 JSValueOperand
arguments(this, node
->child1());
4246 argumentsGPR
= arguments
.gpr();
4250 callOperation(operationSizeOfVarargs
, GPRInfo::returnValueGPR
, argumentsGPR
, data
->offset
);
4252 lock(GPRInfo::returnValueGPR
);
4254 JSValueOperand
arguments(this, node
->child1());
4255 argumentsGPR
= arguments
.gpr();
4258 unlock(GPRInfo::returnValueGPR
);
4260 // FIXME: There is a chance that we will call an effectful length property twice. This is safe
4261 // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
4262 // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
4264 // https://bugs.webkit.org/show_bug.cgi?id=141448
4266 GPRReg argCountIncludingThisGPR
=
4267 JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR
, argumentsGPR
);
4269 m_jit
.add32(TrustedImm32(1), GPRInfo::returnValueGPR
, argCountIncludingThisGPR
);
4271 VarargsOverflow
, JSValueSource(), Edge(), m_jit
.branch32(
4272 MacroAssembler::Above
,
4273 argCountIncludingThisGPR
,
4274 TrustedImm32(data
->limit
)));
4276 m_jit
.store32(argCountIncludingThisGPR
, JITCompiler::payloadFor(data
->machineCount
));
4278 callOperation(operationLoadVarargs
, data
->machineStart
.offset(), argumentsGPR
, data
->offset
, GPRInfo::returnValueGPR
, data
->mandatoryMinimum
);
4284 case ForwardVarargs
: {
4285 compileForwardVarargs(node
);
4289 case CreateActivation
: {
4290 compileCreateActivation(node
);
4294 case CreateDirectArguments
: {
4295 compileCreateDirectArguments(node
);
4299 case GetFromArguments
: {
4300 compileGetFromArguments(node
);
4304 case PutToArguments
: {
4305 compilePutToArguments(node
);
4309 case CreateScopedArguments
: {
4310 compileCreateScopedArguments(node
);
4314 case CreateClonedArguments
: {
4315 compileCreateClonedArguments(node
);
4320 compileNewFunction(node
);
4327 case CountExecution
:
4328 m_jit
.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node
->executionCounter()->address()));
4331 case ForceOSRExit
: {
4332 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
4336 case InvalidationPoint
:
4337 emitInvalidationPoint(node
);
4340 case CheckWatchdogTimer
:
4341 ASSERT(m_jit
.vm()->watchdog
);
4343 WatchdogTimerFired
, JSValueRegs(), 0,
4345 JITCompiler::NonZero
,
4346 JITCompiler::AbsoluteAddress(m_jit
.vm()->watchdog
->timerDidFireAddress())));
4351 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
4356 case ProfileWillCall
:
4357 case ProfileDidCall
:
4365 DFG_CRASH(m_jit
.graph(), node
, "Unexpected Unreachable node");
4368 case StoreBarrier
: {
4369 compileStoreBarrier(node
);
4373 case GetEnumerableLength
: {
4374 SpeculateCellOperand
enumerator(this, node
->child1());
4375 GPRFlushedCallResult
result(this);
4376 GPRReg resultGPR
= result
.gpr();
4378 m_jit
.load32(MacroAssembler::Address(enumerator
.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR
);
4379 int32Result(resultGPR
, node
);
4382 case HasGenericProperty
: {
4383 JSValueOperand
base(this, node
->child1());
4384 SpeculateCellOperand
property(this, node
->child2());
4385 GPRFlushedCallResult
result(this);
4386 GPRReg resultGPR
= result
.gpr();
4389 callOperation(operationHasGenericProperty
, resultGPR
, base
.gpr(), property
.gpr());
4390 jsValueResult(resultGPR
, node
, DataFormatJSBoolean
);
4393 case HasStructureProperty
: {
4394 JSValueOperand
base(this, node
->child1());
4395 SpeculateCellOperand
property(this, node
->child2());
4396 SpeculateCellOperand
enumerator(this, node
->child3());
4397 GPRTemporary
result(this);
4399 GPRReg baseGPR
= base
.gpr();
4400 GPRReg propertyGPR
= property
.gpr();
4401 GPRReg resultGPR
= result
.gpr();
4403 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSCell::structureIDOffset()), resultGPR
);
4404 MacroAssembler::Jump wrongStructure
= m_jit
.branch32(MacroAssembler::NotEqual
,
4406 MacroAssembler::Address(enumerator
.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset()));
4408 moveTrueTo(resultGPR
);
4409 MacroAssembler::Jump done
= m_jit
.jump();
4413 addSlowPathGenerator(slowPathCall(wrongStructure
, this, operationHasGenericProperty
, resultGPR
, baseGPR
, propertyGPR
));
4414 jsValueResult(resultGPR
, node
, DataFormatJSBoolean
);
4417 case HasIndexedProperty
: {
4418 SpeculateCellOperand
base(this, node
->child1());
4419 SpeculateStrictInt32Operand
index(this, node
->child2());
4420 GPRTemporary
result(this);
4422 GPRReg baseGPR
= base
.gpr();
4423 GPRReg indexGPR
= index
.gpr();
4424 GPRReg resultGPR
= result
.gpr();
4426 MacroAssembler::JumpList slowCases
;
4427 ArrayMode mode
= node
->arrayMode();
4428 switch (mode
.type()) {
4430 case Array::Contiguous
: {
4431 ASSERT(!!node
->child3());
4432 StorageOperand
storage(this, node
->child3());
4433 GPRTemporary
scratch(this);
4435 GPRReg storageGPR
= storage
.gpr();
4436 GPRReg scratchGPR
= scratch
.gpr();
4438 MacroAssembler::Jump outOfBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
4439 if (mode
.isInBounds())
4440 speculationCheck(OutOfBounds
, JSValueRegs(), 0, outOfBounds
);
4442 slowCases
.append(outOfBounds
);
4444 m_jit
.load64(MacroAssembler::BaseIndex(storageGPR
, indexGPR
, MacroAssembler::TimesEight
), scratchGPR
);
4445 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, scratchGPR
));
4446 moveTrueTo(resultGPR
);
4449 case Array::Double
: {
4450 ASSERT(!!node
->child3());
4451 StorageOperand
storage(this, node
->child3());
4452 FPRTemporary
scratch(this);
4453 FPRReg scratchFPR
= scratch
.fpr();
4454 GPRReg storageGPR
= storage
.gpr();
4456 MacroAssembler::Jump outOfBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
4457 if (mode
.isInBounds())
4458 speculationCheck(OutOfBounds
, JSValueRegs(), 0, outOfBounds
);
4460 slowCases
.append(outOfBounds
);
4462 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageGPR
, indexGPR
, MacroAssembler::TimesEight
), scratchFPR
);
4463 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, scratchFPR
, scratchFPR
));
4464 moveTrueTo(resultGPR
);
4467 case Array::ArrayStorage
: {
4468 ASSERT(!!node
->child3());
4469 StorageOperand
storage(this, node
->child3());
4470 GPRTemporary
scratch(this);
4472 GPRReg storageGPR
= storage
.gpr();
4473 GPRReg scratchGPR
= scratch
.gpr();
4475 MacroAssembler::Jump outOfBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
4476 if (mode
.isInBounds())
4477 speculationCheck(OutOfBounds
, JSValueRegs(), 0, outOfBounds
);
4479 slowCases
.append(outOfBounds
);
4481 m_jit
.load64(MacroAssembler::BaseIndex(storageGPR
, indexGPR
, MacroAssembler::TimesEight
, ArrayStorage::vectorOffset()), scratchGPR
);
4482 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, scratchGPR
));
4483 moveTrueTo(resultGPR
);
4487 slowCases
.append(m_jit
.jump());
4492 addSlowPathGenerator(slowPathCall(slowCases
, this, operationHasIndexedProperty
, resultGPR
, baseGPR
, indexGPR
));
4494 jsValueResult(resultGPR
, node
, DataFormatJSBoolean
);
4497 case GetDirectPname
: {
4498 Edge
& baseEdge
= m_jit
.graph().varArgChild(node
, 0);
4499 Edge
& propertyEdge
= m_jit
.graph().varArgChild(node
, 1);
4500 Edge
& indexEdge
= m_jit
.graph().varArgChild(node
, 2);
4501 Edge
& enumeratorEdge
= m_jit
.graph().varArgChild(node
, 3);
4503 SpeculateCellOperand
base(this, baseEdge
);
4504 SpeculateCellOperand
property(this, propertyEdge
);
4505 SpeculateStrictInt32Operand
index(this, indexEdge
);
4506 SpeculateCellOperand
enumerator(this, enumeratorEdge
);
4507 GPRTemporary
result(this);
4508 GPRTemporary
scratch1(this);
4509 GPRTemporary
scratch2(this);
4511 GPRReg baseGPR
= base
.gpr();
4512 GPRReg propertyGPR
= property
.gpr();
4513 GPRReg indexGPR
= index
.gpr();
4514 GPRReg enumeratorGPR
= enumerator
.gpr();
4515 GPRReg resultGPR
= result
.gpr();
4516 GPRReg scratch1GPR
= scratch1
.gpr();
4517 GPRReg scratch2GPR
= scratch2
.gpr();
4519 // Check the structure
4520 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSCell::structureIDOffset()), scratch1GPR
);
4521 MacroAssembler::Jump wrongStructure
= m_jit
.branch32(MacroAssembler::NotEqual
,
4522 scratch1GPR
, MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedStructureIDOffset()));
4524 // Compute the offset
4525 // If index is less than the enumerator's cached inline storage, then it's an inline access
4526 MacroAssembler::Jump outOfLineAccess
= m_jit
.branch32(MacroAssembler::AboveOrEqual
,
4527 indexGPR
, MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
4529 m_jit
.load64(MacroAssembler::BaseIndex(baseGPR
, indexGPR
, MacroAssembler::TimesEight
, JSObject::offsetOfInlineStorage()), resultGPR
);
4531 MacroAssembler::Jump done
= m_jit
.jump();
4533 // Otherwise it's out of line
4534 outOfLineAccess
.link(&m_jit
);
4535 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSObject::butterflyOffset()), scratch2GPR
);
4536 m_jit
.move(indexGPR
, scratch1GPR
);
4537 m_jit
.sub32(MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratch1GPR
);
4538 m_jit
.neg32(scratch1GPR
);
4539 m_jit
.signExtend32ToPtr(scratch1GPR
, scratch1GPR
);
4540 int32_t offsetOfFirstProperty
= static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset
)) * sizeof(EncodedJSValue
);
4541 m_jit
.load64(MacroAssembler::BaseIndex(scratch2GPR
, scratch1GPR
, MacroAssembler::TimesEight
, offsetOfFirstProperty
), resultGPR
);
4545 addSlowPathGenerator(slowPathCall(wrongStructure
, this, operationGetByVal
, resultGPR
, baseGPR
, propertyGPR
));
4547 jsValueResult(resultGPR
, node
);
4550 case GetPropertyEnumerator
: {
4551 SpeculateCellOperand
base(this, node
->child1());
4552 GPRFlushedCallResult
result(this);
4553 GPRReg resultGPR
= result
.gpr();
4556 callOperation(operationGetPropertyEnumerator
, resultGPR
, base
.gpr());
4557 cellResult(resultGPR
, node
);
4560 case GetEnumeratorStructurePname
:
4561 case GetEnumeratorGenericPname
: {
4562 SpeculateCellOperand
enumerator(this, node
->child1());
4563 SpeculateStrictInt32Operand
index(this, node
->child2());
4564 GPRTemporary
scratch1(this);
4565 GPRTemporary
result(this);
4567 GPRReg enumeratorGPR
= enumerator
.gpr();
4568 GPRReg indexGPR
= index
.gpr();
4569 GPRReg scratch1GPR
= scratch1
.gpr();
4570 GPRReg resultGPR
= result
.gpr();
4572 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, indexGPR
,
4573 MacroAssembler::Address(enumeratorGPR
, (op
== GetEnumeratorStructurePname
)
4574 ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset()
4575 : JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
4577 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsNull())), resultGPR
);
4579 MacroAssembler::Jump done
= m_jit
.jump();
4580 inBounds
.link(&m_jit
);
4582 m_jit
.loadPtr(MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratch1GPR
);
4583 m_jit
.load64(MacroAssembler::BaseIndex(scratch1GPR
, indexGPR
, MacroAssembler::TimesEight
), resultGPR
);
4586 jsValueResult(resultGPR
, node
);
4589 case ToIndexString
: {
4590 SpeculateInt32Operand
index(this, node
->child1());
4591 GPRFlushedCallResult
result(this);
4592 GPRReg resultGPR
= result
.gpr();
4595 callOperation(operationToIndexString
, resultGPR
, index
.gpr());
4596 cellResult(resultGPR
, node
);
4600 JSValueOperand
value(this, node
->child1());
4601 GPRTemporary
scratch1(this);
4602 GPRTemporary
scratch2(this);
4603 GPRTemporary
scratch3(this);
4605 GPRReg scratch1GPR
= scratch1
.gpr();
4606 GPRReg scratch2GPR
= scratch2
.gpr();
4607 GPRReg scratch3GPR
= scratch3
.gpr();
4608 GPRReg valueGPR
= value
.gpr();
4610 MacroAssembler::JumpList jumpToEnd
;
4612 TypeLocation
* cachedTypeLocation
= node
->typeLocation();
4613 // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
4614 // These typechecks are inlined to match those of the 64-bit JSValue type checks.
4615 if (cachedTypeLocation
->m_lastSeenType
== TypeUndefined
)
4616 jumpToEnd
.append(m_jit
.branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined()))));
4617 else if (cachedTypeLocation
->m_lastSeenType
== TypeNull
)
4618 jumpToEnd
.append(m_jit
.branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsNull()))));
4619 else if (cachedTypeLocation
->m_lastSeenType
== TypeBoolean
) {
4620 m_jit
.move(valueGPR
, scratch2GPR
);
4621 m_jit
.and64(TrustedImm32(~1), scratch2GPR
);
4622 jumpToEnd
.append(m_jit
.branch64(MacroAssembler::Equal
, scratch2GPR
, MacroAssembler::TrustedImm64(ValueFalse
)));
4623 } else if (cachedTypeLocation
->m_lastSeenType
== TypeMachineInt
)
4624 jumpToEnd
.append(m_jit
.branch64(MacroAssembler::AboveOrEqual
, valueGPR
, GPRInfo::tagTypeNumberRegister
));
4625 else if (cachedTypeLocation
->m_lastSeenType
== TypeNumber
)
4626 jumpToEnd
.append(m_jit
.branchTest64(MacroAssembler::NonZero
, valueGPR
, GPRInfo::tagTypeNumberRegister
));
4627 else if (cachedTypeLocation
->m_lastSeenType
== TypeString
) {
4628 MacroAssembler::Jump isNotCell
= m_jit
.branchIfNotCell(JSValueRegs(valueGPR
));
4629 jumpToEnd
.append(m_jit
.branchIfString(valueGPR
));
4630 isNotCell
.link(&m_jit
);
4633 // Load the TypeProfilerLog into Scratch2.
4634 TypeProfilerLog
* cachedTypeProfilerLog
= m_jit
.vm()->typeProfilerLog();
4635 m_jit
.move(TrustedImmPtr(cachedTypeProfilerLog
), scratch2GPR
);
4637 // Load the next LogEntry into Scratch1.
4638 m_jit
.loadPtr(MacroAssembler::Address(scratch2GPR
, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR
);
4640 // Store the JSValue onto the log entry.
4641 m_jit
.store64(valueGPR
, MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::valueOffset()));
4643 // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry.
4644 MacroAssembler::Jump isNotCell
= m_jit
.branchIfNotCell(JSValueRegs(valueGPR
));
4645 m_jit
.load32(MacroAssembler::Address(valueGPR
, JSCell::structureIDOffset()), scratch3GPR
);
4646 m_jit
.store32(scratch3GPR
, MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::structureIDOffset()));
4647 MacroAssembler::Jump skipIsCell
= m_jit
.jump();
4648 isNotCell
.link(&m_jit
);
4649 m_jit
.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::structureIDOffset()));
4650 skipIsCell
.link(&m_jit
);
4652 // Store the typeLocation on the log entry.
4653 m_jit
.move(TrustedImmPtr(cachedTypeLocation
), scratch3GPR
);
4654 m_jit
.storePtr(scratch3GPR
, MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::locationOffset()));
4656 // Increment the current log entry.
4657 m_jit
.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry
)), scratch1GPR
);
4658 m_jit
.storePtr(scratch1GPR
, MacroAssembler::Address(scratch2GPR
, TypeProfilerLog::currentLogEntryOffset()));
4659 MacroAssembler::Jump clearLog
= m_jit
.branchPtr(MacroAssembler::Equal
, scratch1GPR
, TrustedImmPtr(cachedTypeProfilerLog
->logEndPtr()));
4660 addSlowPathGenerator(
4661 slowPathCall(clearLog
, this, operationProcessTypeProfilerLogDFG
, NoResult
));
4663 jumpToEnd
.link(&m_jit
);
4668 case ProfileControlFlow
: {
4669 BasicBlockLocation
* basicBlockLocation
= node
->basicBlockLocation();
4670 if (!basicBlockLocation
->hasExecuted()) {
4671 GPRTemporary
scratch1(this);
4672 basicBlockLocation
->emitExecuteCode(m_jit
, scratch1
.gpr());
4679 case CheckTierUpInLoop
: {
4680 MacroAssembler::Jump done
= m_jit
.branchAdd32(
4681 MacroAssembler::Signed
,
4682 TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()),
4683 MacroAssembler::AbsoluteAddress(&m_jit
.jitCode()->tierUpCounter
.m_counter
));
4685 silentSpillAllRegisters(InvalidGPRReg
);
4686 m_jit
.setupArgumentsExecState();
4687 appendCall(triggerTierUpNowInLoop
);
4688 silentFillAllRegisters(InvalidGPRReg
);
4694 case CheckTierUpAtReturn
: {
4695 MacroAssembler::Jump done
= m_jit
.branchAdd32(
4696 MacroAssembler::Signed
,
4697 TrustedImm32(Options::ftlTierUpCounterIncrementForReturn()),
4698 MacroAssembler::AbsoluteAddress(&m_jit
.jitCode()->tierUpCounter
.m_counter
));
4700 silentSpillAllRegisters(InvalidGPRReg
);
4701 m_jit
.setupArgumentsExecState();
4702 appendCall(triggerTierUpNow
);
4703 silentFillAllRegisters(InvalidGPRReg
);
4709 case CheckTierUpAndOSREnter
:
4710 case CheckTierUpWithNestedTriggerAndOSREnter
: {
4711 ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4713 GPRTemporary
temp(this);
4714 GPRReg tempGPR
= temp
.gpr();
4716 MacroAssembler::Jump forceOSREntry
;
4717 if (op
== CheckTierUpWithNestedTriggerAndOSREnter
)
4718 forceOSREntry
= m_jit
.branchTest8(MacroAssembler::NonZero
, MacroAssembler::AbsoluteAddress(&m_jit
.jitCode()->nestedTriggerIsSet
));
4720 MacroAssembler::Jump done
= m_jit
.branchAdd32(
4721 MacroAssembler::Signed
,
4722 TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()),
4723 MacroAssembler::AbsoluteAddress(&m_jit
.jitCode()->tierUpCounter
.m_counter
));
4725 if (forceOSREntry
.isSet())
4726 forceOSREntry
.link(&m_jit
);
4727 silentSpillAllRegisters(tempGPR
);
4728 m_jit
.setupArgumentsWithExecState(
4729 TrustedImm32(node
->origin
.semantic
.bytecodeIndex
),
4730 TrustedImm32(m_stream
->size()));
4731 appendCallSetResult(triggerOSREntryNow
, tempGPR
);
4732 MacroAssembler::Jump dontEnter
= m_jit
.branchTestPtr(MacroAssembler::Zero
, tempGPR
);
4733 m_jit
.jump(tempGPR
);
4734 dontEnter
.link(&m_jit
);
4735 silentFillAllRegisters(tempGPR
);
4740 #else // ENABLE(FTL_JIT)
4741 case CheckTierUpInLoop
:
4742 case CheckTierUpAtReturn
:
4743 case CheckTierUpAndOSREnter
:
4744 case CheckTierUpWithNestedTriggerAndOSREnter
:
4745 DFG_CRASH(m_jit
.graph(), node
, "Unexpected tier-up node");
4747 #endif // ENABLE(FTL_JIT)
4750 case NativeConstruct
:
4754 case ExtractOSREntryLocal
:
4757 case MultiGetByOffset
:
4758 case MultiPutByOffset
:
4762 case PhantomNewObject
:
4763 case PhantomNewFunction
:
4764 case PhantomCreateActivation
:
4765 case GetMyArgumentByVal
:
4767 case CheckStructureImmediate
:
4768 case MaterializeNewObject
:
4769 case MaterializeCreateActivation
:
4773 DFG_CRASH(m_jit
.graph(), node
, "Unexpected node");
4780 if (node
->hasResult() && node
->mustGenerate())
4785 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, GPRReg valueGPR
, Edge valueUse
, GPRReg scratch1
, GPRReg scratch2
)
4787 JITCompiler::Jump isNotCell
;
4788 if (!isKnownCell(valueUse
.node()))
4789 isNotCell
= m_jit
.branchIfNotCell(JSValueRegs(valueGPR
));
4791 JITCompiler::Jump ownerIsRememberedOrInEden
= m_jit
.jumpIfIsRememberedOrInEden(ownerGPR
);
4792 storeToWriteBarrierBuffer(ownerGPR
, scratch1
, scratch2
);
4793 ownerIsRememberedOrInEden
.link(&m_jit
);
4795 if (!isKnownCell(valueUse
.node()))
4796 isNotCell
.link(&m_jit
);
4798 #endif // ENABLE(GGC)
4800 void SpeculativeJIT::moveTrueTo(GPRReg gpr
)
4802 m_jit
.move(TrustedImm32(ValueTrue
), gpr
);
4805 void SpeculativeJIT::moveFalseTo(GPRReg gpr
)
4807 m_jit
.move(TrustedImm32(ValueFalse
), gpr
);
4810 void SpeculativeJIT::blessBoolean(GPRReg gpr
)
4812 m_jit
.or32(TrustedImm32(ValueFalse
), gpr
);
4815 void SpeculativeJIT::convertMachineInt(Edge valueEdge
, GPRReg resultGPR
)
4817 JSValueOperand
value(this, valueEdge
, ManualOperandSpeculation
);
4818 GPRReg valueGPR
= value
.gpr();
4820 JITCompiler::Jump notInt32
=
4821 m_jit
.branch64(JITCompiler::Below
, valueGPR
, GPRInfo::tagTypeNumberRegister
);
4823 m_jit
.signExtend32ToPtr(valueGPR
, resultGPR
);
4824 JITCompiler::Jump done
= m_jit
.jump();
4826 notInt32
.link(&m_jit
);
4827 silentSpillAllRegisters(resultGPR
);
4828 callOperation(operationConvertBoxedDoubleToInt52
, resultGPR
, valueGPR
);
4829 silentFillAllRegisters(resultGPR
);
4832 JSValueRegs(valueGPR
), valueEdge
, SpecInt32
| SpecInt52AsDouble
,
4834 JITCompiler::Equal
, resultGPR
,
4835 JITCompiler::TrustedImm64(JSValue::notInt52
)));
4839 void SpeculativeJIT::speculateMachineInt(Edge edge
)
4841 if (!needsTypeCheck(edge
, SpecInt32
| SpecInt52AsDouble
))
4844 GPRTemporary
temp(this);
4845 convertMachineInt(edge
, temp
.gpr());
4848 void SpeculativeJIT::speculateDoubleRepMachineInt(Edge edge
)
4850 if (!needsTypeCheck(edge
, SpecInt52AsDouble
))
4853 SpeculateDoubleOperand
value(this, edge
);
4854 FPRReg valueFPR
= value
.fpr();
4856 GPRFlushedCallResult
result(this);
4857 GPRReg resultGPR
= result
.gpr();
4861 callOperation(operationConvertDoubleToInt52
, resultGPR
, valueFPR
);
4864 JSValueRegs(), edge
, SpecInt52AsDouble
,
4866 JITCompiler::Equal
, resultGPR
,
4867 JITCompiler::TrustedImm64(JSValue::notInt52
)));
4872 } } // namespace JSC::DFG