2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "ArrayPrototype.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGOperations.h"
36 #include "DFGSlowPathGenerator.h"
38 #include "JSCInlines.h"
39 #include "ObjectPrototype.h"
40 #include "SpillRegistersMode.h"
42 namespace JSC
{ namespace DFG
{
46 void SpeculativeJIT::boxInt52(GPRReg sourceGPR
, GPRReg targetGPR
, DataFormat format
)
49 if (sourceGPR
== targetGPR
)
54 FPRReg fpr
= fprAllocate();
56 if (format
== DataFormatInt52
)
57 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), sourceGPR
);
59 ASSERT(format
== DataFormatStrictInt52
);
61 m_jit
.boxInt52(sourceGPR
, targetGPR
, tempGPR
, fpr
);
63 if (format
== DataFormatInt52
&& sourceGPR
!= targetGPR
)
64 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), sourceGPR
);
66 if (tempGPR
!= targetGPR
)
72 GPRReg
SpeculativeJIT::fillJSValue(Edge edge
)
74 VirtualRegister virtualRegister
= edge
->virtualRegister();
75 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
77 switch (info
.registerFormat()) {
78 case DataFormatNone
: {
79 GPRReg gpr
= allocate();
81 if (edge
->hasConstant()) {
82 if (isInt32Constant(edge
.node())) {
83 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
84 JSValue jsValue
= jsNumber(valueOfInt32Constant(edge
.node()));
85 m_jit
.move(MacroAssembler::Imm64(JSValue::encode(jsValue
)), gpr
);
86 } else if (isNumberConstant(edge
.node())) {
87 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSDouble
);
88 JSValue
jsValue(JSValue::EncodeAsDouble
, valueOfNumberConstant(edge
.node()));
89 m_jit
.move(MacroAssembler::Imm64(JSValue::encode(jsValue
)), gpr
);
91 ASSERT(isJSConstant(edge
.node()));
92 JSValue jsValue
= valueOfJSConstant(edge
.node());
93 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
94 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
97 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
99 DataFormat spillFormat
= info
.spillFormat();
100 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
101 switch (spillFormat
) {
102 case DataFormatInt32
: {
103 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
104 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
105 spillFormat
= DataFormatJSInt32
;
110 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
111 RELEASE_ASSERT(spillFormat
& DataFormatJS
);
114 info
.fillJSValue(*m_stream
, gpr
, spillFormat
);
119 case DataFormatInt32
: {
120 GPRReg gpr
= info
.gpr();
121 // If the register has already been locked we need to take a copy.
122 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
123 if (m_gprs
.isLocked(gpr
)) {
124 GPRReg result
= allocate();
125 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
, result
);
129 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
130 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
135 // No retag required on JSVALUE64!
137 case DataFormatJSInt32
:
138 case DataFormatJSDouble
:
139 case DataFormatJSCell
:
140 case DataFormatJSBoolean
: {
141 GPRReg gpr
= info
.gpr();
146 case DataFormatBoolean
:
147 case DataFormatStorage
:
148 case DataFormatDouble
:
149 case DataFormatInt52
:
150 // this type currently never occurs
151 RELEASE_ASSERT_NOT_REACHED();
154 RELEASE_ASSERT_NOT_REACHED();
155 return InvalidGPRReg
;
159 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg resultGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
161 JITGetByIdGenerator
gen(
162 m_jit
.codeBlock(), codeOrigin
, usedRegisters(), JSValueRegs(baseGPR
),
163 JSValueRegs(resultGPR
), spillMode
);
164 gen
.generateFastPath(m_jit
);
166 JITCompiler::JumpList slowCases
;
167 if (slowPathTarget
.isSet())
168 slowCases
.append(slowPathTarget
);
169 slowCases
.append(gen
.slowPathJump());
171 OwnPtr
<SlowPathGenerator
> slowPath
= slowPathCall(
172 slowCases
, this, operationGetByIdOptimize
, resultGPR
, gen
.stubInfo(), baseGPR
,
173 identifierUID(identifierNumber
), spillMode
);
175 m_jit
.addGetById(gen
, slowPath
.get());
176 addSlowPathGenerator(slowPath
.release());
179 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg valueGPR
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind putKind
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
181 JITPutByIdGenerator
gen(
182 m_jit
.codeBlock(), codeOrigin
, usedRegisters(), JSValueRegs(baseGPR
),
183 JSValueRegs(valueGPR
), scratchGPR
, spillMode
, m_jit
.ecmaModeFor(codeOrigin
), putKind
);
185 gen
.generateFastPath(m_jit
);
187 JITCompiler::JumpList slowCases
;
188 if (slowPathTarget
.isSet())
189 slowCases
.append(slowPathTarget
);
190 slowCases
.append(gen
.slowPathJump());
192 OwnPtr
<SlowPathGenerator
> slowPath
= slowPathCall(
193 slowCases
, this, gen
.slowPathFunction(), NoResult
, gen
.stubInfo(), valueGPR
, baseGPR
,
194 identifierUID(identifierNumber
));
196 m_jit
.addPutById(gen
, slowPath
.get());
197 addSlowPathGenerator(slowPath
.release());
200 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
)
202 JSValueOperand
arg(this, operand
);
203 GPRReg argGPR
= arg
.gpr();
205 GPRTemporary
result(this, Reuse
, arg
);
206 GPRReg resultGPR
= result
.gpr();
208 JITCompiler::Jump notCell
;
210 JITCompiler::Jump notMasqueradesAsUndefined
;
211 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
212 if (!isKnownCell(operand
.node()))
213 notCell
= branchNotCell(JSValueRegs(argGPR
));
215 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultGPR
);
216 notMasqueradesAsUndefined
= m_jit
.jump();
218 GPRTemporary
localGlobalObject(this);
219 GPRTemporary
remoteGlobalObject(this);
220 GPRTemporary
scratch(this);
222 if (!isKnownCell(operand
.node()))
223 notCell
= branchNotCell(JSValueRegs(argGPR
));
225 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
226 JITCompiler::NonZero
,
227 JITCompiler::Address(argGPR
, JSCell::typeInfoFlagsOffset()),
228 JITCompiler::TrustedImm32(MasqueradesAsUndefined
));
230 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultGPR
);
231 notMasqueradesAsUndefined
= m_jit
.jump();
233 isMasqueradesAsUndefined
.link(&m_jit
);
234 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
235 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
236 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
237 m_jit
.emitLoadStructure(argGPR
, resultGPR
, scratch
.gpr());
238 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
239 m_jit
.comparePtr(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, resultGPR
);
242 if (!isKnownCell(operand
.node())) {
243 JITCompiler::Jump done
= m_jit
.jump();
245 notCell
.link(&m_jit
);
247 m_jit
.move(argGPR
, resultGPR
);
248 m_jit
.and64(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
249 m_jit
.compare64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm32(ValueNull
), resultGPR
);
254 notMasqueradesAsUndefined
.link(&m_jit
);
256 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
257 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
260 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand
, Node
* branchNode
, bool invert
)
262 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
263 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
265 if (taken
== nextBlock()) {
267 BasicBlock
* tmp
= taken
;
272 JSValueOperand
arg(this, operand
);
273 GPRReg argGPR
= arg
.gpr();
275 GPRTemporary
result(this, Reuse
, arg
);
276 GPRReg resultGPR
= result
.gpr();
278 JITCompiler::Jump notCell
;
280 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
281 if (!isKnownCell(operand
.node()))
282 notCell
= branchNotCell(JSValueRegs(argGPR
));
284 jump(invert
? taken
: notTaken
, ForceJump
);
286 GPRTemporary
localGlobalObject(this);
287 GPRTemporary
remoteGlobalObject(this);
288 GPRTemporary
scratch(this);
290 if (!isKnownCell(operand
.node()))
291 notCell
= branchNotCell(JSValueRegs(argGPR
));
293 branchTest8(JITCompiler::Zero
,
294 JITCompiler::Address(argGPR
, JSCell::typeInfoFlagsOffset()),
295 JITCompiler::TrustedImm32(MasqueradesAsUndefined
),
296 invert
? taken
: notTaken
);
298 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
299 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
300 m_jit
.move(TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
301 m_jit
.emitLoadStructure(argGPR
, resultGPR
, scratch
.gpr());
302 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
303 branchPtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, invert
? notTaken
: taken
);
306 if (!isKnownCell(operand
.node())) {
307 jump(notTaken
, ForceJump
);
309 notCell
.link(&m_jit
);
311 m_jit
.move(argGPR
, resultGPR
);
312 m_jit
.and64(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
313 branch64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm64(ValueNull
), taken
);
319 bool SpeculativeJIT::nonSpeculativeCompareNull(Node
* node
, Edge operand
, bool invert
)
321 unsigned branchIndexInBlock
= detectPeepHoleBranch();
322 if (branchIndexInBlock
!= UINT_MAX
) {
323 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
325 RELEASE_ASSERT(node
->adjustedRefCount() == 1);
327 nonSpeculativePeepholeBranchNull(operand
, branchNode
, invert
);
331 m_indexInBlock
= branchIndexInBlock
;
332 m_currentNode
= branchNode
;
337 nonSpeculativeNonPeepholeCompareNull(operand
, invert
);
342 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node
* node
, Node
* branchNode
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
344 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
345 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
347 JITCompiler::ResultCondition callResultCondition
= JITCompiler::NonZero
;
349 // The branch instruction will branch to the taken block.
350 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
351 if (taken
== nextBlock()) {
352 cond
= JITCompiler::invert(cond
);
353 callResultCondition
= JITCompiler::Zero
;
354 BasicBlock
* tmp
= taken
;
359 JSValueOperand
arg1(this, node
->child1());
360 JSValueOperand
arg2(this, node
->child2());
361 GPRReg arg1GPR
= arg1
.gpr();
362 GPRReg arg2GPR
= arg2
.gpr();
364 JITCompiler::JumpList slowPath
;
366 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
367 GPRResult
result(this);
368 GPRReg resultGPR
= result
.gpr();
374 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
376 branchTest32(callResultCondition
, resultGPR
, taken
);
378 GPRTemporary
result(this, Reuse
, arg2
);
379 GPRReg resultGPR
= result
.gpr();
384 if (!isKnownInteger(node
->child1().node()))
385 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
386 if (!isKnownInteger(node
->child2().node()))
387 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
389 branch32(cond
, arg1GPR
, arg2GPR
, taken
);
391 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
392 jump(notTaken
, ForceJump
);
394 slowPath
.link(&m_jit
);
396 silentSpillAllRegisters(resultGPR
);
397 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
398 silentFillAllRegisters(resultGPR
);
400 branchTest32(callResultCondition
, resultGPR
, taken
);
406 m_indexInBlock
= m_block
->size() - 1;
407 m_currentNode
= branchNode
;
410 template<typename JumpType
>
411 class CompareAndBoxBooleanSlowPathGenerator
412 : public CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
> {
414 CompareAndBoxBooleanSlowPathGenerator(
415 JumpType from
, SpeculativeJIT
* jit
,
416 S_JITOperation_EJJ function
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
417 : CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
>(
418 from
, jit
, function
, NeedToSpill
, result
)
425 virtual void generateInternal(SpeculativeJIT
* jit
) override
428 this->recordCall(jit
->callOperation(this->m_function
, this->m_result
, m_arg1
, m_arg2
));
429 jit
->m_jit
.and32(JITCompiler::TrustedImm32(1), this->m_result
);
430 jit
->m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), this->m_result
);
439 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
441 ASSERT(node
->isBinaryUseKind(UntypedUse
));
442 JSValueOperand
arg1(this, node
->child1());
443 JSValueOperand
arg2(this, node
->child2());
444 GPRReg arg1GPR
= arg1
.gpr();
445 GPRReg arg2GPR
= arg2
.gpr();
447 JITCompiler::JumpList slowPath
;
449 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
450 GPRResult
result(this);
451 GPRReg resultGPR
= result
.gpr();
457 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
459 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
460 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
462 GPRTemporary
result(this, Reuse
, arg2
);
463 GPRReg resultGPR
= result
.gpr();
468 if (!isKnownInteger(node
->child1().node()))
469 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
470 if (!isKnownInteger(node
->child2().node()))
471 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
473 m_jit
.compare32(cond
, arg1GPR
, arg2GPR
, resultGPR
);
474 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
476 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
477 addSlowPathGenerator(adoptPtr(
478 new CompareAndBoxBooleanSlowPathGenerator
<JITCompiler::JumpList
>(
479 slowPath
, this, helperFunction
, resultGPR
, arg1GPR
, arg2GPR
)));
482 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
486 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node
* node
, Node
* branchNode
, bool invert
)
488 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
489 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
491 // The branch instruction will branch to the taken block.
492 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
493 if (taken
== nextBlock()) {
495 BasicBlock
* tmp
= taken
;
500 JSValueOperand
arg1(this, node
->child1());
501 JSValueOperand
arg2(this, node
->child2());
502 GPRReg arg1GPR
= arg1
.gpr();
503 GPRReg arg2GPR
= arg2
.gpr();
505 GPRTemporary
result(this);
506 GPRReg resultGPR
= result
.gpr();
511 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
512 // see if we get lucky: if the arguments are cells and they reference the same
513 // cell, then they must be strictly equal.
514 branch64(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
516 silentSpillAllRegisters(resultGPR
);
517 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
518 silentFillAllRegisters(resultGPR
);
520 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
522 m_jit
.or64(arg1GPR
, arg2GPR
, resultGPR
);
524 JITCompiler::Jump twoCellsCase
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
526 JITCompiler::Jump leftOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
527 JITCompiler::Jump leftDouble
= m_jit
.branchTest64(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
529 JITCompiler::Jump rightOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
530 JITCompiler::Jump rightDouble
= m_jit
.branchTest64(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
531 rightOK
.link(&m_jit
);
533 branch64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, taken
);
534 jump(notTaken
, ForceJump
);
536 twoCellsCase
.link(&m_jit
);
537 branch64(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
539 leftDouble
.link(&m_jit
);
540 rightDouble
.link(&m_jit
);
542 silentSpillAllRegisters(resultGPR
);
543 callOperation(operationCompareStrictEq
, resultGPR
, arg1GPR
, arg2GPR
);
544 silentFillAllRegisters(resultGPR
);
546 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
552 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node
* node
, bool invert
)
554 JSValueOperand
arg1(this, node
->child1());
555 JSValueOperand
arg2(this, node
->child2());
556 GPRReg arg1GPR
= arg1
.gpr();
557 GPRReg arg2GPR
= arg2
.gpr();
559 GPRTemporary
result(this);
560 GPRReg resultGPR
= result
.gpr();
565 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
566 // see if we get lucky: if the arguments are cells and they reference the same
567 // cell, then they must be strictly equal.
568 // FIXME: this should flush registers instead of silent spill/fill.
569 JITCompiler::Jump notEqualCase
= m_jit
.branch64(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
);
571 m_jit
.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
573 JITCompiler::Jump done
= m_jit
.jump();
575 notEqualCase
.link(&m_jit
);
577 silentSpillAllRegisters(resultGPR
);
578 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
579 silentFillAllRegisters(resultGPR
);
581 m_jit
.and64(JITCompiler::TrustedImm32(1), resultGPR
);
582 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
586 m_jit
.or64(arg1GPR
, arg2GPR
, resultGPR
);
588 JITCompiler::JumpList slowPathCases
;
590 JITCompiler::Jump twoCellsCase
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
592 JITCompiler::Jump leftOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
593 slowPathCases
.append(m_jit
.branchTest64(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
595 JITCompiler::Jump rightOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
596 slowPathCases
.append(m_jit
.branchTest64(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
597 rightOK
.link(&m_jit
);
599 m_jit
.compare64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, resultGPR
);
600 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
602 JITCompiler::Jump done
= m_jit
.jump();
604 twoCellsCase
.link(&m_jit
);
605 slowPathCases
.append(m_jit
.branch64(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
));
607 m_jit
.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
609 addSlowPathGenerator(
611 new CompareAndBoxBooleanSlowPathGenerator
<MacroAssembler::JumpList
>(
612 slowPathCases
, this, operationCompareStrictEq
, resultGPR
, arg1GPR
,
618 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
621 void SpeculativeJIT::compileMiscStrictEq(Node
* node
)
623 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
624 JSValueOperand
op2(this, node
->child2(), ManualOperandSpeculation
);
625 GPRTemporary
result(this);
627 if (node
->child1().useKind() == MiscUse
)
628 speculateMisc(node
->child1(), op1
.jsValueRegs());
629 if (node
->child2().useKind() == MiscUse
)
630 speculateMisc(node
->child2(), op2
.jsValueRegs());
632 m_jit
.compare64(JITCompiler::Equal
, op1
.gpr(), op2
.gpr(), result
.gpr());
633 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
634 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
637 void SpeculativeJIT::emitCall(Node
* node
)
639 if (node
->op() != Call
)
640 RELEASE_ASSERT(node
->op() == Construct
);
642 // For constructors, the this argument is not passed but we have to make space
644 int dummyThisArgument
= node
->op() == Call
? 0 : 1;
646 CallLinkInfo::CallType callType
= node
->op() == Call
? CallLinkInfo::Call
: CallLinkInfo::Construct
;
648 Edge calleeEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild()];
649 JSValueOperand
callee(this, calleeEdge
);
650 GPRReg calleeGPR
= callee
.gpr();
653 // The call instruction's first child is the function; the subsequent children are the
655 int numPassedArgs
= node
->numChildren() - 1;
657 int numArgs
= numPassedArgs
+ dummyThisArgument
;
659 m_jit
.store32(MacroAssembler::TrustedImm32(numArgs
), calleeFramePayloadSlot(JSStack::ArgumentCount
));
660 m_jit
.store64(calleeGPR
, calleeFrameSlot(JSStack::Callee
));
662 for (int i
= 0; i
< numPassedArgs
; i
++) {
663 Edge argEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + 1 + i
];
664 JSValueOperand
arg(this, argEdge
);
665 GPRReg argGPR
= arg
.gpr();
668 m_jit
.store64(argGPR
, calleeArgumentSlot(i
+ dummyThisArgument
));
673 GPRResult
result(this);
674 GPRReg resultGPR
= result
.gpr();
676 JITCompiler::DataLabelPtr targetToCheck
;
677 JITCompiler::Jump slowPath
;
679 m_jit
.emitStoreCodeOrigin(node
->origin
.semantic
);
681 slowPath
= m_jit
.branchPtrWithPatch(MacroAssembler::NotEqual
, calleeGPR
, targetToCheck
, MacroAssembler::TrustedImmPtr(0));
683 m_jit
.loadPtr(MacroAssembler::Address(calleeGPR
, OBJECT_OFFSETOF(JSFunction
, m_scope
)), resultGPR
);
684 m_jit
.store64(resultGPR
, calleeFrameSlot(JSStack::ScopeChain
));
686 JITCompiler::Call fastCall
= m_jit
.nearCall();
688 JITCompiler::Jump done
= m_jit
.jump();
690 slowPath
.link(&m_jit
);
692 m_jit
.move(calleeGPR
, GPRInfo::regT0
); // Callee needs to be in regT0
693 CallLinkInfo
* callLinkInfo
= m_jit
.codeBlock()->addCallLinkInfo();
694 m_jit
.move(MacroAssembler::TrustedImmPtr(callLinkInfo
), GPRInfo::regT2
); // Link info needs to be in regT2
695 JITCompiler::Call slowCall
= m_jit
.nearCall();
699 m_jit
.move(GPRInfo::returnValueGPR
, resultGPR
);
701 jsValueResult(resultGPR
, m_currentNode
, DataFormatJS
, UseChildrenCalledExplicitly
);
703 callLinkInfo
->callType
= callType
;
704 callLinkInfo
->codeOrigin
= m_currentNode
->origin
.semantic
;
705 callLinkInfo
->calleeGPR
= calleeGPR
;
707 m_jit
.addJSCall(fastCall
, slowCall
, targetToCheck
, callLinkInfo
);
710 // Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it
711 // http://llvm.org/bugs/show_bug.cgi?id=18619
712 #if COMPILER(CLANG) && defined(__has_warning)
713 #pragma clang diagnostic push
714 #if __has_warning("-Wimplicit-fallthrough")
715 #pragma clang diagnostic ignored "-Wimplicit-fallthrough"
718 template<bool strict
>
719 GPRReg
SpeculativeJIT::fillSpeculateInt32Internal(Edge edge
, DataFormat
& returnFormat
)
721 AbstractValue
& value
= m_state
.forNode(edge
);
722 SpeculatedType type
= value
.m_type
;
723 ASSERT(edge
.useKind() != KnownInt32Use
|| !(value
.m_type
& ~SpecInt32
));
724 m_interpreter
.filter(value
, SpecInt32
);
725 VirtualRegister virtualRegister
= edge
->virtualRegister();
726 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
728 if (edge
->hasConstant() && !isInt32Constant(edge
.node())) {
729 // Protect the silent spill/fill logic by failing early. If we "speculate" on
730 // the constant then the silent filler may think that we have an int32 and a
731 // constant, so it will try to fill this as an int32 constant. Bad things will
733 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
734 returnFormat
= DataFormatInt32
;
738 switch (info
.registerFormat()) {
739 case DataFormatNone
: {
740 GPRReg gpr
= allocate();
742 if (edge
->hasConstant()) {
743 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
744 ASSERT(isInt32Constant(edge
.node()));
745 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
746 info
.fillInt32(*m_stream
, gpr
);
747 returnFormat
= DataFormatInt32
;
751 DataFormat spillFormat
= info
.spillFormat();
753 RELEASE_ASSERT((spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInt32
);
755 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
757 if (spillFormat
== DataFormatJSInt32
|| spillFormat
== DataFormatInt32
) {
758 // If we know this was spilled as an integer we can fill without checking.
760 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
761 info
.fillInt32(*m_stream
, gpr
);
762 returnFormat
= DataFormatInt32
;
765 if (spillFormat
== DataFormatInt32
) {
766 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
767 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
769 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
770 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
771 returnFormat
= DataFormatJSInt32
;
774 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
776 // Fill as JSValue, and fall through.
777 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
783 RELEASE_ASSERT(!(type
& SpecInt52
));
784 // Check the value is an integer.
785 GPRReg gpr
= info
.gpr();
787 if (type
& ~SpecInt32
)
788 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branch64(MacroAssembler::Below
, gpr
, GPRInfo::tagTypeNumberRegister
));
789 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInt32
);
790 // If !strict we're done, return.
792 returnFormat
= DataFormatJSInt32
;
795 // else fall through & handle as DataFormatJSInt32.
800 case DataFormatJSInt32
: {
801 // In a strict fill we need to strip off the value tag.
803 GPRReg gpr
= info
.gpr();
805 // If the register has already been locked we need to take a copy.
806 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
807 if (m_gprs
.isLocked(gpr
))
811 info
.fillInt32(*m_stream
, gpr
);
814 m_jit
.zeroExtend32ToPtr(gpr
, result
);
815 returnFormat
= DataFormatInt32
;
819 GPRReg gpr
= info
.gpr();
821 returnFormat
= DataFormatJSInt32
;
825 case DataFormatInt32
: {
826 GPRReg gpr
= info
.gpr();
828 returnFormat
= DataFormatInt32
;
832 case DataFormatJSDouble
:
834 case DataFormatBoolean
:
835 case DataFormatJSCell
:
836 case DataFormatJSBoolean
: {
837 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
838 returnFormat
= DataFormatInt32
;
842 case DataFormatDouble
:
843 case DataFormatStorage
:
844 case DataFormatInt52
:
845 case DataFormatStrictInt52
:
846 RELEASE_ASSERT_NOT_REACHED();
849 RELEASE_ASSERT_NOT_REACHED();
850 return InvalidGPRReg
;
853 #if COMPILER(CLANG) && defined(__has_warning)
854 #pragma clang diagnostic pop
857 GPRReg
SpeculativeJIT::fillSpeculateInt32(Edge edge
, DataFormat
& returnFormat
)
859 return fillSpeculateInt32Internal
<false>(edge
, returnFormat
);
862 GPRReg
SpeculativeJIT::fillSpeculateInt32Strict(Edge edge
)
864 DataFormat mustBeDataFormatInt32
;
865 GPRReg result
= fillSpeculateInt32Internal
<true>(edge
, mustBeDataFormatInt32
);
866 RELEASE_ASSERT(mustBeDataFormatInt32
== DataFormatInt32
);
870 GPRReg
SpeculativeJIT::fillSpeculateInt52(Edge edge
, DataFormat desiredFormat
)
872 ASSERT(desiredFormat
== DataFormatInt52
|| desiredFormat
== DataFormatStrictInt52
);
873 AbstractValue
& value
= m_state
.forNode(edge
);
874 m_interpreter
.filter(value
, SpecMachineInt
);
875 VirtualRegister virtualRegister
= edge
->virtualRegister();
876 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
878 switch (info
.registerFormat()) {
879 case DataFormatNone
: {
880 if ((edge
->hasConstant() && !valueOfJSConstant(edge
.node()).isMachineInt())) {
881 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
885 GPRReg gpr
= allocate();
887 if (edge
->hasConstant()) {
888 JSValue jsValue
= valueOfJSConstant(edge
.node());
889 ASSERT(jsValue
.isMachineInt());
890 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
891 int64_t value
= jsValue
.asMachineInt();
892 if (desiredFormat
== DataFormatInt52
)
893 value
= value
<< JSValue::int52ShiftAmount
;
894 m_jit
.move(MacroAssembler::Imm64(value
), gpr
);
895 info
.fillGPR(*m_stream
, gpr
, desiredFormat
);
899 DataFormat spillFormat
= info
.spillFormat();
901 RELEASE_ASSERT(spillFormat
== DataFormatInt52
|| spillFormat
== DataFormatStrictInt52
);
903 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
905 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
906 if (desiredFormat
== DataFormatStrictInt52
) {
907 if (spillFormat
== DataFormatInt52
)
908 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
909 info
.fillStrictInt52(*m_stream
, gpr
);
912 if (spillFormat
== DataFormatStrictInt52
)
913 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
914 info
.fillInt52(*m_stream
, gpr
);
918 case DataFormatStrictInt52
: {
919 GPRReg gpr
= info
.gpr();
920 bool wasLocked
= m_gprs
.isLocked(gpr
);
922 if (desiredFormat
== DataFormatStrictInt52
)
925 GPRReg result
= allocate();
926 m_jit
.move(gpr
, result
);
930 info
.fillInt52(*m_stream
, gpr
);
931 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
935 case DataFormatInt52
: {
936 GPRReg gpr
= info
.gpr();
937 bool wasLocked
= m_gprs
.isLocked(gpr
);
939 if (desiredFormat
== DataFormatInt52
)
942 GPRReg result
= allocate();
943 m_jit
.move(gpr
, result
);
947 info
.fillStrictInt52(*m_stream
, gpr
);
948 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), gpr
);
953 RELEASE_ASSERT_NOT_REACHED();
954 return InvalidGPRReg
;
958 FPRReg
SpeculativeJIT::fillSpeculateDouble(Edge edge
)
960 ASSERT(edge
.useKind() == DoubleRepUse
|| edge
.useKind() == DoubleRepRealUse
|| edge
.useKind() == DoubleRepMachineIntUse
);
961 ASSERT(edge
->hasDoubleResult());
962 VirtualRegister virtualRegister
= edge
->virtualRegister();
963 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
965 if (info
.registerFormat() == DataFormatNone
) {
966 if (edge
->hasConstant()) {
967 GPRReg gpr
= allocate();
969 if (isNumberConstant(edge
.node())) {
970 FPRReg fpr
= fprAllocate();
971 m_jit
.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(edge
.node()))), gpr
);
972 m_jit
.move64ToDouble(gpr
, fpr
);
975 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
976 info
.fillDouble(*m_stream
, fpr
);
979 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
980 return fprAllocate();
983 DataFormat spillFormat
= info
.spillFormat();
984 RELEASE_ASSERT(spillFormat
== DataFormatDouble
);
985 FPRReg fpr
= fprAllocate();
986 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
987 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
988 info
.fillDouble(*m_stream
, fpr
);
992 RELEASE_ASSERT(info
.registerFormat() == DataFormatDouble
);
993 FPRReg fpr
= info
.fpr();
998 GPRReg
SpeculativeJIT::fillSpeculateCell(Edge edge
)
1000 AbstractValue
& value
= m_state
.forNode(edge
);
1001 SpeculatedType type
= value
.m_type
;
1002 ASSERT((edge
.useKind() != KnownCellUse
&& edge
.useKind() != KnownStringUse
) || !(value
.m_type
& ~SpecCell
));
1003 m_interpreter
.filter(value
, SpecCell
);
1004 VirtualRegister virtualRegister
= edge
->virtualRegister();
1005 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1007 switch (info
.registerFormat()) {
1008 case DataFormatNone
: {
1009 GPRReg gpr
= allocate();
1011 if (edge
->hasConstant()) {
1012 JSValue jsValue
= valueOfJSConstant(edge
.node());
1013 if (jsValue
.isCell()) {
1014 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1015 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
1016 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1019 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1023 if (!(info
.spillFormat() & DataFormatJS
)) {
1024 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1028 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1029 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
1031 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
1032 if (type
& ~SpecCell
)
1033 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, branchNotCell(JSValueRegs(gpr
)));
1034 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1038 case DataFormatCell
:
1039 case DataFormatJSCell
: {
1040 GPRReg gpr
= info
.gpr();
1042 if (!ASSERT_DISABLED
) {
1043 MacroAssembler::Jump checkCell
= branchIsCell(JSValueRegs(gpr
));
1044 m_jit
.abortWithReason(DFGIsNotCell
);
1045 checkCell
.link(&m_jit
);
1050 case DataFormatJS
: {
1051 GPRReg gpr
= info
.gpr();
1053 if (type
& ~SpecCell
)
1054 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, branchNotCell(JSValueRegs(gpr
)));
1055 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1059 case DataFormatJSInt32
:
1060 case DataFormatInt32
:
1061 case DataFormatJSDouble
:
1062 case DataFormatJSBoolean
:
1063 case DataFormatBoolean
: {
1064 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1068 case DataFormatDouble
:
1069 case DataFormatStorage
:
1070 case DataFormatInt52
:
1071 case DataFormatStrictInt52
:
1072 RELEASE_ASSERT_NOT_REACHED();
1075 RELEASE_ASSERT_NOT_REACHED();
1076 return InvalidGPRReg
;
1080 GPRReg
SpeculativeJIT::fillSpeculateBoolean(Edge edge
)
1082 AbstractValue
& value
= m_state
.forNode(edge
);
1083 SpeculatedType type
= value
.m_type
;
1084 m_interpreter
.filter(value
, SpecBoolean
);
1085 VirtualRegister virtualRegister
= edge
->virtualRegister();
1086 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1088 switch (info
.registerFormat()) {
1089 case DataFormatNone
: {
1090 if (info
.spillFormat() == DataFormatInt32
) {
1091 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1095 GPRReg gpr
= allocate();
1097 if (edge
->hasConstant()) {
1098 JSValue jsValue
= valueOfJSConstant(edge
.node());
1099 if (jsValue
.isBoolean()) {
1100 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1101 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
1102 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1105 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1108 RELEASE_ASSERT(info
.spillFormat() & DataFormatJS
);
1109 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1110 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
1112 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
1113 if (type
& ~SpecBoolean
) {
1114 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1115 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1116 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1118 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1122 case DataFormatBoolean
:
1123 case DataFormatJSBoolean
: {
1124 GPRReg gpr
= info
.gpr();
1129 case DataFormatJS
: {
1130 GPRReg gpr
= info
.gpr();
1132 if (type
& ~SpecBoolean
) {
1133 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1134 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1135 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1137 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1141 case DataFormatJSInt32
:
1142 case DataFormatInt32
:
1143 case DataFormatJSDouble
:
1144 case DataFormatJSCell
:
1145 case DataFormatCell
:
1146 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1149 case DataFormatDouble
:
1150 case DataFormatStorage
:
1151 case DataFormatInt52
:
1152 case DataFormatStrictInt52
:
1153 RELEASE_ASSERT_NOT_REACHED();
1156 RELEASE_ASSERT_NOT_REACHED();
1157 return InvalidGPRReg
;
1161 void SpeculativeJIT::compileBaseValueStoreBarrier(Edge
& baseEdge
, Edge
& valueEdge
)
1164 ASSERT(!isKnownNotCell(valueEdge
.node()));
1166 SpeculateCellOperand
base(this, baseEdge
);
1167 JSValueOperand
value(this, valueEdge
);
1168 GPRTemporary
scratch1(this);
1169 GPRTemporary
scratch2(this);
1171 writeBarrier(base
.gpr(), value
.gpr(), valueEdge
, scratch1
.gpr(), scratch2
.gpr());
1173 UNUSED_PARAM(baseEdge
);
1174 UNUSED_PARAM(valueEdge
);
1178 void SpeculativeJIT::compileObjectEquality(Node
* node
)
1180 SpeculateCellOperand
op1(this, node
->child1());
1181 SpeculateCellOperand
op2(this, node
->child2());
1182 GPRTemporary
result(this, Reuse
, op1
);
1184 GPRReg op1GPR
= op1
.gpr();
1185 GPRReg op2GPR
= op2
.gpr();
1186 GPRReg resultGPR
= result
.gpr();
1188 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1190 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchStructurePtr(
1191 MacroAssembler::Equal
,
1192 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1193 m_jit
.vm()->stringStructure
.get()));
1195 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchStructurePtr(
1196 MacroAssembler::Equal
,
1197 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1198 m_jit
.vm()->stringStructure
.get()));
1201 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchStructurePtr(
1202 MacroAssembler::Equal
,
1203 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1204 m_jit
.vm()->stringStructure
.get()));
1205 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1207 MacroAssembler::NonZero
,
1208 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1209 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1212 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchStructurePtr(
1213 MacroAssembler::Equal
,
1214 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1215 m_jit
.vm()->stringStructure
.get()));
1216 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1218 MacroAssembler::NonZero
,
1219 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1220 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1223 MacroAssembler::Jump falseCase
= m_jit
.branch64(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1224 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1225 MacroAssembler::Jump done
= m_jit
.jump();
1226 falseCase
.link(&m_jit
);
1227 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1230 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1233 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
)
1235 SpeculateCellOperand
op1(this, leftChild
);
1236 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1237 GPRTemporary
result(this);
1239 GPRReg op1GPR
= op1
.gpr();
1240 GPRReg op2GPR
= op2
.gpr();
1241 GPRReg resultGPR
= result
.gpr();
1243 bool masqueradesAsUndefinedWatchpointValid
=
1244 masqueradesAsUndefinedWatchpointIsStillValid();
1246 if (masqueradesAsUndefinedWatchpointValid
) {
1248 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchStructurePtr(
1249 MacroAssembler::Equal
,
1250 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1251 m_jit
.vm()->stringStructure
.get()));
1254 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchStructurePtr(
1255 MacroAssembler::Equal
,
1256 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1257 m_jit
.vm()->stringStructure
.get()));
1258 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1260 MacroAssembler::NonZero
,
1261 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1262 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1265 // It seems that most of the time when programs do a == b where b may be either null/undefined
1266 // or an object, b is usually an object. Balance the branches to make that case fast.
1267 MacroAssembler::Jump rightNotCell
= branchNotCell(JSValueRegs(op2GPR
));
1269 // We know that within this branch, rightChild must be a cell.
1270 if (masqueradesAsUndefinedWatchpointValid
) {
1272 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1273 MacroAssembler::Equal
,
1274 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1275 m_jit
.vm()->stringStructure
.get()));
1278 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1279 MacroAssembler::Equal
,
1280 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1281 m_jit
.vm()->stringStructure
.get()));
1282 speculationCheck(BadType
, JSValueRegs(op2GPR
), rightChild
,
1284 MacroAssembler::NonZero
,
1285 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1286 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1289 // At this point we know that we can perform a straight-forward equality comparison on pointer
1290 // values because both left and right are pointers to objects that have no special equality
1292 MacroAssembler::Jump falseCase
= m_jit
.branch64(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1293 MacroAssembler::Jump trueCase
= m_jit
.jump();
1295 rightNotCell
.link(&m_jit
);
1297 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1298 // prove that it is either null or undefined.
1299 if (needsTypeCheck(rightChild
, SpecCell
| SpecOther
)) {
1300 m_jit
.move(op2GPR
, resultGPR
);
1301 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1304 JSValueRegs(op2GPR
), rightChild
, SpecCell
| SpecOther
,
1306 MacroAssembler::NotEqual
, resultGPR
,
1307 MacroAssembler::TrustedImm64(ValueNull
)));
1310 falseCase
.link(&m_jit
);
1311 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1312 MacroAssembler::Jump done
= m_jit
.jump();
1313 trueCase
.link(&m_jit
);
1314 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1317 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1320 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
, Node
* branchNode
)
1322 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1323 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1325 SpeculateCellOperand
op1(this, leftChild
);
1326 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1327 GPRTemporary
result(this);
1329 GPRReg op1GPR
= op1
.gpr();
1330 GPRReg op2GPR
= op2
.gpr();
1331 GPRReg resultGPR
= result
.gpr();
1333 bool masqueradesAsUndefinedWatchpointValid
=
1334 masqueradesAsUndefinedWatchpointIsStillValid();
1336 if (masqueradesAsUndefinedWatchpointValid
) {
1338 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchStructurePtr(
1339 MacroAssembler::Equal
,
1340 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1341 m_jit
.vm()->stringStructure
.get()));
1344 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchStructurePtr(
1345 MacroAssembler::Equal
,
1346 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1347 m_jit
.vm()->stringStructure
.get()));
1348 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1350 MacroAssembler::NonZero
,
1351 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1352 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1355 // It seems that most of the time when programs do a == b where b may be either null/undefined
1356 // or an object, b is usually an object. Balance the branches to make that case fast.
1357 MacroAssembler::Jump rightNotCell
= branchNotCell(JSValueRegs(op2GPR
));
1359 // We know that within this branch, rightChild must be a cell.
1360 if (masqueradesAsUndefinedWatchpointValid
) {
1362 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1363 MacroAssembler::Equal
,
1364 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1365 m_jit
.vm()->stringStructure
.get()));
1368 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1369 MacroAssembler::Equal
,
1370 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1371 m_jit
.vm()->stringStructure
.get()));
1372 speculationCheck(BadType
, JSValueRegs(op2GPR
), rightChild
,
1374 MacroAssembler::NonZero
,
1375 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1376 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1379 // At this point we know that we can perform a straight-forward equality comparison on pointer
1380 // values because both left and right are pointers to objects that have no special equality
1382 branch64(MacroAssembler::Equal
, op1GPR
, op2GPR
, taken
);
1384 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1385 // prove that it is either null or undefined.
1386 if (!needsTypeCheck(rightChild
, SpecCell
| SpecOther
))
1387 rightNotCell
.link(&m_jit
);
1389 jump(notTaken
, ForceJump
);
1391 rightNotCell
.link(&m_jit
);
1392 m_jit
.move(op2GPR
, resultGPR
);
1393 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1396 JSValueRegs(op2GPR
), rightChild
, SpecCell
| SpecOther
, m_jit
.branch64(
1397 MacroAssembler::NotEqual
, resultGPR
,
1398 MacroAssembler::TrustedImm64(ValueNull
)));
1404 void SpeculativeJIT::compileInt32Compare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1406 SpeculateInt32Operand
op1(this, node
->child1());
1407 SpeculateInt32Operand
op2(this, node
->child2());
1408 GPRTemporary
result(this, Reuse
, op1
, op2
);
1410 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
1412 // If we add a DataFormatBool, we should use it here.
1413 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1414 jsValueResult(result
.gpr(), m_currentNode
, DataFormatJSBoolean
);
1417 void SpeculativeJIT::compileInt52Compare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1419 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
1420 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
1421 GPRTemporary
result(this, Reuse
, op1
, op2
);
1423 m_jit
.compare64(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
1425 // If we add a DataFormatBool, we should use it here.
1426 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1427 jsValueResult(result
.gpr(), m_currentNode
, DataFormatJSBoolean
);
1430 void SpeculativeJIT::compilePeepHoleInt52Branch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1432 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1433 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1435 // The branch instruction will branch to the taken block.
1436 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1437 if (taken
== nextBlock()) {
1438 condition
= JITCompiler::invert(condition
);
1439 BasicBlock
* tmp
= taken
;
1444 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
1445 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
1447 branch64(condition
, op1
.gpr(), op2
.gpr(), taken
);
1451 void SpeculativeJIT::compileDoubleCompare(Node
* node
, MacroAssembler::DoubleCondition condition
)
1453 SpeculateDoubleOperand
op1(this, node
->child1());
1454 SpeculateDoubleOperand
op2(this, node
->child2());
1455 GPRTemporary
result(this);
1457 m_jit
.move(TrustedImm32(ValueTrue
), result
.gpr());
1458 MacroAssembler::Jump trueCase
= m_jit
.branchDouble(condition
, op1
.fpr(), op2
.fpr());
1459 m_jit
.xor64(TrustedImm32(true), result
.gpr());
1460 trueCase
.link(&m_jit
);
1462 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1465 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse
)
1467 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1468 GPRTemporary
result(this);
1469 GPRReg valueGPR
= value
.gpr();
1470 GPRReg resultGPR
= result
.gpr();
1471 GPRTemporary structure
;
1472 GPRReg structureGPR
= InvalidGPRReg
;
1473 GPRTemporary scratch
;
1474 GPRReg scratchGPR
= InvalidGPRReg
;
1476 bool masqueradesAsUndefinedWatchpointValid
=
1477 masqueradesAsUndefinedWatchpointIsStillValid();
1479 if (!masqueradesAsUndefinedWatchpointValid
) {
1480 // The masquerades as undefined case will use the structure register, so allocate it here.
1481 // Do this at the top of the function to avoid branching around a register allocation.
1482 GPRTemporary
realStructure(this);
1483 GPRTemporary
realScratch(this);
1484 structure
.adopt(realStructure
);
1485 scratch
.adopt(realScratch
);
1486 structureGPR
= structure
.gpr();
1487 scratchGPR
= scratch
.gpr();
1490 MacroAssembler::Jump notCell
= branchNotCell(JSValueRegs(valueGPR
));
1491 if (masqueradesAsUndefinedWatchpointValid
) {
1493 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1494 MacroAssembler::Equal
,
1495 MacroAssembler::Address(valueGPR
, JSCell::structureIDOffset()),
1496 m_jit
.vm()->stringStructure
.get()));
1499 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1500 MacroAssembler::Equal
,
1501 MacroAssembler::Address(valueGPR
, JSCell::structureIDOffset()),
1502 m_jit
.vm()->stringStructure
.get()));
1504 MacroAssembler::Jump isNotMasqueradesAsUndefined
=
1506 MacroAssembler::Zero
,
1507 MacroAssembler::Address(valueGPR
, JSCell::typeInfoFlagsOffset()),
1508 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
));
1510 m_jit
.emitLoadStructure(valueGPR
, structureGPR
, scratchGPR
);
1511 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
,
1513 MacroAssembler::Equal
,
1514 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1515 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1517 isNotMasqueradesAsUndefined
.link(&m_jit
);
1519 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1520 MacroAssembler::Jump done
= m_jit
.jump();
1522 notCell
.link(&m_jit
);
1524 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1525 m_jit
.move(valueGPR
, resultGPR
);
1526 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1528 JSValueRegs(valueGPR
), nodeUse
, SpecCell
| SpecOther
, m_jit
.branch64(
1529 MacroAssembler::NotEqual
,
1531 MacroAssembler::TrustedImm64(ValueNull
)));
1533 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1537 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1540 void SpeculativeJIT::compileLogicalNot(Node
* node
)
1542 switch (node
->child1().useKind()) {
1543 case ObjectOrOtherUse
: {
1544 compileObjectOrOtherLogicalNot(node
->child1());
1549 SpeculateInt32Operand
value(this, node
->child1());
1550 GPRTemporary
result(this, Reuse
, value
);
1551 m_jit
.compare32(MacroAssembler::Equal
, value
.gpr(), MacroAssembler::TrustedImm32(0), result
.gpr());
1552 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1553 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1557 case DoubleRepUse
: {
1558 SpeculateDoubleOperand
value(this, node
->child1());
1559 FPRTemporary
scratch(this);
1560 GPRTemporary
result(this);
1561 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
1562 MacroAssembler::Jump nonZero
= m_jit
.branchDoubleNonZero(value
.fpr(), scratch
.fpr());
1563 m_jit
.xor32(TrustedImm32(true), result
.gpr());
1564 nonZero
.link(&m_jit
);
1565 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1570 if (!needsTypeCheck(node
->child1(), SpecBoolean
)) {
1571 SpeculateBooleanOperand
value(this, node
->child1());
1572 GPRTemporary
result(this, Reuse
, value
);
1574 m_jit
.move(value
.gpr(), result
.gpr());
1575 m_jit
.xor64(TrustedImm32(true), result
.gpr());
1577 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1581 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
1582 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1584 m_jit
.move(value
.gpr(), result
.gpr());
1585 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
1587 JSValueRegs(value
.gpr()), node
->child1(), SpecBoolean
, m_jit
.branchTest64(
1588 JITCompiler::NonZero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1589 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue
)), result
.gpr());
1591 // If we add a DataFormatBool, we should use it here.
1592 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1597 JSValueOperand
arg1(this, node
->child1());
1598 GPRTemporary
result(this);
1600 GPRReg arg1GPR
= arg1
.gpr();
1601 GPRReg resultGPR
= result
.gpr();
1605 m_jit
.move(arg1GPR
, resultGPR
);
1606 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), resultGPR
);
1607 JITCompiler::Jump slowCase
= m_jit
.branchTest64(JITCompiler::NonZero
, resultGPR
, TrustedImm32(static_cast<int32_t>(~1)));
1609 addSlowPathGenerator(
1610 slowPathCall(slowCase
, this, operationConvertJSValueToBoolean
, resultGPR
, arg1GPR
));
1612 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue
)), resultGPR
);
1613 jsValueResult(resultGPR
, node
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
1617 return compileStringZeroLength(node
);
1620 RELEASE_ASSERT_NOT_REACHED();
1625 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse
, BasicBlock
* taken
, BasicBlock
* notTaken
)
1627 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1628 GPRTemporary
scratch(this);
1629 GPRTemporary structure
;
1630 GPRReg valueGPR
= value
.gpr();
1631 GPRReg scratchGPR
= scratch
.gpr();
1632 GPRReg structureGPR
= InvalidGPRReg
;
1634 if (!masqueradesAsUndefinedWatchpointIsStillValid()) {
1635 GPRTemporary
realStructure(this);
1636 structure
.adopt(realStructure
);
1637 structureGPR
= structure
.gpr();
1640 MacroAssembler::Jump notCell
= branchNotCell(JSValueRegs(valueGPR
));
1641 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1643 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1644 MacroAssembler::Equal
,
1645 MacroAssembler::Address(valueGPR
, JSCell::structureIDOffset()),
1646 m_jit
.vm()->stringStructure
.get()));
1649 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
1650 MacroAssembler::Equal
,
1651 MacroAssembler::Address(valueGPR
, JSCell::structureIDOffset()),
1652 m_jit
.vm()->stringStructure
.get()));
1654 JITCompiler::Jump isNotMasqueradesAsUndefined
= m_jit
.branchTest8(
1656 MacroAssembler::Address(valueGPR
, JSCell::typeInfoFlagsOffset()),
1657 TrustedImm32(MasqueradesAsUndefined
));
1659 m_jit
.emitLoadStructure(valueGPR
, structureGPR
, scratchGPR
);
1660 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
,
1662 MacroAssembler::Equal
,
1663 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1664 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1666 isNotMasqueradesAsUndefined
.link(&m_jit
);
1668 jump(taken
, ForceJump
);
1670 notCell
.link(&m_jit
);
1672 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1673 m_jit
.move(valueGPR
, scratchGPR
);
1674 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), scratchGPR
);
1676 JSValueRegs(valueGPR
), nodeUse
, SpecCell
| SpecOther
, m_jit
.branch64(
1677 MacroAssembler::NotEqual
, scratchGPR
, MacroAssembler::TrustedImm64(ValueNull
)));
1681 noResult(m_currentNode
);
1684 void SpeculativeJIT::emitBranch(Node
* node
)
1686 BasicBlock
* taken
= node
->branchData()->taken
.block
;
1687 BasicBlock
* notTaken
= node
->branchData()->notTaken
.block
;
1689 switch (node
->child1().useKind()) {
1690 case ObjectOrOtherUse
: {
1691 emitObjectOrOtherBranch(node
->child1(), taken
, notTaken
);
1696 case DoubleRepUse
: {
1697 if (node
->child1().useKind() == Int32Use
) {
1698 bool invert
= false;
1700 if (taken
== nextBlock()) {
1702 BasicBlock
* tmp
= taken
;
1707 SpeculateInt32Operand
value(this, node
->child1());
1708 branchTest32(invert
? MacroAssembler::Zero
: MacroAssembler::NonZero
, value
.gpr(), taken
);
1710 SpeculateDoubleOperand
value(this, node
->child1());
1711 FPRTemporary
scratch(this);
1712 branchDoubleNonZero(value
.fpr(), scratch
.fpr(), taken
);
1723 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
1724 GPRReg valueGPR
= value
.gpr();
1726 if (node
->child1().useKind() == BooleanUse
) {
1727 if (!needsTypeCheck(node
->child1(), SpecBoolean
)) {
1728 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
1730 if (taken
== nextBlock()) {
1731 condition
= MacroAssembler::Zero
;
1732 BasicBlock
* tmp
= taken
;
1737 branchTest32(condition
, valueGPR
, TrustedImm32(true), taken
);
1740 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken
);
1741 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken
);
1743 typeCheck(JSValueRegs(valueGPR
), node
->child1(), SpecBoolean
, m_jit
.jump());
1747 GPRTemporary
result(this);
1748 GPRReg resultGPR
= result
.gpr();
1750 if (node
->child1()->prediction() & SpecInt32
) {
1751 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken
);
1752 branch64(MacroAssembler::AboveOrEqual
, valueGPR
, GPRInfo::tagTypeNumberRegister
, taken
);
1755 if (node
->child1()->prediction() & SpecBoolean
) {
1756 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken
);
1757 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken
);
1762 silentSpillAllRegisters(resultGPR
);
1763 callOperation(operationConvertJSValueToBoolean
, resultGPR
, valueGPR
);
1764 silentFillAllRegisters(resultGPR
);
1766 branchTest32(MacroAssembler::NonZero
, resultGPR
, taken
);
1770 noResult(node
, UseChildrenCalledExplicitly
);
1775 RELEASE_ASSERT_NOT_REACHED();
1779 void SpeculativeJIT::compile(Node
* node
)
1781 NodeType op
= node
->op();
1783 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1784 m_jit
.clearRegisterAllocationOffsets();
1789 case DoubleConstant
:
1791 initConstantInfo(node
);
1794 case PhantomArguments
:
1795 initConstantInfo(node
);
1798 case WeakJSConstant
:
1799 m_jit
.addWeakReference(node
->weakConstant());
1800 initConstantInfo(node
);
1804 // CSE should always eliminate this.
1805 RELEASE_ASSERT_NOT_REACHED();
1810 AbstractValue
& value
= m_state
.variables().operand(node
->local());
1812 // If the CFA is tracking this variable and it found that the variable
1813 // cannot have been assigned, then don't attempt to proceed.
1814 if (value
.isClear()) {
1815 // FIXME: We should trap instead.
1816 // https://bugs.webkit.org/show_bug.cgi?id=110383
1817 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
1821 switch (node
->variableAccessData()->flushFormat()) {
1822 case FlushedDouble
: {
1823 FPRTemporary
result(this);
1824 m_jit
.loadDouble(JITCompiler::addressFor(node
->machineLocal()), result
.fpr());
1825 VirtualRegister virtualRegister
= node
->virtualRegister();
1826 m_fprs
.retain(result
.fpr(), virtualRegister
, SpillOrderDouble
);
1827 generationInfoFromVirtualRegister(virtualRegister
).initDouble(node
, node
->refCount(), result
.fpr());
1831 case FlushedInt32
: {
1832 GPRTemporary
result(this);
1833 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1835 // Like int32Result, but don't useChildren - our children are phi nodes,
1836 // and don't represent values within this dataflow with virtual registers.
1837 VirtualRegister virtualRegister
= node
->virtualRegister();
1838 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
1839 generationInfoFromVirtualRegister(virtualRegister
).initInt32(node
, node
->refCount(), result
.gpr());
1843 case FlushedInt52
: {
1844 GPRTemporary
result(this);
1845 m_jit
.load64(JITCompiler::addressFor(node
->machineLocal()), result
.gpr());
1847 VirtualRegister virtualRegister
= node
->virtualRegister();
1848 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1849 generationInfoFromVirtualRegister(virtualRegister
).initInt52(node
, node
->refCount(), result
.gpr());
1854 GPRTemporary
result(this);
1855 m_jit
.load64(JITCompiler::addressFor(node
->machineLocal()), result
.gpr());
1857 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1858 // and don't represent values within this dataflow with virtual registers.
1859 VirtualRegister virtualRegister
= node
->virtualRegister();
1860 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1863 if (isCellSpeculation(value
.m_type
))
1864 format
= DataFormatJSCell
;
1865 else if (isBooleanSpeculation(value
.m_type
))
1866 format
= DataFormatJSBoolean
;
1868 format
= DataFormatJS
;
1870 generationInfoFromVirtualRegister(virtualRegister
).initJSValue(node
, node
->refCount(), result
.gpr(), format
);
1876 case GetLocalUnlinked
: {
1877 GPRTemporary
result(this);
1879 m_jit
.load64(JITCompiler::addressFor(node
->unlinkedMachineLocal()), result
.gpr());
1881 jsValueResult(result
.gpr(), node
);
1888 RELEASE_ASSERT_NOT_REACHED();
1893 switch (node
->variableAccessData()->flushFormat()) {
1894 case FlushedDouble
: {
1895 SpeculateDoubleOperand
value(this, node
->child1());
1896 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
->machineLocal()));
1898 // Indicate that it's no longer necessary to retrieve the value of
1899 // this bytecode variable from registers or other locations in the stack,
1900 // but that it is stored as a double.
1901 recordSetLocal(DataFormatDouble
);
1905 case FlushedInt32
: {
1906 SpeculateInt32Operand
value(this, node
->child1());
1907 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->machineLocal()));
1909 recordSetLocal(DataFormatInt32
);
1913 case FlushedInt52
: {
1914 SpeculateInt52Operand
value(this, node
->child1());
1915 m_jit
.store64(value
.gpr(), JITCompiler::addressFor(node
->machineLocal()));
1917 recordSetLocal(DataFormatInt52
);
1922 SpeculateCellOperand
cell(this, node
->child1());
1923 GPRReg cellGPR
= cell
.gpr();
1924 m_jit
.store64(cellGPR
, JITCompiler::addressFor(node
->machineLocal()));
1926 recordSetLocal(DataFormatCell
);
1930 case FlushedBoolean
: {
1931 SpeculateBooleanOperand
boolean(this, node
->child1());
1932 m_jit
.store64(boolean
.gpr(), JITCompiler::addressFor(node
->machineLocal()));
1934 recordSetLocal(DataFormatBoolean
);
1938 case FlushedJSValue
:
1939 case FlushedArguments
: {
1940 JSValueOperand
value(this, node
->child1());
1941 m_jit
.store64(value
.gpr(), JITCompiler::addressFor(node
->machineLocal()));
1943 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
1948 RELEASE_ASSERT_NOT_REACHED();
1956 // This is a no-op; it just marks the fact that the argument is being used.
1957 // But it may be profitable to use this as a hook to run speculation checks
1958 // on arguments, thereby allowing us to trivially eliminate such checks if
1959 // the argument is not used.
1960 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
1966 if (isInt32Constant(node
->child1().node())) {
1967 SpeculateInt32Operand
op2(this, node
->child2());
1968 GPRTemporary
result(this, Reuse
, op2
);
1970 bitOp(op
, valueOfInt32Constant(node
->child1().node()), op2
.gpr(), result
.gpr());
1972 int32Result(result
.gpr(), node
);
1973 } else if (isInt32Constant(node
->child2().node())) {
1974 SpeculateInt32Operand
op1(this, node
->child1());
1975 GPRTemporary
result(this, Reuse
, op1
);
1977 bitOp(op
, valueOfInt32Constant(node
->child2().node()), op1
.gpr(), result
.gpr());
1979 int32Result(result
.gpr(), node
);
1981 SpeculateInt32Operand
op1(this, node
->child1());
1982 SpeculateInt32Operand
op2(this, node
->child2());
1983 GPRTemporary
result(this, Reuse
, op1
, op2
);
1985 GPRReg reg1
= op1
.gpr();
1986 GPRReg reg2
= op2
.gpr();
1987 bitOp(op
, reg1
, reg2
, result
.gpr());
1989 int32Result(result
.gpr(), node
);
1996 if (isInt32Constant(node
->child2().node())) {
1997 SpeculateInt32Operand
op1(this, node
->child1());
1998 GPRTemporary
result(this, Reuse
, op1
);
2000 shiftOp(op
, op1
.gpr(), valueOfInt32Constant(node
->child2().node()) & 0x1f, result
.gpr());
2002 int32Result(result
.gpr(), node
);
2004 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2005 SpeculateInt32Operand
op1(this, node
->child1());
2006 SpeculateInt32Operand
op2(this, node
->child2());
2007 GPRTemporary
result(this, Reuse
, op1
);
2009 GPRReg reg1
= op1
.gpr();
2010 GPRReg reg2
= op2
.gpr();
2011 shiftOp(op
, reg1
, reg2
, result
.gpr());
2013 int32Result(result
.gpr(), node
);
2017 case UInt32ToNumber
: {
2018 compileUInt32ToNumber(node
);
2022 case DoubleAsInt32
: {
2023 compileDoubleAsInt32(node
);
2027 case ValueToInt32
: {
2028 compileValueToInt32(node
);
2033 compileDoubleRep(node
);
2038 compileValueRep(node
);
2043 switch (node
->child1().useKind()) {
2045 SpeculateInt32Operand
operand(this, node
->child1());
2046 GPRTemporary
result(this, Reuse
, operand
);
2048 m_jit
.signExtend32ToPtr(operand
.gpr(), result
.gpr());
2050 strictInt52Result(result
.gpr(), node
);
2054 case MachineIntUse
: {
2055 GPRResult
result(this);
2056 GPRReg resultGPR
= result
.gpr();
2058 convertMachineInt(node
->child1(), resultGPR
);
2060 strictInt52Result(resultGPR
, node
);
2064 case DoubleRepMachineIntUse
: {
2065 SpeculateDoubleOperand
value(this, node
->child1());
2066 FPRReg valueFPR
= value
.fpr();
2068 GPRResult
result(this);
2069 GPRReg resultGPR
= result
.gpr();
2073 callOperation(operationConvertDoubleToInt52
, resultGPR
, valueFPR
);
2076 JSValueRegs(), node
->child1(), SpecInt52AsDouble
,
2078 JITCompiler::Equal
, resultGPR
,
2079 JITCompiler::TrustedImm64(JSValue::notInt52
)));
2081 strictInt52Result(resultGPR
, node
);
2086 RELEASE_ASSERT_NOT_REACHED();
2092 JSValueOperand
op1(this, node
->child1());
2093 JSValueOperand
op2(this, node
->child2());
2095 GPRReg op1GPR
= op1
.gpr();
2096 GPRReg op2GPR
= op2
.gpr();
2100 GPRResult
result(this);
2101 if (isKnownNotNumber(node
->child1().node()) || isKnownNotNumber(node
->child2().node()))
2102 callOperation(operationValueAddNotNumber
, result
.gpr(), op1GPR
, op2GPR
);
2104 callOperation(operationValueAdd
, result
.gpr(), op1GPR
, op2GPR
);
2106 jsValueResult(result
.gpr(), node
);
2115 compileMakeRope(node
);
2119 compileArithSub(node
);
2123 compileArithNegate(node
);
2127 compileArithMul(node
);
2131 compileArithDiv(node
);
2136 compileArithMod(node
);
2141 switch (node
->child1().useKind()) {
2143 SpeculateStrictInt32Operand
op1(this, node
->child1());
2144 GPRTemporary
result(this);
2145 GPRTemporary
scratch(this);
2147 m_jit
.move(op1
.gpr(), result
.gpr());
2148 m_jit
.rshift32(result
.gpr(), MacroAssembler::TrustedImm32(31), scratch
.gpr());
2149 m_jit
.add32(scratch
.gpr(), result
.gpr());
2150 m_jit
.xor32(scratch
.gpr(), result
.gpr());
2151 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, result
.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2152 int32Result(result
.gpr(), node
);
2156 case DoubleRepUse
: {
2157 SpeculateDoubleOperand
op1(this, node
->child1());
2158 FPRTemporary
result(this);
2160 m_jit
.absDouble(op1
.fpr(), result
.fpr());
2161 doubleResult(result
.fpr(), node
);
2166 RELEASE_ASSERT_NOT_REACHED();
2174 switch (node
->binaryUseKind()) {
2176 SpeculateStrictInt32Operand
op1(this, node
->child1());
2177 SpeculateStrictInt32Operand
op2(this, node
->child2());
2178 GPRTemporary
result(this, Reuse
, op1
);
2180 MacroAssembler::Jump op1Less
= m_jit
.branch32(op
== ArithMin
? MacroAssembler::LessThan
: MacroAssembler::GreaterThan
, op1
.gpr(), op2
.gpr());
2181 m_jit
.move(op2
.gpr(), result
.gpr());
2182 if (op1
.gpr() != result
.gpr()) {
2183 MacroAssembler::Jump done
= m_jit
.jump();
2184 op1Less
.link(&m_jit
);
2185 m_jit
.move(op1
.gpr(), result
.gpr());
2188 op1Less
.link(&m_jit
);
2190 int32Result(result
.gpr(), node
);
2194 case DoubleRepUse
: {
2195 SpeculateDoubleOperand
op1(this, node
->child1());
2196 SpeculateDoubleOperand
op2(this, node
->child2());
2197 FPRTemporary
result(this, op1
);
2199 FPRReg op1FPR
= op1
.fpr();
2200 FPRReg op2FPR
= op2
.fpr();
2201 FPRReg resultFPR
= result
.fpr();
2203 MacroAssembler::JumpList done
;
2205 MacroAssembler::Jump op1Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleLessThan
: MacroAssembler::DoubleGreaterThan
, op1FPR
, op2FPR
);
2207 // op2 is eather the lesser one or one of then is NaN
2208 MacroAssembler::Jump op2Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleGreaterThanOrEqual
: MacroAssembler::DoubleLessThanOrEqual
, op1FPR
, op2FPR
);
2210 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2211 // op1 + op2 and putting it into result.
2212 m_jit
.addDouble(op1FPR
, op2FPR
, resultFPR
);
2213 done
.append(m_jit
.jump());
2215 op2Less
.link(&m_jit
);
2216 m_jit
.moveDouble(op2FPR
, resultFPR
);
2218 if (op1FPR
!= resultFPR
) {
2219 done
.append(m_jit
.jump());
2221 op1Less
.link(&m_jit
);
2222 m_jit
.moveDouble(op1FPR
, resultFPR
);
2224 op1Less
.link(&m_jit
);
2228 doubleResult(resultFPR
, node
);
2233 RELEASE_ASSERT_NOT_REACHED();
2240 SpeculateDoubleOperand
op1(this, node
->child1());
2241 FPRTemporary
result(this, op1
);
2243 m_jit
.sqrtDouble(op1
.fpr(), result
.fpr());
2245 doubleResult(result
.fpr(), node
);
2250 SpeculateDoubleOperand
op1(this, node
->child1());
2251 FPRTemporary
result(this, op1
);
2253 m_jit
.convertDoubleToFloat(op1
.fpr(), result
.fpr());
2254 m_jit
.convertFloatToDouble(result
.fpr(), result
.fpr());
2256 doubleResult(result
.fpr(), node
);
2261 SpeculateDoubleOperand
op1(this, node
->child1());
2262 FPRReg op1FPR
= op1
.fpr();
2266 FPRResult
result(this);
2267 callOperation(sin
, result
.fpr(), op1FPR
);
2268 doubleResult(result
.fpr(), node
);
2273 SpeculateDoubleOperand
op1(this, node
->child1());
2274 FPRReg op1FPR
= op1
.fpr();
2278 FPRResult
result(this);
2279 callOperation(cos
, result
.fpr(), op1FPR
);
2280 doubleResult(result
.fpr(), node
);
2285 compileLogicalNot(node
);
2289 if (compare(node
, JITCompiler::LessThan
, JITCompiler::DoubleLessThan
, operationCompareLess
))
2294 if (compare(node
, JITCompiler::LessThanOrEqual
, JITCompiler::DoubleLessThanOrEqual
, operationCompareLessEq
))
2298 case CompareGreater
:
2299 if (compare(node
, JITCompiler::GreaterThan
, JITCompiler::DoubleGreaterThan
, operationCompareGreater
))
2303 case CompareGreaterEq
:
2304 if (compare(node
, JITCompiler::GreaterThanOrEqual
, JITCompiler::DoubleGreaterThanOrEqual
, operationCompareGreaterEq
))
2308 case CompareEqConstant
:
2309 ASSERT(isNullConstant(node
->child2().node()));
2310 if (nonSpeculativeCompareNull(node
, node
->child1()))
2315 if (compare(node
, JITCompiler::Equal
, JITCompiler::DoubleEqual
, operationCompareEq
))
2319 case CompareStrictEq
:
2320 if (compileStrictEq(node
))
2324 case StringCharCodeAt
: {
2325 compileGetCharCodeAt(node
);
2329 case StringCharAt
: {
2330 // Relies on StringCharAt node having same basic layout as GetByVal
2331 compileGetByValOnString(node
);
2335 case StringFromCharCode
: {
2336 compileFromCharCode(node
);
2346 case ArrayifyToStructure
: {
2352 switch (node
->arrayMode().type()) {
2353 case Array::SelectUsingPredictions
:
2354 case Array::ForceExit
:
2355 RELEASE_ASSERT_NOT_REACHED();
2356 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2358 case Array::Generic
: {
2359 JSValueOperand
base(this, node
->child1());
2360 JSValueOperand
property(this, node
->child2());
2361 GPRReg baseGPR
= base
.gpr();
2362 GPRReg propertyGPR
= property
.gpr();
2365 GPRResult
result(this);
2366 callOperation(operationGetByVal
, result
.gpr(), baseGPR
, propertyGPR
);
2368 jsValueResult(result
.gpr(), node
);
2372 case Array::Contiguous
: {
2373 if (node
->arrayMode().isInBounds()) {
2374 SpeculateStrictInt32Operand
property(this, node
->child2());
2375 StorageOperand
storage(this, node
->child3());
2377 GPRReg propertyReg
= property
.gpr();
2378 GPRReg storageReg
= storage
.gpr();
2383 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2385 GPRTemporary
result(this);
2386 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.gpr());
2387 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchTest64(MacroAssembler::Zero
, result
.gpr()));
2388 jsValueResult(result
.gpr(), node
, node
->arrayMode().type() == Array::Int32
? DataFormatJSInt32
: DataFormatJS
);
2392 SpeculateCellOperand
base(this, node
->child1());
2393 SpeculateStrictInt32Operand
property(this, node
->child2());
2394 StorageOperand
storage(this, node
->child3());
2396 GPRReg baseReg
= base
.gpr();
2397 GPRReg propertyReg
= property
.gpr();
2398 GPRReg storageReg
= storage
.gpr();
2403 GPRTemporary
result(this);
2404 GPRReg resultReg
= result
.gpr();
2406 MacroAssembler::JumpList slowCases
;
2408 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2410 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), resultReg
);
2411 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, resultReg
));
2413 addSlowPathGenerator(
2415 slowCases
, this, operationGetByValArrayInt
,
2416 result
.gpr(), baseReg
, propertyReg
));
2418 jsValueResult(resultReg
, node
);
2422 case Array::Double
: {
2423 if (node
->arrayMode().isInBounds()) {
2424 SpeculateStrictInt32Operand
property(this, node
->child2());
2425 StorageOperand
storage(this, node
->child3());
2427 GPRReg propertyReg
= property
.gpr();
2428 GPRReg storageReg
= storage
.gpr();
2433 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2435 FPRTemporary
result(this);
2436 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.fpr());
2437 if (!node
->arrayMode().isSaneChain())
2438 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, result
.fpr(), result
.fpr()));
2439 doubleResult(result
.fpr(), node
);
2443 SpeculateCellOperand
base(this, node
->child1());
2444 SpeculateStrictInt32Operand
property(this, node
->child2());
2445 StorageOperand
storage(this, node
->child3());
2447 GPRReg baseReg
= base
.gpr();
2448 GPRReg propertyReg
= property
.gpr();
2449 GPRReg storageReg
= storage
.gpr();
2454 GPRTemporary
result(this);
2455 FPRTemporary
temp(this);
2456 GPRReg resultReg
= result
.gpr();
2457 FPRReg tempReg
= temp
.fpr();
2459 MacroAssembler::JumpList slowCases
;
2461 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2463 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), tempReg
);
2464 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempReg
, tempReg
));
2465 boxDouble(tempReg
, resultReg
);
2467 addSlowPathGenerator(
2469 slowCases
, this, operationGetByValArrayInt
,
2470 result
.gpr(), baseReg
, propertyReg
));
2472 jsValueResult(resultReg
, node
);
2476 case Array::ArrayStorage
:
2477 case Array::SlowPutArrayStorage
: {
2478 if (node
->arrayMode().isInBounds()) {
2479 SpeculateStrictInt32Operand
property(this, node
->child2());
2480 StorageOperand
storage(this, node
->child3());
2482 GPRReg propertyReg
= property
.gpr();
2483 GPRReg storageReg
= storage
.gpr();
2488 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2490 GPRTemporary
result(this);
2491 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), result
.gpr());
2492 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchTest64(MacroAssembler::Zero
, result
.gpr()));
2494 jsValueResult(result
.gpr(), node
);
2498 SpeculateCellOperand
base(this, node
->child1());
2499 SpeculateStrictInt32Operand
property(this, node
->child2());
2500 StorageOperand
storage(this, node
->child3());
2502 GPRReg baseReg
= base
.gpr();
2503 GPRReg propertyReg
= property
.gpr();
2504 GPRReg storageReg
= storage
.gpr();
2509 GPRTemporary
result(this);
2510 GPRReg resultReg
= result
.gpr();
2512 MacroAssembler::JumpList slowCases
;
2514 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2516 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), resultReg
);
2517 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, resultReg
));
2519 addSlowPathGenerator(
2521 slowCases
, this, operationGetByValArrayInt
,
2522 result
.gpr(), baseReg
, propertyReg
));
2524 jsValueResult(resultReg
, node
);
2528 compileGetByValOnString(node
);
2530 case Array::Arguments
:
2531 compileGetByValOnArguments(node
);
2534 TypedArrayType type
= node
->arrayMode().typedArrayType();
2536 compileGetByValOnIntTypedArray(node
, type
);
2538 compileGetByValOnFloatTypedArray(node
, type
);
2543 case PutByValDirect
:
2545 case PutByValAlias
: {
2546 Edge child1
= m_jit
.graph().varArgChild(node
, 0);
2547 Edge child2
= m_jit
.graph().varArgChild(node
, 1);
2548 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
2549 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
2551 ArrayMode arrayMode
= node
->arrayMode().modeForPut();
2552 bool alreadyHandled
= false;
2554 switch (arrayMode
.type()) {
2555 case Array::SelectUsingPredictions
:
2556 case Array::ForceExit
:
2557 RELEASE_ASSERT_NOT_REACHED();
2558 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2559 alreadyHandled
= true;
2561 case Array::Generic
: {
2562 RELEASE_ASSERT(node
->op() == PutByVal
);
2564 JSValueOperand
arg1(this, child1
);
2565 JSValueOperand
arg2(this, child2
);
2566 JSValueOperand
arg3(this, child3
);
2567 GPRReg arg1GPR
= arg1
.gpr();
2568 GPRReg arg2GPR
= arg2
.gpr();
2569 GPRReg arg3GPR
= arg3
.gpr();
2571 if (node
->op() == PutByValDirect
)
2572 callOperation(m_jit
.isStrictModeFor(node
->origin
.semantic
) ? operationPutByValDirectStrict
: operationPutByValDirectNonStrict
, arg1GPR
, arg2GPR
, arg3GPR
);
2574 callOperation(m_jit
.isStrictModeFor(node
->origin
.semantic
) ? operationPutByValStrict
: operationPutByValNonStrict
, arg1GPR
, arg2GPR
, arg3GPR
);
2577 alreadyHandled
= true;
2587 // FIXME: the base may not be necessary for some array access modes. But we have to
2588 // keep it alive to this point, so it's likely to be in a register anyway. Likely
2589 // no harm in locking it here.
2590 SpeculateCellOperand
base(this, child1
);
2591 SpeculateStrictInt32Operand
property(this, child2
);
2593 GPRReg baseReg
= base
.gpr();
2594 GPRReg propertyReg
= property
.gpr();
2596 switch (arrayMode
.type()) {
2598 case Array::Contiguous
: {
2599 JSValueOperand
value(this, child3
, ManualOperandSpeculation
);
2601 GPRReg valueReg
= value
.gpr();
2606 if (arrayMode
.type() == Array::Int32
) {
2608 JSValueRegs(valueReg
), child3
, SpecInt32
,
2610 MacroAssembler::Below
, valueReg
, GPRInfo::tagTypeNumberRegister
));
2613 StorageOperand
storage(this, child4
);
2614 GPRReg storageReg
= storage
.gpr();
2616 if (node
->op() == PutByValAlias
) {
2617 // Store the value to the array.
2618 GPRReg propertyReg
= property
.gpr();
2619 GPRReg valueReg
= value
.gpr();
2620 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2626 GPRTemporary temporary
;
2627 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
2629 MacroAssembler::Jump slowCase
;
2631 if (arrayMode
.isInBounds()) {
2633 OutOfBounds
, JSValueRegs(), 0,
2634 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2636 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2638 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
2640 if (!arrayMode
.isOutOfBounds())
2641 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
2643 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
2644 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2646 inBounds
.link(&m_jit
);
2649 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2656 if (arrayMode
.isOutOfBounds()) {
2657 if (node
->op() == PutByValDirect
) {
2658 addSlowPathGenerator(slowPathCall(
2660 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
2661 NoResult
, baseReg
, propertyReg
, valueReg
));
2663 addSlowPathGenerator(slowPathCall(
2665 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2666 NoResult
, baseReg
, propertyReg
, valueReg
));
2670 noResult(node
, UseChildrenCalledExplicitly
);
2674 case Array::Double
: {
2675 compileDoublePutByVal(node
, base
, property
);
2679 case Array::ArrayStorage
:
2680 case Array::SlowPutArrayStorage
: {
2681 JSValueOperand
value(this, child3
);
2683 GPRReg valueReg
= value
.gpr();
2688 StorageOperand
storage(this, child4
);
2689 GPRReg storageReg
= storage
.gpr();
2691 if (node
->op() == PutByValAlias
) {
2692 // Store the value to the array.
2693 GPRReg propertyReg
= property
.gpr();
2694 GPRReg valueReg
= value
.gpr();
2695 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2701 GPRTemporary temporary
;
2702 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
2704 MacroAssembler::JumpList slowCases
;
2706 MacroAssembler::Jump beyondArrayBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2707 if (!arrayMode
.isOutOfBounds())
2708 speculationCheck(OutOfBounds
, JSValueRegs(), 0, beyondArrayBounds
);
2710 slowCases
.append(beyondArrayBounds
);
2712 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2713 if (arrayMode
.isInBounds()) {
2715 StoreToHole
, JSValueRegs(), 0,
2716 m_jit
.branchTest64(MacroAssembler::Zero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]))));
2718 MacroAssembler::Jump notHoleValue
= m_jit
.branchTest64(MacroAssembler::NonZero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2719 if (arrayMode
.isSlowPut()) {
2720 // This is sort of strange. If we wanted to optimize this code path, we would invert
2721 // the above branch. But it's simply not worth it since this only happens if we're
2722 // already having a bad time.
2723 slowCases
.append(m_jit
.jump());
2725 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, ArrayStorage::numValuesInVectorOffset()));
2727 // If we're writing to a hole we might be growing the array;
2728 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2729 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
2730 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2732 lengthDoesNotNeedUpdate
.link(&m_jit
);
2734 notHoleValue
.link(&m_jit
);
2737 // Store the value to the array.
2738 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2745 if (!slowCases
.empty()) {
2746 if (node
->op() == PutByValDirect
) {
2747 addSlowPathGenerator(slowPathCall(
2749 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
2750 NoResult
, baseReg
, propertyReg
, valueReg
));
2752 addSlowPathGenerator(slowPathCall(
2754 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2755 NoResult
, baseReg
, propertyReg
, valueReg
));
2759 noResult(node
, UseChildrenCalledExplicitly
);
2763 case Array::Arguments
: {
2764 JSValueOperand
value(this, child3
);
2765 GPRTemporary
scratch(this);
2766 GPRTemporary
scratch2(this);
2768 GPRReg valueReg
= value
.gpr();
2769 GPRReg scratchReg
= scratch
.gpr();
2770 GPRReg scratch2Reg
= scratch2
.gpr();
2775 // Two really lame checks.
2777 Uncountable
, JSValueSource(), 0,
2779 MacroAssembler::AboveOrEqual
, propertyReg
,
2780 MacroAssembler::Address(baseReg
, Arguments::offsetOfNumArguments())));
2782 Uncountable
, JSValueSource(), 0,
2783 m_jit
.branchTestPtr(
2784 MacroAssembler::NonZero
,
2785 MacroAssembler::Address(
2786 baseReg
, Arguments::offsetOfSlowArgumentData())));
2788 m_jit
.move(propertyReg
, scratch2Reg
);
2789 m_jit
.signExtend32ToPtr(scratch2Reg
, scratch2Reg
);
2791 MacroAssembler::Address(baseReg
, Arguments::offsetOfRegisters()),
2796 MacroAssembler::BaseIndex(
2797 scratchReg
, scratch2Reg
, MacroAssembler::TimesEight
,
2798 CallFrame::thisArgumentOffset() * sizeof(Register
) + sizeof(Register
)));
2805 TypedArrayType type
= arrayMode
.typedArrayType();
2807 compilePutByValForIntTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2809 compilePutByValForFloatTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2816 if (compileRegExpExec(node
))
2818 if (!node
->adjustedRefCount()) {
2819 SpeculateCellOperand
base(this, node
->child1());
2820 SpeculateCellOperand
argument(this, node
->child2());
2821 GPRReg baseGPR
= base
.gpr();
2822 GPRReg argumentGPR
= argument
.gpr();
2825 GPRResult
result(this);
2826 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2828 // Must use jsValueResult because otherwise we screw up register
2829 // allocation, which thinks that this node has a result.
2830 jsValueResult(result
.gpr(), node
);
2834 SpeculateCellOperand
base(this, node
->child1());
2835 SpeculateCellOperand
argument(this, node
->child2());
2836 GPRReg baseGPR
= base
.gpr();
2837 GPRReg argumentGPR
= argument
.gpr();
2840 GPRResult
result(this);
2841 callOperation(operationRegExpExec
, result
.gpr(), baseGPR
, argumentGPR
);
2843 jsValueResult(result
.gpr(), node
);
2848 SpeculateCellOperand
base(this, node
->child1());
2849 SpeculateCellOperand
argument(this, node
->child2());
2850 GPRReg baseGPR
= base
.gpr();
2851 GPRReg argumentGPR
= argument
.gpr();
2854 GPRResult
result(this);
2855 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2857 // If we add a DataFormatBool, we should use it here.
2858 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
2859 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
2864 ASSERT(node
->arrayMode().isJSArray());
2866 SpeculateCellOperand
base(this, node
->child1());
2867 GPRTemporary
storageLength(this);
2869 GPRReg baseGPR
= base
.gpr();
2870 GPRReg storageLengthGPR
= storageLength
.gpr();
2872 StorageOperand
storage(this, node
->child3());
2873 GPRReg storageGPR
= storage
.gpr();
2875 switch (node
->arrayMode().type()) {
2877 case Array::Contiguous
: {
2878 JSValueOperand
value(this, node
->child2(), ManualOperandSpeculation
);
2879 GPRReg valueGPR
= value
.gpr();
2881 if (node
->arrayMode().type() == Array::Int32
) {
2883 JSValueRegs(valueGPR
), node
->child2(), SpecInt32
,
2885 MacroAssembler::Below
, valueGPR
, GPRInfo::tagTypeNumberRegister
));
2888 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2889 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2890 m_jit
.store64(valueGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
2891 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2892 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2893 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
2895 addSlowPathGenerator(
2897 slowPath
, this, operationArrayPush
, storageLengthGPR
,
2898 valueGPR
, baseGPR
));
2900 jsValueResult(storageLengthGPR
, node
);
2904 case Array::Double
: {
2905 SpeculateDoubleOperand
value(this, node
->child2());
2906 FPRReg valueFPR
= value
.fpr();
2909 JSValueRegs(), node
->child2(), SpecDoubleReal
,
2910 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, valueFPR
, valueFPR
));
2912 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2913 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2914 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
2915 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2916 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2917 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
2919 addSlowPathGenerator(
2921 slowPath
, this, operationArrayPushDouble
, storageLengthGPR
,
2922 valueFPR
, baseGPR
));
2924 jsValueResult(storageLengthGPR
, node
);
2928 case Array::ArrayStorage
: {
2929 JSValueOperand
value(this, node
->child2());
2930 GPRReg valueGPR
= value
.gpr();
2932 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
2934 // Refuse to handle bizarre lengths.
2935 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Above
, storageLengthGPR
, TrustedImm32(0x7ffffffe)));
2937 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
2939 m_jit
.store64(valueGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2941 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2942 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
2943 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
2944 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
2946 addSlowPathGenerator(
2948 slowPath
, this, operationArrayPush
, NoResult
, storageLengthGPR
,
2949 valueGPR
, baseGPR
));
2951 jsValueResult(storageLengthGPR
, node
);
2963 ASSERT(node
->arrayMode().isJSArray());
2965 SpeculateCellOperand
base(this, node
->child1());
2966 StorageOperand
storage(this, node
->child2());
2967 GPRTemporary
value(this);
2968 GPRTemporary
storageLength(this);
2969 FPRTemporary
temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop().
2971 GPRReg baseGPR
= base
.gpr();
2972 GPRReg storageGPR
= storage
.gpr();
2973 GPRReg valueGPR
= value
.gpr();
2974 GPRReg storageLengthGPR
= storageLength
.gpr();
2975 FPRReg tempFPR
= temp
.fpr();
2977 switch (node
->arrayMode().type()) {
2980 case Array::Contiguous
: {
2982 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2983 MacroAssembler::Jump undefinedCase
=
2984 m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
);
2985 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
2987 storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2988 MacroAssembler::Jump slowCase
;
2989 if (node
->arrayMode().type() == Array::Double
) {
2991 MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
),
2993 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
2994 // length and the new length.
2996 MacroAssembler::TrustedImm64(bitwise_cast
<int64_t>(PNaN
)), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
2997 slowCase
= m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempFPR
, tempFPR
);
2998 boxDouble(tempFPR
, valueGPR
);
3001 MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
),
3003 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3004 // length and the new length.
3006 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3007 slowCase
= m_jit
.branchTest64(MacroAssembler::Zero
, valueGPR
);
3010 addSlowPathGenerator(
3012 undefinedCase
, this,
3013 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR
));
3014 addSlowPathGenerator(
3016 slowCase
, this, operationArrayPopAndRecoverLength
, valueGPR
, baseGPR
));
3018 // We can't know for sure that the result is an int because of the slow paths. :-/
3019 jsValueResult(valueGPR
, node
);
3023 case Array::ArrayStorage
: {
3024 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3026 JITCompiler::Jump undefinedCase
=
3027 m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
);
3029 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
3031 JITCompiler::JumpList slowCases
;
3032 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset())));
3034 m_jit
.load64(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), valueGPR
);
3035 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, valueGPR
));
3037 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3039 m_jit
.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
3040 m_jit
.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3042 addSlowPathGenerator(
3044 undefinedCase
, this,
3045 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR
));
3047 addSlowPathGenerator(
3049 slowCases
, this, operationArrayPop
, valueGPR
, baseGPR
));
3051 jsValueResult(valueGPR
, node
);
3063 jump(node
->targetBlock());
3077 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT1
);
3078 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
3079 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
3081 // Return the result in returnValueGPR.
3082 JSValueOperand
op1(this, node
->child1());
3083 m_jit
.move(op1
.gpr(), GPRInfo::returnValueGPR
);
3085 m_jit
.emitFunctionEpilogue();
3093 case ThrowReferenceError
: {
3094 // We expect that throw statements are rare and are intended to exit the code block
3095 // anyway, so we just OSR back to the old JIT for now.
3096 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
3100 case BooleanToNumber
: {
3101 switch (node
->child1().useKind()) {
3103 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
3104 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
3106 m_jit
.move(value
.gpr(), result
.gpr());
3107 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
3109 JSValueRegs(value
.gpr()), node
->child1(), SpecBoolean
, m_jit
.branchTest64(
3110 JITCompiler::NonZero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
3112 int32Result(result
.gpr(), node
);
3117 JSValueOperand
value(this, node
->child1());
3118 GPRTemporary
result(this);
3120 m_jit
.move(value
.gpr(), result
.gpr());
3121 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
3122 JITCompiler::Jump isBoolean
= m_jit
.branchTest64(
3123 JITCompiler::Zero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1)));
3124 m_jit
.move(value
.gpr(), result
.gpr());
3125 JITCompiler::Jump done
= m_jit
.jump();
3126 isBoolean
.link(&m_jit
);
3127 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, result
.gpr());
3130 jsValueResult(result
.gpr(), node
);
3135 RELEASE_ASSERT_NOT_REACHED();
3142 RELEASE_ASSERT(node
->child1().useKind() == UntypedUse
);
3143 JSValueOperand
op1(this, node
->child1());
3144 GPRTemporary
result(this, Reuse
, op1
);
3146 GPRReg op1GPR
= op1
.gpr();
3147 GPRReg resultGPR
= result
.gpr();
3151 MacroAssembler::Jump alreadyPrimitive
= branchNotCell(JSValueRegs(op1GPR
));
3152 MacroAssembler::Jump notPrimitive
= m_jit
.branchStructurePtr(
3153 MacroAssembler::NotEqual
,
3154 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
3155 m_jit
.vm()->stringStructure
.get());
3157 alreadyPrimitive
.link(&m_jit
);
3158 m_jit
.move(op1GPR
, resultGPR
);
3160 addSlowPathGenerator(
3161 slowPathCall(notPrimitive
, this, operationToPrimitive
, resultGPR
, op1GPR
));
3163 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3168 if (node
->child1().useKind() == UntypedUse
) {
3169 JSValueOperand
op1(this, node
->child1());
3170 GPRReg op1GPR
= op1
.gpr();
3172 GPRResult
result(this);
3173 GPRReg resultGPR
= result
.gpr();
3177 JITCompiler::Jump done
;
3178 if (node
->child1()->prediction() & SpecString
) {
3179 JITCompiler::Jump slowPath1
= branchNotCell(JSValueRegs(op1GPR
));
3180 JITCompiler::Jump slowPath2
= m_jit
.branchStructurePtr(
3181 JITCompiler::NotEqual
,
3182 JITCompiler::Address(op1GPR
, JSCell::structureIDOffset()),
3183 m_jit
.vm()->stringStructure
.get());
3184 m_jit
.move(op1GPR
, resultGPR
);
3185 done
= m_jit
.jump();
3186 slowPath1
.link(&m_jit
);
3187 slowPath2
.link(&m_jit
);
3189 callOperation(operationToString
, resultGPR
, op1GPR
);
3192 cellResult(resultGPR
, node
);
3196 compileToStringOnCell(node
);
3200 case NewStringObject
: {
3201 compileNewStringObject(node
);
3206 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3207 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3208 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3209 RELEASE_ASSERT(structure
->indexingType() == node
->indexingType());
3211 hasUndecided(structure
->indexingType())
3212 || hasInt32(structure
->indexingType())
3213 || hasDouble(structure
->indexingType())
3214 || hasContiguous(structure
->indexingType()));
3216 unsigned numElements
= node
->numChildren();
3218 GPRTemporary
result(this);
3219 GPRTemporary
storage(this);
3221 GPRReg resultGPR
= result
.gpr();
3222 GPRReg storageGPR
= storage
.gpr();
3224 emitAllocateJSArray(resultGPR
, structure
, storageGPR
, numElements
);
3226 // At this point, one way or another, resultGPR and storageGPR have pointers to
3227 // the JSArray and the Butterfly, respectively.
3229 ASSERT(!hasUndecided(structure
->indexingType()) || !node
->numChildren());
3231 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3232 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3233 switch (node
->indexingType()) {
3234 case ALL_BLANK_INDEXING_TYPES
:
3235 case ALL_UNDECIDED_INDEXING_TYPES
:
3238 case ALL_DOUBLE_INDEXING_TYPES
: {
3239 SpeculateDoubleOperand
operand(this, use
);
3240 FPRReg opFPR
= operand
.fpr();
3242 JSValueRegs(), use
, SpecDoubleReal
,
3244 MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3245 m_jit
.storeDouble(opFPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * operandIdx
));
3248 case ALL_INT32_INDEXING_TYPES
:
3249 case ALL_CONTIGUOUS_INDEXING_TYPES
: {
3250 JSValueOperand
operand(this, use
, ManualOperandSpeculation
);
3251 GPRReg opGPR
= operand
.gpr();
3252 if (hasInt32(node
->indexingType())) {
3254 JSValueRegs(opGPR
), use
, SpecInt32
,
3256 MacroAssembler::Below
, opGPR
, GPRInfo::tagTypeNumberRegister
));
3258 m_jit
.store64(opGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
));
3267 // Yuck, we should *really* have a way of also returning the storageGPR. But
3268 // that's the least of what's wrong with this code. We really shouldn't be
3269 // allocating the array after having computed - and probably spilled to the
3270 // stack - all of the things that will go into the array. The solution to that
3271 // bigger problem will also likely fix the redundancy in reloading the storage
3272 // pointer that we currently have.
3274 cellResult(resultGPR
, node
);
3278 if (!node
->numChildren()) {
3280 GPRResult
result(this);
3281 callOperation(operationNewEmptyArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()));
3282 cellResult(result
.gpr(), node
);
3286 size_t scratchSize
= sizeof(EncodedJSValue
) * node
->numChildren();
3287 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(scratchSize
);
3288 EncodedJSValue
* buffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
3290 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3291 // Need to perform the speculations that this node promises to perform. If we're
3292 // emitting code here and the indexing type is not array storage then there is
3293 // probably something hilarious going on and we're already failing at all the
3294 // things, but at least we're going to be sound.
3295 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3296 switch (node
->indexingType()) {
3297 case ALL_BLANK_INDEXING_TYPES
:
3298 case ALL_UNDECIDED_INDEXING_TYPES
:
3301 case ALL_DOUBLE_INDEXING_TYPES
: {
3302 SpeculateDoubleOperand
operand(this, use
);
3303 GPRTemporary
scratch(this);
3304 FPRReg opFPR
= operand
.fpr();
3305 GPRReg scratchGPR
= scratch
.gpr();
3307 JSValueRegs(), use
, SpecDoubleReal
,
3309 MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3310 m_jit
.boxDouble(opFPR
, scratchGPR
);
3311 m_jit
.store64(scratchGPR
, buffer
+ operandIdx
);
3314 case ALL_INT32_INDEXING_TYPES
: {
3315 JSValueOperand
operand(this, use
, ManualOperandSpeculation
);
3316 GPRReg opGPR
= operand
.gpr();
3317 if (hasInt32(node
->indexingType())) {
3319 JSValueRegs(opGPR
), use
, SpecInt32
,
3321 MacroAssembler::Below
, opGPR
, GPRInfo::tagTypeNumberRegister
));
3323 m_jit
.store64(opGPR
, buffer
+ operandIdx
);
3326 case ALL_CONTIGUOUS_INDEXING_TYPES
:
3327 case ALL_ARRAY_STORAGE_INDEXING_TYPES
: {
3328 JSValueOperand
operand(this, use
);
3329 GPRReg opGPR
= operand
.gpr();
3330 m_jit
.store64(opGPR
, buffer
+ operandIdx
);
3340 switch (node
->indexingType()) {
3341 case ALL_DOUBLE_INDEXING_TYPES
:
3342 case ALL_INT32_INDEXING_TYPES
:
3352 GPRTemporary
scratch(this);
3354 // Tell GC mark phase how much of the scratch buffer is active during call.
3355 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3356 m_jit
.storePtr(TrustedImmPtr(scratchSize
), scratch
.gpr());
3359 GPRResult
result(this);
3362 operationNewArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3363 static_cast<void*>(buffer
), node
->numChildren());
3366 GPRTemporary
scratch(this);
3368 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3369 m_jit
.storePtr(TrustedImmPtr(0), scratch
.gpr());
3372 cellResult(result
.gpr(), node
, UseChildrenCalledExplicitly
);
3376 case NewArrayWithSize
: {
3377 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3378 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3379 SpeculateStrictInt32Operand
size(this, node
->child1());
3380 GPRTemporary
result(this);
3381 GPRTemporary
storage(this);
3382 GPRTemporary
scratch(this);
3383 GPRTemporary
scratch2(this);
3385 GPRReg sizeGPR
= size
.gpr();
3386 GPRReg resultGPR
= result
.gpr();
3387 GPRReg storageGPR
= storage
.gpr();
3388 GPRReg scratchGPR
= scratch
.gpr();
3389 GPRReg scratch2GPR
= scratch2
.gpr();
3391 MacroAssembler::JumpList slowCases
;
3392 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
)));
3394 ASSERT((1 << 3) == sizeof(JSValue
));
3395 m_jit
.move(sizeGPR
, scratchGPR
);
3396 m_jit
.lshift32(TrustedImm32(3), scratchGPR
);
3397 m_jit
.add32(TrustedImm32(sizeof(IndexingHeader
)), scratchGPR
, resultGPR
);
3399 emitAllocateBasicStorage(resultGPR
, storageGPR
));
3400 m_jit
.subPtr(scratchGPR
, storageGPR
);
3401 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3402 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
3404 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3405 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3407 if (hasDouble(node
->indexingType())) {
3408 m_jit
.move(TrustedImm64(bitwise_cast
<int64_t>(PNaN
)), scratchGPR
);
3409 m_jit
.move(sizeGPR
, scratch2GPR
);
3410 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, scratch2GPR
);
3411 MacroAssembler::Label loop
= m_jit
.label();
3412 m_jit
.sub32(TrustedImm32(1), scratch2GPR
);
3413 m_jit
.store64(scratchGPR
, MacroAssembler::BaseIndex(storageGPR
, scratch2GPR
, MacroAssembler::TimesEight
));
3414 m_jit
.branchTest32(MacroAssembler::NonZero
, scratch2GPR
).linkTo(loop
, &m_jit
);
3418 addSlowPathGenerator(adoptPtr(
3419 new CallArrayAllocatorWithVariableSizeSlowPathGenerator(
3420 slowCases
, this, operationNewArrayWithSize
, resultGPR
,
3421 globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3422 globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
),
3425 cellResult(resultGPR
, node
);
3429 SpeculateStrictInt32Operand
size(this, node
->child1());
3430 GPRReg sizeGPR
= size
.gpr();
3432 GPRResult
result(this);
3433 GPRReg resultGPR
= result
.gpr();
3434 GPRReg structureGPR
= selectScratchGPR(sizeGPR
);
3435 MacroAssembler::Jump bigLength
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
));
3436 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType())), structureGPR
);
3437 MacroAssembler::Jump done
= m_jit
.jump();
3438 bigLength
.link(&m_jit
);
3439 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
)), structureGPR
);
3441 callOperation(operationNewArrayWithSize
, resultGPR
, structureGPR
, sizeGPR
);
3442 cellResult(resultGPR
, node
);
3446 case NewArrayBuffer
: {
3447 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3448 IndexingType indexingType
= node
->indexingType();
3449 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(indexingType
)) {
3450 unsigned numElements
= node
->numConstants();
3452 GPRTemporary
result(this);
3453 GPRTemporary
storage(this);
3455 GPRReg resultGPR
= result
.gpr();
3456 GPRReg storageGPR
= storage
.gpr();
3458 emitAllocateJSArray(resultGPR
, globalObject
->arrayStructureForIndexingTypeDuringAllocation(indexingType
), storageGPR
, numElements
);
3460 RELEASE_ASSERT(indexingType
& IsArray
);
3461 JSValue
* data
= m_jit
.codeBlock()->constantBuffer(node
->startConstant());
3462 if (indexingType
== ArrayWithDouble
) {
3463 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3464 double value
= data
[index
].asNumber();
3466 Imm64(bitwise_cast
<int64_t>(value
)),
3467 MacroAssembler::Address(storageGPR
, sizeof(double) * index
));
3470 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3472 Imm64(JSValue::encode(data
[index
])),
3473 MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * index
));
3477 cellResult(resultGPR
, node
);
3482 GPRResult
result(this);
3484 callOperation(operationNewArrayBuffer
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()), node
->startConstant(), node
->numConstants());
3486 cellResult(result
.gpr(), node
);
3490 case NewTypedArray
: {
3491 switch (node
->child1().useKind()) {
3493 compileNewTypedArray(node
);
3496 JSValueOperand
argument(this, node
->child1());
3497 GPRReg argumentGPR
= argument
.gpr();
3501 GPRResult
result(this);
3502 GPRReg resultGPR
= result
.gpr();
3504 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3506 operationNewTypedArrayWithOneArgumentForType(node
->typedArrayType()),
3507 resultGPR
, globalObject
->typedArrayStructure(node
->typedArrayType()),
3510 cellResult(resultGPR
, node
);
3514 RELEASE_ASSERT_NOT_REACHED();
3522 GPRResult
result(this);
3524 callOperation(operationNewRegexp
, result
.gpr(), m_jit
.codeBlock()->regexp(node
->regexpIndex()));
3526 cellResult(result
.gpr(), node
);
3531 ASSERT(node
->child1().useKind() == UntypedUse
);
3532 JSValueOperand
thisValue(this, node
->child1());
3533 GPRTemporary
temp(this);
3534 GPRReg thisValueGPR
= thisValue
.gpr();
3535 GPRReg tempGPR
= temp
.gpr();
3537 MacroAssembler::JumpList slowCases
;
3538 slowCases
.append(branchNotCell(JSValueRegs(thisValueGPR
)));
3539 slowCases
.append(m_jit
.branch8(
3540 MacroAssembler::NotEqual
,
3541 MacroAssembler::Address(thisValueGPR
, JSCell::typeInfoTypeOffset()),
3542 TrustedImm32(FinalObjectType
)));
3543 m_jit
.move(thisValueGPR
, tempGPR
);
3544 J_JITOperation_EJ function
;
3545 if (m_jit
.graph().executableFor(node
->origin
.semantic
)->isStrictMode())
3546 function
= operationToThisStrict
;
3548 function
= operationToThis
;
3549 addSlowPathGenerator(
3550 slowPathCall(slowCases
, this, function
, tempGPR
, thisValueGPR
));
3552 jsValueResult(tempGPR
, node
);
3557 // Note that there is not so much profit to speculate here. The only things we
3558 // speculate on are (1) that it's a cell, since that eliminates cell checks
3559 // later if the proto is reused, and (2) if we have a FinalObject prediction
3560 // then we speculate because we want to get recompiled if it isn't (since
3561 // otherwise we'd start taking slow path a lot).
3563 SpeculateCellOperand
callee(this, node
->child1());
3564 GPRTemporary
result(this);
3565 GPRTemporary
allocator(this);
3566 GPRTemporary
structure(this);
3567 GPRTemporary
scratch(this);
3569 GPRReg calleeGPR
= callee
.gpr();
3570 GPRReg resultGPR
= result
.gpr();
3571 GPRReg allocatorGPR
= allocator
.gpr();
3572 GPRReg structureGPR
= structure
.gpr();
3573 GPRReg scratchGPR
= scratch
.gpr();
3575 MacroAssembler::JumpList slowPath
;
3577 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR
);
3578 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR
);
3579 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, allocatorGPR
));
3580 emitAllocateJSObject(resultGPR
, allocatorGPR
, structureGPR
, TrustedImmPtr(0), scratchGPR
, slowPath
);
3582 addSlowPathGenerator(slowPathCall(slowPath
, this, operationCreateThis
, resultGPR
, calleeGPR
, node
->inlineCapacity()));
3584 cellResult(resultGPR
, node
);
3588 case AllocationProfileWatchpoint
:
3589 case TypedArrayWatchpoint
: {
3595 GPRTemporary
result(this);
3596 GPRTemporary
allocator(this);
3597 GPRTemporary
scratch(this);
3599 GPRReg resultGPR
= result
.gpr();
3600 GPRReg allocatorGPR
= allocator
.gpr();
3601 GPRReg scratchGPR
= scratch
.gpr();
3603 MacroAssembler::JumpList slowPath
;
3605 Structure
* structure
= node
->structure();
3606 size_t allocationSize
= JSFinalObject::allocationSize(structure
->inlineCapacity());
3607 MarkedAllocator
* allocatorPtr
= &m_jit
.vm()->heap
.allocatorForObjectWithoutDestructor(allocationSize
);
3609 m_jit
.move(TrustedImmPtr(allocatorPtr
), allocatorGPR
);
3610 emitAllocateJSObject(resultGPR
, allocatorGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, slowPath
);
3612 addSlowPathGenerator(slowPathCall(slowPath
, this, operationNewObject
, resultGPR
, structure
));
3614 cellResult(resultGPR
, node
);
3619 GPRTemporary
result(this);
3620 m_jit
.loadPtr(JITCompiler::addressFor(JSStack::Callee
), result
.gpr());
3621 cellResult(result
.gpr(), node
);
3626 SpeculateCellOperand
function(this, node
->child1());
3627 GPRTemporary
result(this, Reuse
, function
);
3628 m_jit
.loadPtr(JITCompiler::Address(function
.gpr(), JSFunction::offsetOfScopeChain()), result
.gpr());
3629 cellResult(result
.gpr(), node
);
3634 GPRTemporary
result(this);
3635 GPRReg resultGPR
= result
.gpr();
3637 m_jit
.loadPtr(JITCompiler::addressFor(JSStack::ScopeChain
), resultGPR
);
3638 cellResult(resultGPR
, node
);
3642 case SkipTopScope
: {
3643 SpeculateCellOperand
scope(this, node
->child1());
3644 GPRTemporary
result(this, Reuse
, scope
);
3645 GPRReg resultGPR
= result
.gpr();
3646 m_jit
.move(scope
.gpr(), resultGPR
);
3647 JITCompiler::Jump activationNotCreated
=
3650 JITCompiler::addressFor(
3651 static_cast<VirtualRegister
>(m_jit
.graph().machineActivationRegister())));
3652 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, JSScope::offsetOfNext()), resultGPR
);
3653 activationNotCreated
.link(&m_jit
);
3654 cellResult(resultGPR
, node
);
3659 SpeculateCellOperand
scope(this, node
->child1());
3660 GPRTemporary
result(this, Reuse
, scope
);
3661 m_jit
.loadPtr(JITCompiler::Address(scope
.gpr(), JSScope::offsetOfNext()), result
.gpr());
3662 cellResult(result
.gpr(), node
);
3666 case GetClosureRegisters
: {
3667 if (WriteBarrierBase
<Unknown
>* registers
= m_jit
.graph().tryGetRegisters(node
->child1().node())) {
3668 GPRTemporary
result(this);
3669 GPRReg resultGPR
= result
.gpr();
3670 m_jit
.move(TrustedImmPtr(registers
), resultGPR
);
3671 storageResult(resultGPR
, node
);
3675 SpeculateCellOperand
scope(this, node
->child1());
3676 GPRTemporary
result(this);
3677 GPRReg scopeGPR
= scope
.gpr();
3678 GPRReg resultGPR
= result
.gpr();
3680 m_jit
.loadPtr(JITCompiler::Address(scopeGPR
, JSVariableObject::offsetOfRegisters()), resultGPR
);
3681 storageResult(resultGPR
, node
);
3684 case GetClosureVar
: {
3685 StorageOperand
registers(this, node
->child1());
3686 GPRTemporary
result(this);
3687 GPRReg registersGPR
= registers
.gpr();
3688 GPRReg resultGPR
= result
.gpr();
3690 m_jit
.load64(JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
)), resultGPR
);
3691 jsValueResult(resultGPR
, node
);
3694 case PutClosureVar
: {
3695 StorageOperand
registers(this, node
->child2());
3696 JSValueOperand
value(this, node
->child3());
3698 GPRReg registersGPR
= registers
.gpr();
3699 GPRReg valueGPR
= value
.gpr();
3701 speculate(node
, node
->child1());
3703 m_jit
.store64(valueGPR
, JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
)));
3708 ASSERT(node
->prediction());
3710 switch (node
->child1().useKind()) {
3712 SpeculateCellOperand
base(this, node
->child1());
3713 GPRTemporary
result(this, Reuse
, base
);
3715 GPRReg baseGPR
= base
.gpr();
3716 GPRReg resultGPR
= result
.gpr();
3720 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber());
3722 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3727 JSValueOperand
base(this, node
->child1());
3728 GPRTemporary
result(this, Reuse
, base
);
3730 GPRReg baseGPR
= base
.gpr();
3731 GPRReg resultGPR
= result
.gpr();
3735 JITCompiler::Jump notCell
= branchNotCell(JSValueRegs(baseGPR
));
3737 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber(), notCell
);
3739 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3744 RELEASE_ASSERT_NOT_REACHED();
3750 case GetByIdFlush
: {
3751 if (!node
->prediction()) {
3752 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3756 switch (node
->child1().useKind()) {
3758 SpeculateCellOperand
base(this, node
->child1());
3759 GPRReg baseGPR
= base
.gpr();
3761 GPRResult
result(this);
3763 GPRReg resultGPR
= result
.gpr();
3769 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber(), JITCompiler::Jump(), DontSpill
);
3771 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3776 JSValueOperand
base(this, node
->child1());
3777 GPRReg baseGPR
= base
.gpr();
3779 GPRResult
result(this);
3780 GPRReg resultGPR
= result
.gpr();
3785 JITCompiler::Jump notCell
= branchNotCell(JSValueRegs(baseGPR
));
3787 cachedGetById(node
->origin
.semantic
, baseGPR
, resultGPR
, node
->identifierNumber(), notCell
, DontSpill
);
3789 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3794 RELEASE_ASSERT_NOT_REACHED();
3800 case GetArrayLength
:
3801 compileGetArrayLength(node
);
3804 case CheckFunction
: {
3805 SpeculateCellOperand
function(this, node
->child1());
3806 speculationCheck(BadFunction
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, function
.gpr(), node
->function()));
3811 case CheckExecutable
: {
3812 SpeculateCellOperand
function(this, node
->child1());
3813 speculationCheck(BadExecutable
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, JITCompiler::Address(function
.gpr(), JSFunction::offsetOfExecutable()), node
->executable()));
3818 case CheckStructure
: {
3819 SpeculateCellOperand
base(this, node
->child1());
3821 ASSERT(node
->structureSet().size());
3824 if (node
->child1()->op() == WeakJSConstant
)
3825 exitKind
= BadWeakConstantCache
;
3827 exitKind
= BadCache
;
3829 if (node
->structureSet().size() == 1) {
3831 exitKind
, JSValueSource::unboxedCell(base
.gpr()), 0,
3832 m_jit
.branchWeakStructure(
3833 JITCompiler::NotEqual
,
3834 JITCompiler::Address(base
.gpr(), JSCell::structureIDOffset()),
3835 node
->structureSet()[0]));
3837 JITCompiler::JumpList done
;
3839 for (size_t i
= 0; i
< node
->structureSet().size() - 1; ++i
)
3840 done
.append(m_jit
.branchWeakStructure(JITCompiler::Equal
, MacroAssembler::Address(base
.gpr(), JSCell::structureIDOffset()), node
->structureSet()[i
]));
3843 exitKind
, JSValueSource::unboxedCell(base
.gpr()), 0,
3844 m_jit
.branchWeakStructure(
3845 JITCompiler::NotEqual
, MacroAssembler::Address(base
.gpr(), JSCell::structureIDOffset()), node
->structureSet().last()));
3854 case StructureTransitionWatchpoint
: {
3855 // There is a fascinating question here of what to do about array profiling.
3856 // We *could* try to tell the OSR exit about where the base of the access is.
3857 // The DFG will have kept it alive, though it may not be in a register, and
3858 // we shouldn't really load it since that could be a waste. For now though,
3859 // we'll just rely on the fact that when a watchpoint fires then that's
3860 // quite a hint already.
3862 m_jit
.addWeakReference(node
->structure());
3864 #if !ASSERT_DISABLED
3865 SpeculateCellOperand
op1(this, node
->child1());
3866 JITCompiler::Jump isOK
= m_jit
.branchStructurePtr(
3868 JITCompiler::Address(op1
.gpr(), JSCell::structureIDOffset()),
3870 m_jit
.abortWithReason(DFGIneffectiveWatchpoint
);
3873 speculateCell(node
->child1());
3880 case PhantomPutStructure
: {
3881 ASSERT(isKnownCell(node
->child1().node()));
3882 m_jit
.jitCode()->common
.notifyCompilingStructureTransition(m_jit
.graph().m_plan
, m_jit
.codeBlock(), node
);
3887 case PutStructure
: {
3888 Structure
* oldStructure
= node
->structureTransitionData().previousStructure
;
3889 Structure
* newStructure
= node
->structureTransitionData().newStructure
;
3891 m_jit
.jitCode()->common
.notifyCompilingStructureTransition(m_jit
.graph().m_plan
, m_jit
.codeBlock(), node
);
3893 SpeculateCellOperand
base(this, node
->child1());
3894 GPRReg baseGPR
= base
.gpr();
3896 ASSERT_UNUSED(oldStructure
, oldStructure
->indexingType() == newStructure
->indexingType());
3897 ASSERT(oldStructure
->typeInfo().type() == newStructure
->typeInfo().type());
3898 ASSERT(oldStructure
->typeInfo().inlineTypeFlags() == newStructure
->typeInfo().inlineTypeFlags());
3899 m_jit
.store32(MacroAssembler::TrustedImm32(newStructure
->id()), MacroAssembler::Address(baseGPR
, JSCell::structureIDOffset()));
3905 case AllocatePropertyStorage
:
3906 compileAllocatePropertyStorage(node
);
3909 case ReallocatePropertyStorage
:
3910 compileReallocatePropertyStorage(node
);
3913 case GetButterfly
: {
3914 SpeculateCellOperand
base(this, node
->child1());
3915 GPRTemporary
result(this, Reuse
, base
);
3917 GPRReg baseGPR
= base
.gpr();
3918 GPRReg resultGPR
= result
.gpr();
3920 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), resultGPR
);
3922 storageResult(resultGPR
, node
);
3926 case GetIndexedPropertyStorage
: {
3927 compileGetIndexedPropertyStorage(node
);
3931 case ConstantStoragePointer
: {
3932 compileConstantStoragePointer(node
);
3936 case GetTypedArrayByteOffset
: {
3937 compileGetTypedArrayByteOffset(node
);
3942 StorageOperand
storage(this, node
->child1());
3943 GPRTemporary
result(this, Reuse
, storage
);
3945 GPRReg storageGPR
= storage
.gpr();
3946 GPRReg resultGPR
= result
.gpr();
3948 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
3950 m_jit
.load64(JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
)), resultGPR
);
3952 jsValueResult(resultGPR
, node
);
3957 StorageOperand
storage(this, node
->child1());
3958 JSValueOperand
value(this, node
->child3());
3959 GPRTemporary
scratch1(this);
3960 GPRTemporary
scratch2(this);
3962 GPRReg storageGPR
= storage
.gpr();
3963 GPRReg valueGPR
= value
.gpr();
3965 speculate(node
, node
->child2());
3967 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
3969 m_jit
.store64(valueGPR
, JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
)));
3975 case PutByIdFlush
: {
3976 SpeculateCellOperand
base(this, node
->child1());
3977 JSValueOperand
value(this, node
->child2());
3978 GPRTemporary
scratch(this);
3980 GPRReg baseGPR
= base
.gpr();
3981 GPRReg valueGPR
= value
.gpr();
3982 GPRReg scratchGPR
= scratch
.gpr();
3985 cachedPutById(node
->origin
.semantic
, baseGPR
, valueGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
, MacroAssembler::Jump(), DontSpill
);
3992 SpeculateCellOperand
base(this, node
->child1());
3993 JSValueOperand
value(this, node
->child2());
3994 GPRTemporary
scratch(this);
3996 GPRReg baseGPR
= base
.gpr();
3997 GPRReg valueGPR
= value
.gpr();
3998 GPRReg scratchGPR
= scratch
.gpr();
4000 cachedPutById(node
->origin
.semantic
, baseGPR
, valueGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
);
4006 case PutByIdDirect
: {
4007 SpeculateCellOperand
base(this, node
->child1());
4008 JSValueOperand
value(this, node
->child2());
4009 GPRTemporary
scratch(this);
4011 GPRReg baseGPR
= base
.gpr();
4012 GPRReg valueGPR
= value
.gpr();
4013 GPRReg scratchGPR
= scratch
.gpr();
4015 cachedPutById(node
->origin
.semantic
, baseGPR
, valueGPR
, scratchGPR
, node
->identifierNumber(), Direct
);
4021 case GetGlobalVar
: {
4022 GPRTemporary
result(this);
4024 m_jit
.load64(node
->registerPointer(), result
.gpr());
4026 jsValueResult(result
.gpr(), node
);
4030 case PutGlobalVar
: {
4031 JSValueOperand
value(this, node
->child1());
4033 m_jit
.store64(value
.gpr(), node
->registerPointer());
4040 VariableWatchpointSet
* set
= node
->variableWatchpointSet();
4042 JSValueOperand
value(this, node
->child1());
4043 GPRReg valueGPR
= value
.gpr();
4045 GPRTemporary
temp(this);
4046 GPRReg tempGPR
= temp
.gpr();
4048 m_jit
.load8(set
->addressOfState(), tempGPR
);
4050 JITCompiler::Jump isDone
=
4051 m_jit
.branch32(JITCompiler::Equal
, tempGPR
, TrustedImm32(IsInvalidated
));
4052 JITCompiler::Jump slowCase
= m_jit
.branch64(JITCompiler::NotEqual
,
4053 JITCompiler::AbsoluteAddress(set
->addressOfInferredValue()), valueGPR
);
4054 isDone
.link(&m_jit
);
4056 addSlowPathGenerator(
4057 slowPathCall(slowCase
, this, operationNotifyWrite
, NoResult
, set
, valueGPR
));
4063 case VarInjectionWatchpoint
:
4064 case VariableWatchpoint
: {
4069 case CheckHasInstance
: {
4070 SpeculateCellOperand
base(this, node
->child1());
4071 GPRTemporary
structure(this);
4073 // Speculate that base 'ImplementsDefaultHasInstance'.
4074 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branchTest8(
4075 MacroAssembler::Zero
,
4076 MacroAssembler::Address(base
.gpr(), JSCell::typeInfoFlagsOffset()),
4077 MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance
)));
4084 compileInstanceOf(node
);
4089 JSValueOperand
value(this, node
->child1());
4090 GPRTemporary
result(this);
4091 GPRTemporary
localGlobalObject(this);
4092 GPRTemporary
remoteGlobalObject(this);
4093 GPRTemporary
scratch(this);
4095 JITCompiler::Jump isCell
= branchIsCell(value
.jsValueRegs());
4097 m_jit
.compare64(JITCompiler::Equal
, value
.gpr(), TrustedImm32(ValueUndefined
), result
.gpr());
4098 JITCompiler::Jump done
= m_jit
.jump();
4100 isCell
.link(&m_jit
);
4101 JITCompiler::Jump notMasqueradesAsUndefined
;
4102 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
4103 m_jit
.move(TrustedImm32(0), result
.gpr());
4104 notMasqueradesAsUndefined
= m_jit
.jump();
4106 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
4107 JITCompiler::NonZero
,
4108 JITCompiler::Address(value
.gpr(), JSCell::typeInfoFlagsOffset()),
4109 TrustedImm32(MasqueradesAsUndefined
));
4110 m_jit
.move(TrustedImm32(0), result
.gpr());
4111 notMasqueradesAsUndefined
= m_jit
.jump();
4113 isMasqueradesAsUndefined
.link(&m_jit
);
4114 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
4115 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
4116 m_jit
.move(TrustedImmPtr(m_jit
.globalObjectFor(node
->origin
.semantic
)), localGlobalObjectGPR
);
4117 m_jit
.emitLoadStructure(value
.gpr(), result
.gpr(), scratch
.gpr());
4118 m_jit
.loadPtr(JITCompiler::Address(result
.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
4119 m_jit
.comparePtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, result
.gpr());
4122 notMasqueradesAsUndefined
.link(&m_jit
);
4124 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4125 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4130 JSValueOperand
value(this, node
->child1());
4131 GPRTemporary
result(this, Reuse
, value
);
4133 m_jit
.move(value
.gpr(), result
.gpr());
4134 m_jit
.xor64(JITCompiler::TrustedImm32(ValueFalse
), result
.gpr());
4135 m_jit
.test64(JITCompiler::Zero
, result
.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result
.gpr());
4136 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4137 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4142 JSValueOperand
value(this, node
->child1());
4143 GPRTemporary
result(this, Reuse
, value
);
4145 m_jit
.test64(JITCompiler::NonZero
, value
.gpr(), GPRInfo::tagTypeNumberRegister
, result
.gpr());
4146 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4147 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4152 JSValueOperand
value(this, node
->child1());
4153 GPRTemporary
result(this, Reuse
, value
);
4155 JITCompiler::Jump isNotCell
= branchNotCell(value
.jsValueRegs());
4157 m_jit
.compare8(JITCompiler::Equal
,
4158 JITCompiler::Address(value
.gpr(), JSCell::typeInfoTypeOffset()),
4159 TrustedImm32(StringType
),
4161 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4162 JITCompiler::Jump done
= m_jit
.jump();
4164 isNotCell
.link(&m_jit
);
4165 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
4168 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4173 JSValueOperand
value(this, node
->child1());
4174 GPRReg valueGPR
= value
.gpr();
4175 GPRResult
result(this);
4176 GPRReg resultGPR
= result
.gpr();
4178 callOperation(operationIsObject
, resultGPR
, valueGPR
);
4179 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
4180 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4185 JSValueOperand
value(this, node
->child1());
4186 GPRReg valueGPR
= value
.gpr();
4187 GPRResult
result(this);
4188 GPRReg resultGPR
= result
.gpr();
4190 callOperation(operationIsFunction
, resultGPR
, valueGPR
);
4191 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
4192 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4197 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
4198 GPRReg valueGPR
= value
.gpr();
4199 GPRResult
result(this);
4200 GPRReg resultGPR
= result
.gpr();
4201 JITCompiler::JumpList doneJumps
;
4205 ASSERT(node
->child1().useKind() == UntypedUse
|| node
->child1().useKind() == CellUse
|| node
->child1().useKind() == StringUse
);
4207 JITCompiler::Jump isNotCell
= branchNotCell(JSValueRegs(valueGPR
));
4208 if (node
->child1().useKind() != UntypedUse
)
4209 DFG_TYPE_CHECK(JSValueSource(valueGPR
), node
->child1(), SpecCell
, isNotCell
);
4211 if (!node
->child1()->shouldSpeculateObject() || node
->child1().useKind() == StringUse
) {
4212 JITCompiler::Jump notString
= m_jit
.branch8(
4213 JITCompiler::NotEqual
,
4214 JITCompiler::Address(valueGPR
, JSCell::typeInfoTypeOffset()),
4215 TrustedImm32(StringType
));
4216 if (node
->child1().useKind() == StringUse
)
4217 DFG_TYPE_CHECK(JSValueSource(valueGPR
), node
->child1(), SpecString
, notString
);
4218 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.stringString()), resultGPR
);
4219 doneJumps
.append(m_jit
.jump());
4220 if (node
->child1().useKind() != StringUse
) {
4221 notString
.link(&m_jit
);
4222 callOperation(operationTypeOf
, resultGPR
, valueGPR
);
4223 doneJumps
.append(m_jit
.jump());
4226 callOperation(operationTypeOf
, resultGPR
, valueGPR
);
4227 doneJumps
.append(m_jit
.jump());
4230 if (node
->child1().useKind() == UntypedUse
) {
4231 isNotCell
.link(&m_jit
);
4232 JITCompiler::Jump notNumber
= m_jit
.branchTest64(JITCompiler::Zero
, valueGPR
, GPRInfo::tagTypeNumberRegister
);
4233 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.numberString()), resultGPR
);
4234 doneJumps
.append(m_jit
.jump());
4235 notNumber
.link(&m_jit
);
4237 JITCompiler::Jump notUndefined
= m_jit
.branch64(JITCompiler::NotEqual
, valueGPR
, JITCompiler::TrustedImm64(ValueUndefined
));
4238 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.undefinedString()), resultGPR
);
4239 doneJumps
.append(m_jit
.jump());
4240 notUndefined
.link(&m_jit
);
4242 JITCompiler::Jump notNull
= m_jit
.branch64(JITCompiler::NotEqual
, valueGPR
, JITCompiler::TrustedImm64(ValueNull
));
4243 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.objectString()), resultGPR
);
4244 doneJumps
.append(m_jit
.jump());
4245 notNull
.link(&m_jit
);
4247 // Only boolean left
4248 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.booleanString()), resultGPR
);
4250 doneJumps
.link(&m_jit
);
4251 cellResult(resultGPR
, node
);
4263 case CreateActivation
: {
4264 RELEASE_ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4266 JSValueOperand
value(this, node
->child1());
4267 GPRTemporary
result(this, Reuse
, value
);
4269 GPRReg valueGPR
= value
.gpr();
4270 GPRReg resultGPR
= result
.gpr();
4272 m_jit
.move(valueGPR
, resultGPR
);
4274 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
);
4276 addSlowPathGenerator(
4278 notCreated
, this, operationCreateActivation
, resultGPR
,
4279 framePointerOffsetToGetActivationRegisters()));
4281 cellResult(resultGPR
, node
);
4285 case FunctionReentryWatchpoint
: {
4290 case CreateArguments
: {
4291 JSValueOperand
value(this, node
->child1());
4292 GPRTemporary
scratch1(this);
4293 GPRTemporary
scratch2(this);
4294 GPRTemporary
result(this, Reuse
, value
);
4296 GPRReg valueGPR
= value
.gpr();
4297 GPRReg scratchGPR1
= scratch1
.gpr();
4298 GPRReg scratchGPR2
= scratch2
.gpr();
4299 GPRReg resultGPR
= result
.gpr();
4301 m_jit
.move(valueGPR
, resultGPR
);
4303 if (node
->origin
.semantic
.inlineCallFrame
) {
4304 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
);
4305 addSlowPathGenerator(
4307 notCreated
, this, operationCreateInlinedArguments
, resultGPR
,
4308 node
->origin
.semantic
.inlineCallFrame
));
4309 cellResult(resultGPR
, node
);
4313 FunctionExecutable
* executable
= jsCast
<FunctionExecutable
*>(m_jit
.graph().executableFor(node
->origin
.semantic
));
4314 if (m_jit
.codeBlock()->hasSlowArguments()
4315 || executable
->isStrictMode()
4316 || !executable
->parameterCount()) {
4317 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
);
4318 addSlowPathGenerator(
4319 slowPathCall(notCreated
, this, operationCreateArguments
, resultGPR
));
4320 cellResult(resultGPR
, node
);
4324 JITCompiler::Jump alreadyCreated
= m_jit
.branchTest64(JITCompiler::NonZero
, resultGPR
);
4326 MacroAssembler::JumpList slowPaths
;
4327 emitAllocateArguments(resultGPR
, scratchGPR1
, scratchGPR2
, slowPaths
);
4328 addSlowPathGenerator(
4329 slowPathCall(slowPaths
, this, operationCreateArguments
, resultGPR
));
4331 alreadyCreated
.link(&m_jit
);
4332 cellResult(resultGPR
, node
);
4336 case TearOffActivation
: {
4337 RELEASE_ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4339 JSValueOperand
activationValue(this, node
->child1());
4340 GPRTemporary
scratch(this);
4341 GPRReg activationValueGPR
= activationValue
.gpr();
4342 GPRReg scratchGPR
= scratch
.gpr();
4344 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, activationValueGPR
);
4346 SymbolTable
* symbolTable
= m_jit
.symbolTableFor(node
->origin
.semantic
);
4347 int registersOffset
= JSActivation::registersOffset(symbolTable
);
4349 int bytecodeCaptureStart
= symbolTable
->captureStart();
4350 int machineCaptureStart
= m_jit
.graph().m_machineCaptureStart
;
4351 for (int i
= symbolTable
->captureCount(); i
--;) {
4353 JITCompiler::Address(
4354 GPRInfo::callFrameRegister
,
4355 (machineCaptureStart
- i
) * sizeof(Register
)),
4359 JITCompiler::Address(
4361 registersOffset
+ (bytecodeCaptureStart
- i
) * sizeof(Register
)));
4363 m_jit
.addPtr(TrustedImm32(registersOffset
), activationValueGPR
, scratchGPR
);
4364 m_jit
.storePtr(scratchGPR
, JITCompiler::Address(activationValueGPR
, JSActivation::offsetOfRegisters()));
4366 notCreated
.link(&m_jit
);
4371 case TearOffArguments
: {
4372 JSValueOperand
unmodifiedArgumentsValue(this, node
->child1());
4373 JSValueOperand
activationValue(this, node
->child2());
4374 GPRReg unmodifiedArgumentsValueGPR
= unmodifiedArgumentsValue
.gpr();
4375 GPRReg activationValueGPR
= activationValue
.gpr();
4377 JITCompiler::Jump created
= m_jit
.branchTest64(JITCompiler::NonZero
, unmodifiedArgumentsValueGPR
);
4379 if (node
->origin
.semantic
.inlineCallFrame
) {
4380 addSlowPathGenerator(
4382 created
, this, operationTearOffInlinedArguments
, NoResult
,
4383 unmodifiedArgumentsValueGPR
, activationValueGPR
, node
->origin
.semantic
.inlineCallFrame
));
4385 addSlowPathGenerator(
4387 created
, this, operationTearOffArguments
, NoResult
, unmodifiedArgumentsValueGPR
, activationValueGPR
));
4394 case GetMyArgumentsLength
: {
4395 GPRTemporary
result(this);
4396 GPRReg resultGPR
= result
.gpr();
4398 if (!isEmptySpeculation(
4399 m_state
.variables().operand(
4400 m_jit
.graph().argumentsRegisterFor(node
->origin
.semantic
)).m_type
)) {
4402 ArgumentsEscaped
, JSValueRegs(), 0,
4404 JITCompiler::NonZero
,
4405 JITCompiler::addressFor(
4406 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
))));
4409 RELEASE_ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4410 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultGPR
);
4411 m_jit
.sub32(TrustedImm32(1), resultGPR
);
4412 int32Result(resultGPR
, node
);
4416 case GetMyArgumentsLengthSafe
: {
4417 GPRTemporary
result(this);
4418 GPRReg resultGPR
= result
.gpr();
4420 JITCompiler::Jump created
= m_jit
.branchTest64(
4421 JITCompiler::NonZero
,
4422 JITCompiler::addressFor(
4423 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
)));
4425 if (node
->origin
.semantic
.inlineCallFrame
) {
4427 Imm64(JSValue::encode(jsNumber(node
->origin
.semantic
.inlineCallFrame
->arguments
.size() - 1))),
4430 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultGPR
);
4431 m_jit
.sub32(TrustedImm32(1), resultGPR
);
4432 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, resultGPR
);
4435 // FIXME: the slow path generator should perform a forward speculation that the
4436 // result is an integer. For now we postpone the speculation by having this return
4439 addSlowPathGenerator(
4441 created
, this, operationGetArgumentsLength
, resultGPR
,
4442 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
).offset()));
4444 jsValueResult(resultGPR
, node
);
4448 case GetMyArgumentByVal
: {
4449 SpeculateStrictInt32Operand
index(this, node
->child1());
4450 GPRTemporary
result(this);
4451 GPRReg indexGPR
= index
.gpr();
4452 GPRReg resultGPR
= result
.gpr();
4454 if (!isEmptySpeculation(
4455 m_state
.variables().operand(
4456 m_jit
.graph().argumentsRegisterFor(node
->origin
.semantic
)).m_type
)) {
4458 ArgumentsEscaped
, JSValueRegs(), 0,
4460 JITCompiler::NonZero
,
4461 JITCompiler::addressFor(
4462 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
))));
4465 m_jit
.add32(TrustedImm32(1), indexGPR
, resultGPR
);
4466 if (node
->origin
.semantic
.inlineCallFrame
) {
4468 Uncountable
, JSValueRegs(), 0,
4470 JITCompiler::AboveOrEqual
,
4472 Imm32(node
->origin
.semantic
.inlineCallFrame
->arguments
.size())));
4475 Uncountable
, JSValueRegs(), 0,
4477 JITCompiler::AboveOrEqual
,
4479 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4482 JITCompiler::JumpList slowArgument
;
4483 JITCompiler::JumpList slowArgumentOutOfBounds
;
4484 if (m_jit
.symbolTableFor(node
->origin
.semantic
)->slowArguments()) {
4485 RELEASE_ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4486 const SlowArgument
* slowArguments
= m_jit
.graph().m_slowArguments
.get();
4488 slowArgumentOutOfBounds
.append(
4490 JITCompiler::AboveOrEqual
, indexGPR
,
4491 Imm32(m_jit
.symbolTableFor(node
->origin
.semantic
)->parameterCount())));
4493 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4494 m_jit
.move(ImmPtr(slowArguments
), resultGPR
);
4496 JITCompiler::BaseIndex(
4497 resultGPR
, indexGPR
, JITCompiler::TimesEight
,
4498 OBJECT_OFFSETOF(SlowArgument
, index
)),
4500 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4502 JITCompiler::BaseIndex(
4503 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
),
4505 slowArgument
.append(m_jit
.jump());
4507 slowArgumentOutOfBounds
.link(&m_jit
);
4509 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4512 JITCompiler::BaseIndex(
4513 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
, m_jit
.offsetOfArgumentsIncludingThis(node
->origin
.semantic
)),
4516 slowArgument
.link(&m_jit
);
4517 jsValueResult(resultGPR
, node
);
4521 case GetMyArgumentByValSafe
: {
4522 SpeculateStrictInt32Operand
index(this, node
->child1());
4523 GPRTemporary
result(this);
4524 GPRReg indexGPR
= index
.gpr();
4525 GPRReg resultGPR
= result
.gpr();
4527 JITCompiler::JumpList slowPath
;
4530 JITCompiler::NonZero
,
4531 JITCompiler::addressFor(
4532 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
))));
4534 m_jit
.add32(TrustedImm32(1), indexGPR
, resultGPR
);
4535 if (node
->origin
.semantic
.inlineCallFrame
) {
4538 JITCompiler::AboveOrEqual
,
4540 Imm32(node
->origin
.semantic
.inlineCallFrame
->arguments
.size())));
4544 JITCompiler::AboveOrEqual
,
4546 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4549 JITCompiler::JumpList slowArgument
;
4550 JITCompiler::JumpList slowArgumentOutOfBounds
;
4551 if (m_jit
.symbolTableFor(node
->origin
.semantic
)->slowArguments()) {
4552 RELEASE_ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4553 const SlowArgument
* slowArguments
= m_jit
.graph().m_slowArguments
.get();
4555 slowArgumentOutOfBounds
.append(
4557 JITCompiler::AboveOrEqual
, indexGPR
,
4558 Imm32(m_jit
.symbolTableFor(node
->origin
.semantic
)->parameterCount())));
4560 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4561 m_jit
.move(ImmPtr(slowArguments
), resultGPR
);
4563 JITCompiler::BaseIndex(
4564 resultGPR
, indexGPR
, JITCompiler::TimesEight
,
4565 OBJECT_OFFSETOF(SlowArgument
, index
)),
4567 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4569 JITCompiler::BaseIndex(
4570 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
),
4572 slowArgument
.append(m_jit
.jump());
4574 slowArgumentOutOfBounds
.link(&m_jit
);
4576 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4579 JITCompiler::BaseIndex(
4580 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
, m_jit
.offsetOfArgumentsIncludingThis(node
->origin
.semantic
)),
4583 if (node
->origin
.semantic
.inlineCallFrame
) {
4584 addSlowPathGenerator(
4586 slowPath
, this, operationGetInlinedArgumentByVal
, resultGPR
,
4587 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
).offset(),
4588 node
->origin
.semantic
.inlineCallFrame
,
4591 addSlowPathGenerator(
4593 slowPath
, this, operationGetArgumentByVal
, resultGPR
,
4594 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
).offset(),
4598 slowArgument
.link(&m_jit
);
4599 jsValueResult(resultGPR
, node
);
4603 case CheckArgumentsNotCreated
: {
4604 ASSERT(!isEmptySpeculation(
4605 m_state
.variables().operand(
4606 m_jit
.graph().argumentsRegisterFor(node
->origin
.semantic
)).m_type
));
4608 ArgumentsEscaped
, JSValueRegs(), 0,
4610 JITCompiler::NonZero
,
4611 JITCompiler::addressFor(
4612 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
))));
4617 case NewFunctionNoCheck
:
4618 compileNewFunctionNoCheck(node
);
4622 JSValueOperand
value(this, node
->child1());
4623 GPRTemporary
result(this, Reuse
, value
);
4625 GPRReg valueGPR
= value
.gpr();
4626 GPRReg resultGPR
= result
.gpr();
4628 m_jit
.move(valueGPR
, resultGPR
);
4630 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
);
4632 addSlowPathGenerator(
4634 notCreated
, this, operationNewFunction
,
4635 resultGPR
, m_jit
.codeBlock()->functionDecl(node
->functionDeclIndex())));
4637 jsValueResult(resultGPR
, node
);
4641 case NewFunctionExpression
:
4642 compileNewFunctionExpression(node
);
4649 case CountExecution
:
4650 m_jit
.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node
->executionCounter()->address()));
4653 case ForceOSRExit
: {
4654 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
4658 case InvalidationPoint
:
4659 emitInvalidationPoint(node
);
4662 case CheckWatchdogTimer
:
4663 ASSERT(m_jit
.vm()->watchdog
);
4665 WatchdogTimerFired
, JSValueRegs(), 0,
4667 JITCompiler::NonZero
,
4668 JITCompiler::AbsoluteAddress(m_jit
.vm()->watchdog
->timerDidFireAddress())));
4673 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
4678 case ProfileWillCall
:
4679 case ProfileDidCall
:
4687 RELEASE_ASSERT_NOT_REACHED();
4691 case StoreBarrierWithNullCheck
: {
4692 compileStoreBarrier(node
);
4697 case CheckTierUpInLoop
: {
4698 MacroAssembler::Jump done
= m_jit
.branchAdd32(
4699 MacroAssembler::Signed
,
4700 TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()),
4701 MacroAssembler::AbsoluteAddress(&m_jit
.jitCode()->tierUpCounter
.m_counter
));
4703 silentSpillAllRegisters(InvalidGPRReg
);
4704 m_jit
.setupArgumentsExecState();
4705 appendCall(triggerTierUpNow
);
4706 silentFillAllRegisters(InvalidGPRReg
);
4712 case CheckTierUpAtReturn
: {
4713 MacroAssembler::Jump done
= m_jit
.branchAdd32(
4714 MacroAssembler::Signed
,
4715 TrustedImm32(Options::ftlTierUpCounterIncrementForReturn()),
4716 MacroAssembler::AbsoluteAddress(&m_jit
.jitCode()->tierUpCounter
.m_counter
));
4718 silentSpillAllRegisters(InvalidGPRReg
);
4719 m_jit
.setupArgumentsExecState();
4720 appendCall(triggerTierUpNow
);
4721 silentFillAllRegisters(InvalidGPRReg
);
4727 case CheckTierUpAndOSREnter
: {
4728 ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4730 GPRTemporary
temp(this);
4731 GPRReg tempGPR
= temp
.gpr();
4733 MacroAssembler::Jump done
= m_jit
.branchAdd32(
4734 MacroAssembler::Signed
,
4735 TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()),
4736 MacroAssembler::AbsoluteAddress(&m_jit
.jitCode()->tierUpCounter
.m_counter
));
4738 silentSpillAllRegisters(tempGPR
);
4739 m_jit
.setupArgumentsWithExecState(
4740 TrustedImm32(node
->origin
.semantic
.bytecodeIndex
),
4741 TrustedImm32(m_stream
->size()));
4742 appendCallSetResult(triggerOSREntryNow
, tempGPR
);
4743 MacroAssembler::Jump dontEnter
= m_jit
.branchTestPtr(MacroAssembler::Zero
, tempGPR
);
4744 m_jit
.jump(tempGPR
);
4745 dontEnter
.link(&m_jit
);
4746 silentFillAllRegisters(tempGPR
);
4751 #else // ENABLE(FTL_JIT)
4752 case CheckTierUpInLoop
:
4753 case CheckTierUpAtReturn
:
4754 case CheckTierUpAndOSREnter
:
4755 RELEASE_ASSERT_NOT_REACHED();
4757 #endif // ENABLE(FTL_JIT)
4763 case ExtractOSREntryLocal
:
4766 case MultiGetByOffset
:
4767 case MultiPutByOffset
:
4769 RELEASE_ASSERT_NOT_REACHED();
4776 if (node
->hasResult() && node
->mustGenerate())
4781 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, GPRReg valueGPR
, Edge valueUse
, GPRReg scratch1
, GPRReg scratch2
)
4783 JITCompiler::Jump isNotCell
;
4784 if (!isKnownCell(valueUse
.node()))
4785 isNotCell
= branchNotCell(JSValueRegs(valueGPR
));
4787 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered
= m_jit
.checkMarkByte(ownerGPR
);
4788 storeToWriteBarrierBuffer(ownerGPR
, scratch1
, scratch2
);
4789 ownerNotMarkedOrAlreadyRemembered
.link(&m_jit
);
4791 if (!isKnownCell(valueUse
.node()))
4792 isNotCell
.link(&m_jit
);
4795 void SpeculativeJIT::writeBarrier(JSCell
* owner
, GPRReg valueGPR
, Edge valueUse
, GPRReg scratch1
, GPRReg scratch2
)
4797 JITCompiler::Jump isNotCell
;
4798 if (!isKnownCell(valueUse
.node()))
4799 isNotCell
= branchNotCell(JSValueRegs(valueGPR
));
4801 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered
= m_jit
.checkMarkByte(owner
);
4802 storeToWriteBarrierBuffer(owner
, scratch1
, scratch2
);
4803 ownerNotMarkedOrAlreadyRemembered
.link(&m_jit
);
4805 if (!isKnownCell(valueUse
.node()))
4806 isNotCell
.link(&m_jit
);
4808 #endif // ENABLE(GGC)
4810 JITCompiler::Jump
SpeculativeJIT::branchIsCell(JSValueRegs regs
)
4812 return m_jit
.branchTest64(MacroAssembler::Zero
, regs
.gpr(), GPRInfo::tagMaskRegister
);
4815 JITCompiler::Jump
SpeculativeJIT::branchNotCell(JSValueRegs regs
)
4817 return m_jit
.branchTest64(MacroAssembler::NonZero
, regs
.gpr(), GPRInfo::tagMaskRegister
);
4820 JITCompiler::Jump
SpeculativeJIT::branchIsOther(JSValueRegs regs
, GPRReg tempGPR
)
4822 m_jit
.move(regs
.gpr(), tempGPR
);
4823 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), tempGPR
);
4824 return m_jit
.branch64(
4825 MacroAssembler::Equal
, tempGPR
,
4826 MacroAssembler::TrustedImm64(ValueNull
));
4829 JITCompiler::Jump
SpeculativeJIT::branchNotOther(JSValueRegs regs
, GPRReg tempGPR
)
4831 m_jit
.move(regs
.gpr(), tempGPR
);
4832 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), tempGPR
);
4833 return m_jit
.branch64(
4834 MacroAssembler::NotEqual
, tempGPR
,
4835 MacroAssembler::TrustedImm64(ValueNull
));
4838 void SpeculativeJIT::moveTrueTo(GPRReg gpr
)
4840 m_jit
.move(TrustedImm32(ValueTrue
), gpr
);
4843 void SpeculativeJIT::moveFalseTo(GPRReg gpr
)
4845 m_jit
.move(TrustedImm32(ValueFalse
), gpr
);
4848 void SpeculativeJIT::blessBoolean(GPRReg gpr
)
4850 m_jit
.or32(TrustedImm32(ValueFalse
), gpr
);
4853 void SpeculativeJIT::convertMachineInt(Edge valueEdge
, GPRReg resultGPR
)
4855 JSValueOperand
value(this, valueEdge
, ManualOperandSpeculation
);
4856 GPRReg valueGPR
= value
.gpr();
4858 JITCompiler::Jump notInt32
=
4859 m_jit
.branch64(JITCompiler::Below
, valueGPR
, GPRInfo::tagTypeNumberRegister
);
4861 m_jit
.signExtend32ToPtr(valueGPR
, resultGPR
);
4862 JITCompiler::Jump done
= m_jit
.jump();
4864 notInt32
.link(&m_jit
);
4865 silentSpillAllRegisters(resultGPR
);
4866 callOperation(operationConvertBoxedDoubleToInt52
, resultGPR
, valueGPR
);
4867 silentFillAllRegisters(resultGPR
);
4870 JSValueRegs(valueGPR
), valueEdge
, SpecInt32
| SpecInt52AsDouble
,
4872 JITCompiler::Equal
, resultGPR
,
4873 JITCompiler::TrustedImm64(JSValue::notInt52
)));
4877 void SpeculativeJIT::speculateMachineInt(Edge edge
)
4879 if (!needsTypeCheck(edge
, SpecInt32
| SpecInt52AsDouble
))
4882 GPRTemporary
temp(this);
4883 convertMachineInt(edge
, temp
.gpr());
4886 void SpeculativeJIT::speculateDoubleRepMachineInt(Edge edge
)
4888 if (!needsTypeCheck(edge
, SpecInt52AsDouble
))
4891 SpeculateDoubleOperand
value(this, edge
);
4892 FPRReg valueFPR
= value
.fpr();
4894 GPRResult
result(this);
4895 GPRReg resultGPR
= result
.gpr();
4899 callOperation(operationConvertDoubleToInt52
, resultGPR
, valueFPR
);
4902 JSValueRegs(), edge
, SpecInt52AsDouble
,
4904 JITCompiler::Equal
, resultGPR
,
4905 JITCompiler::TrustedImm64(JSValue::notInt52
)));
4910 } } // namespace JSC::DFG