2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "ArrayPrototype.h"
33 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
34 #include "DFGSlowPathGenerator.h"
35 #include "JSCJSValueInlines.h"
36 #include "ObjectPrototype.h"
38 namespace JSC
{ namespace DFG
{
42 GPRReg
SpeculativeJIT::fillInteger(Edge edge
, DataFormat
& returnFormat
)
44 ASSERT(!needsTypeCheck(edge
, SpecInt32
));
46 VirtualRegister virtualRegister
= edge
->virtualRegister();
47 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
49 if (info
.registerFormat() == DataFormatNone
) {
50 GPRReg gpr
= allocate();
52 if (edge
->hasConstant()) {
53 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
54 if (isInt32Constant(edge
.node())) {
55 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
56 info
.fillInteger(*m_stream
, gpr
);
57 returnFormat
= DataFormatInteger
;
60 if (isNumberConstant(edge
.node())) {
61 JSValue jsValue
= jsNumber(valueOfNumberConstant(edge
.node()));
62 m_jit
.move(MacroAssembler::Imm64(JSValue::encode(jsValue
)), gpr
);
64 ASSERT(isJSConstant(edge
.node()));
65 JSValue jsValue
= valueOfJSConstant(edge
.node());
66 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
68 } else if (info
.spillFormat() == DataFormatInteger
) {
69 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
70 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
71 // Tag it, since fillInteger() is used when we want a boxed integer.
72 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
74 RELEASE_ASSERT(info
.spillFormat() == DataFormatJS
|| info
.spillFormat() == DataFormatJSInteger
);
75 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
76 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
79 // Since we statically know that we're filling an integer, and values
80 // in the JSStack are boxed, this must be DataFormatJSInteger.
81 // We will check this with a jitAssert below.
82 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInteger
);
86 switch (info
.registerFormat()) {
88 // Should have filled, above.
89 case DataFormatJSDouble
:
90 case DataFormatDouble
:
93 case DataFormatJSCell
:
94 case DataFormatBoolean
:
95 case DataFormatJSBoolean
:
96 case DataFormatStorage
:
97 // Should only be calling this function if we know this operand to be integer.
98 RELEASE_ASSERT_NOT_REACHED();
100 case DataFormatJSInteger
: {
101 GPRReg gpr
= info
.gpr();
103 m_jit
.jitAssertIsJSInt32(gpr
);
104 returnFormat
= DataFormatJSInteger
;
108 case DataFormatInteger
: {
109 GPRReg gpr
= info
.gpr();
111 m_jit
.jitAssertIsInt32(gpr
);
112 returnFormat
= DataFormatInteger
;
117 RELEASE_ASSERT_NOT_REACHED();
118 return InvalidGPRReg
;
122 GPRReg
SpeculativeJIT::fillJSValue(Edge edge
)
124 VirtualRegister virtualRegister
= edge
->virtualRegister();
125 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
127 switch (info
.registerFormat()) {
128 case DataFormatNone
: {
129 GPRReg gpr
= allocate();
131 if (edge
->hasConstant()) {
132 if (isInt32Constant(edge
.node())) {
133 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInteger
);
134 JSValue jsValue
= jsNumber(valueOfInt32Constant(edge
.node()));
135 m_jit
.move(MacroAssembler::Imm64(JSValue::encode(jsValue
)), gpr
);
136 } else if (isNumberConstant(edge
.node())) {
137 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSDouble
);
138 JSValue
jsValue(JSValue::EncodeAsDouble
, valueOfNumberConstant(edge
.node()));
139 m_jit
.move(MacroAssembler::Imm64(JSValue::encode(jsValue
)), gpr
);
141 ASSERT(isJSConstant(edge
.node()));
142 JSValue jsValue
= valueOfJSConstant(edge
.node());
143 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
144 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
147 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
149 DataFormat spillFormat
= info
.spillFormat();
150 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
151 if (spillFormat
== DataFormatInteger
) {
152 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
153 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
154 spillFormat
= DataFormatJSInteger
;
156 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
157 if (spillFormat
== DataFormatDouble
) {
158 // Need to box the double, since we want a JSValue.
159 m_jit
.sub64(GPRInfo::tagTypeNumberRegister
, gpr
);
160 spillFormat
= DataFormatJSDouble
;
162 RELEASE_ASSERT(spillFormat
& DataFormatJS
);
164 info
.fillJSValue(*m_stream
, gpr
, spillFormat
);
169 case DataFormatInteger
: {
170 GPRReg gpr
= info
.gpr();
171 // If the register has already been locked we need to take a copy.
172 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
173 if (m_gprs
.isLocked(gpr
)) {
174 GPRReg result
= allocate();
175 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
, result
);
179 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
180 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInteger
);
184 case DataFormatDouble
: {
185 FPRReg fpr
= info
.fpr();
186 GPRReg gpr
= boxDouble(fpr
);
189 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSDouble
);
191 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderJS
);
197 // No retag required on JSVALUE64!
199 case DataFormatJSInteger
:
200 case DataFormatJSDouble
:
201 case DataFormatJSCell
:
202 case DataFormatJSBoolean
: {
203 GPRReg gpr
= info
.gpr();
208 case DataFormatBoolean
:
209 case DataFormatStorage
:
210 // this type currently never occurs
211 RELEASE_ASSERT_NOT_REACHED();
214 RELEASE_ASSERT_NOT_REACHED();
215 return InvalidGPRReg
;
219 void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node
* node
)
221 IntegerOperand
op1(this, node
->child1());
222 FPRTemporary
boxer(this);
223 GPRTemporary
result(this, op1
);
225 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, op1
.gpr(), TrustedImm32(0));
227 m_jit
.convertInt32ToDouble(op1
.gpr(), boxer
.fpr());
228 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), boxer
.fpr());
230 boxDouble(boxer
.fpr(), result
.gpr());
232 JITCompiler::Jump done
= m_jit
.jump();
234 positive
.link(&m_jit
);
236 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, op1
.gpr(), result
.gpr());
240 jsValueResult(result
.gpr(), m_currentNode
);
243 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg resultGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
245 JITCompiler::DataLabelPtr structureToCompare
;
246 JITCompiler::PatchableJump structureCheck
= m_jit
.patchableBranchPtrWithPatch(JITCompiler::NotEqual
, JITCompiler::Address(baseGPR
, JSCell::structureOffset()), structureToCompare
, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer
)));
248 JITCompiler::ConvertibleLoadLabel propertyStorageLoad
=
249 m_jit
.convertibleLoadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), resultGPR
);
250 JITCompiler::DataLabelCompact loadWithPatch
= m_jit
.load64WithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR
, 0), resultGPR
);
252 JITCompiler::Label doneLabel
= m_jit
.label();
254 OwnPtr
<SlowPathGenerator
> slowPath
;
255 if (!slowPathTarget
.isSet()) {
256 slowPath
= slowPathCall(
257 structureCheck
.m_jump
, this, operationGetByIdOptimize
, resultGPR
, baseGPR
,
258 identifier(identifierNumber
), spillMode
);
260 JITCompiler::JumpList slowCases
;
261 slowCases
.append(structureCheck
.m_jump
);
262 slowCases
.append(slowPathTarget
);
263 slowPath
= slowPathCall(
264 slowCases
, this, operationGetByIdOptimize
, resultGPR
, baseGPR
,
265 identifier(identifierNumber
), spillMode
);
267 m_jit
.addPropertyAccess(
268 PropertyAccessRecord(
269 codeOrigin
, structureToCompare
, structureCheck
, propertyStorageLoad
, loadWithPatch
,
270 slowPath
.get(), doneLabel
, safeCast
<int8_t>(baseGPR
), safeCast
<int8_t>(resultGPR
),
272 spillMode
== NeedToSpill
? PropertyAccessRecord::RegistersInUse
: PropertyAccessRecord::RegistersFlushed
));
273 addSlowPathGenerator(slowPath
.release());
276 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin
, GPRReg baseGPR
, GPRReg valueGPR
, Edge valueUse
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind putKind
, JITCompiler::Jump slowPathTarget
)
279 JITCompiler::DataLabelPtr structureToCompare
;
280 JITCompiler::PatchableJump structureCheck
= m_jit
.patchableBranchPtrWithPatch(JITCompiler::NotEqual
, JITCompiler::Address(baseGPR
, JSCell::structureOffset()), structureToCompare
, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer
)));
282 writeBarrier(baseGPR
, valueGPR
, valueUse
, WriteBarrierForPropertyAccess
, scratchGPR
);
284 JITCompiler::ConvertibleLoadLabel propertyStorageLoad
=
285 m_jit
.convertibleLoadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), scratchGPR
);
286 JITCompiler::DataLabel32 storeWithPatch
= m_jit
.store64WithAddressOffsetPatch(valueGPR
, JITCompiler::Address(scratchGPR
, 0));
288 JITCompiler::Label doneLabel
= m_jit
.label();
290 V_DFGOperation_EJCI optimizedCall
;
291 if (m_jit
.strictModeFor(m_currentNode
->codeOrigin
)) {
292 if (putKind
== Direct
)
293 optimizedCall
= operationPutByIdDirectStrictOptimize
;
295 optimizedCall
= operationPutByIdStrictOptimize
;
297 if (putKind
== Direct
)
298 optimizedCall
= operationPutByIdDirectNonStrictOptimize
;
300 optimizedCall
= operationPutByIdNonStrictOptimize
;
302 OwnPtr
<SlowPathGenerator
> slowPath
;
303 if (!slowPathTarget
.isSet()) {
304 slowPath
= slowPathCall(
305 structureCheck
.m_jump
, this, optimizedCall
, NoResult
, valueGPR
, baseGPR
,
306 identifier(identifierNumber
));
308 JITCompiler::JumpList slowCases
;
309 slowCases
.append(structureCheck
.m_jump
);
310 slowCases
.append(slowPathTarget
);
311 slowPath
= slowPathCall(
312 slowCases
, this, optimizedCall
, NoResult
, valueGPR
, baseGPR
,
313 identifier(identifierNumber
));
315 RegisterSet currentlyUsedRegisters
= usedRegisters();
316 currentlyUsedRegisters
.clear(scratchGPR
);
317 ASSERT(currentlyUsedRegisters
.get(baseGPR
));
318 ASSERT(currentlyUsedRegisters
.get(valueGPR
));
319 m_jit
.addPropertyAccess(
320 PropertyAccessRecord(
321 codeOrigin
, structureToCompare
, structureCheck
, propertyStorageLoad
,
322 JITCompiler::DataLabelCompact(storeWithPatch
.label()), slowPath
.get(), doneLabel
,
323 safeCast
<int8_t>(baseGPR
), safeCast
<int8_t>(valueGPR
), currentlyUsedRegisters
));
324 addSlowPathGenerator(slowPath
.release());
327 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
)
329 JSValueOperand
arg(this, operand
);
330 GPRReg argGPR
= arg
.gpr();
332 GPRTemporary
result(this, arg
);
333 GPRReg resultGPR
= result
.gpr();
335 JITCompiler::Jump notCell
;
337 JITCompiler::Jump notMasqueradesAsUndefined
;
338 if (m_jit
.graph().globalObjectFor(operand
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
339 if (!isKnownCell(operand
.node()))
340 notCell
= m_jit
.branchTest64(MacroAssembler::NonZero
, argGPR
, GPRInfo::tagMaskRegister
);
342 m_jit
.graph().globalObjectFor(operand
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
343 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultGPR
);
344 notMasqueradesAsUndefined
= m_jit
.jump();
346 GPRTemporary
localGlobalObject(this);
347 GPRTemporary
remoteGlobalObject(this);
349 if (!isKnownCell(operand
.node()))
350 notCell
= m_jit
.branchTest64(MacroAssembler::NonZero
, argGPR
, GPRInfo::tagMaskRegister
);
352 m_jit
.loadPtr(JITCompiler::Address(argGPR
, JSCell::structureOffset()), resultGPR
);
353 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(JITCompiler::NonZero
, JITCompiler::Address(resultGPR
, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined
));
355 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultGPR
);
356 notMasqueradesAsUndefined
= m_jit
.jump();
358 isMasqueradesAsUndefined
.link(&m_jit
);
359 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
360 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
361 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.graph().globalObjectFor(operand
->codeOrigin
)), localGlobalObjectGPR
);
362 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
363 m_jit
.comparePtr(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, resultGPR
);
366 if (!isKnownCell(operand
.node())) {
367 JITCompiler::Jump done
= m_jit
.jump();
369 notCell
.link(&m_jit
);
371 m_jit
.move(argGPR
, resultGPR
);
372 m_jit
.and64(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
373 m_jit
.compare64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm32(ValueNull
), resultGPR
);
378 notMasqueradesAsUndefined
.link(&m_jit
);
380 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
381 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
384 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand
, Node
* branchNode
, bool invert
)
386 BlockIndex taken
= branchNode
->takenBlockIndex();
387 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
389 if (taken
== nextBlock()) {
391 BlockIndex tmp
= taken
;
396 JSValueOperand
arg(this, operand
);
397 GPRReg argGPR
= arg
.gpr();
399 GPRTemporary
result(this, arg
);
400 GPRReg resultGPR
= result
.gpr();
402 JITCompiler::Jump notCell
;
404 if (m_jit
.graph().globalObjectFor(operand
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
405 if (!isKnownCell(operand
.node()))
406 notCell
= m_jit
.branchTest64(MacroAssembler::NonZero
, argGPR
, GPRInfo::tagMaskRegister
);
408 m_jit
.graph().globalObjectFor(operand
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
409 jump(invert
? taken
: notTaken
, ForceJump
);
411 GPRTemporary
localGlobalObject(this);
412 GPRTemporary
remoteGlobalObject(this);
414 if (!isKnownCell(operand
.node()))
415 notCell
= m_jit
.branchTest64(MacroAssembler::NonZero
, argGPR
, GPRInfo::tagMaskRegister
);
417 m_jit
.loadPtr(JITCompiler::Address(argGPR
, JSCell::structureOffset()), resultGPR
);
418 branchTest8(JITCompiler::Zero
, JITCompiler::Address(resultGPR
, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined
), invert
? taken
: notTaken
);
420 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
421 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
422 m_jit
.move(TrustedImmPtr(m_jit
.graph().globalObjectFor(operand
->codeOrigin
)), localGlobalObjectGPR
);
423 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
424 branchPtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, invert
? notTaken
: taken
);
427 if (!isKnownCell(operand
.node())) {
428 jump(notTaken
, ForceJump
);
430 notCell
.link(&m_jit
);
432 m_jit
.move(argGPR
, resultGPR
);
433 m_jit
.and64(JITCompiler::TrustedImm32(~TagBitUndefined
), resultGPR
);
434 branch64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm64(ValueNull
), taken
);
440 bool SpeculativeJIT::nonSpeculativeCompareNull(Node
* node
, Edge operand
, bool invert
)
442 unsigned branchIndexInBlock
= detectPeepHoleBranch();
443 if (branchIndexInBlock
!= UINT_MAX
) {
444 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
446 RELEASE_ASSERT(node
->adjustedRefCount() == 1);
448 nonSpeculativePeepholeBranchNull(operand
, branchNode
, invert
);
452 m_indexInBlock
= branchIndexInBlock
;
453 m_currentNode
= branchNode
;
458 nonSpeculativeNonPeepholeCompareNull(operand
, invert
);
463 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node
* node
, Node
* branchNode
, MacroAssembler::RelationalCondition cond
, S_DFGOperation_EJJ helperFunction
)
465 BlockIndex taken
= branchNode
->takenBlockIndex();
466 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
468 JITCompiler::ResultCondition callResultCondition
= JITCompiler::NonZero
;
470 // The branch instruction will branch to the taken block.
471 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
472 if (taken
== nextBlock()) {
473 cond
= JITCompiler::invert(cond
);
474 callResultCondition
= JITCompiler::Zero
;
475 BlockIndex tmp
= taken
;
480 JSValueOperand
arg1(this, node
->child1());
481 JSValueOperand
arg2(this, node
->child2());
482 GPRReg arg1GPR
= arg1
.gpr();
483 GPRReg arg2GPR
= arg2
.gpr();
485 JITCompiler::JumpList slowPath
;
487 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
488 GPRResult
result(this);
489 GPRReg resultGPR
= result
.gpr();
495 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
497 branchTest32(callResultCondition
, resultGPR
, taken
);
499 GPRTemporary
result(this, arg2
);
500 GPRReg resultGPR
= result
.gpr();
505 if (!isKnownInteger(node
->child1().node()))
506 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
507 if (!isKnownInteger(node
->child2().node()))
508 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
510 branch32(cond
, arg1GPR
, arg2GPR
, taken
);
512 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
513 jump(notTaken
, ForceJump
);
515 slowPath
.link(&m_jit
);
517 silentSpillAllRegisters(resultGPR
);
518 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
519 silentFillAllRegisters(resultGPR
);
521 branchTest32(callResultCondition
, resultGPR
, taken
);
527 m_indexInBlock
= m_jit
.graph().m_blocks
[m_block
]->size() - 1;
528 m_currentNode
= branchNode
;
531 template<typename JumpType
>
532 class CompareAndBoxBooleanSlowPathGenerator
533 : public CallSlowPathGenerator
<JumpType
, S_DFGOperation_EJJ
, GPRReg
> {
535 CompareAndBoxBooleanSlowPathGenerator(
536 JumpType from
, SpeculativeJIT
* jit
,
537 S_DFGOperation_EJJ function
, GPRReg result
, GPRReg arg1
, GPRReg arg2
)
538 : CallSlowPathGenerator
<JumpType
, S_DFGOperation_EJJ
, GPRReg
>(
539 from
, jit
, function
, NeedToSpill
, result
)
546 virtual void generateInternal(SpeculativeJIT
* jit
)
549 this->recordCall(jit
->callOperation(this->m_function
, this->m_result
, m_arg1
, m_arg2
));
550 jit
->m_jit
.and32(JITCompiler::TrustedImm32(1), this->m_result
);
551 jit
->m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), this->m_result
);
560 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_DFGOperation_EJJ helperFunction
)
562 JSValueOperand
arg1(this, node
->child1());
563 JSValueOperand
arg2(this, node
->child2());
564 GPRReg arg1GPR
= arg1
.gpr();
565 GPRReg arg2GPR
= arg2
.gpr();
567 JITCompiler::JumpList slowPath
;
569 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
570 GPRResult
result(this);
571 GPRReg resultGPR
= result
.gpr();
577 callOperation(helperFunction
, resultGPR
, arg1GPR
, arg2GPR
);
579 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
580 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
582 GPRTemporary
result(this, arg2
);
583 GPRReg resultGPR
= result
.gpr();
588 if (!isKnownInteger(node
->child1().node()))
589 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
590 if (!isKnownInteger(node
->child2().node()))
591 slowPath
.append(m_jit
.branch64(MacroAssembler::Below
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
593 m_jit
.compare32(cond
, arg1GPR
, arg2GPR
, resultGPR
);
594 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
596 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
597 addSlowPathGenerator(adoptPtr(
598 new CompareAndBoxBooleanSlowPathGenerator
<JITCompiler::JumpList
>(
599 slowPath
, this, helperFunction
, resultGPR
, arg1GPR
, arg2GPR
)));
602 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
606 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node
* node
, Node
* branchNode
, bool invert
)
608 BlockIndex taken
= branchNode
->takenBlockIndex();
609 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
611 // The branch instruction will branch to the taken block.
612 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
613 if (taken
== nextBlock()) {
615 BlockIndex tmp
= taken
;
620 JSValueOperand
arg1(this, node
->child1());
621 JSValueOperand
arg2(this, node
->child2());
622 GPRReg arg1GPR
= arg1
.gpr();
623 GPRReg arg2GPR
= arg2
.gpr();
625 GPRTemporary
result(this);
626 GPRReg resultGPR
= result
.gpr();
631 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
632 // see if we get lucky: if the arguments are cells and they reference the same
633 // cell, then they must be strictly equal.
634 branch64(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
636 silentSpillAllRegisters(resultGPR
);
637 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
638 silentFillAllRegisters(resultGPR
);
640 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
642 m_jit
.or64(arg1GPR
, arg2GPR
, resultGPR
);
644 JITCompiler::Jump twoCellsCase
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
646 JITCompiler::Jump leftOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
647 JITCompiler::Jump leftDouble
= m_jit
.branchTest64(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
649 JITCompiler::Jump rightOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
650 JITCompiler::Jump rightDouble
= m_jit
.branchTest64(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
651 rightOK
.link(&m_jit
);
653 branch64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, taken
);
654 jump(notTaken
, ForceJump
);
656 twoCellsCase
.link(&m_jit
);
657 branch64(JITCompiler::Equal
, arg1GPR
, arg2GPR
, invert
? notTaken
: taken
);
659 leftDouble
.link(&m_jit
);
660 rightDouble
.link(&m_jit
);
662 silentSpillAllRegisters(resultGPR
);
663 callOperation(operationCompareStrictEq
, resultGPR
, arg1GPR
, arg2GPR
);
664 silentFillAllRegisters(resultGPR
);
666 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultGPR
, taken
);
672 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node
* node
, bool invert
)
674 JSValueOperand
arg1(this, node
->child1());
675 JSValueOperand
arg2(this, node
->child2());
676 GPRReg arg1GPR
= arg1
.gpr();
677 GPRReg arg2GPR
= arg2
.gpr();
679 GPRTemporary
result(this);
680 GPRReg resultGPR
= result
.gpr();
685 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
686 // see if we get lucky: if the arguments are cells and they reference the same
687 // cell, then they must be strictly equal.
688 // FIXME: this should flush registers instead of silent spill/fill.
689 JITCompiler::Jump notEqualCase
= m_jit
.branch64(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
);
691 m_jit
.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
693 JITCompiler::Jump done
= m_jit
.jump();
695 notEqualCase
.link(&m_jit
);
697 silentSpillAllRegisters(resultGPR
);
698 callOperation(operationCompareStrictEqCell
, resultGPR
, arg1GPR
, arg2GPR
);
699 silentFillAllRegisters(resultGPR
);
701 m_jit
.and64(JITCompiler::TrustedImm32(1), resultGPR
);
702 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
706 m_jit
.or64(arg1GPR
, arg2GPR
, resultGPR
);
708 JITCompiler::JumpList slowPathCases
;
710 JITCompiler::Jump twoCellsCase
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
, GPRInfo::tagMaskRegister
);
712 JITCompiler::Jump leftOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg1GPR
, GPRInfo::tagTypeNumberRegister
);
713 slowPathCases
.append(m_jit
.branchTest64(JITCompiler::NonZero
, arg1GPR
, GPRInfo::tagTypeNumberRegister
));
715 JITCompiler::Jump rightOK
= m_jit
.branch64(JITCompiler::AboveOrEqual
, arg2GPR
, GPRInfo::tagTypeNumberRegister
);
716 slowPathCases
.append(m_jit
.branchTest64(JITCompiler::NonZero
, arg2GPR
, GPRInfo::tagTypeNumberRegister
));
717 rightOK
.link(&m_jit
);
719 m_jit
.compare64(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, arg1GPR
, arg2GPR
, resultGPR
);
720 m_jit
.or32(JITCompiler::TrustedImm32(ValueFalse
), resultGPR
);
722 JITCompiler::Jump done
= m_jit
.jump();
724 twoCellsCase
.link(&m_jit
);
725 slowPathCases
.append(m_jit
.branch64(JITCompiler::NotEqual
, arg1GPR
, arg2GPR
));
727 m_jit
.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert
))), resultGPR
);
729 addSlowPathGenerator(
731 new CompareAndBoxBooleanSlowPathGenerator
<MacroAssembler::JumpList
>(
732 slowPathCases
, this, operationCompareStrictEq
, resultGPR
, arg1GPR
,
738 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
741 void SpeculativeJIT::emitCall(Node
* node
)
743 if (node
->op() != Call
)
744 RELEASE_ASSERT(node
->op() == Construct
);
746 // For constructors, the this argument is not passed but we have to make space
748 int dummyThisArgument
= node
->op() == Call
? 0 : 1;
750 CallLinkInfo::CallType callType
= node
->op() == Call
? CallLinkInfo::Call
: CallLinkInfo::Construct
;
752 Edge calleeEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild()];
753 JSValueOperand
callee(this, calleeEdge
);
754 GPRReg calleeGPR
= callee
.gpr();
757 // The call instruction's first child is the function; the subsequent children are the
759 int numPassedArgs
= node
->numChildren() - 1;
761 m_jit
.store32(MacroAssembler::TrustedImm32(numPassedArgs
+ dummyThisArgument
), callFramePayloadSlot(JSStack::ArgumentCount
));
762 m_jit
.store64(GPRInfo::callFrameRegister
, callFrameSlot(JSStack::CallerFrame
));
763 m_jit
.store64(calleeGPR
, callFrameSlot(JSStack::Callee
));
765 for (int i
= 0; i
< numPassedArgs
; i
++) {
766 Edge argEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + 1 + i
];
767 JSValueOperand
arg(this, argEdge
);
768 GPRReg argGPR
= arg
.gpr();
771 m_jit
.store64(argGPR
, argumentSlot(i
+ dummyThisArgument
));
776 GPRResult
result(this);
777 GPRReg resultGPR
= result
.gpr();
779 JITCompiler::DataLabelPtr targetToCheck
;
780 JITCompiler::JumpList slowPath
;
782 CallBeginToken token
;
783 m_jit
.beginCall(node
->codeOrigin
, token
);
785 m_jit
.addPtr(TrustedImm32(m_jit
.codeBlock()->m_numCalleeRegisters
* sizeof(Register
)), GPRInfo::callFrameRegister
);
787 slowPath
.append(m_jit
.branchPtrWithPatch(MacroAssembler::NotEqual
, calleeGPR
, targetToCheck
, MacroAssembler::TrustedImmPtr(0)));
789 m_jit
.loadPtr(MacroAssembler::Address(calleeGPR
, OBJECT_OFFSETOF(JSFunction
, m_scope
)), resultGPR
);
790 m_jit
.store64(resultGPR
, MacroAssembler::Address(GPRInfo::callFrameRegister
, static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ScopeChain
));
792 CodeOrigin codeOrigin
= m_currentNode
->codeOrigin
;
793 JITCompiler::Call fastCall
= m_jit
.nearCall();
794 m_jit
.notifyCall(fastCall
, codeOrigin
, token
);
796 JITCompiler::Jump done
= m_jit
.jump();
798 slowPath
.link(&m_jit
);
800 m_jit
.move(calleeGPR
, GPRInfo::nonArgGPR0
);
801 m_jit
.prepareForExceptionCheck();
802 JITCompiler::Call slowCall
= m_jit
.nearCall();
803 m_jit
.notifyCall(slowCall
, codeOrigin
, token
);
807 m_jit
.move(GPRInfo::returnValueGPR
, resultGPR
);
809 jsValueResult(resultGPR
, m_currentNode
, DataFormatJS
, UseChildrenCalledExplicitly
);
811 m_jit
.addJSCall(fastCall
, slowCall
, targetToCheck
, callType
, calleeGPR
, m_currentNode
->codeOrigin
);
814 template<bool strict
>
815 GPRReg
SpeculativeJIT::fillSpeculateIntInternal(Edge edge
, DataFormat
& returnFormat
)
817 #if DFG_ENABLE(DEBUG_VERBOSE)
818 dataLogF("SpecInt@%d ", edge
->index());
820 AbstractValue
& value
= m_state
.forNode(edge
);
821 SpeculatedType type
= value
.m_type
;
822 ASSERT(edge
.useKind() != KnownInt32Use
|| !(value
.m_type
& ~SpecInt32
));
823 value
.filter(SpecInt32
);
824 VirtualRegister virtualRegister
= edge
->virtualRegister();
825 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
827 switch (info
.registerFormat()) {
828 case DataFormatNone
: {
829 if ((edge
->hasConstant() && !isInt32Constant(edge
.node())) || info
.spillFormat() == DataFormatDouble
) {
830 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
831 returnFormat
= DataFormatInteger
;
835 GPRReg gpr
= allocate();
837 if (edge
->hasConstant()) {
838 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
839 ASSERT(isInt32Constant(edge
.node()));
840 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
841 info
.fillInteger(*m_stream
, gpr
);
842 returnFormat
= DataFormatInteger
;
846 DataFormat spillFormat
= info
.spillFormat();
848 RELEASE_ASSERT((spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInteger
);
850 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
852 if (spillFormat
== DataFormatJSInteger
|| spillFormat
== DataFormatInteger
) {
853 // If we know this was spilled as an integer we can fill without checking.
855 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
856 info
.fillInteger(*m_stream
, gpr
);
857 returnFormat
= DataFormatInteger
;
860 if (spillFormat
== DataFormatInteger
) {
861 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
862 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, gpr
);
864 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
865 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInteger
);
866 returnFormat
= DataFormatJSInteger
;
869 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
871 // Fill as JSValue, and fall through.
872 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInteger
);
877 // Check the value is an integer.
878 GPRReg gpr
= info
.gpr();
880 if (type
& ~SpecInt32
)
881 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branch64(MacroAssembler::Below
, gpr
, GPRInfo::tagTypeNumberRegister
));
882 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSInteger
);
883 // If !strict we're done, return.
885 returnFormat
= DataFormatJSInteger
;
888 // else fall through & handle as DataFormatJSInteger.
892 case DataFormatJSInteger
: {
893 // In a strict fill we need to strip off the value tag.
895 GPRReg gpr
= info
.gpr();
897 // If the register has already been locked we need to take a copy.
898 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
899 if (m_gprs
.isLocked(gpr
))
903 info
.fillInteger(*m_stream
, gpr
);
906 m_jit
.zeroExtend32ToPtr(gpr
, result
);
907 returnFormat
= DataFormatInteger
;
911 GPRReg gpr
= info
.gpr();
913 returnFormat
= DataFormatJSInteger
;
917 case DataFormatInteger
: {
918 GPRReg gpr
= info
.gpr();
920 returnFormat
= DataFormatInteger
;
924 case DataFormatDouble
:
925 case DataFormatJSDouble
: {
926 if (edge
->hasConstant() && isInt32Constant(edge
.node())) {
927 GPRReg gpr
= allocate();
928 ASSERT(isInt32Constant(edge
.node()));
929 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
930 returnFormat
= DataFormatInteger
;
935 case DataFormatBoolean
:
936 case DataFormatJSCell
:
937 case DataFormatJSBoolean
: {
938 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
939 returnFormat
= DataFormatInteger
;
943 case DataFormatStorage
:
944 RELEASE_ASSERT_NOT_REACHED();
947 RELEASE_ASSERT_NOT_REACHED();
948 return InvalidGPRReg
;
952 GPRReg
SpeculativeJIT::fillSpeculateInt(Edge edge
, DataFormat
& returnFormat
)
954 return fillSpeculateIntInternal
<false>(edge
, returnFormat
);
957 GPRReg
SpeculativeJIT::fillSpeculateIntStrict(Edge edge
)
959 DataFormat mustBeDataFormatInteger
;
960 GPRReg result
= fillSpeculateIntInternal
<true>(edge
, mustBeDataFormatInteger
);
961 RELEASE_ASSERT(mustBeDataFormatInteger
== DataFormatInteger
);
965 FPRReg
SpeculativeJIT::fillSpeculateDouble(Edge edge
)
967 #if DFG_ENABLE(DEBUG_VERBOSE)
968 dataLogF("SpecDouble@%d ", edge
->index());
970 AbstractValue
& value
= m_state
.forNode(edge
);
971 SpeculatedType type
= value
.m_type
;
972 ASSERT(edge
.useKind() != KnownNumberUse
|| !(value
.m_type
& ~SpecNumber
));
973 value
.filter(SpecNumber
);
974 VirtualRegister virtualRegister
= edge
->virtualRegister();
975 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
977 if (info
.registerFormat() == DataFormatNone
) {
978 if (edge
->hasConstant()) {
979 GPRReg gpr
= allocate();
981 if (isInt32Constant(edge
.node())) {
982 FPRReg fpr
= fprAllocate();
983 m_jit
.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(static_cast<double>(valueOfInt32Constant(edge
.node())))), gpr
);
984 m_jit
.move64ToDouble(gpr
, fpr
);
987 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
988 info
.fillDouble(*m_stream
, fpr
);
991 if (isNumberConstant(edge
.node())) {
992 FPRReg fpr
= fprAllocate();
993 m_jit
.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(edge
.node()))), gpr
);
994 m_jit
.move64ToDouble(gpr
, fpr
);
997 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
998 info
.fillDouble(*m_stream
, fpr
);
1001 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1002 return fprAllocate();
1005 DataFormat spillFormat
= info
.spillFormat();
1006 switch (spillFormat
) {
1007 case DataFormatDouble
: {
1008 FPRReg fpr
= fprAllocate();
1009 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
1010 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1011 info
.fillDouble(*m_stream
, fpr
);
1015 case DataFormatInteger
: {
1016 GPRReg gpr
= allocate();
1018 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1019 m_jit
.load32(JITCompiler::addressFor(virtualRegister
), gpr
);
1020 info
.fillInteger(*m_stream
, gpr
);
1026 GPRReg gpr
= allocate();
1028 RELEASE_ASSERT(spillFormat
& DataFormatJS
);
1029 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1030 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
1031 info
.fillJSValue(*m_stream
, gpr
, spillFormat
);
1037 switch (info
.registerFormat()) {
1038 case DataFormatNone
: // Should have filled, above.
1039 case DataFormatBoolean
: // This type never occurs.
1040 case DataFormatStorage
:
1041 RELEASE_ASSERT_NOT_REACHED();
1043 case DataFormatCell
:
1044 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1045 return fprAllocate();
1047 case DataFormatJSCell
:
1049 case DataFormatJSBoolean
: {
1050 GPRReg jsValueGpr
= info
.gpr();
1051 m_gprs
.lock(jsValueGpr
);
1052 FPRReg fpr
= fprAllocate();
1053 GPRReg tempGpr
= allocate();
1055 JITCompiler::Jump isInteger
= m_jit
.branch64(MacroAssembler::AboveOrEqual
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
);
1057 if (type
& ~SpecNumber
)
1058 speculationCheck(BadType
, JSValueRegs(jsValueGpr
), edge
, m_jit
.branchTest64(MacroAssembler::Zero
, jsValueGpr
, GPRInfo::tagTypeNumberRegister
));
1060 // First, if we get here we have a double encoded as a JSValue
1061 m_jit
.move(jsValueGpr
, tempGpr
);
1062 unboxDouble(tempGpr
, fpr
);
1063 JITCompiler::Jump hasUnboxedDouble
= m_jit
.jump();
1065 // Finally, handle integers.
1066 isInteger
.link(&m_jit
);
1067 m_jit
.convertInt32ToDouble(jsValueGpr
, fpr
);
1068 hasUnboxedDouble
.link(&m_jit
);
1070 m_gprs
.release(jsValueGpr
);
1071 m_gprs
.unlock(jsValueGpr
);
1072 m_gprs
.unlock(tempGpr
);
1073 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1074 info
.fillDouble(*m_stream
, fpr
);
1079 case DataFormatJSInteger
:
1080 case DataFormatInteger
: {
1081 FPRReg fpr
= fprAllocate();
1082 GPRReg gpr
= info
.gpr();
1084 m_jit
.convertInt32ToDouble(gpr
, fpr
);
1090 case DataFormatJSDouble
: {
1091 GPRReg gpr
= info
.gpr();
1092 FPRReg fpr
= fprAllocate();
1093 if (m_gprs
.isLocked(gpr
)) {
1094 // Make sure we don't trample gpr if it is in use.
1095 GPRReg temp
= allocate();
1096 m_jit
.move(gpr
, temp
);
1097 unboxDouble(temp
, fpr
);
1100 unboxDouble(gpr
, fpr
);
1102 m_gprs
.release(gpr
);
1103 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1105 info
.fillDouble(*m_stream
, fpr
);
1109 case DataFormatDouble
: {
1110 FPRReg fpr
= info
.fpr();
1116 RELEASE_ASSERT_NOT_REACHED();
1117 return InvalidFPRReg
;
1121 GPRReg
SpeculativeJIT::fillSpeculateCell(Edge edge
)
1123 #if DFG_ENABLE(DEBUG_VERBOSE)
1124 dataLogF("SpecCell@%d ", edge
->index());
1126 AbstractValue
& value
= m_state
.forNode(edge
);
1127 SpeculatedType type
= value
.m_type
;
1128 ASSERT((edge
.useKind() != KnownCellUse
&& edge
.useKind() != KnownStringUse
) || !(value
.m_type
& ~SpecCell
));
1129 value
.filter(SpecCell
);
1130 VirtualRegister virtualRegister
= edge
->virtualRegister();
1131 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1133 switch (info
.registerFormat()) {
1134 case DataFormatNone
: {
1135 if (info
.spillFormat() == DataFormatInteger
|| info
.spillFormat() == DataFormatDouble
) {
1136 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1140 GPRReg gpr
= allocate();
1142 if (edge
->hasConstant()) {
1143 JSValue jsValue
= valueOfJSConstant(edge
.node());
1144 if (jsValue
.isCell()) {
1145 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1146 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
1147 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1150 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1153 RELEASE_ASSERT(info
.spillFormat() & DataFormatJS
);
1154 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1155 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
1157 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
1158 if (type
& ~SpecCell
)
1159 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, GPRInfo::tagMaskRegister
));
1160 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1164 case DataFormatCell
:
1165 case DataFormatJSCell
: {
1166 GPRReg gpr
= info
.gpr();
1171 case DataFormatJS
: {
1172 GPRReg gpr
= info
.gpr();
1174 if (type
& ~SpecCell
)
1175 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, GPRInfo::tagMaskRegister
));
1176 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSCell
);
1180 case DataFormatJSInteger
:
1181 case DataFormatInteger
:
1182 case DataFormatJSDouble
:
1183 case DataFormatDouble
:
1184 case DataFormatJSBoolean
:
1185 case DataFormatBoolean
: {
1186 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1190 case DataFormatStorage
:
1191 RELEASE_ASSERT_NOT_REACHED();
1194 RELEASE_ASSERT_NOT_REACHED();
1195 return InvalidGPRReg
;
1199 GPRReg
SpeculativeJIT::fillSpeculateBoolean(Edge edge
)
1201 #if DFG_ENABLE(DEBUG_VERBOSE)
1202 dataLogF("SpecBool@%d ", edge
->index());
1204 AbstractValue
& value
= m_state
.forNode(edge
);
1205 SpeculatedType type
= value
.m_type
;
1206 value
.filter(SpecBoolean
);
1207 VirtualRegister virtualRegister
= edge
->virtualRegister();
1208 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1210 switch (info
.registerFormat()) {
1211 case DataFormatNone
: {
1212 if (info
.spillFormat() == DataFormatInteger
|| info
.spillFormat() == DataFormatDouble
) {
1213 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1217 GPRReg gpr
= allocate();
1219 if (edge
->hasConstant()) {
1220 JSValue jsValue
= valueOfJSConstant(edge
.node());
1221 if (jsValue
.isBoolean()) {
1222 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1223 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue
)), gpr
);
1224 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1227 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1230 RELEASE_ASSERT(info
.spillFormat() & DataFormatJS
);
1231 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1232 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), gpr
);
1234 info
.fillJSValue(*m_stream
, gpr
, DataFormatJS
);
1235 if (type
& ~SpecBoolean
) {
1236 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1237 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1238 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1240 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1244 case DataFormatBoolean
:
1245 case DataFormatJSBoolean
: {
1246 GPRReg gpr
= info
.gpr();
1251 case DataFormatJS
: {
1252 GPRReg gpr
= info
.gpr();
1254 if (type
& ~SpecBoolean
) {
1255 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1256 speculationCheck(BadType
, JSValueRegs(gpr
), edge
, m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck
, gpr
, InvalidGPRReg
));
1257 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), gpr
);
1259 info
.fillJSValue(*m_stream
, gpr
, DataFormatJSBoolean
);
1263 case DataFormatJSInteger
:
1264 case DataFormatInteger
:
1265 case DataFormatJSDouble
:
1266 case DataFormatDouble
:
1267 case DataFormatJSCell
:
1268 case DataFormatCell
: {
1269 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1273 case DataFormatStorage
:
1274 RELEASE_ASSERT_NOT_REACHED();
1277 RELEASE_ASSERT_NOT_REACHED();
1278 return InvalidGPRReg
;
1282 JITCompiler::Jump
SpeculativeJIT::convertToDouble(GPRReg value
, FPRReg result
, GPRReg tmp
)
1284 JITCompiler::Jump isInteger
= m_jit
.branch64(MacroAssembler::AboveOrEqual
, value
, GPRInfo::tagTypeNumberRegister
);
1286 JITCompiler::Jump notNumber
= m_jit
.branchTest64(MacroAssembler::Zero
, value
, GPRInfo::tagTypeNumberRegister
);
1288 m_jit
.move(value
, tmp
);
1289 unboxDouble(tmp
, result
);
1291 JITCompiler::Jump done
= m_jit
.jump();
1293 isInteger
.link(&m_jit
);
1295 m_jit
.convertInt32ToDouble(value
, result
);
1302 void SpeculativeJIT::compileObjectEquality(Node
* node
)
1304 SpeculateCellOperand
op1(this, node
->child1());
1305 SpeculateCellOperand
op2(this, node
->child2());
1306 GPRTemporary
result(this, op1
);
1308 GPRReg op1GPR
= op1
.gpr();
1309 GPRReg op2GPR
= op2
.gpr();
1310 GPRReg resultGPR
= result
.gpr();
1312 if (m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
1313 m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1315 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchPtr(
1316 MacroAssembler::Equal
,
1317 MacroAssembler::Address(op1GPR
, JSCell::structureOffset()),
1318 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1320 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchPtr(
1321 MacroAssembler::Equal
,
1322 MacroAssembler::Address(op2GPR
, JSCell::structureOffset()),
1323 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1325 GPRTemporary
structure(this);
1326 GPRReg structureGPR
= structure
.gpr();
1328 m_jit
.loadPtr(MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), structureGPR
);
1330 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchPtr(
1331 MacroAssembler::Equal
,
1333 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1334 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1336 MacroAssembler::NonZero
,
1337 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1338 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1340 m_jit
.loadPtr(MacroAssembler::Address(op2GPR
, JSCell::structureOffset()), structureGPR
);
1342 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchPtr(
1343 MacroAssembler::Equal
,
1345 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1346 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1348 MacroAssembler::NonZero
,
1349 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1350 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1353 MacroAssembler::Jump falseCase
= m_jit
.branch64(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1354 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1355 MacroAssembler::Jump done
= m_jit
.jump();
1356 falseCase
.link(&m_jit
);
1357 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1360 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1363 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
)
1365 SpeculateCellOperand
op1(this, leftChild
);
1366 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1367 GPRTemporary
result(this);
1369 GPRReg op1GPR
= op1
.gpr();
1370 GPRReg op2GPR
= op2
.gpr();
1371 GPRReg resultGPR
= result
.gpr();
1372 GPRTemporary structure
;
1373 GPRReg structureGPR
= InvalidGPRReg
;
1375 bool masqueradesAsUndefinedWatchpointValid
= m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid();
1377 if (!masqueradesAsUndefinedWatchpointValid
) {
1378 // The masquerades as undefined case will use the structure register, so allocate it here.
1379 // Do this at the top of the function to avoid branching around a register allocation.
1380 GPRTemporary
realStructure(this);
1381 structure
.adopt(realStructure
);
1382 structureGPR
= structure
.gpr();
1385 if (masqueradesAsUndefinedWatchpointValid
) {
1386 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1388 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1389 MacroAssembler::Equal
,
1390 MacroAssembler::Address(op1GPR
, JSCell::structureOffset()),
1391 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1393 m_jit
.loadPtr(MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), structureGPR
);
1395 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1396 MacroAssembler::Equal
,
1398 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1399 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1401 MacroAssembler::NonZero
,
1402 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1403 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1406 // It seems that most of the time when programs do a == b where b may be either null/undefined
1407 // or an object, b is usually an object. Balance the branches to make that case fast.
1408 MacroAssembler::Jump rightNotCell
=
1409 m_jit
.branchTest64(MacroAssembler::NonZero
, op2GPR
, GPRInfo::tagMaskRegister
);
1411 // We know that within this branch, rightChild must be a cell.
1412 if (masqueradesAsUndefinedWatchpointValid
) {
1413 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1415 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1416 MacroAssembler::Equal
,
1417 MacroAssembler::Address(op2GPR
, JSCell::structureOffset()),
1418 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1420 m_jit
.loadPtr(MacroAssembler::Address(op2GPR
, JSCell::structureOffset()), structureGPR
);
1422 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1423 MacroAssembler::Equal
,
1425 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1426 speculationCheck(BadType
, JSValueRegs(op2GPR
), rightChild
,
1428 MacroAssembler::NonZero
,
1429 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1430 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1433 // At this point we know that we can perform a straight-forward equality comparison on pointer
1434 // values because both left and right are pointers to objects that have no special equality
1436 MacroAssembler::Jump falseCase
= m_jit
.branch64(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1437 MacroAssembler::Jump trueCase
= m_jit
.jump();
1439 rightNotCell
.link(&m_jit
);
1441 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1442 // prove that it is either null or undefined.
1443 if (needsTypeCheck(rightChild
, SpecCell
| SpecOther
)) {
1444 m_jit
.move(op2GPR
, resultGPR
);
1445 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1448 JSValueRegs(op2GPR
), rightChild
, SpecCell
| SpecOther
,
1450 MacroAssembler::NotEqual
, resultGPR
,
1451 MacroAssembler::TrustedImm64(ValueNull
)));
1454 falseCase
.link(&m_jit
);
1455 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1456 MacroAssembler::Jump done
= m_jit
.jump();
1457 trueCase
.link(&m_jit
);
1458 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1461 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1464 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
, Node
* branchNode
)
1466 BlockIndex taken
= branchNode
->takenBlockIndex();
1467 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
1469 SpeculateCellOperand
op1(this, leftChild
);
1470 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1471 GPRTemporary
result(this);
1473 GPRReg op1GPR
= op1
.gpr();
1474 GPRReg op2GPR
= op2
.gpr();
1475 GPRReg resultGPR
= result
.gpr();
1476 GPRTemporary structure
;
1477 GPRReg structureGPR
= InvalidGPRReg
;
1479 bool masqueradesAsUndefinedWatchpointValid
= m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid();
1481 if (!masqueradesAsUndefinedWatchpointValid
) {
1482 // The masquerades as undefined case will use the structure register, so allocate it here.
1483 // Do this at the top of the function to avoid branching around a register allocation.
1484 GPRTemporary
realStructure(this);
1485 structure
.adopt(realStructure
);
1486 structureGPR
= structure
.gpr();
1489 if (masqueradesAsUndefinedWatchpointValid
) {
1490 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1492 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1493 MacroAssembler::Equal
,
1494 MacroAssembler::Address(op1GPR
, JSCell::structureOffset()),
1495 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1497 m_jit
.loadPtr(MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), structureGPR
);
1499 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1500 MacroAssembler::Equal
,
1502 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1503 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1505 MacroAssembler::NonZero
,
1506 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1507 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1510 // It seems that most of the time when programs do a == b where b may be either null/undefined
1511 // or an object, b is usually an object. Balance the branches to make that case fast.
1512 MacroAssembler::Jump rightNotCell
=
1513 m_jit
.branchTest64(MacroAssembler::NonZero
, op2GPR
, GPRInfo::tagMaskRegister
);
1515 // We know that within this branch, rightChild must be a cell.
1516 if (masqueradesAsUndefinedWatchpointValid
) {
1517 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1519 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1520 MacroAssembler::Equal
,
1521 MacroAssembler::Address(op2GPR
, JSCell::structureOffset()),
1522 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1524 m_jit
.loadPtr(MacroAssembler::Address(op2GPR
, JSCell::structureOffset()), structureGPR
);
1526 JSValueRegs(op2GPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1527 MacroAssembler::Equal
,
1529 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1530 speculationCheck(BadType
, JSValueRegs(op2GPR
), rightChild
,
1532 MacroAssembler::NonZero
,
1533 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1534 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1537 // At this point we know that we can perform a straight-forward equality comparison on pointer
1538 // values because both left and right are pointers to objects that have no special equality
1540 branch64(MacroAssembler::Equal
, op1GPR
, op2GPR
, taken
);
1542 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1543 // prove that it is either null or undefined.
1544 if (!needsTypeCheck(rightChild
, SpecCell
| SpecOther
))
1545 rightNotCell
.link(&m_jit
);
1547 jump(notTaken
, ForceJump
);
1549 rightNotCell
.link(&m_jit
);
1550 m_jit
.move(op2GPR
, resultGPR
);
1551 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1554 JSValueRegs(op2GPR
), rightChild
, SpecCell
| SpecOther
, m_jit
.branch64(
1555 MacroAssembler::NotEqual
, resultGPR
,
1556 MacroAssembler::TrustedImm64(ValueNull
)));
1562 void SpeculativeJIT::compileIntegerCompare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1564 SpeculateIntegerOperand
op1(this, node
->child1());
1565 SpeculateIntegerOperand
op2(this, node
->child2());
1566 GPRTemporary
result(this, op1
, op2
);
1568 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
1570 // If we add a DataFormatBool, we should use it here.
1571 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1572 jsValueResult(result
.gpr(), m_currentNode
, DataFormatJSBoolean
);
1575 void SpeculativeJIT::compileDoubleCompare(Node
* node
, MacroAssembler::DoubleCondition condition
)
1577 SpeculateDoubleOperand
op1(this, node
->child1());
1578 SpeculateDoubleOperand
op2(this, node
->child2());
1579 GPRTemporary
result(this);
1581 m_jit
.move(TrustedImm32(ValueTrue
), result
.gpr());
1582 MacroAssembler::Jump trueCase
= m_jit
.branchDouble(condition
, op1
.fpr(), op2
.fpr());
1583 m_jit
.xor64(TrustedImm32(true), result
.gpr());
1584 trueCase
.link(&m_jit
);
1586 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1589 void SpeculativeJIT::compileValueAdd(Node
* node
)
1591 JSValueOperand
op1(this, node
->child1());
1592 JSValueOperand
op2(this, node
->child2());
1594 GPRReg op1GPR
= op1
.gpr();
1595 GPRReg op2GPR
= op2
.gpr();
1599 GPRResult
result(this);
1600 if (isKnownNotNumber(node
->child1().node()) || isKnownNotNumber(node
->child2().node()))
1601 callOperation(operationValueAddNotNumber
, result
.gpr(), op1GPR
, op2GPR
);
1603 callOperation(operationValueAdd
, result
.gpr(), op1GPR
, op2GPR
);
1605 jsValueResult(result
.gpr(), node
);
1608 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse
)
1610 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1611 GPRTemporary
result(this);
1612 GPRReg valueGPR
= value
.gpr();
1613 GPRReg resultGPR
= result
.gpr();
1614 GPRTemporary structure
;
1615 GPRReg structureGPR
= InvalidGPRReg
;
1617 bool masqueradesAsUndefinedWatchpointValid
= m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid();
1619 if (!masqueradesAsUndefinedWatchpointValid
) {
1620 // The masquerades as undefined case will use the structure register, so allocate it here.
1621 // Do this at the top of the function to avoid branching around a register allocation.
1622 GPRTemporary
realStructure(this);
1623 structure
.adopt(realStructure
);
1624 structureGPR
= structure
.gpr();
1627 MacroAssembler::Jump notCell
= m_jit
.branchTest64(MacroAssembler::NonZero
, valueGPR
, GPRInfo::tagMaskRegister
);
1628 if (masqueradesAsUndefinedWatchpointValid
) {
1629 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1631 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1632 MacroAssembler::Equal
,
1633 MacroAssembler::Address(valueGPR
, JSCell::structureOffset()),
1634 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1636 m_jit
.loadPtr(MacroAssembler::Address(valueGPR
, JSCell::structureOffset()), structureGPR
);
1639 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1640 MacroAssembler::Equal
,
1642 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1644 MacroAssembler::Jump isNotMasqueradesAsUndefined
=
1646 MacroAssembler::Zero
,
1647 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1648 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
));
1650 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
,
1652 MacroAssembler::Equal
,
1653 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1654 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
))));
1656 isNotMasqueradesAsUndefined
.link(&m_jit
);
1658 m_jit
.move(TrustedImm32(ValueFalse
), resultGPR
);
1659 MacroAssembler::Jump done
= m_jit
.jump();
1661 notCell
.link(&m_jit
);
1663 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1664 m_jit
.move(valueGPR
, resultGPR
);
1665 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), resultGPR
);
1667 JSValueRegs(valueGPR
), nodeUse
, SpecCell
| SpecOther
, m_jit
.branch64(
1668 MacroAssembler::NotEqual
,
1670 MacroAssembler::TrustedImm64(ValueNull
)));
1672 m_jit
.move(TrustedImm32(ValueTrue
), resultGPR
);
1676 jsValueResult(resultGPR
, m_currentNode
, DataFormatJSBoolean
);
1679 void SpeculativeJIT::compileLogicalNot(Node
* node
)
1681 switch (node
->child1().useKind()) {
1682 case ObjectOrOtherUse
: {
1683 compileObjectOrOtherLogicalNot(node
->child1());
1688 SpeculateIntegerOperand
value(this, node
->child1());
1689 GPRTemporary
result(this, value
);
1690 m_jit
.compare32(MacroAssembler::Equal
, value
.gpr(), MacroAssembler::TrustedImm32(0), result
.gpr());
1691 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
1692 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1697 SpeculateDoubleOperand
value(this, node
->child1());
1698 FPRTemporary
scratch(this);
1699 GPRTemporary
result(this);
1700 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
1701 MacroAssembler::Jump nonZero
= m_jit
.branchDoubleNonZero(value
.fpr(), scratch
.fpr());
1702 m_jit
.xor32(TrustedImm32(true), result
.gpr());
1703 nonZero
.link(&m_jit
);
1704 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1709 if (!needsTypeCheck(node
->child1(), SpecBoolean
)) {
1710 SpeculateBooleanOperand
value(this, node
->child1());
1711 GPRTemporary
result(this, value
);
1713 m_jit
.move(value
.gpr(), result
.gpr());
1714 m_jit
.xor64(TrustedImm32(true), result
.gpr());
1716 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1720 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
1721 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1723 m_jit
.move(value
.gpr(), result
.gpr());
1724 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), result
.gpr());
1726 JSValueRegs(value
.gpr()), node
->child1(), SpecBoolean
, m_jit
.branchTest64(
1727 JITCompiler::NonZero
, result
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1728 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue
)), result
.gpr());
1730 // If we add a DataFormatBool, we should use it here.
1731 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
1736 JSValueOperand
arg1(this, node
->child1());
1737 GPRTemporary
result(this);
1739 GPRReg arg1GPR
= arg1
.gpr();
1740 GPRReg resultGPR
= result
.gpr();
1744 m_jit
.move(arg1GPR
, resultGPR
);
1745 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), resultGPR
);
1746 JITCompiler::Jump slowCase
= m_jit
.branchTest64(JITCompiler::NonZero
, resultGPR
, TrustedImm32(static_cast<int32_t>(~1)));
1748 addSlowPathGenerator(
1749 slowPathCall(slowCase
, this, dfgConvertJSValueToBoolean
, resultGPR
, arg1GPR
));
1751 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue
)), resultGPR
);
1752 jsValueResult(resultGPR
, node
, DataFormatJSBoolean
, UseChildrenCalledExplicitly
);
1757 RELEASE_ASSERT_NOT_REACHED();
1762 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse
, BlockIndex taken
, BlockIndex notTaken
)
1764 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1765 GPRTemporary
scratch(this);
1766 GPRReg valueGPR
= value
.gpr();
1767 GPRReg scratchGPR
= scratch
.gpr();
1769 MacroAssembler::Jump notCell
= m_jit
.branchTest64(MacroAssembler::NonZero
, valueGPR
, GPRInfo::tagMaskRegister
);
1770 if (m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
1771 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1774 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1775 MacroAssembler::Equal
,
1776 MacroAssembler::Address(valueGPR
, JSCell::structureOffset()),
1777 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1779 m_jit
.loadPtr(MacroAssembler::Address(valueGPR
, JSCell::structureOffset()), scratchGPR
);
1782 JSValueRegs(valueGPR
), nodeUse
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
1783 MacroAssembler::Equal
,
1785 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1787 JITCompiler::Jump isNotMasqueradesAsUndefined
= m_jit
.branchTest8(JITCompiler::Zero
, MacroAssembler::Address(scratchGPR
, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined
));
1789 speculationCheck(BadType
, JSValueRegs(valueGPR
), nodeUse
,
1791 MacroAssembler::Equal
,
1792 MacroAssembler::Address(scratchGPR
, Structure::globalObjectOffset()),
1793 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
))));
1795 isNotMasqueradesAsUndefined
.link(&m_jit
);
1797 jump(taken
, ForceJump
);
1799 notCell
.link(&m_jit
);
1801 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1802 m_jit
.move(valueGPR
, scratchGPR
);
1803 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), scratchGPR
);
1805 JSValueRegs(valueGPR
), nodeUse
, SpecCell
| SpecOther
, m_jit
.branch64(
1806 MacroAssembler::NotEqual
, scratchGPR
, MacroAssembler::TrustedImm64(ValueNull
)));
1810 noResult(m_currentNode
);
1813 void SpeculativeJIT::emitBranch(Node
* node
)
1815 BlockIndex taken
= node
->takenBlockIndex();
1816 BlockIndex notTaken
= node
->notTakenBlockIndex();
1818 switch (node
->child1().useKind()) {
1819 case ObjectOrOtherUse
: {
1820 emitObjectOrOtherBranch(node
->child1(), taken
, notTaken
);
1826 if (node
->child1().useKind() == Int32Use
) {
1827 bool invert
= false;
1829 if (taken
== nextBlock()) {
1831 BlockIndex tmp
= taken
;
1836 SpeculateIntegerOperand
value(this, node
->child1());
1837 branchTest32(invert
? MacroAssembler::Zero
: MacroAssembler::NonZero
, value
.gpr(), taken
);
1839 SpeculateDoubleOperand
value(this, node
->child1());
1840 FPRTemporary
scratch(this);
1841 branchDoubleNonZero(value
.fpr(), scratch
.fpr(), taken
);
1852 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
1853 GPRReg valueGPR
= value
.gpr();
1855 if (node
->child1().useKind() == BooleanUse
) {
1856 if (!needsTypeCheck(node
->child1(), SpecBoolean
)) {
1857 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
1859 if (taken
== nextBlock()) {
1860 condition
= MacroAssembler::Zero
;
1861 BlockIndex tmp
= taken
;
1866 branchTest32(condition
, valueGPR
, TrustedImm32(true), taken
);
1869 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken
);
1870 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken
);
1872 typeCheck(JSValueRegs(valueGPR
), node
->child1(), SpecBoolean
, m_jit
.jump());
1876 GPRTemporary
result(this);
1877 GPRReg resultGPR
= result
.gpr();
1879 if (node
->child1()->prediction() & SpecInt32
) {
1880 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken
);
1881 branch64(MacroAssembler::AboveOrEqual
, valueGPR
, GPRInfo::tagTypeNumberRegister
, taken
);
1884 if (node
->child1()->prediction() & SpecBoolean
) {
1885 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken
);
1886 branch64(MacroAssembler::Equal
, valueGPR
, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken
);
1891 silentSpillAllRegisters(resultGPR
);
1892 callOperation(dfgConvertJSValueToBoolean
, resultGPR
, valueGPR
);
1893 silentFillAllRegisters(resultGPR
);
1895 branchTest32(MacroAssembler::NonZero
, resultGPR
, taken
);
1899 noResult(node
, UseChildrenCalledExplicitly
);
1904 RELEASE_ASSERT_NOT_REACHED();
1908 void SpeculativeJIT::compile(Node
* node
)
1910 NodeType op
= node
->op();
1912 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1913 m_jit
.clearRegisterAllocationOffsets();
1918 initConstantInfo(node
);
1921 case PhantomArguments
:
1922 initConstantInfo(node
);
1925 case WeakJSConstant
:
1926 m_jit
.addWeakReference(node
->weakConstant());
1927 initConstantInfo(node
);
1931 // CSE should always eliminate this.
1932 RELEASE_ASSERT_NOT_REACHED();
1937 SpeculatedType prediction
= node
->variableAccessData()->prediction();
1938 AbstractValue
& value
= m_state
.variables().operand(node
->local());
1940 // If we have no prediction for this local, then don't attempt to compile.
1941 if (prediction
== SpecNone
) {
1942 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
1946 // If the CFA is tracking this variable and it found that the variable
1947 // cannot have been assigned, then don't attempt to proceed.
1948 if (value
.isClear()) {
1949 // FIXME: We should trap instead.
1950 // https://bugs.webkit.org/show_bug.cgi?id=110383
1951 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
1955 if (node
->variableAccessData()->shouldUseDoubleFormat()) {
1956 FPRTemporary
result(this);
1957 m_jit
.loadDouble(JITCompiler::addressFor(node
->local()), result
.fpr());
1958 VirtualRegister virtualRegister
= node
->virtualRegister();
1959 m_fprs
.retain(result
.fpr(), virtualRegister
, SpillOrderDouble
);
1960 m_generationInfo
[virtualRegister
].initDouble(node
, node
->refCount(), result
.fpr());
1964 if (isInt32Speculation(value
.m_type
)) {
1965 GPRTemporary
result(this);
1966 m_jit
.load32(JITCompiler::payloadFor(node
->local()), result
.gpr());
1968 // Like integerResult, but don't useChildren - our children are phi nodes,
1969 // and don't represent values within this dataflow with virtual registers.
1970 VirtualRegister virtualRegister
= node
->virtualRegister();
1971 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
1972 m_generationInfo
[virtualRegister
].initInteger(node
, node
->refCount(), result
.gpr());
1976 GPRTemporary
result(this);
1977 m_jit
.load64(JITCompiler::addressFor(node
->local()), result
.gpr());
1979 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1980 // and don't represent values within this dataflow with virtual registers.
1981 VirtualRegister virtualRegister
= node
->virtualRegister();
1982 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1985 if (isCellSpeculation(value
.m_type
))
1986 format
= DataFormatJSCell
;
1987 else if (isBooleanSpeculation(value
.m_type
))
1988 format
= DataFormatJSBoolean
;
1990 format
= DataFormatJS
;
1992 m_generationInfo
[virtualRegister
].initJSValue(node
, node
->refCount(), result
.gpr(), format
);
1996 case GetLocalUnlinked
: {
1997 GPRTemporary
result(this);
1999 m_jit
.load64(JITCompiler::addressFor(node
->unlinkedLocal()), result
.gpr());
2001 jsValueResult(result
.gpr(), node
);
2005 case MovHintAndCheck
: {
2006 compileMovHintAndCheck(node
);
2011 compileInlineStart(node
);
2017 RELEASE_ASSERT_NOT_REACHED();
2022 // SetLocal doubles as a hint as to where a node will be stored and
2023 // as a speculation point. So before we speculate make sure that we
2024 // know where the child of this node needs to go in the virtual
2026 compileMovHint(node
);
2028 if (node
->variableAccessData()->shouldUnboxIfPossible()) {
2029 if (node
->variableAccessData()->shouldUseDoubleFormat()) {
2030 SpeculateDoubleOperand
value(this, node
->child1());
2031 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
->local()));
2033 // Indicate that it's no longer necessary to retrieve the value of
2034 // this bytecode variable from registers or other locations in the stack,
2035 // but that it is stored as a double.
2036 recordSetLocal(node
->local(), ValueSource(DoubleInJSStack
));
2040 SpeculatedType predictedType
= node
->variableAccessData()->argumentAwarePrediction();
2041 if (isInt32Speculation(predictedType
)) {
2042 SpeculateIntegerOperand
value(this, node
->child1());
2043 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->local()));
2045 recordSetLocal(node
->local(), ValueSource(Int32InJSStack
));
2048 if (isCellSpeculation(predictedType
)) {
2049 SpeculateCellOperand
cell(this, node
->child1());
2050 GPRReg cellGPR
= cell
.gpr();
2051 m_jit
.store64(cellGPR
, JITCompiler::addressFor(node
->local()));
2053 recordSetLocal(node
->local(), ValueSource(CellInJSStack
));
2056 if (isBooleanSpeculation(predictedType
)) {
2057 SpeculateBooleanOperand
boolean(this, node
->child1());
2058 m_jit
.store64(boolean
.gpr(), JITCompiler::addressFor(node
->local()));
2060 recordSetLocal(node
->local(), ValueSource(BooleanInJSStack
));
2065 JSValueOperand
value(this, node
->child1());
2066 m_jit
.store64(value
.gpr(), JITCompiler::addressFor(node
->local()));
2069 recordSetLocal(node
->local(), ValueSource(ValueInJSStack
));
2071 // If we're storing an arguments object that has been optimized away,
2072 // our variable event stream for OSR exit now reflects the optimized
2073 // value (JSValue()). On the slow path, we want an arguments object
2074 // instead. We add an additional move hint to show OSR exit that it
2075 // needs to reconstruct the arguments object.
2076 if (node
->child1()->op() == PhantomArguments
)
2077 compileMovHint(node
);
2083 // This is a no-op; it just marks the fact that the argument is being used.
2084 // But it may be profitable to use this as a hook to run speculation checks
2085 // on arguments, thereby allowing us to trivially eliminate such checks if
2086 // the argument is not used.
2092 if (isInt32Constant(node
->child1().node())) {
2093 SpeculateIntegerOperand
op2(this, node
->child2());
2094 GPRTemporary
result(this, op2
);
2096 bitOp(op
, valueOfInt32Constant(node
->child1().node()), op2
.gpr(), result
.gpr());
2098 integerResult(result
.gpr(), node
);
2099 } else if (isInt32Constant(node
->child2().node())) {
2100 SpeculateIntegerOperand
op1(this, node
->child1());
2101 GPRTemporary
result(this, op1
);
2103 bitOp(op
, valueOfInt32Constant(node
->child2().node()), op1
.gpr(), result
.gpr());
2105 integerResult(result
.gpr(), node
);
2107 SpeculateIntegerOperand
op1(this, node
->child1());
2108 SpeculateIntegerOperand
op2(this, node
->child2());
2109 GPRTemporary
result(this, op1
, op2
);
2111 GPRReg reg1
= op1
.gpr();
2112 GPRReg reg2
= op2
.gpr();
2113 bitOp(op
, reg1
, reg2
, result
.gpr());
2115 integerResult(result
.gpr(), node
);
2122 if (isInt32Constant(node
->child2().node())) {
2123 SpeculateIntegerOperand
op1(this, node
->child1());
2124 GPRTemporary
result(this, op1
);
2126 shiftOp(op
, op1
.gpr(), valueOfInt32Constant(node
->child2().node()) & 0x1f, result
.gpr());
2128 integerResult(result
.gpr(), node
);
2130 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2131 SpeculateIntegerOperand
op1(this, node
->child1());
2132 SpeculateIntegerOperand
op2(this, node
->child2());
2133 GPRTemporary
result(this, op1
);
2135 GPRReg reg1
= op1
.gpr();
2136 GPRReg reg2
= op2
.gpr();
2137 shiftOp(op
, reg1
, reg2
, result
.gpr());
2139 integerResult(result
.gpr(), node
);
2143 case UInt32ToNumber
: {
2144 compileUInt32ToNumber(node
);
2148 case DoubleAsInt32
: {
2149 compileDoubleAsInt32(node
);
2153 case ValueToInt32
: {
2154 compileValueToInt32(node
);
2159 case ForwardInt32ToDouble
: {
2160 compileInt32ToDouble(node
);
2170 compileMakeRope(node
);
2174 compileArithSub(node
);
2178 compileArithNegate(node
);
2182 compileArithMul(node
);
2186 compileArithIMul(node
);
2190 switch (node
->binaryUseKind()) {
2192 #if CPU(X86) || CPU(X86_64)
2193 compileIntegerArithDivForX86(node
);
2195 compileIntegerArithDivForARM64(node
);
2197 // See DFGFixupPhase - on any architecture other than X86[_64] we'll force the prediction to double.
2198 ASSERT_NOT_REACHED();
2204 SpeculateDoubleOperand
op1(this, node
->child1());
2205 SpeculateDoubleOperand
op2(this, node
->child2());
2206 FPRTemporary
result(this, op1
);
2208 FPRReg reg1
= op1
.fpr();
2209 FPRReg reg2
= op2
.fpr();
2210 m_jit
.divDouble(reg1
, reg2
, result
.fpr());
2212 doubleResult(result
.fpr(), node
);
2217 RELEASE_ASSERT_NOT_REACHED();
2224 compileArithMod(node
);
2229 switch (node
->child1().useKind()) {
2231 SpeculateIntegerOperand
op1(this, node
->child1());
2232 GPRTemporary
result(this);
2233 GPRTemporary
scratch(this);
2235 m_jit
.zeroExtend32ToPtr(op1
.gpr(), result
.gpr());
2236 m_jit
.rshift32(result
.gpr(), MacroAssembler::TrustedImm32(31), scratch
.gpr());
2237 m_jit
.add32(scratch
.gpr(), result
.gpr());
2238 m_jit
.xor32(scratch
.gpr(), result
.gpr());
2239 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, result
.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2240 integerResult(result
.gpr(), node
);
2245 SpeculateDoubleOperand
op1(this, node
->child1());
2246 FPRTemporary
result(this);
2248 m_jit
.absDouble(op1
.fpr(), result
.fpr());
2249 doubleResult(result
.fpr(), node
);
2254 RELEASE_ASSERT_NOT_REACHED();
2262 switch (node
->binaryUseKind()) {
2264 SpeculateStrictInt32Operand
op1(this, node
->child1());
2265 SpeculateStrictInt32Operand
op2(this, node
->child2());
2266 GPRTemporary
result(this, op1
);
2268 MacroAssembler::Jump op1Less
= m_jit
.branch32(op
== ArithMin
? MacroAssembler::LessThan
: MacroAssembler::GreaterThan
, op1
.gpr(), op2
.gpr());
2269 m_jit
.move(op2
.gpr(), result
.gpr());
2270 if (op1
.gpr() != result
.gpr()) {
2271 MacroAssembler::Jump done
= m_jit
.jump();
2272 op1Less
.link(&m_jit
);
2273 m_jit
.move(op1
.gpr(), result
.gpr());
2276 op1Less
.link(&m_jit
);
2278 integerResult(result
.gpr(), node
);
2283 SpeculateDoubleOperand
op1(this, node
->child1());
2284 SpeculateDoubleOperand
op2(this, node
->child2());
2285 FPRTemporary
result(this, op1
);
2287 FPRReg op1FPR
= op1
.fpr();
2288 FPRReg op2FPR
= op2
.fpr();
2289 FPRReg resultFPR
= result
.fpr();
2291 MacroAssembler::JumpList done
;
2293 MacroAssembler::Jump op1Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleLessThan
: MacroAssembler::DoubleGreaterThan
, op1FPR
, op2FPR
);
2295 // op2 is eather the lesser one or one of then is NaN
2296 MacroAssembler::Jump op2Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleGreaterThanOrEqual
: MacroAssembler::DoubleLessThanOrEqual
, op1FPR
, op2FPR
);
2298 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2299 // op1 + op2 and putting it into result.
2300 m_jit
.addDouble(op1FPR
, op2FPR
, resultFPR
);
2301 done
.append(m_jit
.jump());
2303 op2Less
.link(&m_jit
);
2304 m_jit
.moveDouble(op2FPR
, resultFPR
);
2306 if (op1FPR
!= resultFPR
) {
2307 done
.append(m_jit
.jump());
2309 op1Less
.link(&m_jit
);
2310 m_jit
.moveDouble(op1FPR
, resultFPR
);
2312 op1Less
.link(&m_jit
);
2316 doubleResult(resultFPR
, node
);
2321 RELEASE_ASSERT_NOT_REACHED();
2328 SpeculateDoubleOperand
op1(this, node
->child1());
2329 FPRTemporary
result(this, op1
);
2331 m_jit
.sqrtDouble(op1
.fpr(), result
.fpr());
2333 doubleResult(result
.fpr(), node
);
2338 compileLogicalNot(node
);
2342 if (compare(node
, JITCompiler::LessThan
, JITCompiler::DoubleLessThan
, operationCompareLess
))
2347 if (compare(node
, JITCompiler::LessThanOrEqual
, JITCompiler::DoubleLessThanOrEqual
, operationCompareLessEq
))
2351 case CompareGreater
:
2352 if (compare(node
, JITCompiler::GreaterThan
, JITCompiler::DoubleGreaterThan
, operationCompareGreater
))
2356 case CompareGreaterEq
:
2357 if (compare(node
, JITCompiler::GreaterThanOrEqual
, JITCompiler::DoubleGreaterThanOrEqual
, operationCompareGreaterEq
))
2361 case CompareEqConstant
:
2362 ASSERT(isNullConstant(node
->child2().node()));
2363 if (nonSpeculativeCompareNull(node
, node
->child1()))
2368 if (compare(node
, JITCompiler::Equal
, JITCompiler::DoubleEqual
, operationCompareEq
))
2372 case CompareStrictEqConstant
:
2373 if (compileStrictEqForConstant(node
, node
->child1(), valueOfJSConstant(node
->child2().node())))
2377 case CompareStrictEq
:
2378 if (compileStrictEq(node
))
2382 case StringCharCodeAt
: {
2383 compileGetCharCodeAt(node
);
2387 case StringCharAt
: {
2388 // Relies on StringCharAt node having same basic layout as GetByVal
2389 compileGetByValOnString(node
);
2393 case StringFromCharCode
: {
2394 compileFromCharCode(node
);
2404 case ArrayifyToStructure
: {
2410 switch (node
->arrayMode().type()) {
2411 case Array::SelectUsingPredictions
:
2412 case Array::ForceExit
:
2413 RELEASE_ASSERT_NOT_REACHED();
2414 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2416 case Array::Generic
: {
2417 JSValueOperand
base(this, node
->child1());
2418 JSValueOperand
property(this, node
->child2());
2419 GPRReg baseGPR
= base
.gpr();
2420 GPRReg propertyGPR
= property
.gpr();
2423 GPRResult
result(this);
2424 callOperation(operationGetByVal
, result
.gpr(), baseGPR
, propertyGPR
);
2426 jsValueResult(result
.gpr(), node
);
2430 case Array::Contiguous
: {
2431 if (node
->arrayMode().isInBounds()) {
2432 SpeculateStrictInt32Operand
property(this, node
->child2());
2433 StorageOperand
storage(this, node
->child3());
2435 GPRReg propertyReg
= property
.gpr();
2436 GPRReg storageReg
= storage
.gpr();
2441 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2443 GPRTemporary
result(this);
2444 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.gpr());
2445 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchTest64(MacroAssembler::Zero
, result
.gpr()));
2446 jsValueResult(result
.gpr(), node
, node
->arrayMode().type() == Array::Int32
? DataFormatJSInteger
: DataFormatJS
);
2450 SpeculateCellOperand
base(this, node
->child1());
2451 SpeculateStrictInt32Operand
property(this, node
->child2());
2452 StorageOperand
storage(this, node
->child3());
2454 GPRReg baseReg
= base
.gpr();
2455 GPRReg propertyReg
= property
.gpr();
2456 GPRReg storageReg
= storage
.gpr();
2461 GPRTemporary
result(this);
2462 GPRReg resultReg
= result
.gpr();
2464 MacroAssembler::JumpList slowCases
;
2466 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2468 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), resultReg
);
2469 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, resultReg
));
2471 addSlowPathGenerator(
2473 slowCases
, this, operationGetByValArrayInt
,
2474 result
.gpr(), baseReg
, propertyReg
));
2476 jsValueResult(resultReg
, node
);
2480 case Array::Double
: {
2481 if (node
->arrayMode().isInBounds()) {
2482 if (node
->arrayMode().isSaneChain()) {
2483 JSGlobalObject
* globalObject
= m_jit
.globalObjectFor(node
->codeOrigin
);
2484 ASSERT(globalObject
->arrayPrototypeChainIsSane());
2485 globalObject
->arrayPrototype()->structure()->addTransitionWatchpoint(speculationWatchpoint());
2486 globalObject
->objectPrototype()->structure()->addTransitionWatchpoint(speculationWatchpoint());
2489 SpeculateStrictInt32Operand
property(this, node
->child2());
2490 StorageOperand
storage(this, node
->child3());
2492 GPRReg propertyReg
= property
.gpr();
2493 GPRReg storageReg
= storage
.gpr();
2498 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2500 FPRTemporary
result(this);
2501 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.fpr());
2502 if (!node
->arrayMode().isSaneChain())
2503 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, result
.fpr(), result
.fpr()));
2504 doubleResult(result
.fpr(), node
);
2508 SpeculateCellOperand
base(this, node
->child1());
2509 SpeculateStrictInt32Operand
property(this, node
->child2());
2510 StorageOperand
storage(this, node
->child3());
2512 GPRReg baseReg
= base
.gpr();
2513 GPRReg propertyReg
= property
.gpr();
2514 GPRReg storageReg
= storage
.gpr();
2519 GPRTemporary
result(this);
2520 FPRTemporary
temp(this);
2521 GPRReg resultReg
= result
.gpr();
2522 FPRReg tempReg
= temp
.fpr();
2524 MacroAssembler::JumpList slowCases
;
2526 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2528 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), tempReg
);
2529 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempReg
, tempReg
));
2530 boxDouble(tempReg
, resultReg
);
2532 addSlowPathGenerator(
2534 slowCases
, this, operationGetByValArrayInt
,
2535 result
.gpr(), baseReg
, propertyReg
));
2537 jsValueResult(resultReg
, node
);
2541 case Array::ArrayStorage
:
2542 case Array::SlowPutArrayStorage
: {
2543 if (node
->arrayMode().isInBounds()) {
2544 SpeculateStrictInt32Operand
property(this, node
->child2());
2545 StorageOperand
storage(this, node
->child3());
2547 GPRReg propertyReg
= property
.gpr();
2548 GPRReg storageReg
= storage
.gpr();
2553 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2555 GPRTemporary
result(this);
2556 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), result
.gpr());
2557 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchTest64(MacroAssembler::Zero
, result
.gpr()));
2559 jsValueResult(result
.gpr(), node
);
2563 SpeculateCellOperand
base(this, node
->child1());
2564 SpeculateStrictInt32Operand
property(this, node
->child2());
2565 StorageOperand
storage(this, node
->child3());
2567 GPRReg baseReg
= base
.gpr();
2568 GPRReg propertyReg
= property
.gpr();
2569 GPRReg storageReg
= storage
.gpr();
2574 GPRTemporary
result(this);
2575 GPRReg resultReg
= result
.gpr();
2577 MacroAssembler::JumpList slowCases
;
2579 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2581 m_jit
.load64(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), resultReg
);
2582 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, resultReg
));
2584 addSlowPathGenerator(
2586 slowCases
, this, operationGetByValArrayInt
,
2587 result
.gpr(), baseReg
, propertyReg
));
2589 jsValueResult(resultReg
, node
);
2593 compileGetByValOnString(node
);
2595 case Array::Arguments
:
2596 compileGetByValOnArguments(node
);
2598 case Array::Int8Array
:
2599 compileGetByValOnIntTypedArray(m_jit
.vm()->int8ArrayDescriptor(), node
, sizeof(int8_t), SignedTypedArray
);
2601 case Array::Int16Array
:
2602 compileGetByValOnIntTypedArray(m_jit
.vm()->int16ArrayDescriptor(), node
, sizeof(int16_t), SignedTypedArray
);
2604 case Array::Int32Array
:
2605 compileGetByValOnIntTypedArray(m_jit
.vm()->int32ArrayDescriptor(), node
, sizeof(int32_t), SignedTypedArray
);
2607 case Array::Uint8Array
:
2608 compileGetByValOnIntTypedArray(m_jit
.vm()->uint8ArrayDescriptor(), node
, sizeof(uint8_t), UnsignedTypedArray
);
2610 case Array::Uint8ClampedArray
:
2611 compileGetByValOnIntTypedArray(m_jit
.vm()->uint8ClampedArrayDescriptor(), node
, sizeof(uint8_t), UnsignedTypedArray
);
2613 case Array::Uint16Array
:
2614 compileGetByValOnIntTypedArray(m_jit
.vm()->uint16ArrayDescriptor(), node
, sizeof(uint16_t), UnsignedTypedArray
);
2616 case Array::Uint32Array
:
2617 compileGetByValOnIntTypedArray(m_jit
.vm()->uint32ArrayDescriptor(), node
, sizeof(uint32_t), UnsignedTypedArray
);
2619 case Array::Float32Array
:
2620 compileGetByValOnFloatTypedArray(m_jit
.vm()->float32ArrayDescriptor(), node
, sizeof(float));
2622 case Array::Float64Array
:
2623 compileGetByValOnFloatTypedArray(m_jit
.vm()->float64ArrayDescriptor(), node
, sizeof(double));
2626 RELEASE_ASSERT_NOT_REACHED();
2633 case PutByValAlias
: {
2634 Edge child1
= m_jit
.graph().varArgChild(node
, 0);
2635 Edge child2
= m_jit
.graph().varArgChild(node
, 1);
2636 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
2637 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
2639 ArrayMode arrayMode
= node
->arrayMode().modeForPut();
2640 bool alreadyHandled
= false;
2642 switch (arrayMode
.type()) {
2643 case Array::SelectUsingPredictions
:
2644 case Array::ForceExit
:
2645 RELEASE_ASSERT_NOT_REACHED();
2646 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2647 alreadyHandled
= true;
2649 case Array::Generic
: {
2650 RELEASE_ASSERT(node
->op() == PutByVal
);
2652 JSValueOperand
arg1(this, child1
);
2653 JSValueOperand
arg2(this, child2
);
2654 JSValueOperand
arg3(this, child3
);
2655 GPRReg arg1GPR
= arg1
.gpr();
2656 GPRReg arg2GPR
= arg2
.gpr();
2657 GPRReg arg3GPR
= arg3
.gpr();
2660 callOperation(m_jit
.strictModeFor(node
->codeOrigin
) ? operationPutByValStrict
: operationPutByValNonStrict
, arg1GPR
, arg2GPR
, arg3GPR
);
2663 alreadyHandled
= true;
2673 // FIXME: the base may not be necessary for some array access modes. But we have to
2674 // keep it alive to this point, so it's likely to be in a register anyway. Likely
2675 // no harm in locking it here.
2676 SpeculateCellOperand
base(this, child1
);
2677 SpeculateStrictInt32Operand
property(this, child2
);
2679 GPRReg baseReg
= base
.gpr();
2680 GPRReg propertyReg
= property
.gpr();
2682 switch (arrayMode
.type()) {
2684 case Array::Contiguous
: {
2685 JSValueOperand
value(this, child3
, ManualOperandSpeculation
);
2687 GPRReg valueReg
= value
.gpr();
2692 if (arrayMode
.type() == Array::Int32
) {
2694 JSValueRegs(valueReg
), child3
, SpecInt32
,
2696 MacroAssembler::Below
, valueReg
, GPRInfo::tagTypeNumberRegister
));
2699 if (arrayMode
.type() == Array::Contiguous
&& Heap::isWriteBarrierEnabled()) {
2700 GPRTemporary
scratch(this);
2701 writeBarrier(baseReg
, value
.gpr(), child3
, WriteBarrierForPropertyAccess
, scratch
.gpr());
2704 StorageOperand
storage(this, child4
);
2705 GPRReg storageReg
= storage
.gpr();
2707 if (node
->op() == PutByValAlias
) {
2708 // Store the value to the array.
2709 GPRReg propertyReg
= property
.gpr();
2710 GPRReg valueReg
= value
.gpr();
2711 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2717 GPRTemporary temporary
;
2718 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
2720 MacroAssembler::Jump slowCase
;
2722 if (arrayMode
.isInBounds()) {
2724 StoreToHoleOrOutOfBounds
, JSValueRegs(), 0,
2725 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2727 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2729 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
2731 if (!arrayMode
.isOutOfBounds())
2732 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
2734 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
2735 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2737 inBounds
.link(&m_jit
);
2740 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2747 if (arrayMode
.isOutOfBounds()) {
2748 addSlowPathGenerator(
2751 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2752 NoResult
, baseReg
, propertyReg
, valueReg
));
2755 noResult(node
, UseChildrenCalledExplicitly
);
2759 case Array::Double
: {
2760 compileDoublePutByVal(node
, base
, property
);
2764 case Array::ArrayStorage
:
2765 case Array::SlowPutArrayStorage
: {
2766 JSValueOperand
value(this, child3
);
2768 GPRReg valueReg
= value
.gpr();
2773 if (Heap::isWriteBarrierEnabled()) {
2774 GPRTemporary
scratch(this);
2775 writeBarrier(baseReg
, value
.gpr(), child3
, WriteBarrierForPropertyAccess
, scratch
.gpr());
2778 StorageOperand
storage(this, child4
);
2779 GPRReg storageReg
= storage
.gpr();
2781 if (node
->op() == PutByValAlias
) {
2782 // Store the value to the array.
2783 GPRReg propertyReg
= property
.gpr();
2784 GPRReg valueReg
= value
.gpr();
2785 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2791 GPRTemporary temporary
;
2792 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
2794 MacroAssembler::JumpList slowCases
;
2796 MacroAssembler::Jump beyondArrayBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2797 if (!arrayMode
.isOutOfBounds())
2798 speculationCheck(OutOfBounds
, JSValueRegs(), 0, beyondArrayBounds
);
2800 slowCases
.append(beyondArrayBounds
);
2802 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2803 if (arrayMode
.isInBounds()) {
2805 StoreToHole
, JSValueRegs(), 0,
2806 m_jit
.branchTest64(MacroAssembler::Zero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]))));
2808 MacroAssembler::Jump notHoleValue
= m_jit
.branchTest64(MacroAssembler::NonZero
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2809 if (arrayMode
.isSlowPut()) {
2810 // This is sort of strange. If we wanted to optimize this code path, we would invert
2811 // the above branch. But it's simply not worth it since this only happens if we're
2812 // already having a bad time.
2813 slowCases
.append(m_jit
.jump());
2815 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, ArrayStorage::numValuesInVectorOffset()));
2817 // If we're writing to a hole we might be growing the array;
2818 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2819 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
2820 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2822 lengthDoesNotNeedUpdate
.link(&m_jit
);
2824 notHoleValue
.link(&m_jit
);
2827 // Store the value to the array.
2828 m_jit
.store64(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
2835 if (!slowCases
.empty()) {
2836 addSlowPathGenerator(
2839 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2840 NoResult
, baseReg
, propertyReg
, valueReg
));
2843 noResult(node
, UseChildrenCalledExplicitly
);
2847 case Array::Arguments
: {
2848 JSValueOperand
value(this, child3
);
2849 GPRTemporary
scratch(this);
2850 GPRTemporary
scratch2(this);
2852 GPRReg valueReg
= value
.gpr();
2853 GPRReg scratchReg
= scratch
.gpr();
2854 GPRReg scratch2Reg
= scratch2
.gpr();
2859 // Two really lame checks.
2861 Uncountable
, JSValueSource(), 0,
2863 MacroAssembler::AboveOrEqual
, propertyReg
,
2864 MacroAssembler::Address(baseReg
, OBJECT_OFFSETOF(Arguments
, m_numArguments
))));
2866 Uncountable
, JSValueSource(), 0,
2867 m_jit
.branchTestPtr(
2868 MacroAssembler::NonZero
,
2869 MacroAssembler::Address(
2870 baseReg
, OBJECT_OFFSETOF(Arguments
, m_slowArguments
))));
2872 m_jit
.move(propertyReg
, scratch2Reg
);
2873 m_jit
.neg32(scratch2Reg
);
2874 m_jit
.signExtend32ToPtr(scratch2Reg
, scratch2Reg
);
2876 MacroAssembler::Address(baseReg
, OBJECT_OFFSETOF(Arguments
, m_registers
)),
2881 MacroAssembler::BaseIndex(
2882 scratchReg
, scratch2Reg
, MacroAssembler::TimesEight
,
2883 CallFrame::thisArgumentOffset() * sizeof(Register
) - sizeof(Register
)));
2889 case Array::Int8Array
:
2890 compilePutByValForIntTypedArray(m_jit
.vm()->int8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int8_t), SignedTypedArray
);
2893 case Array::Int16Array
:
2894 compilePutByValForIntTypedArray(m_jit
.vm()->int16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int16_t), SignedTypedArray
);
2897 case Array::Int32Array
:
2898 compilePutByValForIntTypedArray(m_jit
.vm()->int32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int32_t), SignedTypedArray
);
2901 case Array::Uint8Array
:
2902 compilePutByValForIntTypedArray(m_jit
.vm()->uint8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), UnsignedTypedArray
);
2905 case Array::Uint8ClampedArray
:
2906 compilePutByValForIntTypedArray(m_jit
.vm()->uint8ClampedArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), UnsignedTypedArray
, ClampRounding
);
2909 case Array::Uint16Array
:
2910 compilePutByValForIntTypedArray(m_jit
.vm()->uint16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint16_t), UnsignedTypedArray
);
2913 case Array::Uint32Array
:
2914 compilePutByValForIntTypedArray(m_jit
.vm()->uint32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint32_t), UnsignedTypedArray
);
2917 case Array::Float32Array
:
2918 compilePutByValForFloatTypedArray(m_jit
.vm()->float32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(float));
2921 case Array::Float64Array
:
2922 compilePutByValForFloatTypedArray(m_jit
.vm()->float64ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(double));
2926 RELEASE_ASSERT_NOT_REACHED();
2934 if (compileRegExpExec(node
))
2936 if (!node
->adjustedRefCount()) {
2937 SpeculateCellOperand
base(this, node
->child1());
2938 SpeculateCellOperand
argument(this, node
->child2());
2939 GPRReg baseGPR
= base
.gpr();
2940 GPRReg argumentGPR
= argument
.gpr();
2943 GPRResult
result(this);
2944 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2946 // Must use jsValueResult because otherwise we screw up register
2947 // allocation, which thinks that this node has a result.
2948 jsValueResult(result
.gpr(), node
);
2952 SpeculateCellOperand
base(this, node
->child1());
2953 SpeculateCellOperand
argument(this, node
->child2());
2954 GPRReg baseGPR
= base
.gpr();
2955 GPRReg argumentGPR
= argument
.gpr();
2958 GPRResult
result(this);
2959 callOperation(operationRegExpExec
, result
.gpr(), baseGPR
, argumentGPR
);
2961 jsValueResult(result
.gpr(), node
);
2966 SpeculateCellOperand
base(this, node
->child1());
2967 SpeculateCellOperand
argument(this, node
->child2());
2968 GPRReg baseGPR
= base
.gpr();
2969 GPRReg argumentGPR
= argument
.gpr();
2972 GPRResult
result(this);
2973 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2975 // If we add a DataFormatBool, we should use it here.
2976 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
2977 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
2982 ASSERT(node
->arrayMode().isJSArray());
2984 SpeculateCellOperand
base(this, node
->child1());
2985 GPRTemporary
storageLength(this);
2987 GPRReg baseGPR
= base
.gpr();
2988 GPRReg storageLengthGPR
= storageLength
.gpr();
2990 StorageOperand
storage(this, node
->child3());
2991 GPRReg storageGPR
= storage
.gpr();
2993 switch (node
->arrayMode().type()) {
2995 case Array::Contiguous
: {
2996 JSValueOperand
value(this, node
->child2(), ManualOperandSpeculation
);
2997 GPRReg valueGPR
= value
.gpr();
2999 if (node
->arrayMode().type() == Array::Int32
) {
3001 JSValueRegs(valueGPR
), node
->child2(), SpecInt32
,
3003 MacroAssembler::Below
, valueGPR
, GPRInfo::tagTypeNumberRegister
));
3006 if (node
->arrayMode().type() != Array::Int32
&& Heap::isWriteBarrierEnabled()) {
3007 GPRTemporary
scratch(this);
3008 writeBarrier(baseGPR
, valueGPR
, node
->child2(), WriteBarrierForPropertyAccess
, scratch
.gpr(), storageLengthGPR
);
3011 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
3012 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3013 m_jit
.store64(valueGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3014 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3015 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3016 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
3018 addSlowPathGenerator(
3020 slowPath
, this, operationArrayPush
, NoResult
, storageLengthGPR
,
3021 valueGPR
, baseGPR
));
3023 jsValueResult(storageLengthGPR
, node
);
3027 case Array::Double
: {
3028 SpeculateDoubleOperand
value(this, node
->child2());
3029 FPRReg valueFPR
= value
.fpr();
3032 JSValueRegs(), node
->child2(), SpecRealNumber
,
3033 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, valueFPR
, valueFPR
));
3035 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
3036 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3037 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3038 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3039 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3040 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
3042 addSlowPathGenerator(
3044 slowPath
, this, operationArrayPushDouble
, NoResult
, storageLengthGPR
,
3045 valueFPR
, baseGPR
));
3047 jsValueResult(storageLengthGPR
, node
);
3051 case Array::ArrayStorage
: {
3052 JSValueOperand
value(this, node
->child2());
3053 GPRReg valueGPR
= value
.gpr();
3055 if (Heap::isWriteBarrierEnabled()) {
3056 GPRTemporary
scratch(this);
3057 writeBarrier(baseGPR
, valueGPR
, node
->child2(), WriteBarrierForPropertyAccess
, scratch
.gpr(), storageLengthGPR
);
3060 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3062 // Refuse to handle bizarre lengths.
3063 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Above
, storageLengthGPR
, TrustedImm32(0x7ffffffe)));
3065 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
3067 m_jit
.store64(valueGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
3069 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3070 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3071 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3072 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, storageLengthGPR
);
3074 addSlowPathGenerator(
3076 slowPath
, this, operationArrayPush
, NoResult
, storageLengthGPR
,
3077 valueGPR
, baseGPR
));
3079 jsValueResult(storageLengthGPR
, node
);
3091 ASSERT(node
->arrayMode().isJSArray());
3093 SpeculateCellOperand
base(this, node
->child1());
3094 StorageOperand
storage(this, node
->child2());
3095 GPRTemporary
value(this);
3096 GPRTemporary
storageLength(this);
3097 FPRTemporary
temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop().
3099 GPRReg baseGPR
= base
.gpr();
3100 GPRReg storageGPR
= storage
.gpr();
3101 GPRReg valueGPR
= value
.gpr();
3102 GPRReg storageLengthGPR
= storageLength
.gpr();
3103 FPRReg tempFPR
= temp
.fpr();
3105 switch (node
->arrayMode().type()) {
3108 case Array::Contiguous
: {
3110 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
3111 MacroAssembler::Jump undefinedCase
=
3112 m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
);
3113 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
3115 storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3116 MacroAssembler::Jump slowCase
;
3117 if (node
->arrayMode().type() == Array::Double
) {
3119 MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
),
3121 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3122 // length and the new length.
3124 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3125 slowCase
= m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempFPR
, tempFPR
);
3126 boxDouble(tempFPR
, valueGPR
);
3129 MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
),
3131 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3132 // length and the new length.
3134 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3135 slowCase
= m_jit
.branchTest64(MacroAssembler::Zero
, valueGPR
);
3138 addSlowPathGenerator(
3140 undefinedCase
, this,
3141 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR
));
3142 addSlowPathGenerator(
3144 slowCase
, this, operationArrayPopAndRecoverLength
, valueGPR
, baseGPR
));
3146 // We can't know for sure that the result is an int because of the slow paths. :-/
3147 jsValueResult(valueGPR
, node
);
3151 case Array::ArrayStorage
: {
3152 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3154 JITCompiler::Jump undefinedCase
=
3155 m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
);
3157 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
3159 JITCompiler::JumpList slowCases
;
3160 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset())));
3162 m_jit
.load64(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), valueGPR
);
3163 slowCases
.append(m_jit
.branchTest64(MacroAssembler::Zero
, valueGPR
));
3165 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3167 m_jit
.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
3168 m_jit
.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3170 addSlowPathGenerator(
3172 undefinedCase
, this,
3173 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR
));
3175 addSlowPathGenerator(
3177 slowCases
, this, operationArrayPop
, valueGPR
, baseGPR
));
3179 jsValueResult(valueGPR
, node
);
3191 BlockIndex taken
= node
->takenBlockIndex();
3202 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT1
);
3203 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
3204 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
3206 #if DFG_ENABLE(SUCCESS_STATS)
3207 static SamplingCounter
counter("SpeculativeJIT");
3208 m_jit
.emitCount(counter
);
3211 // Return the result in returnValueGPR.
3212 JSValueOperand
op1(this, node
->child1());
3213 m_jit
.move(op1
.gpr(), GPRInfo::returnValueGPR
);
3215 // Grab the return address.
3216 m_jit
.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC
, GPRInfo::regT1
);
3217 // Restore our caller's "r".
3218 m_jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, GPRInfo::callFrameRegister
);
3220 m_jit
.restoreReturnAddressBeforeReturn(GPRInfo::regT1
);
3228 case ThrowReferenceError
: {
3229 // We expect that throw statements are rare and are intended to exit the code block
3230 // anyway, so we just OSR back to the old JIT for now.
3231 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
3236 RELEASE_ASSERT(node
->child1().useKind() == UntypedUse
);
3237 JSValueOperand
op1(this, node
->child1());
3238 GPRTemporary
result(this, op1
);
3240 GPRReg op1GPR
= op1
.gpr();
3241 GPRReg resultGPR
= result
.gpr();
3245 if (!(m_state
.forNode(node
->child1()).m_type
& ~(SpecNumber
| SpecBoolean
)))
3246 m_jit
.move(op1GPR
, resultGPR
);
3248 MacroAssembler::Jump alreadyPrimitive
= m_jit
.branchTest64(MacroAssembler::NonZero
, op1GPR
, GPRInfo::tagMaskRegister
);
3249 MacroAssembler::Jump notPrimitive
= m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
3251 alreadyPrimitive
.link(&m_jit
);
3252 m_jit
.move(op1GPR
, resultGPR
);
3254 addSlowPathGenerator(
3255 slowPathCall(notPrimitive
, this, operationToPrimitive
, resultGPR
, op1GPR
));
3258 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3263 if (node
->child1().useKind() == UntypedUse
) {
3264 JSValueOperand
op1(this, node
->child1());
3265 GPRReg op1GPR
= op1
.gpr();
3267 GPRResult
result(this);
3268 GPRReg resultGPR
= result
.gpr();
3272 JITCompiler::Jump done
;
3273 if (node
->child1()->prediction() & SpecString
) {
3274 JITCompiler::Jump slowPath1
= m_jit
.branchTest64(
3275 JITCompiler::NonZero
, op1GPR
, GPRInfo::tagMaskRegister
);
3276 JITCompiler::Jump slowPath2
= m_jit
.branchPtr(
3277 JITCompiler::NotEqual
,
3278 JITCompiler::Address(op1GPR
, JSCell::structureOffset()),
3279 TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
3280 m_jit
.move(op1GPR
, resultGPR
);
3281 done
= m_jit
.jump();
3282 slowPath1
.link(&m_jit
);
3283 slowPath2
.link(&m_jit
);
3285 callOperation(operationToString
, resultGPR
, op1GPR
);
3288 cellResult(resultGPR
, node
);
3292 compileToStringOnCell(node
);
3296 case NewStringObject
: {
3297 compileNewStringObject(node
);
3302 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->codeOrigin
);
3303 if (!globalObject
->isHavingABadTime() && !hasArrayStorage(node
->indexingType())) {
3304 globalObject
->havingABadTimeWatchpoint()->add(speculationWatchpoint());
3306 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3307 RELEASE_ASSERT(structure
->indexingType() == node
->indexingType());
3309 hasUndecided(structure
->indexingType())
3310 || hasInt32(structure
->indexingType())
3311 || hasDouble(structure
->indexingType())
3312 || hasContiguous(structure
->indexingType()));
3314 unsigned numElements
= node
->numChildren();
3316 GPRTemporary
result(this);
3317 GPRTemporary
storage(this);
3319 GPRReg resultGPR
= result
.gpr();
3320 GPRReg storageGPR
= storage
.gpr();
3322 emitAllocateJSArray(resultGPR
, structure
, storageGPR
, numElements
);
3324 // At this point, one way or another, resultGPR and storageGPR have pointers to
3325 // the JSArray and the Butterfly, respectively.
3327 ASSERT(!hasUndecided(structure
->indexingType()) || !node
->numChildren());
3329 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3330 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3331 switch (node
->indexingType()) {
3332 case ALL_BLANK_INDEXING_TYPES
:
3333 case ALL_UNDECIDED_INDEXING_TYPES
:
3336 case ALL_DOUBLE_INDEXING_TYPES
: {
3337 SpeculateDoubleOperand
operand(this, use
);
3338 FPRReg opFPR
= operand
.fpr();
3340 JSValueRegs(), use
, SpecRealNumber
,
3342 MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3343 m_jit
.storeDouble(opFPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * operandIdx
));
3346 case ALL_INT32_INDEXING_TYPES
:
3347 case ALL_CONTIGUOUS_INDEXING_TYPES
: {
3348 JSValueOperand
operand(this, use
, ManualOperandSpeculation
);
3349 GPRReg opGPR
= operand
.gpr();
3350 if (hasInt32(node
->indexingType())) {
3352 JSValueRegs(opGPR
), use
, SpecInt32
,
3354 MacroAssembler::Below
, opGPR
, GPRInfo::tagTypeNumberRegister
));
3356 m_jit
.store64(opGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
));
3365 // Yuck, we should *really* have a way of also returning the storageGPR. But
3366 // that's the least of what's wrong with this code. We really shouldn't be
3367 // allocating the array after having computed - and probably spilled to the
3368 // stack - all of the things that will go into the array. The solution to that
3369 // bigger problem will also likely fix the redundancy in reloading the storage
3370 // pointer that we currently have.
3372 cellResult(resultGPR
, node
);
3376 if (!node
->numChildren()) {
3378 GPRResult
result(this);
3379 callOperation(operationNewEmptyArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()));
3380 cellResult(result
.gpr(), node
);
3384 size_t scratchSize
= sizeof(EncodedJSValue
) * node
->numChildren();
3385 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(scratchSize
);
3386 EncodedJSValue
* buffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
3388 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3389 // Need to perform the speculations that this node promises to perform. If we're
3390 // emitting code here and the indexing type is not array storage then there is
3391 // probably something hilarious going on and we're already failing at all the
3392 // things, but at least we're going to be sound.
3393 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3394 switch (node
->indexingType()) {
3395 case ALL_BLANK_INDEXING_TYPES
:
3396 case ALL_UNDECIDED_INDEXING_TYPES
:
3399 case ALL_DOUBLE_INDEXING_TYPES
: {
3400 SpeculateDoubleOperand
operand(this, use
);
3401 GPRTemporary
scratch(this);
3402 FPRReg opFPR
= operand
.fpr();
3403 GPRReg scratchGPR
= scratch
.gpr();
3405 JSValueRegs(), use
, SpecRealNumber
,
3407 MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3408 m_jit
.boxDouble(opFPR
, scratchGPR
);
3409 m_jit
.store64(scratchGPR
, buffer
+ operandIdx
);
3412 case ALL_INT32_INDEXING_TYPES
: {
3413 JSValueOperand
operand(this, use
, ManualOperandSpeculation
);
3414 GPRReg opGPR
= operand
.gpr();
3415 if (hasInt32(node
->indexingType())) {
3417 JSValueRegs(opGPR
), use
, SpecInt32
,
3419 MacroAssembler::Below
, opGPR
, GPRInfo::tagTypeNumberRegister
));
3421 m_jit
.store64(opGPR
, buffer
+ operandIdx
);
3424 case ALL_CONTIGUOUS_INDEXING_TYPES
:
3425 case ALL_ARRAY_STORAGE_INDEXING_TYPES
: {
3426 JSValueOperand
operand(this, use
);
3427 GPRReg opGPR
= operand
.gpr();
3428 m_jit
.store64(opGPR
, buffer
+ operandIdx
);
3438 switch (node
->indexingType()) {
3439 case ALL_DOUBLE_INDEXING_TYPES
:
3440 case ALL_INT32_INDEXING_TYPES
:
3450 GPRTemporary
scratch(this);
3452 // Tell GC mark phase how much of the scratch buffer is active during call.
3453 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3454 m_jit
.storePtr(TrustedImmPtr(scratchSize
), scratch
.gpr());
3457 GPRResult
result(this);
3460 operationNewArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3461 static_cast<void*>(buffer
), node
->numChildren());
3464 GPRTemporary
scratch(this);
3466 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3467 m_jit
.storePtr(TrustedImmPtr(0), scratch
.gpr());
3470 cellResult(result
.gpr(), node
, UseChildrenCalledExplicitly
);
3474 case NewArrayWithSize
: {
3475 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->codeOrigin
);
3476 if (!globalObject
->isHavingABadTime() && !hasArrayStorage(node
->indexingType())) {
3477 globalObject
->havingABadTimeWatchpoint()->add(speculationWatchpoint());
3479 SpeculateStrictInt32Operand
size(this, node
->child1());
3480 GPRTemporary
result(this);
3481 GPRTemporary
storage(this);
3482 GPRTemporary
scratch(this);
3483 GPRTemporary
scratch2(this);
3485 GPRReg sizeGPR
= size
.gpr();
3486 GPRReg resultGPR
= result
.gpr();
3487 GPRReg storageGPR
= storage
.gpr();
3488 GPRReg scratchGPR
= scratch
.gpr();
3489 GPRReg scratch2GPR
= scratch2
.gpr();
3491 MacroAssembler::JumpList slowCases
;
3492 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
)));
3494 ASSERT((1 << 3) == sizeof(JSValue
));
3495 m_jit
.move(sizeGPR
, scratchGPR
);
3496 m_jit
.lshift32(TrustedImm32(3), scratchGPR
);
3497 m_jit
.add32(TrustedImm32(sizeof(IndexingHeader
)), scratchGPR
, resultGPR
);
3499 emitAllocateBasicStorage(resultGPR
, storageGPR
));
3500 m_jit
.subPtr(scratchGPR
, storageGPR
);
3501 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3502 emitAllocateJSObject
<JSArray
>(resultGPR
, ImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
3504 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3505 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3507 if (hasDouble(node
->indexingType())) {
3508 m_jit
.move(TrustedImm64(bitwise_cast
<int64_t>(QNaN
)), scratchGPR
);
3509 m_jit
.move(sizeGPR
, scratch2GPR
);
3510 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, scratch2GPR
);
3511 MacroAssembler::Label loop
= m_jit
.label();
3512 m_jit
.sub32(TrustedImm32(1), scratch2GPR
);
3513 m_jit
.store64(scratchGPR
, MacroAssembler::BaseIndex(storageGPR
, scratch2GPR
, MacroAssembler::TimesEight
));
3514 m_jit
.branchTest32(MacroAssembler::NonZero
, scratch2GPR
).linkTo(loop
, &m_jit
);
3518 addSlowPathGenerator(adoptPtr(
3519 new CallArrayAllocatorWithVariableSizeSlowPathGenerator(
3520 slowCases
, this, operationNewArrayWithSize
, resultGPR
,
3521 globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3522 globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
),
3525 cellResult(resultGPR
, node
);
3529 SpeculateStrictInt32Operand
size(this, node
->child1());
3530 GPRReg sizeGPR
= size
.gpr();
3532 GPRResult
result(this);
3533 GPRReg resultGPR
= result
.gpr();
3534 GPRReg structureGPR
= selectScratchGPR(sizeGPR
);
3535 MacroAssembler::Jump bigLength
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
));
3536 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType())), structureGPR
);
3537 MacroAssembler::Jump done
= m_jit
.jump();
3538 bigLength
.link(&m_jit
);
3539 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
)), structureGPR
);
3541 callOperation(operationNewArrayWithSize
, resultGPR
, structureGPR
, sizeGPR
);
3542 cellResult(resultGPR
, node
);
3546 case NewArrayBuffer
: {
3547 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->codeOrigin
);
3548 IndexingType indexingType
= node
->indexingType();
3549 if (!globalObject
->isHavingABadTime() && !hasArrayStorage(indexingType
)) {
3550 globalObject
->havingABadTimeWatchpoint()->add(speculationWatchpoint());
3552 unsigned numElements
= node
->numConstants();
3554 GPRTemporary
result(this);
3555 GPRTemporary
storage(this);
3557 GPRReg resultGPR
= result
.gpr();
3558 GPRReg storageGPR
= storage
.gpr();
3560 emitAllocateJSArray(resultGPR
, globalObject
->arrayStructureForIndexingTypeDuringAllocation(indexingType
), storageGPR
, numElements
);
3562 RELEASE_ASSERT(indexingType
& IsArray
);
3563 JSValue
* data
= m_jit
.codeBlock()->constantBuffer(node
->startConstant());
3564 if (indexingType
== ArrayWithDouble
) {
3565 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3566 double value
= data
[index
].asNumber();
3568 Imm64(bitwise_cast
<int64_t>(value
)),
3569 MacroAssembler::Address(storageGPR
, sizeof(double) * index
));
3572 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3574 Imm64(JSValue::encode(data
[index
])),
3575 MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * index
));
3579 cellResult(resultGPR
, node
);
3584 GPRResult
result(this);
3586 callOperation(operationNewArrayBuffer
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()), node
->startConstant(), node
->numConstants());
3588 cellResult(result
.gpr(), node
);
3594 GPRResult
result(this);
3596 callOperation(operationNewRegexp
, result
.gpr(), m_jit
.codeBlock()->regexp(node
->regexpIndex()));
3598 cellResult(result
.gpr(), node
);
3603 ASSERT(node
->child1().useKind() == UntypedUse
);
3604 JSValueOperand
thisValue(this, node
->child1());
3605 GPRReg thisValueGPR
= thisValue
.gpr();
3609 GPRResult
result(this);
3610 callOperation(operationConvertThis
, result
.gpr(), thisValueGPR
);
3612 cellResult(result
.gpr(), node
);
3617 // Note that there is not so much profit to speculate here. The only things we
3618 // speculate on are (1) that it's a cell, since that eliminates cell checks
3619 // later if the proto is reused, and (2) if we have a FinalObject prediction
3620 // then we speculate because we want to get recompiled if it isn't (since
3621 // otherwise we'd start taking slow path a lot).
3623 SpeculateCellOperand
callee(this, node
->child1());
3624 GPRTemporary
result(this);
3625 GPRTemporary
allocator(this);
3626 GPRTemporary
structure(this);
3627 GPRTemporary
scratch(this);
3629 GPRReg calleeGPR
= callee
.gpr();
3630 GPRReg resultGPR
= result
.gpr();
3631 GPRReg allocatorGPR
= allocator
.gpr();
3632 GPRReg structureGPR
= structure
.gpr();
3633 GPRReg scratchGPR
= scratch
.gpr();
3635 MacroAssembler::JumpList slowPath
;
3637 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR
);
3638 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR
);
3639 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, allocatorGPR
));
3640 emitAllocateJSObject(resultGPR
, allocatorGPR
, structureGPR
, TrustedImmPtr(0), scratchGPR
, slowPath
);
3642 addSlowPathGenerator(slowPathCall(slowPath
, this, operationCreateThis
, resultGPR
, calleeGPR
, node
->inlineCapacity()));
3644 cellResult(resultGPR
, node
);
3648 case AllocationProfileWatchpoint
: {
3649 jsCast
<JSFunction
*>(node
->function())->addAllocationProfileWatchpoint(speculationWatchpoint());
3655 GPRTemporary
result(this);
3656 GPRTemporary
allocator(this);
3657 GPRTemporary
scratch(this);
3659 GPRReg resultGPR
= result
.gpr();
3660 GPRReg allocatorGPR
= allocator
.gpr();
3661 GPRReg scratchGPR
= scratch
.gpr();
3663 MacroAssembler::JumpList slowPath
;
3665 Structure
* structure
= node
->structure();
3666 size_t allocationSize
= JSObject::allocationSize(structure
->inlineCapacity());
3667 MarkedAllocator
* allocatorPtr
= &m_jit
.vm()->heap
.allocatorForObjectWithoutDestructor(allocationSize
);
3669 m_jit
.move(TrustedImmPtr(allocatorPtr
), allocatorGPR
);
3670 emitAllocateJSObject(resultGPR
, allocatorGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, slowPath
);
3672 addSlowPathGenerator(slowPathCall(slowPath
, this, operationNewObject
, resultGPR
, structure
));
3674 cellResult(resultGPR
, node
);
3679 GPRTemporary
result(this);
3680 m_jit
.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::Callee
))), result
.gpr());
3681 cellResult(result
.gpr(), node
);
3686 SpeculateCellOperand
callee(this, node
->child1());
3687 m_jit
.storePtr(callee
.gpr(), JITCompiler::addressFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::Callee
))));
3693 SpeculateCellOperand
function(this, node
->child1());
3694 GPRTemporary
result(this, function
);
3695 m_jit
.loadPtr(JITCompiler::Address(function
.gpr(), JSFunction::offsetOfScopeChain()), result
.gpr());
3696 cellResult(result
.gpr(), node
);
3701 GPRTemporary
result(this);
3702 GPRReg resultGPR
= result
.gpr();
3704 m_jit
.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::ScopeChain
))), resultGPR
);
3705 cellResult(resultGPR
, node
);
3710 SpeculateCellOperand
callee(this, node
->child1());
3711 m_jit
.storePtr(callee
.gpr(), JITCompiler::addressFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::ScopeChain
))));
3716 case SkipTopScope
: {
3717 SpeculateCellOperand
scope(this, node
->child1());
3718 GPRTemporary
result(this, scope
);
3719 GPRReg resultGPR
= result
.gpr();
3720 m_jit
.move(scope
.gpr(), resultGPR
);
3721 JITCompiler::Jump activationNotCreated
=
3724 JITCompiler::addressFor(
3725 static_cast<VirtualRegister
>(m_jit
.codeBlock()->activationRegister())));
3726 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, JSScope::offsetOfNext()), resultGPR
);
3727 activationNotCreated
.link(&m_jit
);
3728 cellResult(resultGPR
, node
);
3733 SpeculateCellOperand
scope(this, node
->child1());
3734 GPRTemporary
result(this, scope
);
3735 m_jit
.loadPtr(JITCompiler::Address(scope
.gpr(), JSScope::offsetOfNext()), result
.gpr());
3736 cellResult(result
.gpr(), node
);
3740 case GetScopeRegisters
: {
3741 SpeculateCellOperand
scope(this, node
->child1());
3742 GPRTemporary
result(this);
3743 GPRReg scopeGPR
= scope
.gpr();
3744 GPRReg resultGPR
= result
.gpr();
3746 m_jit
.loadPtr(JITCompiler::Address(scopeGPR
, JSVariableObject::offsetOfRegisters()), resultGPR
);
3747 storageResult(resultGPR
, node
);
3750 case GetScopedVar
: {
3751 StorageOperand
registers(this, node
->child1());
3752 GPRTemporary
result(this);
3753 GPRReg registersGPR
= registers
.gpr();
3754 GPRReg resultGPR
= result
.gpr();
3756 m_jit
.load64(JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
)), resultGPR
);
3757 jsValueResult(resultGPR
, node
);
3760 case PutScopedVar
: {
3761 SpeculateCellOperand
scope(this, node
->child1());
3762 StorageOperand
registers(this, node
->child2());
3763 JSValueOperand
value(this, node
->child3());
3764 GPRTemporary
scratchRegister(this);
3766 GPRReg scopeGPR
= scope
.gpr();
3767 GPRReg registersGPR
= registers
.gpr();
3768 GPRReg valueGPR
= value
.gpr();
3769 GPRReg scratchGPR
= scratchRegister
.gpr();
3771 m_jit
.store64(valueGPR
, JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
)));
3772 writeBarrier(scopeGPR
, valueGPR
, node
->child3(), WriteBarrierForVariableAccess
, scratchGPR
);
3777 if (!node
->prediction()) {
3778 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3782 switch (node
->child1().useKind()) {
3784 SpeculateCellOperand
base(this, node
->child1());
3785 GPRTemporary
result(this, base
);
3787 GPRReg baseGPR
= base
.gpr();
3788 GPRReg resultGPR
= result
.gpr();
3792 cachedGetById(node
->codeOrigin
, baseGPR
, resultGPR
, node
->identifierNumber());
3794 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3799 JSValueOperand
base(this, node
->child1());
3800 GPRTemporary
result(this, base
);
3802 GPRReg baseGPR
= base
.gpr();
3803 GPRReg resultGPR
= result
.gpr();
3807 JITCompiler::Jump notCell
= m_jit
.branchTest64(JITCompiler::NonZero
, baseGPR
, GPRInfo::tagMaskRegister
);
3809 cachedGetById(node
->codeOrigin
, baseGPR
, resultGPR
, node
->identifierNumber(), notCell
);
3811 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3816 RELEASE_ASSERT_NOT_REACHED();
3822 case GetByIdFlush
: {
3823 if (!node
->prediction()) {
3824 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3828 switch (node
->child1().useKind()) {
3830 SpeculateCellOperand
base(this, node
->child1());
3831 GPRReg baseGPR
= base
.gpr();
3833 GPRResult
result(this);
3835 GPRReg resultGPR
= result
.gpr();
3841 cachedGetById(node
->codeOrigin
, baseGPR
, resultGPR
, node
->identifierNumber(), JITCompiler::Jump(), DontSpill
);
3843 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3848 JSValueOperand
base(this, node
->child1());
3849 GPRReg baseGPR
= base
.gpr();
3851 GPRResult
result(this);
3852 GPRReg resultGPR
= result
.gpr();
3857 JITCompiler::Jump notCell
= m_jit
.branchTest64(JITCompiler::NonZero
, baseGPR
, GPRInfo::tagMaskRegister
);
3859 cachedGetById(node
->codeOrigin
, baseGPR
, resultGPR
, node
->identifierNumber(), notCell
, DontSpill
);
3861 jsValueResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
3866 RELEASE_ASSERT_NOT_REACHED();
3872 case GetArrayLength
:
3873 compileGetArrayLength(node
);
3876 case CheckFunction
: {
3877 SpeculateCellOperand
function(this, node
->child1());
3878 speculationCheck(BadFunction
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, function
.gpr(), node
->function()));
3883 case CheckExecutable
: {
3884 SpeculateCellOperand
function(this, node
->child1());
3885 speculationCheck(BadExecutable
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, JITCompiler::Address(function
.gpr(), JSFunction::offsetOfExecutable()), node
->executable()));
3890 case CheckStructure
:
3891 case ForwardCheckStructure
: {
3892 SpeculateCellOperand
base(this, node
->child1());
3894 ASSERT(node
->structureSet().size());
3897 if (node
->child1()->op() == WeakJSConstant
)
3898 exitKind
= BadWeakConstantCache
;
3900 exitKind
= BadCache
;
3902 if (node
->structureSet().size() == 1) {
3904 exitKind
, JSValueSource::unboxedCell(base
.gpr()), 0,
3905 m_jit
.branchWeakPtr(
3906 JITCompiler::NotEqual
,
3907 JITCompiler::Address(base
.gpr(), JSCell::structureOffset()),
3908 node
->structureSet()[0]));
3910 GPRTemporary
structure(this);
3912 m_jit
.loadPtr(JITCompiler::Address(base
.gpr(), JSCell::structureOffset()), structure
.gpr());
3914 JITCompiler::JumpList done
;
3916 for (size_t i
= 0; i
< node
->structureSet().size() - 1; ++i
)
3917 done
.append(m_jit
.branchWeakPtr(JITCompiler::Equal
, structure
.gpr(), node
->structureSet()[i
]));
3920 exitKind
, JSValueSource::unboxedCell(base
.gpr()), 0,
3921 m_jit
.branchWeakPtr(
3922 JITCompiler::NotEqual
, structure
.gpr(), node
->structureSet().last()));
3931 case StructureTransitionWatchpoint
:
3932 case ForwardStructureTransitionWatchpoint
: {
3933 // There is a fascinating question here of what to do about array profiling.
3934 // We *could* try to tell the OSR exit about where the base of the access is.
3935 // The DFG will have kept it alive, though it may not be in a register, and
3936 // we shouldn't really load it since that could be a waste. For now though,
3937 // we'll just rely on the fact that when a watchpoint fires then that's
3938 // quite a hint already.
3940 m_jit
.addWeakReference(node
->structure());
3941 node
->structure()->addTransitionWatchpoint(
3942 speculationWatchpoint(
3943 node
->child1()->op() == WeakJSConstant
? BadWeakConstantCache
: BadCache
));
3945 #if !ASSERT_DISABLED
3946 SpeculateCellOperand
op1(this, node
->child1());
3947 JITCompiler::Jump isOK
= m_jit
.branchPtr(JITCompiler::Equal
, JITCompiler::Address(op1
.gpr(), JSCell::structureOffset()), TrustedImmPtr(node
->structure()));
3951 speculateCell(node
->child1());
3958 case PhantomPutStructure
: {
3959 ASSERT(isKnownCell(node
->child1().node()));
3961 ASSERT(node
->structureTransitionData().previousStructure
->transitionWatchpointSetHasBeenInvalidated());
3962 m_jit
.addWeakReferenceTransition(
3963 node
->codeOrigin
.codeOriginOwner(),
3964 node
->structureTransitionData().previousStructure
,
3965 node
->structureTransitionData().newStructure
);
3970 case PutStructure
: {
3971 ASSERT(node
->structureTransitionData().previousStructure
->transitionWatchpointSetHasBeenInvalidated());
3973 SpeculateCellOperand
base(this, node
->child1());
3974 GPRReg baseGPR
= base
.gpr();
3976 m_jit
.addWeakReferenceTransition(
3977 node
->codeOrigin
.codeOriginOwner(),
3978 node
->structureTransitionData().previousStructure
,
3979 node
->structureTransitionData().newStructure
);
3981 #if ENABLE(WRITE_BARRIER_PROFILING)
3982 // Must always emit this write barrier as the structure transition itself requires it
3983 writeBarrier(baseGPR
, node
->structureTransitionData().newStructure
, WriteBarrierForGenericAccess
);
3986 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(node
->structureTransitionData().newStructure
), MacroAssembler::Address(baseGPR
, JSCell::structureOffset()));
3992 case AllocatePropertyStorage
:
3993 compileAllocatePropertyStorage(node
);
3996 case ReallocatePropertyStorage
:
3997 compileReallocatePropertyStorage(node
);
4000 case GetButterfly
: {
4001 SpeculateCellOperand
base(this, node
->child1());
4002 GPRTemporary
result(this, base
);
4004 GPRReg baseGPR
= base
.gpr();
4005 GPRReg resultGPR
= result
.gpr();
4007 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), resultGPR
);
4009 storageResult(resultGPR
, node
);
4013 case GetIndexedPropertyStorage
: {
4014 compileGetIndexedPropertyStorage(node
);
4019 StorageOperand
storage(this, node
->child1());
4020 GPRTemporary
result(this, storage
);
4022 GPRReg storageGPR
= storage
.gpr();
4023 GPRReg resultGPR
= result
.gpr();
4025 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
4027 m_jit
.load64(JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
)), resultGPR
);
4029 jsValueResult(resultGPR
, node
);
4034 #if ENABLE(WRITE_BARRIER_PROFILING)
4035 SpeculateCellOperand
base(this, node
->child2());
4037 StorageOperand
storage(this, node
->child1());
4038 JSValueOperand
value(this, node
->child3());
4040 GPRReg storageGPR
= storage
.gpr();
4041 GPRReg valueGPR
= value
.gpr();
4043 #if ENABLE(WRITE_BARRIER_PROFILING)
4044 writeBarrier(base
.gpr(), value
.gpr(), node
->child3(), WriteBarrierForPropertyAccess
);
4047 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
4049 m_jit
.store64(valueGPR
, JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
)));
4056 SpeculateCellOperand
base(this, node
->child1());
4057 JSValueOperand
value(this, node
->child2());
4058 GPRTemporary
scratch(this);
4060 GPRReg baseGPR
= base
.gpr();
4061 GPRReg valueGPR
= value
.gpr();
4062 GPRReg scratchGPR
= scratch
.gpr();
4067 cachedPutById(node
->codeOrigin
, baseGPR
, valueGPR
, node
->child2(), scratchGPR
, node
->identifierNumber(), NotDirect
);
4069 noResult(node
, UseChildrenCalledExplicitly
);
4073 case PutByIdDirect
: {
4074 SpeculateCellOperand
base(this, node
->child1());
4075 JSValueOperand
value(this, node
->child2());
4076 GPRTemporary
scratch(this);
4078 GPRReg baseGPR
= base
.gpr();
4079 GPRReg valueGPR
= value
.gpr();
4080 GPRReg scratchGPR
= scratch
.gpr();
4085 cachedPutById(node
->codeOrigin
, baseGPR
, valueGPR
, node
->child2(), scratchGPR
, node
->identifierNumber(), Direct
);
4087 noResult(node
, UseChildrenCalledExplicitly
);
4091 case GetGlobalVar
: {
4092 GPRTemporary
result(this);
4094 m_jit
.load64(node
->registerPointer(), result
.gpr());
4096 jsValueResult(result
.gpr(), node
);
4100 case PutGlobalVar
: {
4101 JSValueOperand
value(this, node
->child1());
4103 if (Heap::isWriteBarrierEnabled()) {
4104 GPRTemporary
scratch(this);
4105 GPRReg scratchReg
= scratch
.gpr();
4107 writeBarrier(m_jit
.globalObjectFor(node
->codeOrigin
), value
.gpr(), node
->child1(), WriteBarrierForVariableAccess
, scratchReg
);
4110 m_jit
.store64(value
.gpr(), node
->registerPointer());
4116 case PutGlobalVarCheck
: {
4117 JSValueOperand
value(this, node
->child1());
4119 WatchpointSet
* watchpointSet
=
4120 m_jit
.globalObjectFor(node
->codeOrigin
)->symbolTable()->get(
4121 identifier(node
->identifierNumberForCheck())->impl()).watchpointSet();
4122 addSlowPathGenerator(
4125 JITCompiler::NonZero
,
4126 JITCompiler::AbsoluteAddress(watchpointSet
->addressOfIsWatched())),
4127 this, operationNotifyGlobalVarWrite
, NoResult
, watchpointSet
));
4129 if (Heap::isWriteBarrierEnabled()) {
4130 GPRTemporary
scratch(this);
4131 GPRReg scratchReg
= scratch
.gpr();
4133 writeBarrier(m_jit
.globalObjectFor(node
->codeOrigin
), value
.gpr(), node
->child1(), WriteBarrierForVariableAccess
, scratchReg
);
4136 m_jit
.store64(value
.gpr(), node
->registerPointer());
4142 case GlobalVarWatchpoint
: {
4143 m_jit
.globalObjectFor(node
->codeOrigin
)->symbolTable()->get(
4144 identifier(node
->identifierNumberForCheck())->impl()).addWatchpoint(
4145 speculationWatchpoint());
4147 #if DFG_ENABLE(JIT_ASSERT)
4148 GPRTemporary
scratch(this);
4149 GPRReg scratchGPR
= scratch
.gpr();
4150 m_jit
.load64(node
->registerPointer(), scratchGPR
);
4151 JITCompiler::Jump ok
= m_jit
.branch64(
4152 JITCompiler::Equal
, scratchGPR
,
4153 TrustedImm64(JSValue::encode(node
->registerPointer()->get())));
4162 case CheckHasInstance
: {
4163 SpeculateCellOperand
base(this, node
->child1());
4164 GPRTemporary
structure(this);
4166 // Speculate that base 'ImplementsDefaultHasInstance'.
4167 m_jit
.loadPtr(MacroAssembler::Address(base
.gpr(), JSCell::structureOffset()), structure
.gpr());
4168 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branchTest8(MacroAssembler::Zero
, MacroAssembler::Address(structure
.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance
)));
4175 compileInstanceOf(node
);
4180 JSValueOperand
value(this, node
->child1());
4181 GPRTemporary
result(this);
4182 GPRTemporary
localGlobalObject(this);
4183 GPRTemporary
remoteGlobalObject(this);
4185 JITCompiler::Jump isCell
= m_jit
.branchTest64(JITCompiler::Zero
, value
.gpr(), GPRInfo::tagMaskRegister
);
4187 m_jit
.compare64(JITCompiler::Equal
, value
.gpr(), TrustedImm32(ValueUndefined
), result
.gpr());
4188 JITCompiler::Jump done
= m_jit
.jump();
4190 isCell
.link(&m_jit
);
4191 JITCompiler::Jump notMasqueradesAsUndefined
;
4192 if (m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
4193 m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
4194 m_jit
.move(TrustedImm32(0), result
.gpr());
4195 notMasqueradesAsUndefined
= m_jit
.jump();
4197 m_jit
.loadPtr(JITCompiler::Address(value
.gpr(), JSCell::structureOffset()), result
.gpr());
4198 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(JITCompiler::NonZero
, JITCompiler::Address(result
.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined
));
4199 m_jit
.move(TrustedImm32(0), result
.gpr());
4200 notMasqueradesAsUndefined
= m_jit
.jump();
4202 isMasqueradesAsUndefined
.link(&m_jit
);
4203 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
4204 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
4205 m_jit
.move(TrustedImmPtr(m_jit
.globalObjectFor(node
->codeOrigin
)), localGlobalObjectGPR
);
4206 m_jit
.loadPtr(JITCompiler::Address(result
.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
4207 m_jit
.comparePtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, result
.gpr());
4210 notMasqueradesAsUndefined
.link(&m_jit
);
4212 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4213 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4218 JSValueOperand
value(this, node
->child1());
4219 GPRTemporary
result(this, value
);
4221 m_jit
.move(value
.gpr(), result
.gpr());
4222 m_jit
.xor64(JITCompiler::TrustedImm32(ValueFalse
), result
.gpr());
4223 m_jit
.test64(JITCompiler::Zero
, result
.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result
.gpr());
4224 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4225 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4230 JSValueOperand
value(this, node
->child1());
4231 GPRTemporary
result(this, value
);
4233 m_jit
.test64(JITCompiler::NonZero
, value
.gpr(), GPRInfo::tagTypeNumberRegister
, result
.gpr());
4234 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4235 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4240 JSValueOperand
value(this, node
->child1());
4241 GPRTemporary
result(this, value
);
4243 JITCompiler::Jump isNotCell
= m_jit
.branchTest64(JITCompiler::NonZero
, value
.gpr(), GPRInfo::tagMaskRegister
);
4245 m_jit
.loadPtr(JITCompiler::Address(value
.gpr(), JSCell::structureOffset()), result
.gpr());
4246 m_jit
.compare8(JITCompiler::Equal
, JITCompiler::Address(result
.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType
), result
.gpr());
4247 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
4248 JITCompiler::Jump done
= m_jit
.jump();
4250 isNotCell
.link(&m_jit
);
4251 m_jit
.move(TrustedImm32(ValueFalse
), result
.gpr());
4254 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4259 JSValueOperand
value(this, node
->child1());
4260 GPRReg valueGPR
= value
.gpr();
4261 GPRResult
result(this);
4262 GPRReg resultGPR
= result
.gpr();
4264 callOperation(operationIsObject
, resultGPR
, valueGPR
);
4265 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
4266 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4271 JSValueOperand
value(this, node
->child1());
4272 GPRReg valueGPR
= value
.gpr();
4273 GPRResult
result(this);
4274 GPRReg resultGPR
= result
.gpr();
4276 callOperation(operationIsFunction
, resultGPR
, valueGPR
);
4277 m_jit
.or32(TrustedImm32(ValueFalse
), resultGPR
);
4278 jsValueResult(result
.gpr(), node
, DataFormatJSBoolean
);
4283 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
4284 GPRReg valueGPR
= value
.gpr();
4285 GPRTemporary
temp(this);
4286 GPRReg tempGPR
= temp
.gpr();
4287 GPRResult
result(this);
4288 GPRReg resultGPR
= result
.gpr();
4289 JITCompiler::JumpList doneJumps
;
4293 ASSERT(node
->child1().useKind() == UntypedUse
|| node
->child1().useKind() == CellUse
|| node
->child1().useKind() == StringUse
);
4295 JITCompiler::Jump isNotCell
= m_jit
.branchTest64(JITCompiler::NonZero
, valueGPR
, GPRInfo::tagMaskRegister
);
4296 if (node
->child1().useKind() != UntypedUse
)
4297 DFG_TYPE_CHECK(JSValueSource(valueGPR
), node
->child1(), SpecCell
, isNotCell
);
4299 if (!node
->child1()->shouldSpeculateObject() || node
->child1().useKind() == StringUse
) {
4300 m_jit
.loadPtr(JITCompiler::Address(valueGPR
, JSCell::structureOffset()), tempGPR
);
4301 JITCompiler::Jump notString
= m_jit
.branch8(JITCompiler::NotEqual
, JITCompiler::Address(tempGPR
, Structure::typeInfoTypeOffset()), TrustedImm32(StringType
));
4302 if (node
->child1().useKind() == StringUse
)
4303 DFG_TYPE_CHECK(JSValueSource(valueGPR
), node
->child1(), SpecString
, notString
);
4304 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.stringString()), resultGPR
);
4305 doneJumps
.append(m_jit
.jump());
4306 if (node
->child1().useKind() != StringUse
) {
4307 notString
.link(&m_jit
);
4308 callOperation(operationTypeOf
, resultGPR
, valueGPR
);
4309 doneJumps
.append(m_jit
.jump());
4312 callOperation(operationTypeOf
, resultGPR
, valueGPR
);
4313 doneJumps
.append(m_jit
.jump());
4316 if (node
->child1().useKind() == UntypedUse
) {
4317 isNotCell
.link(&m_jit
);
4318 JITCompiler::Jump notNumber
= m_jit
.branchTest64(JITCompiler::Zero
, valueGPR
, GPRInfo::tagTypeNumberRegister
);
4319 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.numberString()), resultGPR
);
4320 doneJumps
.append(m_jit
.jump());
4321 notNumber
.link(&m_jit
);
4323 JITCompiler::Jump notUndefined
= m_jit
.branch64(JITCompiler::NotEqual
, valueGPR
, JITCompiler::TrustedImm64(ValueUndefined
));
4324 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.undefinedString()), resultGPR
);
4325 doneJumps
.append(m_jit
.jump());
4326 notUndefined
.link(&m_jit
);
4328 JITCompiler::Jump notNull
= m_jit
.branch64(JITCompiler::NotEqual
, valueGPR
, JITCompiler::TrustedImm64(ValueNull
));
4329 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.objectString()), resultGPR
);
4330 doneJumps
.append(m_jit
.jump());
4331 notNull
.link(&m_jit
);
4333 // Only boolean left
4334 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.booleanString()), resultGPR
);
4336 doneJumps
.link(&m_jit
);
4337 cellResult(resultGPR
, node
);
4346 #if ENABLE(DEBUG_WITH_BREAKPOINT)
4349 RELEASE_ASSERT_NOT_REACHED();
4360 GPRResult
result(this);
4361 ResolveOperationData
& data
= m_jit
.graph().m_resolveOperationsData
[node
->resolveOperationsDataIndex()];
4362 callOperation(operationResolve
, result
.gpr(), identifier(data
.identifierNumber
), data
.resolveOperations
);
4363 jsValueResult(result
.gpr(), node
);
4369 GPRResult
result(this);
4370 ResolveOperationData
& data
= m_jit
.graph().m_resolveOperationsData
[node
->resolveOperationsDataIndex()];
4371 callOperation(operationResolveBase
, result
.gpr(), identifier(data
.identifierNumber
), data
.resolveOperations
, data
.putToBaseOperation
);
4372 jsValueResult(result
.gpr(), node
);
4376 case ResolveBaseStrictPut
: {
4378 GPRResult
result(this);
4379 ResolveOperationData
& data
= m_jit
.graph().m_resolveOperationsData
[node
->resolveOperationsDataIndex()];
4380 callOperation(operationResolveBaseStrictPut
, result
.gpr(), identifier(data
.identifierNumber
), data
.resolveOperations
, data
.putToBaseOperation
);
4381 jsValueResult(result
.gpr(), node
);
4385 case ResolveGlobal
: {
4386 GPRTemporary
globalObject(this);
4387 GPRTemporary
resolveInfo(this);
4388 GPRTemporary
result(this);
4390 GPRReg globalObjectGPR
= globalObject
.gpr();
4391 GPRReg resolveInfoGPR
= resolveInfo
.gpr();
4392 GPRReg resultGPR
= result
.gpr();
4394 ResolveGlobalData
& data
= m_jit
.graph().m_resolveGlobalData
[node
->resolveGlobalDataIndex()];
4395 ResolveOperation
* resolveOperationAddress
= &(data
.resolveOperations
->data()[data
.resolvePropertyIndex
]);
4397 // Check Structure of global object
4398 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.globalObjectFor(node
->codeOrigin
)), globalObjectGPR
);
4399 m_jit
.move(JITCompiler::TrustedImmPtr(resolveOperationAddress
), resolveInfoGPR
);
4400 m_jit
.loadPtr(JITCompiler::Address(resolveInfoGPR
, OBJECT_OFFSETOF(ResolveOperation
, m_structure
)), resultGPR
);
4401 JITCompiler::Jump structuresDontMatch
= m_jit
.branchPtr(JITCompiler::NotEqual
, resultGPR
, JITCompiler::Address(globalObjectGPR
, JSCell::structureOffset()));
4404 m_jit
.load32(JITCompiler::Address(resolveInfoGPR
, OBJECT_OFFSETOF(ResolveOperation
, m_offset
)), resolveInfoGPR
);
4405 #if DFG_ENABLE(JIT_ASSERT)
4406 JITCompiler::Jump isOutOfLine
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, resolveInfoGPR
, TrustedImm32(firstOutOfLineOffset
));
4408 isOutOfLine
.link(&m_jit
);
4410 m_jit
.neg32(resolveInfoGPR
);
4411 m_jit
.signExtend32ToPtr(resolveInfoGPR
, resolveInfoGPR
);
4412 m_jit
.loadPtr(JITCompiler::Address(globalObjectGPR
, JSObject::butterflyOffset()), resultGPR
);
4413 m_jit
.load64(JITCompiler::BaseIndex(resultGPR
, resolveInfoGPR
, JITCompiler::TimesEight
, (firstOutOfLineOffset
- 2) * static_cast<ptrdiff_t>(sizeof(JSValue
))), resultGPR
);
4415 addSlowPathGenerator(
4417 structuresDontMatch
, this, operationResolveGlobal
,
4418 resultGPR
, resolveInfoGPR
, globalObjectGPR
,
4419 &m_jit
.codeBlock()->identifier(data
.identifierNumber
)));
4421 jsValueResult(resultGPR
, node
);
4425 case CreateActivation
: {
4426 RELEASE_ASSERT(!node
->codeOrigin
.inlineCallFrame
);
4428 JSValueOperand
value(this, node
->child1());
4429 GPRTemporary
result(this, value
);
4431 GPRReg valueGPR
= value
.gpr();
4432 GPRReg resultGPR
= result
.gpr();
4434 m_jit
.move(valueGPR
, resultGPR
);
4436 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
);
4438 addSlowPathGenerator(
4439 slowPathCall(notCreated
, this, operationCreateActivation
, resultGPR
));
4441 cellResult(resultGPR
, node
);
4445 case CreateArguments
: {
4446 JSValueOperand
value(this, node
->child1());
4447 GPRTemporary
result(this, value
);
4449 GPRReg valueGPR
= value
.gpr();
4450 GPRReg resultGPR
= result
.gpr();
4452 m_jit
.move(valueGPR
, resultGPR
);
4454 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
);
4456 if (node
->codeOrigin
.inlineCallFrame
) {
4457 addSlowPathGenerator(
4459 notCreated
, this, operationCreateInlinedArguments
, resultGPR
,
4460 node
->codeOrigin
.inlineCallFrame
));
4462 addSlowPathGenerator(
4463 slowPathCall(notCreated
, this, operationCreateArguments
, resultGPR
));
4466 cellResult(resultGPR
, node
);
4470 case TearOffActivation
: {
4471 RELEASE_ASSERT(!node
->codeOrigin
.inlineCallFrame
);
4473 JSValueOperand
activationValue(this, node
->child1());
4474 GPRTemporary
scratch(this);
4475 GPRReg activationValueGPR
= activationValue
.gpr();
4476 GPRReg scratchGPR
= scratch
.gpr();
4478 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, activationValueGPR
);
4480 SharedSymbolTable
* symbolTable
= m_jit
.symbolTableFor(node
->codeOrigin
);
4481 int registersOffset
= JSActivation::registersOffset(symbolTable
);
4483 int captureEnd
= symbolTable
->captureEnd();
4484 for (int i
= symbolTable
->captureStart(); i
< captureEnd
; ++i
) {
4486 JITCompiler::Address(
4487 GPRInfo::callFrameRegister
, i
* sizeof(Register
)), scratchGPR
);
4489 scratchGPR
, JITCompiler::Address(
4490 activationValueGPR
, registersOffset
+ i
* sizeof(Register
)));
4492 m_jit
.addPtr(TrustedImm32(registersOffset
), activationValueGPR
, scratchGPR
);
4493 m_jit
.storePtr(scratchGPR
, JITCompiler::Address(activationValueGPR
, JSActivation::offsetOfRegisters()));
4495 notCreated
.link(&m_jit
);
4500 case TearOffArguments
: {
4501 JSValueOperand
unmodifiedArgumentsValue(this, node
->child1());
4502 JSValueOperand
activationValue(this, node
->child2());
4503 GPRReg unmodifiedArgumentsValueGPR
= unmodifiedArgumentsValue
.gpr();
4504 GPRReg activationValueGPR
= activationValue
.gpr();
4506 JITCompiler::Jump created
= m_jit
.branchTest64(JITCompiler::NonZero
, unmodifiedArgumentsValueGPR
);
4508 if (node
->codeOrigin
.inlineCallFrame
) {
4509 addSlowPathGenerator(
4511 created
, this, operationTearOffInlinedArguments
, NoResult
,
4512 unmodifiedArgumentsValueGPR
, activationValueGPR
, node
->codeOrigin
.inlineCallFrame
));
4514 addSlowPathGenerator(
4516 created
, this, operationTearOffArguments
, NoResult
, unmodifiedArgumentsValueGPR
, activationValueGPR
));
4523 case GetMyArgumentsLength
: {
4524 GPRTemporary
result(this);
4525 GPRReg resultGPR
= result
.gpr();
4527 if (!isEmptySpeculation(
4528 m_state
.variables().operand(
4529 m_jit
.graph().argumentsRegisterFor(node
->codeOrigin
)).m_type
)) {
4531 ArgumentsEscaped
, JSValueRegs(), 0,
4533 JITCompiler::NonZero
,
4534 JITCompiler::addressFor(
4535 m_jit
.argumentsRegisterFor(node
->codeOrigin
))));
4538 RELEASE_ASSERT(!node
->codeOrigin
.inlineCallFrame
);
4539 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultGPR
);
4540 m_jit
.sub32(TrustedImm32(1), resultGPR
);
4541 integerResult(resultGPR
, node
);
4545 case GetMyArgumentsLengthSafe
: {
4546 GPRTemporary
result(this);
4547 GPRReg resultGPR
= result
.gpr();
4549 JITCompiler::Jump created
= m_jit
.branchTest64(
4550 JITCompiler::NonZero
,
4551 JITCompiler::addressFor(
4552 m_jit
.argumentsRegisterFor(node
->codeOrigin
)));
4554 if (node
->codeOrigin
.inlineCallFrame
) {
4556 Imm64(JSValue::encode(jsNumber(node
->codeOrigin
.inlineCallFrame
->arguments
.size() - 1))),
4559 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultGPR
);
4560 m_jit
.sub32(TrustedImm32(1), resultGPR
);
4561 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, resultGPR
);
4564 // FIXME: the slow path generator should perform a forward speculation that the
4565 // result is an integer. For now we postpone the speculation by having this return
4568 addSlowPathGenerator(
4570 created
, this, operationGetArgumentsLength
, resultGPR
,
4571 m_jit
.argumentsRegisterFor(node
->codeOrigin
)));
4573 jsValueResult(resultGPR
, node
);
4577 case GetMyArgumentByVal
: {
4578 SpeculateStrictInt32Operand
index(this, node
->child1());
4579 GPRTemporary
result(this);
4580 GPRReg indexGPR
= index
.gpr();
4581 GPRReg resultGPR
= result
.gpr();
4583 if (!isEmptySpeculation(
4584 m_state
.variables().operand(
4585 m_jit
.graph().argumentsRegisterFor(node
->codeOrigin
)).m_type
)) {
4587 ArgumentsEscaped
, JSValueRegs(), 0,
4589 JITCompiler::NonZero
,
4590 JITCompiler::addressFor(
4591 m_jit
.argumentsRegisterFor(node
->codeOrigin
))));
4594 m_jit
.add32(TrustedImm32(1), indexGPR
, resultGPR
);
4595 if (node
->codeOrigin
.inlineCallFrame
) {
4597 Uncountable
, JSValueRegs(), 0,
4599 JITCompiler::AboveOrEqual
,
4601 Imm32(node
->codeOrigin
.inlineCallFrame
->arguments
.size())));
4604 Uncountable
, JSValueRegs(), 0,
4606 JITCompiler::AboveOrEqual
,
4608 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4611 JITCompiler::JumpList slowArgument
;
4612 JITCompiler::JumpList slowArgumentOutOfBounds
;
4613 if (const SlowArgument
* slowArguments
= m_jit
.symbolTableFor(node
->codeOrigin
)->slowArguments()) {
4614 slowArgumentOutOfBounds
.append(
4616 JITCompiler::AboveOrEqual
, indexGPR
,
4617 Imm32(m_jit
.symbolTableFor(node
->codeOrigin
)->parameterCount())));
4619 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4620 m_jit
.move(ImmPtr(slowArguments
), resultGPR
);
4622 JITCompiler::BaseIndex(
4623 resultGPR
, indexGPR
, JITCompiler::TimesEight
,
4624 OBJECT_OFFSETOF(SlowArgument
, index
)),
4626 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4628 JITCompiler::BaseIndex(
4629 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
, m_jit
.offsetOfLocals(node
->codeOrigin
)),
4631 slowArgument
.append(m_jit
.jump());
4633 slowArgumentOutOfBounds
.link(&m_jit
);
4635 m_jit
.neg32(resultGPR
);
4636 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4639 JITCompiler::BaseIndex(
4640 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
, m_jit
.offsetOfArgumentsIncludingThis(node
->codeOrigin
)),
4643 slowArgument
.link(&m_jit
);
4644 jsValueResult(resultGPR
, node
);
4648 case GetMyArgumentByValSafe
: {
4649 SpeculateStrictInt32Operand
index(this, node
->child1());
4650 GPRTemporary
result(this);
4651 GPRReg indexGPR
= index
.gpr();
4652 GPRReg resultGPR
= result
.gpr();
4654 JITCompiler::JumpList slowPath
;
4657 JITCompiler::NonZero
,
4658 JITCompiler::addressFor(
4659 m_jit
.argumentsRegisterFor(node
->codeOrigin
))));
4661 m_jit
.add32(TrustedImm32(1), indexGPR
, resultGPR
);
4662 if (node
->codeOrigin
.inlineCallFrame
) {
4665 JITCompiler::AboveOrEqual
,
4667 Imm32(node
->codeOrigin
.inlineCallFrame
->arguments
.size())));
4671 JITCompiler::AboveOrEqual
,
4673 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4676 JITCompiler::JumpList slowArgument
;
4677 JITCompiler::JumpList slowArgumentOutOfBounds
;
4678 if (const SlowArgument
* slowArguments
= m_jit
.symbolTableFor(node
->codeOrigin
)->slowArguments()) {
4679 slowArgumentOutOfBounds
.append(
4681 JITCompiler::AboveOrEqual
, indexGPR
,
4682 Imm32(m_jit
.symbolTableFor(node
->codeOrigin
)->parameterCount())));
4684 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4685 m_jit
.move(ImmPtr(slowArguments
), resultGPR
);
4687 JITCompiler::BaseIndex(
4688 resultGPR
, indexGPR
, JITCompiler::TimesEight
,
4689 OBJECT_OFFSETOF(SlowArgument
, index
)),
4691 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4693 JITCompiler::BaseIndex(
4694 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
, m_jit
.offsetOfLocals(node
->codeOrigin
)),
4696 slowArgument
.append(m_jit
.jump());
4698 slowArgumentOutOfBounds
.link(&m_jit
);
4700 m_jit
.neg32(resultGPR
);
4701 m_jit
.signExtend32ToPtr(resultGPR
, resultGPR
);
4704 JITCompiler::BaseIndex(
4705 GPRInfo::callFrameRegister
, resultGPR
, JITCompiler::TimesEight
, m_jit
.offsetOfArgumentsIncludingThis(node
->codeOrigin
)),
4708 if (node
->codeOrigin
.inlineCallFrame
) {
4709 addSlowPathGenerator(
4711 slowPath
, this, operationGetInlinedArgumentByVal
, resultGPR
,
4712 m_jit
.argumentsRegisterFor(node
->codeOrigin
),
4713 node
->codeOrigin
.inlineCallFrame
,
4716 addSlowPathGenerator(
4718 slowPath
, this, operationGetArgumentByVal
, resultGPR
,
4719 m_jit
.argumentsRegisterFor(node
->codeOrigin
),
4723 slowArgument
.link(&m_jit
);
4724 jsValueResult(resultGPR
, node
);
4728 case CheckArgumentsNotCreated
: {
4729 ASSERT(!isEmptySpeculation(
4730 m_state
.variables().operand(
4731 m_jit
.graph().argumentsRegisterFor(node
->codeOrigin
)).m_type
));
4733 ArgumentsEscaped
, JSValueRegs(), 0,
4735 JITCompiler::NonZero
,
4736 JITCompiler::addressFor(
4737 m_jit
.argumentsRegisterFor(node
->codeOrigin
))));
4742 case NewFunctionNoCheck
:
4743 compileNewFunctionNoCheck(node
);
4747 JSValueOperand
value(this, node
->child1());
4748 GPRTemporary
result(this, value
);
4750 GPRReg valueGPR
= value
.gpr();
4751 GPRReg resultGPR
= result
.gpr();
4753 m_jit
.move(valueGPR
, resultGPR
);
4755 JITCompiler::Jump notCreated
= m_jit
.branchTest64(JITCompiler::Zero
, resultGPR
);
4757 addSlowPathGenerator(
4759 notCreated
, this, operationNewFunction
,
4760 resultGPR
, m_jit
.codeBlock()->functionDecl(node
->functionDeclIndex())));
4762 jsValueResult(resultGPR
, node
);
4766 case NewFunctionExpression
:
4767 compileNewFunctionExpression(node
);
4770 case CountExecution
:
4771 m_jit
.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node
->executionCounter()->address()));
4775 // We should never get to the point of code emission for a GarbageValue
4779 case ForceOSRExit
: {
4780 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
4784 case CheckWatchdogTimer
:
4786 WatchdogTimerFired
, JSValueRegs(), 0,
4788 JITCompiler::NonZero
,
4789 JITCompiler::AbsoluteAddress(m_jit
.vm()->watchdog
.timerDidFireAddress())));
4793 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
4803 RELEASE_ASSERT_NOT_REACHED();
4807 RELEASE_ASSERT_NOT_REACHED();
4811 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
4812 m_jit
.clearRegisterAllocationOffsets();
4818 if (node
->hasResult() && node
->mustGenerate())
4824 } } // namespace JSC::DFG