2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2011 Intel Corporation. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "DFGSpeculativeJIT.h"
32 #include "ArrayPrototype.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGOperations.h"
36 #include "DFGSlowPathGenerator.h"
38 #include "JSActivation.h"
39 #include "ObjectPrototype.h"
40 #include "JSCInlines.h"
42 namespace JSC
{ namespace DFG
{
46 bool SpeculativeJIT::fillJSValue(Edge edge
, GPRReg
& tagGPR
, GPRReg
& payloadGPR
, FPRReg
& fpr
)
48 // FIXME: For double we could fill with a FPR.
51 VirtualRegister virtualRegister
= edge
->virtualRegister();
52 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
54 switch (info
.registerFormat()) {
55 case DataFormatNone
: {
57 if (edge
->hasConstant()) {
59 payloadGPR
= allocate();
60 m_jit
.move(Imm32(valueOfJSConstant(edge
.node()).tag()), tagGPR
);
61 m_jit
.move(Imm32(valueOfJSConstant(edge
.node()).payload()), payloadGPR
);
62 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderConstant
);
63 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderConstant
);
64 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, isInt32Constant(edge
.node()) ? DataFormatJSInt32
: DataFormatJS
);
66 DataFormat spillFormat
= info
.spillFormat();
67 ASSERT(spillFormat
!= DataFormatNone
&& spillFormat
!= DataFormatStorage
);
69 payloadGPR
= allocate();
70 switch (spillFormat
) {
72 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), tagGPR
);
73 spillFormat
= DataFormatJSInt32
; // This will be used as the new register format.
76 m_jit
.move(TrustedImm32(JSValue::CellTag
), tagGPR
);
77 spillFormat
= DataFormatJSCell
; // This will be used as the new register format.
79 case DataFormatBoolean
:
80 m_jit
.move(TrustedImm32(JSValue::BooleanTag
), tagGPR
);
81 spillFormat
= DataFormatJSBoolean
; // This will be used as the new register format.
84 m_jit
.load32(JITCompiler::tagFor(virtualRegister
), tagGPR
);
87 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), payloadGPR
);
88 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderSpilled
);
89 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderSpilled
);
90 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, spillFormat
== DataFormatJSDouble
? DataFormatJS
: spillFormat
);
98 case DataFormatBoolean
: {
99 GPRReg gpr
= info
.gpr();
100 // If the register has already been locked we need to take a copy.
101 if (m_gprs
.isLocked(gpr
)) {
102 payloadGPR
= allocate();
103 m_jit
.move(gpr
, payloadGPR
);
109 uint32_t tag
= JSValue::EmptyValueTag
;
110 DataFormat fillFormat
= DataFormatJS
;
111 switch (info
.registerFormat()) {
112 case DataFormatInt32
:
113 tag
= JSValue::Int32Tag
;
114 fillFormat
= DataFormatJSInt32
;
117 tag
= JSValue::CellTag
;
118 fillFormat
= DataFormatJSCell
;
120 case DataFormatBoolean
:
121 tag
= JSValue::BooleanTag
;
122 fillFormat
= DataFormatJSBoolean
;
125 RELEASE_ASSERT_NOT_REACHED();
128 m_jit
.move(TrustedImm32(tag
), tagGPR
);
130 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderJS
);
131 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderJS
);
132 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, fillFormat
);
136 case DataFormatJSDouble
:
138 case DataFormatJSInt32
:
139 case DataFormatJSCell
:
140 case DataFormatJSBoolean
: {
141 tagGPR
= info
.tagGPR();
142 payloadGPR
= info
.payloadGPR();
144 m_gprs
.lock(payloadGPR
);
148 case DataFormatStorage
:
149 case DataFormatDouble
:
150 // this type currently never occurs
151 RELEASE_ASSERT_NOT_REACHED();
154 RELEASE_ASSERT_NOT_REACHED();
159 void SpeculativeJIT::cachedGetById(
160 CodeOrigin codeOrigin
, GPRReg baseTagGPROrNone
, GPRReg basePayloadGPR
, GPRReg resultTagGPR
, GPRReg resultPayloadGPR
,
161 unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
163 // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens
164 // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to
165 // trip over one move instruction.
166 if (basePayloadGPR
== resultTagGPR
) {
167 RELEASE_ASSERT(basePayloadGPR
!= resultPayloadGPR
);
169 if (baseTagGPROrNone
== resultPayloadGPR
) {
170 m_jit
.swap(basePayloadGPR
, baseTagGPROrNone
);
171 baseTagGPROrNone
= resultTagGPR
;
173 m_jit
.move(basePayloadGPR
, resultPayloadGPR
);
174 basePayloadGPR
= resultPayloadGPR
;
177 JITGetByIdGenerator
gen(
178 m_jit
.codeBlock(), codeOrigin
, usedRegisters(),
179 JSValueRegs(baseTagGPROrNone
, basePayloadGPR
),
180 JSValueRegs(resultTagGPR
, resultPayloadGPR
), spillMode
);
182 gen
.generateFastPath(m_jit
);
184 JITCompiler::JumpList slowCases
;
185 if (slowPathTarget
.isSet())
186 slowCases
.append(slowPathTarget
);
187 slowCases
.append(gen
.slowPathJump());
189 OwnPtr
<SlowPathGenerator
> slowPath
;
190 if (baseTagGPROrNone
== InvalidGPRReg
) {
191 slowPath
= slowPathCall(
192 slowCases
, this, operationGetByIdOptimize
,
193 JSValueRegs(resultTagGPR
, resultPayloadGPR
), gen
.stubInfo(),
194 static_cast<int32_t>(JSValue::CellTag
), basePayloadGPR
,
195 identifierUID(identifierNumber
));
197 slowPath
= slowPathCall(
198 slowCases
, this, operationGetByIdOptimize
,
199 JSValueRegs(resultTagGPR
, resultPayloadGPR
), gen
.stubInfo(), baseTagGPROrNone
,
200 basePayloadGPR
, identifierUID(identifierNumber
));
203 m_jit
.addGetById(gen
, slowPath
.get());
204 addSlowPathGenerator(slowPath
.release());
207 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin
, GPRReg basePayloadGPR
, GPRReg valueTagGPR
, GPRReg valuePayloadGPR
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind putKind
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
209 JITPutByIdGenerator
gen(
210 m_jit
.codeBlock(), codeOrigin
, usedRegisters(),
211 JSValueRegs::payloadOnly(basePayloadGPR
), JSValueRegs(valueTagGPR
, valuePayloadGPR
),
212 scratchGPR
, spillMode
, m_jit
.ecmaModeFor(codeOrigin
), putKind
);
214 gen
.generateFastPath(m_jit
);
216 JITCompiler::JumpList slowCases
;
217 if (slowPathTarget
.isSet())
218 slowCases
.append(slowPathTarget
);
219 slowCases
.append(gen
.slowPathJump());
221 OwnPtr
<SlowPathGenerator
> slowPath
= slowPathCall(
222 slowCases
, this, gen
.slowPathFunction(), NoResult
, gen
.stubInfo(), valueTagGPR
,
223 valuePayloadGPR
, basePayloadGPR
, identifierUID(identifierNumber
));
225 m_jit
.addPutById(gen
, slowPath
.get());
226 addSlowPathGenerator(slowPath
.release());
229 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
)
231 JSValueOperand
arg(this, operand
);
232 GPRReg argTagGPR
= arg
.tagGPR();
233 GPRReg argPayloadGPR
= arg
.payloadGPR();
235 GPRTemporary
resultPayload(this, Reuse
, arg
, PayloadWord
);
236 GPRReg resultPayloadGPR
= resultPayload
.gpr();
238 JITCompiler::Jump notCell
;
239 JITCompiler::Jump notMasqueradesAsUndefined
;
240 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
241 if (!isKnownCell(operand
.node()))
242 notCell
= branchNotCell(arg
.jsValueRegs());
244 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR
);
245 notMasqueradesAsUndefined
= m_jit
.jump();
247 GPRTemporary
localGlobalObject(this);
248 GPRTemporary
remoteGlobalObject(this);
250 if (!isKnownCell(operand
.node()))
251 notCell
= branchNotCell(arg
.jsValueRegs());
253 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
254 JITCompiler::NonZero
,
255 JITCompiler::Address(argPayloadGPR
, JSCell::typeInfoFlagsOffset()),
256 JITCompiler::TrustedImm32(MasqueradesAsUndefined
));
258 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR
);
259 notMasqueradesAsUndefined
= m_jit
.jump();
261 isMasqueradesAsUndefined
.link(&m_jit
);
262 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
263 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
264 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
265 m_jit
.loadPtr(JITCompiler::Address(argPayloadGPR
, JSCell::structureIDOffset()), resultPayloadGPR
);
266 m_jit
.loadPtr(JITCompiler::Address(resultPayloadGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
267 m_jit
.compare32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, resultPayloadGPR
);
270 if (!isKnownCell(operand
.node())) {
271 JITCompiler::Jump done
= m_jit
.jump();
273 notCell
.link(&m_jit
);
274 // null or undefined?
275 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
276 m_jit
.move(argTagGPR
, resultPayloadGPR
);
277 m_jit
.or32(TrustedImm32(1), resultPayloadGPR
);
278 m_jit
.compare32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultPayloadGPR
, TrustedImm32(JSValue::NullTag
), resultPayloadGPR
);
283 notMasqueradesAsUndefined
.link(&m_jit
);
285 booleanResult(resultPayloadGPR
, m_currentNode
);
288 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand
, Node
* branchNode
, bool invert
)
290 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
291 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
293 if (taken
== nextBlock()) {
295 BasicBlock
* tmp
= taken
;
300 JSValueOperand
arg(this, operand
);
301 GPRReg argTagGPR
= arg
.tagGPR();
302 GPRReg argPayloadGPR
= arg
.payloadGPR();
304 GPRTemporary
result(this, Reuse
, arg
, TagWord
);
305 GPRReg resultGPR
= result
.gpr();
307 JITCompiler::Jump notCell
;
309 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
310 if (!isKnownCell(operand
.node()))
311 notCell
= branchNotCell(arg
.jsValueRegs());
313 jump(invert
? taken
: notTaken
, ForceJump
);
315 GPRTemporary
localGlobalObject(this);
316 GPRTemporary
remoteGlobalObject(this);
318 if (!isKnownCell(operand
.node()))
319 notCell
= branchNotCell(arg
.jsValueRegs());
321 branchTest8(JITCompiler::Zero
,
322 JITCompiler::Address(argPayloadGPR
, JSCell::typeInfoFlagsOffset()),
323 JITCompiler::TrustedImm32(MasqueradesAsUndefined
),
324 invert
? taken
: notTaken
);
326 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
327 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
328 m_jit
.move(TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
329 m_jit
.loadPtr(JITCompiler::Address(argPayloadGPR
, JSCell::structureIDOffset()), resultGPR
);
330 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
331 branchPtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, invert
? notTaken
: taken
);
334 if (!isKnownCell(operand
.node())) {
335 jump(notTaken
, ForceJump
);
337 notCell
.link(&m_jit
);
338 // null or undefined?
339 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
340 m_jit
.move(argTagGPR
, resultGPR
);
341 m_jit
.or32(TrustedImm32(1), resultGPR
);
342 branch32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm32(JSValue::NullTag
), taken
);
348 bool SpeculativeJIT::nonSpeculativeCompareNull(Node
* node
, Edge operand
, bool invert
)
350 unsigned branchIndexInBlock
= detectPeepHoleBranch();
351 if (branchIndexInBlock
!= UINT_MAX
) {
352 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
354 ASSERT(node
->adjustedRefCount() == 1);
356 nonSpeculativePeepholeBranchNull(operand
, branchNode
, invert
);
360 m_indexInBlock
= branchIndexInBlock
;
361 m_currentNode
= branchNode
;
366 nonSpeculativeNonPeepholeCompareNull(operand
, invert
);
371 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node
* node
, Node
* branchNode
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
373 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
374 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
376 JITCompiler::ResultCondition callResultCondition
= JITCompiler::NonZero
;
378 // The branch instruction will branch to the taken block.
379 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
380 if (taken
== nextBlock()) {
381 cond
= JITCompiler::invert(cond
);
382 callResultCondition
= JITCompiler::Zero
;
383 BasicBlock
* tmp
= taken
;
388 JSValueOperand
arg1(this, node
->child1());
389 JSValueOperand
arg2(this, node
->child2());
390 GPRReg arg1TagGPR
= arg1
.tagGPR();
391 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
392 GPRReg arg2TagGPR
= arg2
.tagGPR();
393 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
395 JITCompiler::JumpList slowPath
;
397 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
398 GPRResult
result(this);
399 GPRReg resultGPR
= result
.gpr();
405 callOperation(helperFunction
, resultGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
407 branchTest32(callResultCondition
, resultGPR
, taken
);
409 GPRTemporary
result(this);
410 GPRReg resultGPR
= result
.gpr();
415 if (!isKnownInteger(node
->child1().node()))
416 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg1TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
417 if (!isKnownInteger(node
->child2().node()))
418 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg2TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
420 branch32(cond
, arg1PayloadGPR
, arg2PayloadGPR
, taken
);
422 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
423 jump(notTaken
, ForceJump
);
425 slowPath
.link(&m_jit
);
427 silentSpillAllRegisters(resultGPR
);
428 callOperation(helperFunction
, resultGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
429 silentFillAllRegisters(resultGPR
);
431 branchTest32(callResultCondition
, resultGPR
, taken
);
437 m_indexInBlock
= m_block
->size() - 1;
438 m_currentNode
= branchNode
;
441 template<typename JumpType
>
442 class CompareAndBoxBooleanSlowPathGenerator
443 : public CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
> {
445 CompareAndBoxBooleanSlowPathGenerator(
446 JumpType from
, SpeculativeJIT
* jit
,
447 S_JITOperation_EJJ function
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
,
448 GPRReg arg2Tag
, GPRReg arg2Payload
)
449 : CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
>(
450 from
, jit
, function
, NeedToSpill
, result
)
452 , m_arg1Payload(arg1Payload
)
454 , m_arg2Payload(arg2Payload
)
459 virtual void generateInternal(SpeculativeJIT
* jit
)
464 this->m_function
, this->m_result
, m_arg1Tag
, m_arg1Payload
, m_arg2Tag
,
466 jit
->m_jit
.and32(JITCompiler::TrustedImm32(1), this->m_result
);
472 GPRReg m_arg1Payload
;
474 GPRReg m_arg2Payload
;
477 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
479 JSValueOperand
arg1(this, node
->child1());
480 JSValueOperand
arg2(this, node
->child2());
481 GPRReg arg1TagGPR
= arg1
.tagGPR();
482 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
483 GPRReg arg2TagGPR
= arg2
.tagGPR();
484 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
486 JITCompiler::JumpList slowPath
;
488 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
489 GPRResult
result(this);
490 GPRReg resultPayloadGPR
= result
.gpr();
496 callOperation(helperFunction
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
498 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
500 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
501 GPRReg resultPayloadGPR
= resultPayload
.gpr();
506 if (!isKnownInteger(node
->child1().node()))
507 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg1TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
508 if (!isKnownInteger(node
->child2().node()))
509 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg2TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
511 m_jit
.compare32(cond
, arg1PayloadGPR
, arg2PayloadGPR
, resultPayloadGPR
);
513 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
514 addSlowPathGenerator(adoptPtr(
515 new CompareAndBoxBooleanSlowPathGenerator
<JITCompiler::JumpList
>(
516 slowPath
, this, helperFunction
, resultPayloadGPR
, arg1TagGPR
,
517 arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
)));
520 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
524 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node
* node
, Node
* branchNode
, bool invert
)
526 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
527 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
529 // The branch instruction will branch to the taken block.
530 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
531 if (taken
== nextBlock()) {
533 BasicBlock
* tmp
= taken
;
538 JSValueOperand
arg1(this, node
->child1());
539 JSValueOperand
arg2(this, node
->child2());
540 GPRReg arg1TagGPR
= arg1
.tagGPR();
541 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
542 GPRReg arg2TagGPR
= arg2
.tagGPR();
543 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
545 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
546 GPRReg resultPayloadGPR
= resultPayload
.gpr();
551 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
552 // see if we get lucky: if the arguments are cells and they reference the same
553 // cell, then they must be strictly equal.
554 branchPtr(JITCompiler::Equal
, arg1PayloadGPR
, arg2PayloadGPR
, invert
? notTaken
: taken
);
556 silentSpillAllRegisters(resultPayloadGPR
);
557 callOperation(operationCompareStrictEqCell
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
558 silentFillAllRegisters(resultPayloadGPR
);
560 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultPayloadGPR
, taken
);
562 // FIXME: Add fast paths for twoCells, number etc.
564 silentSpillAllRegisters(resultPayloadGPR
);
565 callOperation(operationCompareStrictEq
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
566 silentFillAllRegisters(resultPayloadGPR
);
568 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultPayloadGPR
, taken
);
574 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node
* node
, bool invert
)
576 JSValueOperand
arg1(this, node
->child1());
577 JSValueOperand
arg2(this, node
->child2());
578 GPRReg arg1TagGPR
= arg1
.tagGPR();
579 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
580 GPRReg arg2TagGPR
= arg2
.tagGPR();
581 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
583 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
584 GPRReg resultPayloadGPR
= resultPayload
.gpr();
589 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
590 // see if we get lucky: if the arguments are cells and they reference the same
591 // cell, then they must be strictly equal.
592 // FIXME: this should flush registers instead of silent spill/fill.
593 JITCompiler::Jump notEqualCase
= m_jit
.branchPtr(JITCompiler::NotEqual
, arg1PayloadGPR
, arg2PayloadGPR
);
595 m_jit
.move(JITCompiler::TrustedImm32(!invert
), resultPayloadGPR
);
596 JITCompiler::Jump done
= m_jit
.jump();
598 notEqualCase
.link(&m_jit
);
600 silentSpillAllRegisters(resultPayloadGPR
);
601 callOperation(operationCompareStrictEqCell
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
602 silentFillAllRegisters(resultPayloadGPR
);
604 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR
);
608 // FIXME: Add fast paths.
610 silentSpillAllRegisters(resultPayloadGPR
);
611 callOperation(operationCompareStrictEq
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
612 silentFillAllRegisters(resultPayloadGPR
);
614 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR
);
617 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
620 void SpeculativeJIT::compileMiscStrictEq(Node
* node
)
622 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
623 JSValueOperand
op2(this, node
->child2(), ManualOperandSpeculation
);
624 GPRTemporary
result(this);
626 if (node
->child1().useKind() == MiscUse
)
627 speculateMisc(node
->child1(), op1
.jsValueRegs());
628 if (node
->child2().useKind() == MiscUse
)
629 speculateMisc(node
->child2(), op2
.jsValueRegs());
631 m_jit
.move(TrustedImm32(0), result
.gpr());
632 JITCompiler::Jump notEqual
= m_jit
.branch32(JITCompiler::NotEqual
, op1
.tagGPR(), op2
.tagGPR());
633 m_jit
.compare32(JITCompiler::Equal
, op1
.payloadGPR(), op2
.payloadGPR(), result
.gpr());
634 notEqual
.link(&m_jit
);
635 booleanResult(result
.gpr(), node
);
638 void SpeculativeJIT::emitCall(Node
* node
)
640 if (node
->op() != Call
)
641 ASSERT(node
->op() == Construct
);
643 // For constructors, the this argument is not passed but we have to make space
645 int dummyThisArgument
= node
->op() == Call
? 0 : 1;
647 CallLinkInfo::CallType callType
= node
->op() == Call
? CallLinkInfo::Call
: CallLinkInfo::Construct
;
649 Edge calleeEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild()];
650 JSValueOperand
callee(this, calleeEdge
);
651 GPRReg calleeTagGPR
= callee
.tagGPR();
652 GPRReg calleePayloadGPR
= callee
.payloadGPR();
655 // The call instruction's first child is either the function (normal call) or the
656 // receiver (method call). subsequent children are the arguments.
657 int numPassedArgs
= node
->numChildren() - 1;
659 int numArgs
= numPassedArgs
+ dummyThisArgument
;
661 m_jit
.store32(MacroAssembler::TrustedImm32(numArgs
), calleeFramePayloadSlot(JSStack::ArgumentCount
));
662 m_jit
.store32(calleePayloadGPR
, calleeFramePayloadSlot(JSStack::Callee
));
663 m_jit
.store32(calleeTagGPR
, calleeFrameTagSlot(JSStack::Callee
));
665 for (int i
= 0; i
< numPassedArgs
; i
++) {
666 Edge argEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + 1 + i
];
667 JSValueOperand
arg(this, argEdge
);
668 GPRReg argTagGPR
= arg
.tagGPR();
669 GPRReg argPayloadGPR
= arg
.payloadGPR();
672 m_jit
.store32(argTagGPR
, calleeArgumentTagSlot(i
+ dummyThisArgument
));
673 m_jit
.store32(argPayloadGPR
, calleeArgumentPayloadSlot(i
+ dummyThisArgument
));
678 GPRResult
resultPayload(this);
679 GPRResult2
resultTag(this);
680 GPRReg resultPayloadGPR
= resultPayload
.gpr();
681 GPRReg resultTagGPR
= resultTag
.gpr();
683 JITCompiler::DataLabelPtr targetToCheck
;
684 JITCompiler::JumpList slowPath
;
686 m_jit
.emitStoreCodeOrigin(node
->origin
.semantic
);
688 slowPath
.append(branchNotCell(callee
.jsValueRegs()));
689 slowPath
.append(m_jit
.branchPtrWithPatch(MacroAssembler::NotEqual
, calleePayloadGPR
, targetToCheck
));
690 m_jit
.loadPtr(MacroAssembler::Address(calleePayloadGPR
, OBJECT_OFFSETOF(JSFunction
, m_scope
)), resultPayloadGPR
);
691 m_jit
.storePtr(resultPayloadGPR
, calleeFramePayloadSlot(JSStack::ScopeChain
));
692 m_jit
.storePtr(MacroAssembler::TrustedImm32(JSValue::CellTag
), calleeFrameTagSlot(JSStack::ScopeChain
));
694 JITCompiler::Call fastCall
= m_jit
.nearCall();
696 JITCompiler::Jump done
= m_jit
.jump();
698 slowPath
.link(&m_jit
);
700 // Callee payload needs to be in regT0, tag in regT1
701 if (calleeTagGPR
== GPRInfo::regT0
) {
702 if (calleePayloadGPR
== GPRInfo::regT1
)
703 m_jit
.swap(GPRInfo::regT1
, GPRInfo::regT0
);
705 m_jit
.move(calleeTagGPR
, GPRInfo::regT1
);
706 m_jit
.move(calleePayloadGPR
, GPRInfo::regT0
);
709 m_jit
.move(calleePayloadGPR
, GPRInfo::regT0
);
710 m_jit
.move(calleeTagGPR
, GPRInfo::regT1
);
712 CallLinkInfo
* info
= m_jit
.codeBlock()->addCallLinkInfo();
713 m_jit
.move(MacroAssembler::TrustedImmPtr(info
), GPRInfo::regT2
);
714 JITCompiler::Call slowCall
= m_jit
.nearCall();
718 m_jit
.setupResults(resultPayloadGPR
, resultTagGPR
);
720 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, DataFormatJS
, UseChildrenCalledExplicitly
);
722 info
->callType
= callType
;
723 info
->codeOrigin
= node
->origin
.semantic
;
724 info
->calleeGPR
= calleePayloadGPR
;
725 m_jit
.addJSCall(fastCall
, slowCall
, targetToCheck
, info
);
728 template<bool strict
>
729 GPRReg
SpeculativeJIT::fillSpeculateInt32Internal(Edge edge
, DataFormat
& returnFormat
)
731 AbstractValue
& value
= m_state
.forNode(edge
);
732 SpeculatedType type
= value
.m_type
;
733 ASSERT(edge
.useKind() != KnownInt32Use
|| !(value
.m_type
& ~SpecInt32
));
734 m_interpreter
.filter(value
, SpecInt32
);
735 VirtualRegister virtualRegister
= edge
->virtualRegister();
736 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
738 if (edge
->hasConstant() && !isInt32Constant(edge
.node())) {
739 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
740 returnFormat
= DataFormatInt32
;
744 switch (info
.registerFormat()) {
745 case DataFormatNone
: {
746 if (edge
->hasConstant()) {
747 ASSERT(isInt32Constant(edge
.node()));
748 GPRReg gpr
= allocate();
749 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
750 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
751 info
.fillInt32(*m_stream
, gpr
);
752 returnFormat
= DataFormatInt32
;
756 DataFormat spillFormat
= info
.spillFormat();
757 ASSERT_UNUSED(spillFormat
, (spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInt32
);
759 // If we know this was spilled as an integer we can fill without checking.
760 if (type
& ~SpecInt32
)
761 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::Int32Tag
)));
763 GPRReg gpr
= allocate();
764 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
765 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
766 info
.fillInt32(*m_stream
, gpr
);
767 returnFormat
= DataFormatInt32
;
771 case DataFormatJSInt32
:
773 // Check the value is an integer.
774 GPRReg tagGPR
= info
.tagGPR();
775 GPRReg payloadGPR
= info
.payloadGPR();
777 m_gprs
.lock(payloadGPR
);
778 if (type
& ~SpecInt32
)
779 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::Int32Tag
)));
780 m_gprs
.unlock(tagGPR
);
781 m_gprs
.release(tagGPR
);
782 m_gprs
.release(payloadGPR
);
783 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderInteger
);
784 info
.fillInt32(*m_stream
, payloadGPR
);
785 // If !strict we're done, return.
786 returnFormat
= DataFormatInt32
;
790 case DataFormatInt32
: {
791 GPRReg gpr
= info
.gpr();
793 returnFormat
= DataFormatInt32
;
798 case DataFormatBoolean
:
799 case DataFormatJSDouble
:
800 case DataFormatJSCell
:
801 case DataFormatJSBoolean
:
802 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
803 returnFormat
= DataFormatInt32
;
806 case DataFormatDouble
:
807 case DataFormatStorage
:
809 RELEASE_ASSERT_NOT_REACHED();
810 return InvalidGPRReg
;
814 GPRReg
SpeculativeJIT::fillSpeculateInt32(Edge edge
, DataFormat
& returnFormat
)
816 return fillSpeculateInt32Internal
<false>(edge
, returnFormat
);
819 GPRReg
SpeculativeJIT::fillSpeculateInt32Strict(Edge edge
)
821 DataFormat mustBeDataFormatInt32
;
822 GPRReg result
= fillSpeculateInt32Internal
<true>(edge
, mustBeDataFormatInt32
);
823 ASSERT(mustBeDataFormatInt32
== DataFormatInt32
);
827 FPRReg
SpeculativeJIT::fillSpeculateDouble(Edge edge
)
829 ASSERT(isDouble(edge
.useKind()));
830 ASSERT(edge
->hasDoubleResult());
831 VirtualRegister virtualRegister
= edge
->virtualRegister();
832 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
834 if (info
.registerFormat() == DataFormatNone
) {
836 if (edge
->hasConstant()) {
837 RELEASE_ASSERT(isNumberConstant(edge
.node()));
838 FPRReg fpr
= fprAllocate();
839 m_jit
.loadDouble(TrustedImmPtr(addressOfDoubleConstant(edge
.node())), fpr
);
840 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderConstant
);
841 info
.fillDouble(*m_stream
, fpr
);
845 RELEASE_ASSERT(info
.spillFormat() == DataFormatDouble
);
846 FPRReg fpr
= fprAllocate();
847 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
848 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderSpilled
);
849 info
.fillDouble(*m_stream
, fpr
);
853 RELEASE_ASSERT(info
.registerFormat() == DataFormatDouble
);
854 FPRReg fpr
= info
.fpr();
859 GPRReg
SpeculativeJIT::fillSpeculateCell(Edge edge
)
861 AbstractValue
& value
= m_state
.forNode(edge
);
862 SpeculatedType type
= value
.m_type
;
863 ASSERT((edge
.useKind() != KnownCellUse
&& edge
.useKind() != KnownStringUse
) || !(value
.m_type
& ~SpecCell
));
864 m_interpreter
.filter(value
, SpecCell
);
865 VirtualRegister virtualRegister
= edge
->virtualRegister();
866 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
868 switch (info
.registerFormat()) {
869 case DataFormatNone
: {
870 if (info
.spillFormat() == DataFormatInt32
) {
871 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
875 if (edge
->hasConstant()) {
876 JSValue jsValue
= valueOfJSConstant(edge
.node());
877 GPRReg gpr
= allocate();
878 if (jsValue
.isCell()) {
879 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
880 m_jit
.move(MacroAssembler::TrustedImmPtr(jsValue
.asCell()), gpr
);
881 info
.fillCell(*m_stream
, gpr
);
884 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
888 ASSERT((info
.spillFormat() & DataFormatJS
) || info
.spillFormat() == DataFormatCell
);
889 if (type
& ~SpecCell
) {
892 JSValueSource(JITCompiler::addressFor(virtualRegister
)),
895 MacroAssembler::NotEqual
,
896 JITCompiler::tagFor(virtualRegister
),
897 TrustedImm32(JSValue::CellTag
)));
899 GPRReg gpr
= allocate();
900 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
901 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
902 info
.fillCell(*m_stream
, gpr
);
906 case DataFormatCell
: {
907 GPRReg gpr
= info
.gpr();
912 case DataFormatJSCell
:
914 GPRReg tagGPR
= info
.tagGPR();
915 GPRReg payloadGPR
= info
.payloadGPR();
917 m_gprs
.lock(payloadGPR
);
918 if (type
& ~SpecCell
) {
920 BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
,
921 branchNotCell(info
.jsValueRegs()));
923 m_gprs
.unlock(tagGPR
);
924 m_gprs
.release(tagGPR
);
925 m_gprs
.release(payloadGPR
);
926 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderCell
);
927 info
.fillCell(*m_stream
, payloadGPR
);
931 case DataFormatJSInt32
:
932 case DataFormatInt32
:
933 case DataFormatJSDouble
:
934 case DataFormatJSBoolean
:
935 case DataFormatBoolean
:
936 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
939 case DataFormatDouble
:
940 case DataFormatStorage
:
941 RELEASE_ASSERT_NOT_REACHED();
944 RELEASE_ASSERT_NOT_REACHED();
945 return InvalidGPRReg
;
949 GPRReg
SpeculativeJIT::fillSpeculateBoolean(Edge edge
)
951 AbstractValue
& value
= m_state
.forNode(edge
);
952 SpeculatedType type
= value
.m_type
;
953 m_interpreter
.filter(value
, SpecBoolean
);
954 VirtualRegister virtualRegister
= edge
->virtualRegister();
955 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
957 switch (info
.registerFormat()) {
958 case DataFormatNone
: {
959 if (info
.spillFormat() == DataFormatInt32
) {
960 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
964 if (edge
->hasConstant()) {
965 JSValue jsValue
= valueOfJSConstant(edge
.node());
966 GPRReg gpr
= allocate();
967 if (jsValue
.isBoolean()) {
968 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
969 m_jit
.move(MacroAssembler::TrustedImm32(jsValue
.asBoolean()), gpr
);
970 info
.fillBoolean(*m_stream
, gpr
);
973 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
977 ASSERT((info
.spillFormat() & DataFormatJS
) || info
.spillFormat() == DataFormatBoolean
);
979 if (type
& ~SpecBoolean
)
980 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::BooleanTag
)));
982 GPRReg gpr
= allocate();
983 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
984 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
985 info
.fillBoolean(*m_stream
, gpr
);
989 case DataFormatBoolean
: {
990 GPRReg gpr
= info
.gpr();
995 case DataFormatJSBoolean
:
997 GPRReg tagGPR
= info
.tagGPR();
998 GPRReg payloadGPR
= info
.payloadGPR();
1000 m_gprs
.lock(payloadGPR
);
1001 if (type
& ~SpecBoolean
)
1002 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::BooleanTag
)));
1004 m_gprs
.unlock(tagGPR
);
1005 m_gprs
.release(tagGPR
);
1006 m_gprs
.release(payloadGPR
);
1007 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderBoolean
);
1008 info
.fillBoolean(*m_stream
, payloadGPR
);
1012 case DataFormatJSInt32
:
1013 case DataFormatInt32
:
1014 case DataFormatJSDouble
:
1015 case DataFormatJSCell
:
1016 case DataFormatCell
:
1017 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1020 case DataFormatDouble
:
1021 case DataFormatStorage
:
1022 RELEASE_ASSERT_NOT_REACHED();
1025 RELEASE_ASSERT_NOT_REACHED();
1026 return InvalidGPRReg
;
1030 void SpeculativeJIT::compileBaseValueStoreBarrier(Edge
& baseEdge
, Edge
& valueEdge
)
1033 ASSERT(!isKnownNotCell(valueEdge
.node()));
1035 SpeculateCellOperand
base(this, baseEdge
);
1036 JSValueOperand
value(this, valueEdge
);
1037 GPRTemporary
scratch1(this);
1038 GPRTemporary
scratch2(this);
1040 writeBarrier(base
.gpr(), value
.tagGPR(), valueEdge
, scratch1
.gpr(), scratch2
.gpr());
1042 UNUSED_PARAM(baseEdge
);
1043 UNUSED_PARAM(valueEdge
);
1047 void SpeculativeJIT::compileObjectEquality(Node
* node
)
1049 SpeculateCellOperand
op1(this, node
->child1());
1050 SpeculateCellOperand
op2(this, node
->child2());
1051 GPRReg op1GPR
= op1
.gpr();
1052 GPRReg op2GPR
= op2
.gpr();
1054 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1056 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchPtr(
1057 MacroAssembler::Equal
,
1058 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1059 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1061 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchPtr(
1062 MacroAssembler::Equal
,
1063 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1064 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1067 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchPtr(
1068 MacroAssembler::Equal
,
1069 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1070 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1071 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1073 MacroAssembler::NonZero
,
1074 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1075 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1078 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchPtr(
1079 MacroAssembler::Equal
,
1080 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1081 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1082 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1084 MacroAssembler::NonZero
,
1085 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1086 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1089 GPRTemporary
resultPayload(this, Reuse
, op2
);
1090 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1092 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1093 m_jit
.move(TrustedImm32(1), resultPayloadGPR
);
1094 MacroAssembler::Jump done
= m_jit
.jump();
1095 falseCase
.link(&m_jit
);
1096 m_jit
.move(TrustedImm32(0), resultPayloadGPR
);
1099 booleanResult(resultPayloadGPR
, node
);
1102 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
)
1104 SpeculateCellOperand
op1(this, leftChild
);
1105 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1106 GPRTemporary
result(this);
1108 GPRReg op1GPR
= op1
.gpr();
1109 GPRReg op2TagGPR
= op2
.tagGPR();
1110 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1111 GPRReg resultGPR
= result
.gpr();
1113 bool masqueradesAsUndefinedWatchpointValid
=
1114 masqueradesAsUndefinedWatchpointIsStillValid();
1116 if (masqueradesAsUndefinedWatchpointValid
) {
1118 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1119 MacroAssembler::Equal
,
1120 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1121 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1124 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1125 MacroAssembler::Equal
,
1126 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1127 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1128 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1130 MacroAssembler::NonZero
,
1131 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1132 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1136 // It seems that most of the time when programs do a == b where b may be either null/undefined
1137 // or an object, b is usually an object. Balance the branches to make that case fast.
1138 MacroAssembler::Jump rightNotCell
= branchNotCell(op2
.jsValueRegs());
1140 // We know that within this branch, rightChild must be a cell.
1141 if (masqueradesAsUndefinedWatchpointValid
) {
1143 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1145 MacroAssembler::Equal
,
1146 MacroAssembler::Address(op2PayloadGPR
, JSCell::structureIDOffset()),
1147 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1150 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1152 MacroAssembler::Equal
,
1153 MacroAssembler::Address(op2PayloadGPR
, JSCell::structureIDOffset()),
1154 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1155 speculationCheck(BadType
, JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
,
1157 MacroAssembler::NonZero
,
1158 MacroAssembler::Address(op2PayloadGPR
, JSCell::typeInfoFlagsOffset()),
1159 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1162 // At this point we know that we can perform a straight-forward equality comparison on pointer
1163 // values because both left and right are pointers to objects that have no special equality
1165 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2PayloadGPR
);
1166 MacroAssembler::Jump trueCase
= m_jit
.jump();
1168 rightNotCell
.link(&m_jit
);
1170 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1171 // prove that it is either null or undefined.
1172 if (needsTypeCheck(rightChild
, SpecCell
| SpecOther
)) {
1173 m_jit
.move(op2TagGPR
, resultGPR
);
1174 m_jit
.or32(TrustedImm32(1), resultGPR
);
1177 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, SpecCell
| SpecOther
,
1179 MacroAssembler::NotEqual
, resultGPR
,
1180 MacroAssembler::TrustedImm32(JSValue::NullTag
)));
1183 falseCase
.link(&m_jit
);
1184 m_jit
.move(TrustedImm32(0), resultGPR
);
1185 MacroAssembler::Jump done
= m_jit
.jump();
1186 trueCase
.link(&m_jit
);
1187 m_jit
.move(TrustedImm32(1), resultGPR
);
1190 booleanResult(resultGPR
, m_currentNode
);
1193 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
, Node
* branchNode
)
1195 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1196 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1198 SpeculateCellOperand
op1(this, leftChild
);
1199 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1200 GPRTemporary
result(this);
1202 GPRReg op1GPR
= op1
.gpr();
1203 GPRReg op2TagGPR
= op2
.tagGPR();
1204 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1205 GPRReg resultGPR
= result
.gpr();
1207 bool masqueradesAsUndefinedWatchpointValid
=
1208 masqueradesAsUndefinedWatchpointIsStillValid();
1210 if (masqueradesAsUndefinedWatchpointValid
) {
1212 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1213 MacroAssembler::Equal
,
1214 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1215 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1218 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1219 MacroAssembler::Equal
,
1220 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1221 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1222 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1224 MacroAssembler::NonZero
,
1225 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1226 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1229 // It seems that most of the time when programs do a == b where b may be either null/undefined
1230 // or an object, b is usually an object. Balance the branches to make that case fast.
1231 MacroAssembler::Jump rightNotCell
= branchNotCell(op2
.jsValueRegs());
1233 // We know that within this branch, rightChild must be a cell.
1234 if (masqueradesAsUndefinedWatchpointValid
) {
1236 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1238 MacroAssembler::Equal
,
1239 MacroAssembler::Address(op2PayloadGPR
, JSCell::structureIDOffset()),
1240 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1243 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1245 MacroAssembler::Equal
,
1246 MacroAssembler::Address(op2PayloadGPR
, JSCell::structureIDOffset()),
1247 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1248 speculationCheck(BadType
, JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
,
1250 MacroAssembler::NonZero
,
1251 MacroAssembler::Address(op2PayloadGPR
, JSCell::typeInfoFlagsOffset()),
1252 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1255 // At this point we know that we can perform a straight-forward equality comparison on pointer
1256 // values because both left and right are pointers to objects that have no special equality
1258 branch32(MacroAssembler::Equal
, op1GPR
, op2PayloadGPR
, taken
);
1260 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1261 // prove that it is either null or undefined.
1262 if (!needsTypeCheck(rightChild
, SpecCell
| SpecOther
))
1263 rightNotCell
.link(&m_jit
);
1265 jump(notTaken
, ForceJump
);
1267 rightNotCell
.link(&m_jit
);
1268 m_jit
.move(op2TagGPR
, resultGPR
);
1269 m_jit
.or32(TrustedImm32(1), resultGPR
);
1272 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, SpecCell
| SpecOther
,
1274 MacroAssembler::NotEqual
, resultGPR
,
1275 MacroAssembler::TrustedImm32(JSValue::NullTag
)));
1281 void SpeculativeJIT::compileInt32Compare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1283 SpeculateInt32Operand
op1(this, node
->child1());
1284 SpeculateInt32Operand
op2(this, node
->child2());
1285 GPRTemporary
resultPayload(this);
1287 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), resultPayload
.gpr());
1289 // If we add a DataFormatBool, we should use it here.
1290 booleanResult(resultPayload
.gpr(), node
);
1293 void SpeculativeJIT::compileDoubleCompare(Node
* node
, MacroAssembler::DoubleCondition condition
)
1295 SpeculateDoubleOperand
op1(this, node
->child1());
1296 SpeculateDoubleOperand
op2(this, node
->child2());
1297 GPRTemporary
resultPayload(this);
1299 m_jit
.move(TrustedImm32(1), resultPayload
.gpr());
1300 MacroAssembler::Jump trueCase
= m_jit
.branchDouble(condition
, op1
.fpr(), op2
.fpr());
1301 m_jit
.move(TrustedImm32(0), resultPayload
.gpr());
1302 trueCase
.link(&m_jit
);
1304 booleanResult(resultPayload
.gpr(), node
);
1307 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse
)
1309 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1310 GPRTemporary
resultPayload(this);
1311 GPRReg valueTagGPR
= value
.tagGPR();
1312 GPRReg valuePayloadGPR
= value
.payloadGPR();
1313 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1314 GPRTemporary structure
;
1315 GPRReg structureGPR
= InvalidGPRReg
;
1317 bool masqueradesAsUndefinedWatchpointValid
=
1318 masqueradesAsUndefinedWatchpointIsStillValid();
1320 if (!masqueradesAsUndefinedWatchpointValid
) {
1321 // The masquerades as undefined case will use the structure register, so allocate it here.
1322 // Do this at the top of the function to avoid branching around a register allocation.
1323 GPRTemporary
realStructure(this);
1324 structure
.adopt(realStructure
);
1325 structureGPR
= structure
.gpr();
1328 MacroAssembler::Jump notCell
= branchNotCell(value
.jsValueRegs());
1329 if (masqueradesAsUndefinedWatchpointValid
) {
1331 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1333 MacroAssembler::Equal
,
1334 MacroAssembler::Address(valuePayloadGPR
, JSCell::structureIDOffset()),
1335 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1337 m_jit
.loadPtr(MacroAssembler::Address(valuePayloadGPR
, JSCell::structureIDOffset()), structureGPR
);
1340 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1342 MacroAssembler::Equal
,
1344 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1346 MacroAssembler::Jump isNotMasqueradesAsUndefined
=
1348 MacroAssembler::Zero
,
1349 MacroAssembler::Address(valuePayloadGPR
, JSCell::typeInfoFlagsOffset()),
1350 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
));
1352 speculationCheck(BadType
, JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
,
1354 MacroAssembler::Equal
,
1355 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1356 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1358 isNotMasqueradesAsUndefined
.link(&m_jit
);
1360 m_jit
.move(TrustedImm32(0), resultPayloadGPR
);
1361 MacroAssembler::Jump done
= m_jit
.jump();
1363 notCell
.link(&m_jit
);
1365 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
1366 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1367 m_jit
.move(valueTagGPR
, resultPayloadGPR
);
1368 m_jit
.or32(TrustedImm32(1), resultPayloadGPR
);
1370 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, SpecCell
| SpecOther
,
1372 MacroAssembler::NotEqual
,
1374 TrustedImm32(JSValue::NullTag
)));
1376 m_jit
.move(TrustedImm32(1), resultPayloadGPR
);
1380 booleanResult(resultPayloadGPR
, m_currentNode
);
1383 void SpeculativeJIT::compileLogicalNot(Node
* node
)
1385 switch (node
->child1().useKind()) {
1387 SpeculateBooleanOperand
value(this, node
->child1());
1388 GPRTemporary
result(this, Reuse
, value
);
1389 m_jit
.xor32(TrustedImm32(1), value
.gpr(), result
.gpr());
1390 booleanResult(result
.gpr(), node
);
1394 case ObjectOrOtherUse
: {
1395 compileObjectOrOtherLogicalNot(node
->child1());
1400 SpeculateInt32Operand
value(this, node
->child1());
1401 GPRTemporary
resultPayload(this, Reuse
, value
);
1402 m_jit
.compare32(MacroAssembler::Equal
, value
.gpr(), MacroAssembler::TrustedImm32(0), resultPayload
.gpr());
1403 booleanResult(resultPayload
.gpr(), node
);
1407 case DoubleRepUse
: {
1408 SpeculateDoubleOperand
value(this, node
->child1());
1409 FPRTemporary
scratch(this);
1410 GPRTemporary
resultPayload(this);
1411 m_jit
.move(TrustedImm32(0), resultPayload
.gpr());
1412 MacroAssembler::Jump nonZero
= m_jit
.branchDoubleNonZero(value
.fpr(), scratch
.fpr());
1413 m_jit
.move(TrustedImm32(1), resultPayload
.gpr());
1414 nonZero
.link(&m_jit
);
1415 booleanResult(resultPayload
.gpr(), node
);
1420 JSValueOperand
arg1(this, node
->child1());
1421 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
1422 GPRReg arg1TagGPR
= arg1
.tagGPR();
1423 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
1424 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1428 JITCompiler::Jump slowCase
= m_jit
.branch32(JITCompiler::NotEqual
, arg1TagGPR
, TrustedImm32(JSValue::BooleanTag
));
1430 m_jit
.move(arg1PayloadGPR
, resultPayloadGPR
);
1432 addSlowPathGenerator(
1434 slowCase
, this, operationConvertJSValueToBoolean
, resultPayloadGPR
, arg1TagGPR
,
1437 m_jit
.xor32(TrustedImm32(1), resultPayloadGPR
);
1438 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
1442 return compileStringZeroLength(node
);
1445 RELEASE_ASSERT_NOT_REACHED();
1450 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse
, BasicBlock
* taken
, BasicBlock
* notTaken
)
1452 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1453 GPRTemporary
scratch(this);
1454 GPRReg valueTagGPR
= value
.tagGPR();
1455 GPRReg valuePayloadGPR
= value
.payloadGPR();
1456 GPRReg scratchGPR
= scratch
.gpr();
1458 MacroAssembler::Jump notCell
= branchNotCell(value
.jsValueRegs());
1459 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1461 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1463 MacroAssembler::Equal
,
1464 MacroAssembler::Address(valuePayloadGPR
, JSCell::structureIDOffset()),
1465 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1467 m_jit
.loadPtr(MacroAssembler::Address(valuePayloadGPR
, JSCell::structureIDOffset()), scratchGPR
);
1470 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1472 MacroAssembler::Equal
,
1474 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1476 JITCompiler::Jump isNotMasqueradesAsUndefined
= m_jit
.branchTest8(
1478 MacroAssembler::Address(valuePayloadGPR
, JSCell::typeInfoFlagsOffset()),
1479 TrustedImm32(MasqueradesAsUndefined
));
1481 speculationCheck(BadType
, JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
,
1483 MacroAssembler::Equal
,
1484 MacroAssembler::Address(scratchGPR
, Structure::globalObjectOffset()),
1485 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1487 isNotMasqueradesAsUndefined
.link(&m_jit
);
1489 jump(taken
, ForceJump
);
1491 notCell
.link(&m_jit
);
1493 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
1494 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1495 m_jit
.move(valueTagGPR
, scratchGPR
);
1496 m_jit
.or32(TrustedImm32(1), scratchGPR
);
1498 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, SpecCell
| SpecOther
,
1499 m_jit
.branch32(MacroAssembler::NotEqual
, scratchGPR
, TrustedImm32(JSValue::NullTag
)));
1504 noResult(m_currentNode
);
1507 void SpeculativeJIT::emitBranch(Node
* node
)
1509 BasicBlock
* taken
= node
->branchData()->taken
.block
;
1510 BasicBlock
* notTaken
= node
->branchData()->notTaken
.block
;
1512 switch (node
->child1().useKind()) {
1514 SpeculateBooleanOperand
value(this, node
->child1());
1515 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
1517 if (taken
== nextBlock()) {
1518 condition
= MacroAssembler::Zero
;
1519 BasicBlock
* tmp
= taken
;
1524 branchTest32(condition
, value
.gpr(), TrustedImm32(1), taken
);
1531 case ObjectOrOtherUse
: {
1532 emitObjectOrOtherBranch(node
->child1(), taken
, notTaken
);
1538 if (node
->child1().useKind() == Int32Use
) {
1539 bool invert
= false;
1541 if (taken
== nextBlock()) {
1543 BasicBlock
* tmp
= taken
;
1548 SpeculateInt32Operand
value(this, node
->child1());
1549 branchTest32(invert
? MacroAssembler::Zero
: MacroAssembler::NonZero
, value
.gpr(), taken
);
1551 SpeculateDoubleOperand
value(this, node
->child1());
1552 FPRTemporary
scratch(this);
1553 branchDoubleNonZero(value
.fpr(), scratch
.fpr(), taken
);
1563 JSValueOperand
value(this, node
->child1());
1565 GPRReg valueTagGPR
= value
.tagGPR();
1566 GPRReg valuePayloadGPR
= value
.payloadGPR();
1568 GPRTemporary
result(this);
1569 GPRReg resultGPR
= result
.gpr();
1571 use(node
->child1());
1573 JITCompiler::Jump fastPath
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
));
1574 JITCompiler::Jump slowPath
= m_jit
.branch32(JITCompiler::NotEqual
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::BooleanTag
));
1576 fastPath
.link(&m_jit
);
1577 branchTest32(JITCompiler::Zero
, valuePayloadGPR
, notTaken
);
1578 jump(taken
, ForceJump
);
1580 slowPath
.link(&m_jit
);
1581 silentSpillAllRegisters(resultGPR
);
1582 callOperation(operationConvertJSValueToBoolean
, resultGPR
, valueTagGPR
, valuePayloadGPR
);
1583 silentFillAllRegisters(resultGPR
);
1585 branchTest32(JITCompiler::NonZero
, resultGPR
, taken
);
1588 noResult(node
, UseChildrenCalledExplicitly
);
1593 RELEASE_ASSERT_NOT_REACHED();
1598 template<typename BaseOperandType
, typename PropertyOperandType
, typename ValueOperandType
, typename TagType
>
1599 void SpeculativeJIT::compileContiguousPutByVal(Node
* node
, BaseOperandType
& base
, PropertyOperandType
& property
, ValueOperandType
& value
, GPRReg valuePayloadReg
, TagType valueTag
)
1601 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
1603 ArrayMode arrayMode
= node
->arrayMode();
1605 GPRReg baseReg
= base
.gpr();
1606 GPRReg propertyReg
= property
.gpr();
1608 StorageOperand
storage(this, child4
);
1609 GPRReg storageReg
= storage
.gpr();
1611 if (node
->op() == PutByValAlias
) {
1612 // Store the value to the array.
1613 GPRReg propertyReg
= property
.gpr();
1614 m_jit
.store32(valueTag
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
1615 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
1621 MacroAssembler::Jump slowCase
;
1623 if (arrayMode
.isInBounds()) {
1625 OutOfBounds
, JSValueRegs(), 0,
1626 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
1628 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1630 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
1632 if (!arrayMode
.isOutOfBounds())
1633 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
1635 m_jit
.add32(TrustedImm32(1), propertyReg
);
1636 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1637 m_jit
.sub32(TrustedImm32(1), propertyReg
);
1639 inBounds
.link(&m_jit
);
1642 m_jit
.store32(valueTag
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
1643 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
1650 if (arrayMode
.isOutOfBounds()) {
1651 if (node
->op() == PutByValDirect
) {
1652 addSlowPathGenerator(slowPathCall(
1654 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
1655 NoResult
, baseReg
, propertyReg
, valueTag
, valuePayloadReg
));
1657 addSlowPathGenerator(slowPathCall(
1659 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
1660 NoResult
, baseReg
, propertyReg
, valueTag
, valuePayloadReg
));
1664 noResult(node
, UseChildrenCalledExplicitly
);
1667 void SpeculativeJIT::compile(Node
* node
)
1669 NodeType op
= node
->op();
1671 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1672 m_jit
.clearRegisterAllocationOffsets();
1677 case DoubleConstant
:
1678 initConstantInfo(node
);
1681 case PhantomArguments
:
1682 initConstantInfo(node
);
1685 case WeakJSConstant
:
1686 m_jit
.addWeakReference(node
->weakConstant());
1687 initConstantInfo(node
);
1691 RELEASE_ASSERT_NOT_REACHED();
1696 AbstractValue
& value
= m_state
.variables().operand(node
->local());
1698 // If the CFA is tracking this variable and it found that the variable
1699 // cannot have been assigned, then don't attempt to proceed.
1700 if (value
.isClear()) {
1701 // FIXME: We should trap instead.
1702 // https://bugs.webkit.org/show_bug.cgi?id=110383
1703 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
1707 switch (node
->variableAccessData()->flushFormat()) {
1708 case FlushedDouble
: {
1709 FPRTemporary
result(this);
1710 m_jit
.loadDouble(JITCompiler::addressFor(node
->machineLocal()), result
.fpr());
1711 VirtualRegister virtualRegister
= node
->virtualRegister();
1712 m_fprs
.retain(result
.fpr(), virtualRegister
, SpillOrderDouble
);
1713 generationInfoFromVirtualRegister(virtualRegister
).initDouble(node
, node
->refCount(), result
.fpr());
1717 case FlushedInt32
: {
1718 GPRTemporary
result(this);
1719 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1721 // Like int32Result, but don't useChildren - our children are phi nodes,
1722 // and don't represent values within this dataflow with virtual registers.
1723 VirtualRegister virtualRegister
= node
->virtualRegister();
1724 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
1725 generationInfoFromVirtualRegister(virtualRegister
).initInt32(node
, node
->refCount(), result
.gpr());
1730 GPRTemporary
result(this);
1731 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1733 // Like cellResult, but don't useChildren - our children are phi nodes,
1734 // and don't represent values within this dataflow with virtual registers.
1735 VirtualRegister virtualRegister
= node
->virtualRegister();
1736 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderCell
);
1737 generationInfoFromVirtualRegister(virtualRegister
).initCell(node
, node
->refCount(), result
.gpr());
1741 case FlushedBoolean
: {
1742 GPRTemporary
result(this);
1743 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1745 // Like booleanResult, but don't useChildren - our children are phi nodes,
1746 // and don't represent values within this dataflow with virtual registers.
1747 VirtualRegister virtualRegister
= node
->virtualRegister();
1748 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderBoolean
);
1749 generationInfoFromVirtualRegister(virtualRegister
).initBoolean(node
, node
->refCount(), result
.gpr());
1753 case FlushedJSValue
:
1754 case FlushedArguments
: {
1755 GPRTemporary
result(this);
1756 GPRTemporary
tag(this);
1757 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1758 m_jit
.load32(JITCompiler::tagFor(node
->machineLocal()), tag
.gpr());
1760 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1761 // and don't represent values within this dataflow with virtual registers.
1762 VirtualRegister virtualRegister
= node
->virtualRegister();
1763 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1764 m_gprs
.retain(tag
.gpr(), virtualRegister
, SpillOrderJS
);
1766 generationInfoFromVirtualRegister(virtualRegister
).initJSValue(node
, node
->refCount(), tag
.gpr(), result
.gpr(), DataFormatJS
);
1771 RELEASE_ASSERT_NOT_REACHED();
1776 case GetLocalUnlinked
: {
1777 GPRTemporary
payload(this);
1778 GPRTemporary
tag(this);
1779 m_jit
.load32(JITCompiler::payloadFor(node
->unlinkedMachineLocal()), payload
.gpr());
1780 m_jit
.load32(JITCompiler::tagFor(node
->unlinkedMachineLocal()), tag
.gpr());
1781 jsValueResult(tag
.gpr(), payload
.gpr(), node
);
1788 RELEASE_ASSERT_NOT_REACHED();
1793 switch (node
->variableAccessData()->flushFormat()) {
1794 case FlushedDouble
: {
1795 SpeculateDoubleOperand
value(this, node
->child1());
1796 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
->machineLocal()));
1798 // Indicate that it's no longer necessary to retrieve the value of
1799 // this bytecode variable from registers or other locations in the stack,
1800 // but that it is stored as a double.
1801 recordSetLocal(DataFormatDouble
);
1805 case FlushedInt32
: {
1806 SpeculateInt32Operand
value(this, node
->child1());
1807 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->machineLocal()));
1809 recordSetLocal(DataFormatInt32
);
1814 SpeculateCellOperand
cell(this, node
->child1());
1815 GPRReg cellGPR
= cell
.gpr();
1816 m_jit
.storePtr(cellGPR
, JITCompiler::payloadFor(node
->machineLocal()));
1818 recordSetLocal(DataFormatCell
);
1822 case FlushedBoolean
: {
1823 SpeculateBooleanOperand
value(this, node
->child1());
1824 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->machineLocal()));
1826 recordSetLocal(DataFormatBoolean
);
1830 case FlushedJSValue
:
1831 case FlushedArguments
: {
1832 JSValueOperand
value(this, node
->child1());
1833 m_jit
.store32(value
.payloadGPR(), JITCompiler::payloadFor(node
->machineLocal()));
1834 m_jit
.store32(value
.tagGPR(), JITCompiler::tagFor(node
->machineLocal()));
1836 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
1841 RELEASE_ASSERT_NOT_REACHED();
1848 // This is a no-op; it just marks the fact that the argument is being used.
1849 // But it may be profitable to use this as a hook to run speculation checks
1850 // on arguments, thereby allowing us to trivially eliminate such checks if
1851 // the argument is not used.
1852 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
1858 if (isInt32Constant(node
->child1().node())) {
1859 SpeculateInt32Operand
op2(this, node
->child2());
1860 GPRTemporary
result(this, Reuse
, op2
);
1862 bitOp(op
, valueOfInt32Constant(node
->child1().node()), op2
.gpr(), result
.gpr());
1864 int32Result(result
.gpr(), node
);
1865 } else if (isInt32Constant(node
->child2().node())) {
1866 SpeculateInt32Operand
op1(this, node
->child1());
1867 GPRTemporary
result(this, Reuse
, op1
);
1869 bitOp(op
, valueOfInt32Constant(node
->child2().node()), op1
.gpr(), result
.gpr());
1871 int32Result(result
.gpr(), node
);
1873 SpeculateInt32Operand
op1(this, node
->child1());
1874 SpeculateInt32Operand
op2(this, node
->child2());
1875 GPRTemporary
result(this, Reuse
, op1
, op2
);
1877 GPRReg reg1
= op1
.gpr();
1878 GPRReg reg2
= op2
.gpr();
1879 bitOp(op
, reg1
, reg2
, result
.gpr());
1881 int32Result(result
.gpr(), node
);
1888 if (isInt32Constant(node
->child2().node())) {
1889 SpeculateInt32Operand
op1(this, node
->child1());
1890 GPRTemporary
result(this, Reuse
, op1
);
1892 shiftOp(op
, op1
.gpr(), valueOfInt32Constant(node
->child2().node()) & 0x1f, result
.gpr());
1894 int32Result(result
.gpr(), node
);
1896 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
1897 SpeculateInt32Operand
op1(this, node
->child1());
1898 SpeculateInt32Operand
op2(this, node
->child2());
1899 GPRTemporary
result(this, Reuse
, op1
);
1901 GPRReg reg1
= op1
.gpr();
1902 GPRReg reg2
= op2
.gpr();
1903 shiftOp(op
, reg1
, reg2
, result
.gpr());
1905 int32Result(result
.gpr(), node
);
1909 case UInt32ToNumber
: {
1910 compileUInt32ToNumber(node
);
1914 case DoubleAsInt32
: {
1915 compileDoubleAsInt32(node
);
1919 case ValueToInt32
: {
1920 compileValueToInt32(node
);
1925 compileDoubleRep(node
);
1930 compileValueRep(node
);
1935 JSValueOperand
op1(this, node
->child1());
1936 JSValueOperand
op2(this, node
->child2());
1938 GPRReg op1TagGPR
= op1
.tagGPR();
1939 GPRReg op1PayloadGPR
= op1
.payloadGPR();
1940 GPRReg op2TagGPR
= op2
.tagGPR();
1941 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1945 GPRResult2
resultTag(this);
1946 GPRResult
resultPayload(this);
1947 if (isKnownNotNumber(node
->child1().node()) || isKnownNotNumber(node
->child2().node()))
1948 callOperation(operationValueAddNotNumber
, resultTag
.gpr(), resultPayload
.gpr(), op1TagGPR
, op1PayloadGPR
, op2TagGPR
, op2PayloadGPR
);
1950 callOperation(operationValueAdd
, resultTag
.gpr(), resultPayload
.gpr(), op1TagGPR
, op1PayloadGPR
, op2TagGPR
, op2PayloadGPR
);
1952 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
1961 compileMakeRope(node
);
1965 compileArithSub(node
);
1969 compileArithNegate(node
);
1973 compileArithMul(node
);
1977 compileArithDiv(node
);
1982 compileArithMod(node
);
1987 switch (node
->child1().useKind()) {
1989 SpeculateStrictInt32Operand
op1(this, node
->child1());
1990 GPRTemporary
result(this, Reuse
, op1
);
1991 GPRTemporary
scratch(this);
1993 m_jit
.move(op1
.gpr(), result
.gpr());
1994 m_jit
.rshift32(result
.gpr(), MacroAssembler::TrustedImm32(31), scratch
.gpr());
1995 m_jit
.add32(scratch
.gpr(), result
.gpr());
1996 m_jit
.xor32(scratch
.gpr(), result
.gpr());
1997 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, result
.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
1998 int32Result(result
.gpr(), node
);
2003 case DoubleRepUse
: {
2004 SpeculateDoubleOperand
op1(this, node
->child1());
2005 FPRTemporary
result(this);
2007 m_jit
.absDouble(op1
.fpr(), result
.fpr());
2008 doubleResult(result
.fpr(), node
);
2013 RELEASE_ASSERT_NOT_REACHED();
2021 switch (node
->binaryUseKind()) {
2023 SpeculateStrictInt32Operand
op1(this, node
->child1());
2024 SpeculateStrictInt32Operand
op2(this, node
->child2());
2025 GPRTemporary
result(this, Reuse
, op1
);
2027 GPRReg op1GPR
= op1
.gpr();
2028 GPRReg op2GPR
= op2
.gpr();
2029 GPRReg resultGPR
= result
.gpr();
2031 MacroAssembler::Jump op1Less
= m_jit
.branch32(op
== ArithMin
? MacroAssembler::LessThan
: MacroAssembler::GreaterThan
, op1GPR
, op2GPR
);
2032 m_jit
.move(op2GPR
, resultGPR
);
2033 if (op1GPR
!= resultGPR
) {
2034 MacroAssembler::Jump done
= m_jit
.jump();
2035 op1Less
.link(&m_jit
);
2036 m_jit
.move(op1GPR
, resultGPR
);
2039 op1Less
.link(&m_jit
);
2041 int32Result(resultGPR
, node
);
2045 case DoubleRepUse
: {
2046 SpeculateDoubleOperand
op1(this, node
->child1());
2047 SpeculateDoubleOperand
op2(this, node
->child2());
2048 FPRTemporary
result(this, op1
);
2050 FPRReg op1FPR
= op1
.fpr();
2051 FPRReg op2FPR
= op2
.fpr();
2052 FPRReg resultFPR
= result
.fpr();
2054 MacroAssembler::JumpList done
;
2056 MacroAssembler::Jump op1Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleLessThan
: MacroAssembler::DoubleGreaterThan
, op1FPR
, op2FPR
);
2058 // op2 is eather the lesser one or one of then is NaN
2059 MacroAssembler::Jump op2Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleGreaterThanOrEqual
: MacroAssembler::DoubleLessThanOrEqual
, op1FPR
, op2FPR
);
2061 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2062 // op1 + op2 and putting it into result.
2063 m_jit
.addDouble(op1FPR
, op2FPR
, resultFPR
);
2064 done
.append(m_jit
.jump());
2066 op2Less
.link(&m_jit
);
2067 m_jit
.moveDouble(op2FPR
, resultFPR
);
2069 if (op1FPR
!= resultFPR
) {
2070 done
.append(m_jit
.jump());
2072 op1Less
.link(&m_jit
);
2073 m_jit
.moveDouble(op1FPR
, resultFPR
);
2075 op1Less
.link(&m_jit
);
2079 doubleResult(resultFPR
, node
);
2084 RELEASE_ASSERT_NOT_REACHED();
2091 SpeculateDoubleOperand
op1(this, node
->child1());
2092 FPRTemporary
result(this, op1
);
2094 m_jit
.sqrtDouble(op1
.fpr(), result
.fpr());
2096 doubleResult(result
.fpr(), node
);
2101 SpeculateDoubleOperand
op1(this, node
->child1());
2102 FPRTemporary
result(this, op1
);
2104 m_jit
.convertDoubleToFloat(op1
.fpr(), result
.fpr());
2105 m_jit
.convertFloatToDouble(result
.fpr(), result
.fpr());
2107 doubleResult(result
.fpr(), node
);
2112 SpeculateDoubleOperand
op1(this, node
->child1());
2113 FPRReg op1FPR
= op1
.fpr();
2117 FPRResult
result(this);
2118 callOperation(sin
, result
.fpr(), op1FPR
);
2119 doubleResult(result
.fpr(), node
);
2124 SpeculateDoubleOperand
op1(this, node
->child1());
2125 FPRReg op1FPR
= op1
.fpr();
2129 FPRResult
result(this);
2130 callOperation(cos
, result
.fpr(), op1FPR
);
2131 doubleResult(result
.fpr(), node
);
2136 compileLogicalNot(node
);
2140 if (compare(node
, JITCompiler::LessThan
, JITCompiler::DoubleLessThan
, operationCompareLess
))
2145 if (compare(node
, JITCompiler::LessThanOrEqual
, JITCompiler::DoubleLessThanOrEqual
, operationCompareLessEq
))
2149 case CompareGreater
:
2150 if (compare(node
, JITCompiler::GreaterThan
, JITCompiler::DoubleGreaterThan
, operationCompareGreater
))
2154 case CompareGreaterEq
:
2155 if (compare(node
, JITCompiler::GreaterThanOrEqual
, JITCompiler::DoubleGreaterThanOrEqual
, operationCompareGreaterEq
))
2159 case CompareEqConstant
:
2160 ASSERT(isNullConstant(node
->child2().node()));
2161 if (nonSpeculativeCompareNull(node
, node
->child1()))
2166 if (compare(node
, JITCompiler::Equal
, JITCompiler::DoubleEqual
, operationCompareEq
))
2170 case CompareStrictEq
:
2171 if (compileStrictEq(node
))
2175 case StringCharCodeAt
: {
2176 compileGetCharCodeAt(node
);
2180 case StringCharAt
: {
2181 // Relies on StringCharAt node having same basic layout as GetByVal
2182 compileGetByValOnString(node
);
2186 case StringFromCharCode
: {
2187 compileFromCharCode(node
);
2197 case ArrayifyToStructure
: {
2203 switch (node
->arrayMode().type()) {
2204 case Array::SelectUsingPredictions
:
2205 case Array::ForceExit
:
2206 RELEASE_ASSERT_NOT_REACHED();
2207 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2209 case Array::Generic
: {
2210 SpeculateCellOperand
base(this, node
->child1()); // Save a register, speculate cell. We'll probably be right.
2211 JSValueOperand
property(this, node
->child2());
2212 GPRReg baseGPR
= base
.gpr();
2213 GPRReg propertyTagGPR
= property
.tagGPR();
2214 GPRReg propertyPayloadGPR
= property
.payloadGPR();
2217 GPRResult2
resultTag(this);
2218 GPRResult
resultPayload(this);
2219 callOperation(operationGetByValCell
, resultTag
.gpr(), resultPayload
.gpr(), baseGPR
, propertyTagGPR
, propertyPayloadGPR
);
2221 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2225 case Array::Contiguous
: {
2226 if (node
->arrayMode().isInBounds()) {
2227 SpeculateStrictInt32Operand
property(this, node
->child2());
2228 StorageOperand
storage(this, node
->child3());
2230 GPRReg propertyReg
= property
.gpr();
2231 GPRReg storageReg
= storage
.gpr();
2236 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2238 GPRTemporary
resultPayload(this);
2239 if (node
->arrayMode().type() == Array::Int32
) {
2241 OutOfBounds
, JSValueRegs(), 0,
2243 MacroAssembler::Equal
,
2244 MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)),
2245 TrustedImm32(JSValue::EmptyValueTag
)));
2246 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
.gpr());
2247 int32Result(resultPayload
.gpr(), node
);
2251 GPRTemporary
resultTag(this);
2252 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
.gpr());
2253 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
)));
2254 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
.gpr());
2255 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2259 SpeculateCellOperand
base(this, node
->child1());
2260 SpeculateStrictInt32Operand
property(this, node
->child2());
2261 StorageOperand
storage(this, node
->child3());
2263 GPRReg baseReg
= base
.gpr();
2264 GPRReg propertyReg
= property
.gpr();
2265 GPRReg storageReg
= storage
.gpr();
2270 GPRTemporary
resultTag(this);
2271 GPRTemporary
resultPayload(this);
2272 GPRReg resultTagReg
= resultTag
.gpr();
2273 GPRReg resultPayloadReg
= resultPayload
.gpr();
2275 MacroAssembler::JumpList slowCases
;
2277 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2279 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagReg
);
2280 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadReg
);
2281 slowCases
.append(m_jit
.branch32(MacroAssembler::Equal
, resultTagReg
, TrustedImm32(JSValue::EmptyValueTag
)));
2283 addSlowPathGenerator(
2285 slowCases
, this, operationGetByValArrayInt
,
2286 JSValueRegs(resultTagReg
, resultPayloadReg
), baseReg
, propertyReg
));
2288 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2291 case Array::Double
: {
2292 if (node
->arrayMode().isInBounds()) {
2293 SpeculateStrictInt32Operand
property(this, node
->child2());
2294 StorageOperand
storage(this, node
->child3());
2296 GPRReg propertyReg
= property
.gpr();
2297 GPRReg storageReg
= storage
.gpr();
2302 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2304 FPRTemporary
result(this);
2305 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.fpr());
2306 if (!node
->arrayMode().isSaneChain())
2307 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, result
.fpr(), result
.fpr()));
2308 doubleResult(result
.fpr(), node
);
2312 SpeculateCellOperand
base(this, node
->child1());
2313 SpeculateStrictInt32Operand
property(this, node
->child2());
2314 StorageOperand
storage(this, node
->child3());
2316 GPRReg baseReg
= base
.gpr();
2317 GPRReg propertyReg
= property
.gpr();
2318 GPRReg storageReg
= storage
.gpr();
2323 GPRTemporary
resultTag(this);
2324 GPRTemporary
resultPayload(this);
2325 FPRTemporary
temp(this);
2326 GPRReg resultTagReg
= resultTag
.gpr();
2327 GPRReg resultPayloadReg
= resultPayload
.gpr();
2328 FPRReg tempReg
= temp
.fpr();
2330 MacroAssembler::JumpList slowCases
;
2332 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2334 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), tempReg
);
2335 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempReg
, tempReg
));
2336 boxDouble(tempReg
, resultTagReg
, resultPayloadReg
);
2338 addSlowPathGenerator(
2340 slowCases
, this, operationGetByValArrayInt
,
2341 JSValueRegs(resultTagReg
, resultPayloadReg
), baseReg
, propertyReg
));
2343 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2346 case Array::ArrayStorage
:
2347 case Array::SlowPutArrayStorage
: {
2348 if (node
->arrayMode().isInBounds()) {
2349 SpeculateStrictInt32Operand
property(this, node
->child2());
2350 StorageOperand
storage(this, node
->child3());
2351 GPRReg propertyReg
= property
.gpr();
2352 GPRReg storageReg
= storage
.gpr();
2357 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2359 GPRTemporary
resultTag(this);
2360 GPRTemporary
resultPayload(this);
2362 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
.gpr());
2363 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
)));
2364 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
.gpr());
2366 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2370 SpeculateCellOperand
base(this, node
->child1());
2371 SpeculateStrictInt32Operand
property(this, node
->child2());
2372 StorageOperand
storage(this, node
->child3());
2373 GPRReg propertyReg
= property
.gpr();
2374 GPRReg storageReg
= storage
.gpr();
2375 GPRReg baseReg
= base
.gpr();
2380 GPRTemporary
resultTag(this);
2381 GPRTemporary
resultPayload(this);
2382 GPRReg resultTagReg
= resultTag
.gpr();
2383 GPRReg resultPayloadReg
= resultPayload
.gpr();
2385 JITCompiler::Jump outOfBounds
= m_jit
.branch32(
2386 MacroAssembler::AboveOrEqual
, propertyReg
,
2387 MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2389 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagReg
);
2390 JITCompiler::Jump hole
= m_jit
.branch32(
2391 MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
));
2392 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadReg
);
2394 JITCompiler::JumpList slowCases
;
2395 slowCases
.append(outOfBounds
);
2396 slowCases
.append(hole
);
2397 addSlowPathGenerator(
2399 slowCases
, this, operationGetByValArrayInt
,
2400 JSValueRegs(resultTagReg
, resultPayloadReg
),
2401 baseReg
, propertyReg
));
2403 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2407 compileGetByValOnString(node
);
2409 case Array::Arguments
:
2410 compileGetByValOnArguments(node
);
2413 TypedArrayType type
= node
->arrayMode().typedArrayType();
2415 compileGetByValOnIntTypedArray(node
, type
);
2417 compileGetByValOnFloatTypedArray(node
, type
);
2422 case PutByValDirect
:
2424 case PutByValAlias
: {
2425 Edge child1
= m_jit
.graph().varArgChild(node
, 0);
2426 Edge child2
= m_jit
.graph().varArgChild(node
, 1);
2427 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
2428 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
2430 ArrayMode arrayMode
= node
->arrayMode().modeForPut();
2431 bool alreadyHandled
= false;
2433 switch (arrayMode
.type()) {
2434 case Array::SelectUsingPredictions
:
2435 case Array::ForceExit
:
2436 RELEASE_ASSERT_NOT_REACHED();
2437 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2438 alreadyHandled
= true;
2440 case Array::Generic
: {
2441 ASSERT(node
->op() == PutByVal
|| node
->op() == PutByValDirect
);
2443 SpeculateCellOperand
base(this, child1
); // Save a register, speculate cell. We'll probably be right.
2444 JSValueOperand
property(this, child2
);
2445 JSValueOperand
value(this, child3
);
2446 GPRReg baseGPR
= base
.gpr();
2447 GPRReg propertyTagGPR
= property
.tagGPR();
2448 GPRReg propertyPayloadGPR
= property
.payloadGPR();
2449 GPRReg valueTagGPR
= value
.tagGPR();
2450 GPRReg valuePayloadGPR
= value
.payloadGPR();
2453 if (node
->op() == PutByValDirect
)
2454 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict
: operationPutByValDirectCellNonStrict
, baseGPR
, propertyTagGPR
, propertyPayloadGPR
, valueTagGPR
, valuePayloadGPR
);
2456 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByValCellStrict
: operationPutByValCellNonStrict
, baseGPR
, propertyTagGPR
, propertyPayloadGPR
, valueTagGPR
, valuePayloadGPR
);
2459 alreadyHandled
= true;
2469 SpeculateCellOperand
base(this, child1
);
2470 SpeculateStrictInt32Operand
property(this, child2
);
2472 GPRReg baseReg
= base
.gpr();
2473 GPRReg propertyReg
= property
.gpr();
2475 switch (arrayMode
.type()) {
2476 case Array::Int32
: {
2477 SpeculateInt32Operand
value(this, child3
);
2479 GPRReg valuePayloadReg
= value
.gpr();
2484 compileContiguousPutByVal(node
, base
, property
, value
, valuePayloadReg
, TrustedImm32(JSValue::Int32Tag
));
2487 case Array::Contiguous
: {
2488 JSValueOperand
value(this, child3
);
2490 GPRReg valueTagReg
= value
.tagGPR();
2491 GPRReg valuePayloadReg
= value
.payloadGPR();
2496 compileContiguousPutByVal(node
, base
, property
, value
, valuePayloadReg
, valueTagReg
);
2499 case Array::Double
: {
2500 compileDoublePutByVal(node
, base
, property
);
2503 case Array::ArrayStorage
:
2504 case Array::SlowPutArrayStorage
: {
2505 JSValueOperand
value(this, child3
);
2507 GPRReg valueTagReg
= value
.tagGPR();
2508 GPRReg valuePayloadReg
= value
.payloadGPR();
2513 StorageOperand
storage(this, child4
);
2514 GPRReg storageReg
= storage
.gpr();
2516 if (node
->op() == PutByValAlias
) {
2517 // Store the value to the array.
2518 GPRReg propertyReg
= property
.gpr();
2519 m_jit
.store32(value
.tagGPR(), MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2520 m_jit
.store32(value
.payloadGPR(), MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2526 MacroAssembler::JumpList slowCases
;
2528 MacroAssembler::Jump beyondArrayBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2529 if (!arrayMode
.isOutOfBounds())
2530 speculationCheck(OutOfBounds
, JSValueRegs(), 0, beyondArrayBounds
);
2532 slowCases
.append(beyondArrayBounds
);
2534 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2535 if (arrayMode
.isInBounds()) {
2537 StoreToHole
, JSValueRegs(), 0,
2538 m_jit
.branch32(MacroAssembler::Equal
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
)));
2540 MacroAssembler::Jump notHoleValue
= m_jit
.branch32(MacroAssembler::NotEqual
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
));
2541 if (arrayMode
.isSlowPut()) {
2542 // This is sort of strange. If we wanted to optimize this code path, we would invert
2543 // the above branch. But it's simply not worth it since this only happens if we're
2544 // already having a bad time.
2545 slowCases
.append(m_jit
.jump());
2547 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, ArrayStorage::numValuesInVectorOffset()));
2549 // If we're writing to a hole we might be growing the array;
2550 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2551 m_jit
.add32(TrustedImm32(1), propertyReg
);
2552 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2553 m_jit
.sub32(TrustedImm32(1), propertyReg
);
2555 lengthDoesNotNeedUpdate
.link(&m_jit
);
2557 notHoleValue
.link(&m_jit
);
2560 // Store the value to the array.
2561 m_jit
.store32(valueTagReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2562 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2569 if (!slowCases
.empty()) {
2570 if (node
->op() == PutByValDirect
) {
2571 addSlowPathGenerator(slowPathCall(
2573 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
2574 NoResult
, baseReg
, propertyReg
, valueTagReg
, valuePayloadReg
));
2576 addSlowPathGenerator(slowPathCall(
2578 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2579 NoResult
, baseReg
, propertyReg
, valueTagReg
, valuePayloadReg
));
2583 noResult(node
, UseChildrenCalledExplicitly
);
2587 case Array::Arguments
:
2588 // FIXME: we could at some point make this work. Right now we're assuming that the register
2589 // pressure would be too great.
2590 RELEASE_ASSERT_NOT_REACHED();
2594 TypedArrayType type
= arrayMode
.typedArrayType();
2596 compilePutByValForIntTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2598 compilePutByValForFloatTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2604 if (compileRegExpExec(node
))
2607 if (!node
->adjustedRefCount()) {
2608 SpeculateCellOperand
base(this, node
->child1());
2609 SpeculateCellOperand
argument(this, node
->child2());
2610 GPRReg baseGPR
= base
.gpr();
2611 GPRReg argumentGPR
= argument
.gpr();
2614 GPRResult
result(this);
2615 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2617 // Must use jsValueResult because otherwise we screw up register
2618 // allocation, which thinks that this node has a result.
2619 booleanResult(result
.gpr(), node
);
2623 SpeculateCellOperand
base(this, node
->child1());
2624 SpeculateCellOperand
argument(this, node
->child2());
2625 GPRReg baseGPR
= base
.gpr();
2626 GPRReg argumentGPR
= argument
.gpr();
2629 GPRResult2
resultTag(this);
2630 GPRResult
resultPayload(this);
2631 callOperation(operationRegExpExec
, resultTag
.gpr(), resultPayload
.gpr(), baseGPR
, argumentGPR
);
2633 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2638 SpeculateCellOperand
base(this, node
->child1());
2639 SpeculateCellOperand
argument(this, node
->child2());
2640 GPRReg baseGPR
= base
.gpr();
2641 GPRReg argumentGPR
= argument
.gpr();
2644 GPRResult
result(this);
2645 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2647 // If we add a DataFormatBool, we should use it here.
2648 booleanResult(result
.gpr(), node
);
2653 ASSERT(node
->arrayMode().isJSArray());
2655 SpeculateCellOperand
base(this, node
->child1());
2656 GPRTemporary
storageLength(this);
2658 GPRReg baseGPR
= base
.gpr();
2659 GPRReg storageLengthGPR
= storageLength
.gpr();
2661 StorageOperand
storage(this, node
->child3());
2662 GPRReg storageGPR
= storage
.gpr();
2664 switch (node
->arrayMode().type()) {
2665 case Array::Int32
: {
2666 SpeculateInt32Operand
value(this, node
->child2());
2667 GPRReg valuePayloadGPR
= value
.gpr();
2669 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2670 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2671 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2672 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2673 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2674 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2675 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2677 addSlowPathGenerator(
2679 slowPath
, this, operationArrayPush
,
2680 JSValueRegs(storageGPR
, storageLengthGPR
),
2681 TrustedImm32(JSValue::Int32Tag
), valuePayloadGPR
, baseGPR
));
2683 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2687 case Array::Contiguous
: {
2688 JSValueOperand
value(this, node
->child2());
2689 GPRReg valueTagGPR
= value
.tagGPR();
2690 GPRReg valuePayloadGPR
= value
.payloadGPR();
2692 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2693 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2694 m_jit
.store32(valueTagGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2695 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2696 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2697 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2698 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2700 addSlowPathGenerator(
2702 slowPath
, this, operationArrayPush
,
2703 JSValueRegs(storageGPR
, storageLengthGPR
),
2704 valueTagGPR
, valuePayloadGPR
, baseGPR
));
2706 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2710 case Array::Double
: {
2711 SpeculateDoubleOperand
value(this, node
->child2());
2712 FPRReg valueFPR
= value
.fpr();
2715 JSValueRegs(), node
->child2(), SpecDoubleReal
,
2716 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, valueFPR
, valueFPR
));
2718 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2719 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2720 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
2721 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2722 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2723 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2725 addSlowPathGenerator(
2727 slowPath
, this, operationArrayPushDouble
,
2728 JSValueRegs(storageGPR
, storageLengthGPR
),
2729 valueFPR
, baseGPR
));
2731 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2735 case Array::ArrayStorage
: {
2736 JSValueOperand
value(this, node
->child2());
2737 GPRReg valueTagGPR
= value
.tagGPR();
2738 GPRReg valuePayloadGPR
= value
.payloadGPR();
2740 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
2742 // Refuse to handle bizarre lengths.
2743 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Above
, storageLengthGPR
, TrustedImm32(0x7ffffffe)));
2745 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
2747 m_jit
.store32(valueTagGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2748 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2750 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2751 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
2752 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
2753 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2755 addSlowPathGenerator(slowPathCall(slowPath
, this, operationArrayPush
, JSValueRegs(storageGPR
, storageLengthGPR
), valueTagGPR
, valuePayloadGPR
, baseGPR
));
2757 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2769 ASSERT(node
->arrayMode().isJSArray());
2771 SpeculateCellOperand
base(this, node
->child1());
2772 StorageOperand
storage(this, node
->child2());
2773 GPRTemporary
valueTag(this);
2774 GPRTemporary
valuePayload(this);
2776 GPRReg baseGPR
= base
.gpr();
2777 GPRReg valueTagGPR
= valueTag
.gpr();
2778 GPRReg valuePayloadGPR
= valuePayload
.gpr();
2779 GPRReg storageGPR
= storage
.gpr();
2781 switch (node
->arrayMode().type()) {
2783 case Array::Contiguous
: {
2785 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), valuePayloadGPR
);
2786 MacroAssembler::Jump undefinedCase
=
2787 m_jit
.branchTest32(MacroAssembler::Zero
, valuePayloadGPR
);
2788 m_jit
.sub32(TrustedImm32(1), valuePayloadGPR
);
2790 valuePayloadGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2792 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)),
2794 MacroAssembler::Jump slowCase
= m_jit
.branch32(MacroAssembler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
2796 MacroAssembler::TrustedImm32(JSValue::EmptyValueTag
),
2797 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2799 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)),
2802 addSlowPathGenerator(
2804 undefinedCase
, this,
2805 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
2806 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
2807 addSlowPathGenerator(
2809 slowCase
, this, operationArrayPopAndRecoverLength
,
2810 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
2812 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
2816 case Array::Double
: {
2817 FPRTemporary
temp(this);
2818 FPRReg tempFPR
= temp
.fpr();
2821 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), valuePayloadGPR
);
2822 MacroAssembler::Jump undefinedCase
=
2823 m_jit
.branchTest32(MacroAssembler::Zero
, valuePayloadGPR
);
2824 m_jit
.sub32(TrustedImm32(1), valuePayloadGPR
);
2826 valuePayloadGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2828 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
),
2830 MacroAssembler::Jump slowCase
= m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempFPR
, tempFPR
);
2831 JSValue nan
= JSValue(JSValue::EncodeAsDouble
, PNaN
);
2833 MacroAssembler::TrustedImm32(nan
.u
.asBits
.tag
),
2834 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2836 MacroAssembler::TrustedImm32(nan
.u
.asBits
.payload
),
2837 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2838 boxDouble(tempFPR
, valueTagGPR
, valuePayloadGPR
);
2840 addSlowPathGenerator(
2842 undefinedCase
, this,
2843 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
2844 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
2845 addSlowPathGenerator(
2847 slowCase
, this, operationArrayPopAndRecoverLength
,
2848 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
2850 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
2854 case Array::ArrayStorage
: {
2855 GPRTemporary
storageLength(this);
2856 GPRReg storageLengthGPR
= storageLength
.gpr();
2858 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
2860 JITCompiler::JumpList setUndefinedCases
;
2861 setUndefinedCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
));
2863 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
2865 MacroAssembler::Jump slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
2867 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), valueTagGPR
);
2868 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), valuePayloadGPR
);
2870 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
2872 setUndefinedCases
.append(m_jit
.branch32(MacroAssembler::Equal
, TrustedImm32(JSValue::EmptyValueTag
), valueTagGPR
));
2874 m_jit
.store32(TrustedImm32(JSValue::EmptyValueTag
), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2876 m_jit
.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
2878 addSlowPathGenerator(
2880 setUndefinedCases
, this,
2881 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
2882 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
2884 addSlowPathGenerator(
2886 slowCase
, this, operationArrayPop
,
2887 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
2889 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
2901 jump(node
->targetBlock());
2915 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT2
);
2916 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
2917 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
2919 // Return the result in returnValueGPR.
2920 JSValueOperand
op1(this, node
->child1());
2923 boxDouble(op1
.fpr(), GPRInfo::returnValueGPR2
, GPRInfo::returnValueGPR
);
2925 if (op1
.payloadGPR() == GPRInfo::returnValueGPR2
&& op1
.tagGPR() == GPRInfo::returnValueGPR
)
2926 m_jit
.swap(GPRInfo::returnValueGPR
, GPRInfo::returnValueGPR2
);
2927 else if (op1
.payloadGPR() == GPRInfo::returnValueGPR2
) {
2928 m_jit
.move(op1
.payloadGPR(), GPRInfo::returnValueGPR
);
2929 m_jit
.move(op1
.tagGPR(), GPRInfo::returnValueGPR2
);
2931 m_jit
.move(op1
.tagGPR(), GPRInfo::returnValueGPR2
);
2932 m_jit
.move(op1
.payloadGPR(), GPRInfo::returnValueGPR
);
2936 m_jit
.emitFunctionEpilogue();
2944 case ThrowReferenceError
: {
2945 // We expect that throw statements are rare and are intended to exit the code block
2946 // anyway, so we just OSR back to the old JIT for now.
2947 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
2951 case BooleanToNumber
: {
2952 switch (node
->child1().useKind()) {
2954 SpeculateBooleanOperand
value(this, node
->child1());
2955 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
2957 m_jit
.move(value
.gpr(), result
.gpr());
2959 int32Result(result
.gpr(), node
);
2964 JSValueOperand
value(this, node
->child1());
2965 GPRTemporary
resultTag(this);
2966 GPRTemporary
resultPayload(this);
2968 GPRReg valueTagGPR
= value
.tagGPR();
2969 GPRReg valuePayloadGPR
= value
.payloadGPR();
2970 GPRReg resultTagGPR
= resultTag
.gpr();
2971 GPRReg resultPayloadGPR
= resultPayload
.gpr();
2973 m_jit
.move(valuePayloadGPR
, resultPayloadGPR
);
2974 JITCompiler::Jump isBoolean
= m_jit
.branch32(
2975 JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::BooleanTag
));
2976 m_jit
.move(valueTagGPR
, resultTagGPR
);
2977 JITCompiler::Jump done
= m_jit
.jump();
2978 isBoolean
.link(&m_jit
);
2979 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), resultTagGPR
);
2982 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
2987 RELEASE_ASSERT_NOT_REACHED();
2994 RELEASE_ASSERT(node
->child1().useKind() == UntypedUse
);
2995 JSValueOperand
op1(this, node
->child1());
2996 GPRTemporary
resultTag(this, Reuse
, op1
, TagWord
);
2997 GPRTemporary
resultPayload(this, Reuse
, op1
, PayloadWord
);
2999 GPRReg op1TagGPR
= op1
.tagGPR();
3000 GPRReg op1PayloadGPR
= op1
.payloadGPR();
3001 GPRReg resultTagGPR
= resultTag
.gpr();
3002 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3006 if (!(m_state
.forNode(node
->child1()).m_type
& ~(SpecFullNumber
| SpecBoolean
))) {
3007 m_jit
.move(op1TagGPR
, resultTagGPR
);
3008 m_jit
.move(op1PayloadGPR
, resultPayloadGPR
);
3010 MacroAssembler::Jump alreadyPrimitive
= branchNotCell(op1
.jsValueRegs());
3011 MacroAssembler::Jump notPrimitive
= m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(op1PayloadGPR
, JSCell::structureIDOffset()), MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
3013 alreadyPrimitive
.link(&m_jit
);
3014 m_jit
.move(op1TagGPR
, resultTagGPR
);
3015 m_jit
.move(op1PayloadGPR
, resultPayloadGPR
);
3017 addSlowPathGenerator(
3019 notPrimitive
, this, operationToPrimitive
,
3020 JSValueRegs(resultTagGPR
, resultPayloadGPR
), op1TagGPR
, op1PayloadGPR
));
3023 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3028 if (node
->child1().useKind() == UntypedUse
) {
3029 JSValueOperand
op1(this, node
->child1());
3030 GPRReg op1PayloadGPR
= op1
.payloadGPR();
3031 GPRReg op1TagGPR
= op1
.tagGPR();
3033 GPRResult
result(this);
3034 GPRReg resultGPR
= result
.gpr();
3038 JITCompiler::Jump done
;
3039 if (node
->child1()->prediction() & SpecString
) {
3040 JITCompiler::Jump slowPath1
= branchNotCell(op1
.jsValueRegs());
3041 JITCompiler::Jump slowPath2
= m_jit
.branchPtr(
3042 JITCompiler::NotEqual
,
3043 JITCompiler::Address(op1PayloadGPR
, JSCell::structureIDOffset()),
3044 TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
3045 m_jit
.move(op1PayloadGPR
, resultGPR
);
3046 done
= m_jit
.jump();
3047 slowPath1
.link(&m_jit
);
3048 slowPath2
.link(&m_jit
);
3050 callOperation(operationToString
, resultGPR
, op1TagGPR
, op1PayloadGPR
);
3053 cellResult(resultGPR
, node
);
3057 compileToStringOnCell(node
);
3061 case NewStringObject
: {
3062 compileNewStringObject(node
);
3067 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3068 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3069 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3070 ASSERT(structure
->indexingType() == node
->indexingType());
3072 hasUndecided(structure
->indexingType())
3073 || hasInt32(structure
->indexingType())
3074 || hasDouble(structure
->indexingType())
3075 || hasContiguous(structure
->indexingType()));
3077 unsigned numElements
= node
->numChildren();
3079 GPRTemporary
result(this);
3080 GPRTemporary
storage(this);
3082 GPRReg resultGPR
= result
.gpr();
3083 GPRReg storageGPR
= storage
.gpr();
3085 emitAllocateJSArray(resultGPR
, structure
, storageGPR
, numElements
);
3087 // At this point, one way or another, resultGPR and storageGPR have pointers to
3088 // the JSArray and the Butterfly, respectively.
3090 ASSERT(!hasUndecided(structure
->indexingType()) || !node
->numChildren());
3092 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3093 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3094 switch (node
->indexingType()) {
3095 case ALL_BLANK_INDEXING_TYPES
:
3096 case ALL_UNDECIDED_INDEXING_TYPES
:
3099 case ALL_DOUBLE_INDEXING_TYPES
: {
3100 SpeculateDoubleOperand
operand(this, use
);
3101 FPRReg opFPR
= operand
.fpr();
3103 JSValueRegs(), use
, SpecDoubleReal
,
3104 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3106 m_jit
.storeDouble(opFPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * operandIdx
));
3109 case ALL_INT32_INDEXING_TYPES
: {
3110 SpeculateInt32Operand
operand(this, use
);
3111 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3112 m_jit
.store32(operand
.gpr(), MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3115 case ALL_CONTIGUOUS_INDEXING_TYPES
: {
3116 JSValueOperand
operand(this, m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
]);
3117 GPRReg opTagGPR
= operand
.tagGPR();
3118 GPRReg opPayloadGPR
= operand
.payloadGPR();
3119 m_jit
.store32(opTagGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3120 m_jit
.store32(opPayloadGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3129 // Yuck, we should *really* have a way of also returning the storageGPR. But
3130 // that's the least of what's wrong with this code. We really shouldn't be
3131 // allocating the array after having computed - and probably spilled to the
3132 // stack - all of the things that will go into the array. The solution to that
3133 // bigger problem will also likely fix the redundancy in reloading the storage
3134 // pointer that we currently have.
3136 cellResult(resultGPR
, node
);
3140 if (!node
->numChildren()) {
3142 GPRResult
result(this);
3144 operationNewEmptyArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()));
3145 cellResult(result
.gpr(), node
);
3149 size_t scratchSize
= sizeof(EncodedJSValue
) * node
->numChildren();
3150 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(scratchSize
);
3151 EncodedJSValue
* buffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
3153 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3154 // Need to perform the speculations that this node promises to perform. If we're
3155 // emitting code here and the indexing type is not array storage then there is
3156 // probably something hilarious going on and we're already failing at all the
3157 // things, but at least we're going to be sound.
3158 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3159 switch (node
->indexingType()) {
3160 case ALL_BLANK_INDEXING_TYPES
:
3161 case ALL_UNDECIDED_INDEXING_TYPES
:
3164 case ALL_DOUBLE_INDEXING_TYPES
: {
3165 SpeculateDoubleOperand
operand(this, use
);
3166 FPRReg opFPR
= operand
.fpr();
3168 JSValueRegs(), use
, SpecFullRealNumber
,
3169 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3171 m_jit
.storeDouble(opFPR
, TrustedImmPtr(reinterpret_cast<char*>(buffer
+ operandIdx
)));
3174 case ALL_INT32_INDEXING_TYPES
: {
3175 SpeculateInt32Operand
operand(this, use
);
3176 GPRReg opGPR
= operand
.gpr();
3177 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
3178 m_jit
.store32(opGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
3181 case ALL_CONTIGUOUS_INDEXING_TYPES
:
3182 case ALL_ARRAY_STORAGE_INDEXING_TYPES
: {
3183 JSValueOperand
operand(this, m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
]);
3184 GPRReg opTagGPR
= operand
.tagGPR();
3185 GPRReg opPayloadGPR
= operand
.payloadGPR();
3187 m_jit
.store32(opTagGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
3188 m_jit
.store32(opPayloadGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
3198 switch (node
->indexingType()) {
3199 case ALL_DOUBLE_INDEXING_TYPES
:
3200 case ALL_INT32_INDEXING_TYPES
:
3210 GPRTemporary
scratch(this);
3212 // Tell GC mark phase how much of the scratch buffer is active during call.
3213 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3214 m_jit
.storePtr(TrustedImmPtr(scratchSize
), scratch
.gpr());
3217 GPRResult
result(this);
3220 operationNewArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3221 static_cast<void*>(buffer
), node
->numChildren());
3224 GPRTemporary
scratch(this);
3226 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3227 m_jit
.storePtr(TrustedImmPtr(0), scratch
.gpr());
3230 cellResult(result
.gpr(), node
, UseChildrenCalledExplicitly
);
3234 case NewArrayWithSize
: {
3235 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3236 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3237 SpeculateStrictInt32Operand
size(this, node
->child1());
3238 GPRTemporary
result(this);
3239 GPRTemporary
storage(this);
3240 GPRTemporary
scratch(this);
3241 GPRTemporary
scratch2(this);
3243 GPRReg sizeGPR
= size
.gpr();
3244 GPRReg resultGPR
= result
.gpr();
3245 GPRReg storageGPR
= storage
.gpr();
3246 GPRReg scratchGPR
= scratch
.gpr();
3247 GPRReg scratch2GPR
= scratch2
.gpr();
3249 MacroAssembler::JumpList slowCases
;
3250 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
)));
3252 ASSERT((1 << 3) == sizeof(JSValue
));
3253 m_jit
.move(sizeGPR
, scratchGPR
);
3254 m_jit
.lshift32(TrustedImm32(3), scratchGPR
);
3255 m_jit
.add32(TrustedImm32(sizeof(IndexingHeader
)), scratchGPR
, resultGPR
);
3257 emitAllocateBasicStorage(resultGPR
, storageGPR
));
3258 m_jit
.subPtr(scratchGPR
, storageGPR
);
3259 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3260 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
3262 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3263 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3265 if (hasDouble(node
->indexingType())) {
3266 JSValue nan
= JSValue(JSValue::EncodeAsDouble
, PNaN
);
3268 m_jit
.move(sizeGPR
, scratchGPR
);
3269 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, scratchGPR
);
3270 MacroAssembler::Label loop
= m_jit
.label();
3271 m_jit
.sub32(TrustedImm32(1), scratchGPR
);
3272 m_jit
.store32(TrustedImm32(nan
.u
.asBits
.tag
), MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3273 m_jit
.store32(TrustedImm32(nan
.u
.asBits
.payload
), MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3274 m_jit
.branchTest32(MacroAssembler::NonZero
, scratchGPR
).linkTo(loop
, &m_jit
);
3278 addSlowPathGenerator(adoptPtr(
3279 new CallArrayAllocatorWithVariableSizeSlowPathGenerator(
3280 slowCases
, this, operationNewArrayWithSize
, resultGPR
,
3281 globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3282 globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
),
3285 cellResult(resultGPR
, node
);
3289 SpeculateStrictInt32Operand
size(this, node
->child1());
3290 GPRReg sizeGPR
= size
.gpr();
3292 GPRResult
result(this);
3293 GPRReg resultGPR
= result
.gpr();
3294 GPRReg structureGPR
= selectScratchGPR(sizeGPR
);
3295 MacroAssembler::Jump bigLength
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
));
3296 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType())), structureGPR
);
3297 MacroAssembler::Jump done
= m_jit
.jump();
3298 bigLength
.link(&m_jit
);
3299 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
)), structureGPR
);
3302 operationNewArrayWithSize
, resultGPR
, structureGPR
, sizeGPR
);
3303 cellResult(resultGPR
, node
);
3307 case NewArrayBuffer
: {
3308 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3309 IndexingType indexingType
= node
->indexingType();
3310 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(indexingType
)) {
3311 unsigned numElements
= node
->numConstants();
3313 GPRTemporary
result(this);
3314 GPRTemporary
storage(this);
3316 GPRReg resultGPR
= result
.gpr();
3317 GPRReg storageGPR
= storage
.gpr();
3319 emitAllocateJSArray(resultGPR
, globalObject
->arrayStructureForIndexingTypeDuringAllocation(indexingType
), storageGPR
, numElements
);
3321 if (node
->indexingType() == ArrayWithDouble
) {
3322 JSValue
* data
= m_jit
.codeBlock()->constantBuffer(node
->startConstant());
3323 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3328 u
.value
= data
[index
].asNumber();
3329 m_jit
.store32(Imm32(u
.halves
[0]), MacroAssembler::Address(storageGPR
, sizeof(double) * index
));
3330 m_jit
.store32(Imm32(u
.halves
[1]), MacroAssembler::Address(storageGPR
, sizeof(double) * index
+ sizeof(int32_t)));
3333 int32_t* data
= bitwise_cast
<int32_t*>(m_jit
.codeBlock()->constantBuffer(node
->startConstant()));
3334 for (unsigned index
= 0; index
< node
->numConstants() * 2; ++index
) {
3336 Imm32(data
[index
]), MacroAssembler::Address(storageGPR
, sizeof(int32_t) * index
));
3340 cellResult(resultGPR
, node
);
3345 GPRResult
result(this);
3347 callOperation(operationNewArrayBuffer
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()), node
->startConstant(), node
->numConstants());
3349 cellResult(result
.gpr(), node
);
3353 case NewTypedArray
: {
3354 switch (node
->child1().useKind()) {
3356 compileNewTypedArray(node
);
3359 JSValueOperand
argument(this, node
->child1());
3360 GPRReg argumentTagGPR
= argument
.tagGPR();
3361 GPRReg argumentPayloadGPR
= argument
.payloadGPR();
3365 GPRResult
result(this);
3366 GPRReg resultGPR
= result
.gpr();
3368 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3370 operationNewTypedArrayWithOneArgumentForType(node
->typedArrayType()),
3371 resultGPR
, globalObject
->typedArrayStructure(node
->typedArrayType()),
3372 argumentTagGPR
, argumentPayloadGPR
);
3374 cellResult(resultGPR
, node
);
3378 RELEASE_ASSERT_NOT_REACHED();
3386 GPRResult
resultPayload(this);
3387 GPRResult2
resultTag(this);
3389 callOperation(operationNewRegexp
, resultTag
.gpr(), resultPayload
.gpr(), m_jit
.codeBlock()->regexp(node
->regexpIndex()));
3391 // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
3392 cellResult(resultPayload
.gpr(), node
);
3397 ASSERT(node
->child1().useKind() == UntypedUse
);
3398 JSValueOperand
thisValue(this, node
->child1());
3399 GPRTemporary
temp(this);
3400 GPRTemporary
tempTag(this);
3401 GPRReg thisValuePayloadGPR
= thisValue
.payloadGPR();
3402 GPRReg thisValueTagGPR
= thisValue
.tagGPR();
3403 GPRReg tempGPR
= temp
.gpr();
3404 GPRReg tempTagGPR
= tempTag
.gpr();
3406 MacroAssembler::JumpList slowCases
;
3407 slowCases
.append(branchNotCell(thisValue
.jsValueRegs()));
3408 slowCases
.append(m_jit
.branch8(
3409 MacroAssembler::NotEqual
,
3410 MacroAssembler::Address(thisValuePayloadGPR
, JSCell::typeInfoTypeOffset()),
3411 TrustedImm32(FinalObjectType
)));
3412 m_jit
.move(thisValuePayloadGPR
, tempGPR
);
3413 m_jit
.move(thisValueTagGPR
, tempTagGPR
);
3414 J_JITOperation_EJ function
;
3415 if (m_jit
.graph().executableFor(node
->origin
.semantic
)->isStrictMode())
3416 function
= operationToThisStrict
;
3418 function
= operationToThis
;
3419 addSlowPathGenerator(
3421 slowCases
, this, function
,
3422 JSValueRegs(tempTagGPR
, tempGPR
), thisValueTagGPR
, thisValuePayloadGPR
));
3424 jsValueResult(tempTagGPR
, tempGPR
, node
);
3429 // Note that there is not so much profit to speculate here. The only things we
3430 // speculate on are (1) that it's a cell, since that eliminates cell checks
3431 // later if the proto is reused, and (2) if we have a FinalObject prediction
3432 // then we speculate because we want to get recompiled if it isn't (since
3433 // otherwise we'd start taking slow path a lot).
3435 SpeculateCellOperand
callee(this, node
->child1());
3436 GPRTemporary
result(this);
3437 GPRTemporary
allocator(this);
3438 GPRTemporary
structure(this);
3439 GPRTemporary
scratch(this);
3441 GPRReg calleeGPR
= callee
.gpr();
3442 GPRReg resultGPR
= result
.gpr();
3443 GPRReg allocatorGPR
= allocator
.gpr();
3444 GPRReg structureGPR
= structure
.gpr();
3445 GPRReg scratchGPR
= scratch
.gpr();
3447 MacroAssembler::JumpList slowPath
;
3449 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR
);
3450 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR
);
3451 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, allocatorGPR
));
3452 emitAllocateJSObject(resultGPR
, allocatorGPR
, structureGPR
, TrustedImmPtr(0), scratchGPR
, slowPath
);
3454 addSlowPathGenerator(slowPathCall(slowPath
, this, operationCreateThis
, resultGPR
, calleeGPR
, node
->inlineCapacity()));
3456 cellResult(resultGPR
, node
);
3460 case AllocationProfileWatchpoint
:
3461 case TypedArrayWatchpoint
: {
3467 GPRTemporary
result(this);
3468 GPRTemporary
allocator(this);
3469 GPRTemporary
scratch(this);
3471 GPRReg resultGPR
= result
.gpr();
3472 GPRReg allocatorGPR
= allocator
.gpr();
3473 GPRReg scratchGPR
= scratch
.gpr();
3475 MacroAssembler::JumpList slowPath
;
3477 Structure
* structure
= node
->structure();
3478 size_t allocationSize
= JSFinalObject::allocationSize(structure
->inlineCapacity());
3479 MarkedAllocator
* allocatorPtr
= &m_jit
.vm()->heap
.allocatorForObjectWithoutDestructor(allocationSize
);
3481 m_jit
.move(TrustedImmPtr(allocatorPtr
), allocatorGPR
);
3482 emitAllocateJSObject(resultGPR
, allocatorGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, slowPath
);
3484 addSlowPathGenerator(slowPathCall(slowPath
, this, operationNewObject
, resultGPR
, structure
));
3486 cellResult(resultGPR
, node
);
3491 GPRTemporary
result(this);
3492 m_jit
.loadPtr(JITCompiler::payloadFor(JSStack::Callee
), result
.gpr());
3493 cellResult(result
.gpr(), node
);
3498 SpeculateCellOperand
function(this, node
->child1());
3499 GPRTemporary
result(this, Reuse
, function
);
3500 m_jit
.loadPtr(JITCompiler::Address(function
.gpr(), JSFunction::offsetOfScopeChain()), result
.gpr());
3501 cellResult(result
.gpr(), node
);
3506 GPRTemporary
result(this);
3507 GPRReg resultGPR
= result
.gpr();
3509 m_jit
.loadPtr(JITCompiler::payloadFor(JSStack::ScopeChain
), resultGPR
);
3510 cellResult(resultGPR
, node
);
3514 case SkipTopScope
: {
3515 SpeculateCellOperand
scope(this, node
->child1());
3516 GPRTemporary
result(this, Reuse
, scope
);
3517 GPRReg resultGPR
= result
.gpr();
3518 m_jit
.move(scope
.gpr(), resultGPR
);
3519 JITCompiler::Jump activationNotCreated
=
3520 m_jit
.branchTestPtr(
3522 JITCompiler::payloadFor(
3523 static_cast<VirtualRegister
>(m_jit
.graph().machineActivationRegister())));
3524 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, JSScope::offsetOfNext()), resultGPR
);
3525 activationNotCreated
.link(&m_jit
);
3526 cellResult(resultGPR
, node
);
3531 SpeculateCellOperand
scope(this, node
->child1());
3532 GPRTemporary
result(this, Reuse
, scope
);
3533 m_jit
.loadPtr(JITCompiler::Address(scope
.gpr(), JSScope::offsetOfNext()), result
.gpr());
3534 cellResult(result
.gpr(), node
);
3538 case GetClosureRegisters
: {
3539 if (WriteBarrierBase
<Unknown
>* registers
= m_jit
.graph().tryGetRegisters(node
->child1().node())) {
3540 GPRTemporary
result(this);
3541 GPRReg resultGPR
= result
.gpr();
3542 m_jit
.move(TrustedImmPtr(registers
), resultGPR
);
3543 storageResult(resultGPR
, node
);
3547 SpeculateCellOperand
scope(this, node
->child1());
3548 GPRTemporary
result(this);
3549 GPRReg scopeGPR
= scope
.gpr();
3550 GPRReg resultGPR
= result
.gpr();
3552 m_jit
.loadPtr(JITCompiler::Address(scopeGPR
, JSVariableObject::offsetOfRegisters()), resultGPR
);
3553 storageResult(resultGPR
, node
);
3556 case GetClosureVar
: {
3557 StorageOperand
registers(this, node
->child1());
3558 GPRTemporary
resultTag(this);
3559 GPRTemporary
resultPayload(this);
3560 GPRReg registersGPR
= registers
.gpr();
3561 GPRReg resultTagGPR
= resultTag
.gpr();
3562 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3563 m_jit
.load32(JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTagGPR
);
3564 m_jit
.load32(JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayloadGPR
);
3565 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
3568 case PutClosureVar
: {
3569 StorageOperand
registers(this, node
->child2());
3570 JSValueOperand
value(this, node
->child3());
3571 GPRTemporary
scratchRegister(this);
3573 GPRReg registersGPR
= registers
.gpr();
3574 GPRReg valueTagGPR
= value
.tagGPR();
3575 GPRReg valuePayloadGPR
= value
.payloadGPR();
3577 speculate(node
, node
->child1());
3579 m_jit
.store32(valueTagGPR
, JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
3580 m_jit
.store32(valuePayloadGPR
, JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
3586 ASSERT(node
->prediction());
3588 switch (node
->child1().useKind()) {
3590 SpeculateCellOperand
base(this, node
->child1());
3591 GPRTemporary
resultTag(this);
3592 GPRTemporary
resultPayload(this, Reuse
, base
);
3594 GPRReg baseGPR
= base
.gpr();
3595 GPRReg resultTagGPR
= resultTag
.gpr();
3596 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3600 cachedGetById(node
->origin
.semantic
, InvalidGPRReg
, baseGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber());
3602 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3607 JSValueOperand
base(this, node
->child1());
3608 GPRTemporary
resultTag(this);
3609 GPRTemporary
resultPayload(this, Reuse
, base
, TagWord
);
3611 GPRReg baseTagGPR
= base
.tagGPR();
3612 GPRReg basePayloadGPR
= base
.payloadGPR();
3613 GPRReg resultTagGPR
= resultTag
.gpr();
3614 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3618 JITCompiler::Jump notCell
= branchNotCell(base
.jsValueRegs());
3620 cachedGetById(node
->origin
.semantic
, baseTagGPR
, basePayloadGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), notCell
);
3622 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3627 RELEASE_ASSERT_NOT_REACHED();
3633 case GetByIdFlush
: {
3634 if (!node
->prediction()) {
3635 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3639 switch (node
->child1().useKind()) {
3641 SpeculateCellOperand
base(this, node
->child1());
3643 GPRReg baseGPR
= base
.gpr();
3645 GPRResult
resultPayload(this);
3646 GPRResult2
resultTag(this);
3647 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3648 GPRReg resultTagGPR
= resultTag
.gpr();
3654 cachedGetById(node
->origin
.semantic
, InvalidGPRReg
, baseGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), JITCompiler::Jump(), DontSpill
);
3656 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3661 JSValueOperand
base(this, node
->child1());
3662 GPRReg baseTagGPR
= base
.tagGPR();
3663 GPRReg basePayloadGPR
= base
.payloadGPR();
3665 GPRResult
resultPayload(this);
3666 GPRResult2
resultTag(this);
3667 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3668 GPRReg resultTagGPR
= resultTag
.gpr();
3674 JITCompiler::Jump notCell
= branchNotCell(base
.jsValueRegs());
3676 cachedGetById(node
->origin
.semantic
, baseTagGPR
, basePayloadGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), notCell
, DontSpill
);
3678 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3683 RELEASE_ASSERT_NOT_REACHED();
3689 case GetArrayLength
:
3690 compileGetArrayLength(node
);
3693 case CheckFunction
: {
3694 SpeculateCellOperand
function(this, node
->child1());
3695 speculationCheck(BadFunction
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, function
.gpr(), node
->function()));
3700 case CheckExecutable
: {
3701 SpeculateCellOperand
function(this, node
->child1());
3702 speculationCheck(BadExecutable
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, JITCompiler::Address(function
.gpr(), JSFunction::offsetOfExecutable()), node
->executable()));
3707 case CheckStructure
: {
3708 SpeculateCellOperand
base(this, node
->child1());
3710 ASSERT(node
->structureSet().size());
3712 if (node
->structureSet().size() == 1) {
3714 BadCache
, JSValueSource::unboxedCell(base
.gpr()), 0,
3715 m_jit
.branchWeakPtr(
3716 JITCompiler::NotEqual
,
3717 JITCompiler::Address(base
.gpr(), JSCell::structureIDOffset()),
3718 node
->structureSet()[0]));
3720 GPRTemporary
structure(this);
3722 m_jit
.loadPtr(JITCompiler::Address(base
.gpr(), JSCell::structureIDOffset()), structure
.gpr());
3724 JITCompiler::JumpList done
;
3726 for (size_t i
= 0; i
< node
->structureSet().size() - 1; ++i
)
3727 done
.append(m_jit
.branchWeakPtr(JITCompiler::Equal
, structure
.gpr(), node
->structureSet()[i
]));
3730 BadCache
, JSValueSource::unboxedCell(base
.gpr()), 0,
3731 m_jit
.branchWeakPtr(
3732 JITCompiler::NotEqual
, structure
.gpr(), node
->structureSet().last()));
3741 case StructureTransitionWatchpoint
: {
3742 // There is a fascinating question here of what to do about array profiling.
3743 // We *could* try to tell the OSR exit about where the base of the access is.
3744 // The DFG will have kept it alive, though it may not be in a register, and
3745 // we shouldn't really load it since that could be a waste. For now though,
3746 // we'll just rely on the fact that when a watchpoint fires then that's
3747 // quite a hint already.
3749 m_jit
.addWeakReference(node
->structure());
3751 #if !ASSERT_DISABLED
3752 SpeculateCellOperand
op1(this, node
->child1());
3753 JITCompiler::Jump isOK
= m_jit
.branchPtr(JITCompiler::Equal
, JITCompiler::Address(op1
.gpr(), JSCell::structureIDOffset()), TrustedImmPtr(node
->structure()));
3754 m_jit
.abortWithReason(DFGIneffectiveWatchpoint
);
3757 speculateCell(node
->child1());
3764 case PhantomPutStructure
: {
3765 ASSERT(isKnownCell(node
->child1().node()));
3766 m_jit
.jitCode()->common
.notifyCompilingStructureTransition(m_jit
.graph().m_plan
, m_jit
.codeBlock(), node
);
3771 case PutStructure
: {
3772 m_jit
.jitCode()->common
.notifyCompilingStructureTransition(m_jit
.graph().m_plan
, m_jit
.codeBlock(), node
);
3774 SpeculateCellOperand
base(this, node
->child1());
3775 GPRReg baseGPR
= base
.gpr();
3777 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(node
->structureTransitionData().newStructure
), MacroAssembler::Address(baseGPR
, JSCell::structureIDOffset()));
3783 case AllocatePropertyStorage
:
3784 compileAllocatePropertyStorage(node
);
3787 case ReallocatePropertyStorage
:
3788 compileReallocatePropertyStorage(node
);
3791 case GetButterfly
: {
3792 SpeculateCellOperand
base(this, node
->child1());
3793 GPRTemporary
result(this, Reuse
, base
);
3795 GPRReg baseGPR
= base
.gpr();
3796 GPRReg resultGPR
= result
.gpr();
3798 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), resultGPR
);
3800 storageResult(resultGPR
, node
);
3804 case GetIndexedPropertyStorage
: {
3805 compileGetIndexedPropertyStorage(node
);
3809 case ConstantStoragePointer
: {
3810 compileConstantStoragePointer(node
);
3814 case GetTypedArrayByteOffset
: {
3815 compileGetTypedArrayByteOffset(node
);
3820 StorageOperand
storage(this, node
->child1());
3821 GPRTemporary
resultTag(this, Reuse
, storage
);
3822 GPRTemporary
resultPayload(this);
3824 GPRReg storageGPR
= storage
.gpr();
3825 GPRReg resultTagGPR
= resultTag
.gpr();
3826 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3828 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
3830 m_jit
.load32(JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayloadGPR
);
3831 m_jit
.load32(JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTagGPR
);
3833 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
3838 StorageOperand
storage(this, node
->child1());
3839 JSValueOperand
value(this, node
->child3());
3841 GPRReg storageGPR
= storage
.gpr();
3842 GPRReg valueTagGPR
= value
.tagGPR();
3843 GPRReg valuePayloadGPR
= value
.payloadGPR();
3845 speculate(node
, node
->child2());
3847 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
3849 m_jit
.storePtr(valueTagGPR
, JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
3850 m_jit
.storePtr(valuePayloadGPR
, JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
3856 case PutByIdFlush
: {
3857 SpeculateCellOperand
base(this, node
->child1());
3858 JSValueOperand
value(this, node
->child2());
3859 GPRTemporary
scratch(this);
3861 GPRReg baseGPR
= base
.gpr();
3862 GPRReg valueTagGPR
= value
.tagGPR();
3863 GPRReg valuePayloadGPR
= value
.payloadGPR();
3864 GPRReg scratchGPR
= scratch
.gpr();
3867 cachedPutById(node
->origin
.semantic
, baseGPR
, valueTagGPR
, valuePayloadGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
, MacroAssembler::Jump(), DontSpill
);
3874 SpeculateCellOperand
base(this, node
->child1());
3875 JSValueOperand
value(this, node
->child2());
3876 GPRTemporary
scratch(this);
3878 GPRReg baseGPR
= base
.gpr();
3879 GPRReg valueTagGPR
= value
.tagGPR();
3880 GPRReg valuePayloadGPR
= value
.payloadGPR();
3881 GPRReg scratchGPR
= scratch
.gpr();
3883 cachedPutById(node
->origin
.semantic
, baseGPR
, valueTagGPR
, valuePayloadGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
);
3889 case PutByIdDirect
: {
3890 SpeculateCellOperand
base(this, node
->child1());
3891 JSValueOperand
value(this, node
->child2());
3892 GPRTemporary
scratch(this);
3894 GPRReg baseGPR
= base
.gpr();
3895 GPRReg valueTagGPR
= value
.tagGPR();
3896 GPRReg valuePayloadGPR
= value
.payloadGPR();
3897 GPRReg scratchGPR
= scratch
.gpr();
3899 cachedPutById(node
->origin
.semantic
, baseGPR
, valueTagGPR
, valuePayloadGPR
, scratchGPR
, node
->identifierNumber(), Direct
);
3905 case GetGlobalVar
: {
3906 GPRTemporary
resultPayload(this);
3907 GPRTemporary
resultTag(this);
3909 m_jit
.move(TrustedImmPtr(node
->registerPointer()), resultPayload
.gpr());
3910 m_jit
.load32(JITCompiler::Address(resultPayload
.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTag
.gpr());
3911 m_jit
.load32(JITCompiler::Address(resultPayload
.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayload
.gpr());
3913 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
3917 case PutGlobalVar
: {
3918 JSValueOperand
value(this, node
->child1());
3920 // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have
3921 // a spare register - a good optimization would be to put the register pointer into
3922 // a register and then do a zero offset store followed by a four-offset store (or
3923 // vice-versa depending on endianness).
3924 m_jit
.store32(value
.tagGPR(), node
->registerPointer()->tagPointer());
3925 m_jit
.store32(value
.payloadGPR(), node
->registerPointer()->payloadPointer());
3932 VariableWatchpointSet
* set
= node
->variableWatchpointSet();
3934 JSValueOperand
value(this, node
->child1());
3935 GPRReg valueTagGPR
= value
.tagGPR();
3936 GPRReg valuePayloadGPR
= value
.payloadGPR();
3938 GPRTemporary
temp(this);
3939 GPRReg tempGPR
= temp
.gpr();
3941 m_jit
.load8(set
->addressOfState(), tempGPR
);
3943 JITCompiler::Jump isDone
= m_jit
.branch32(JITCompiler::Equal
, tempGPR
, TrustedImm32(IsInvalidated
));
3944 JITCompiler::JumpList notifySlow
;
3945 notifySlow
.append(m_jit
.branch32(
3946 JITCompiler::NotEqual
,
3947 JITCompiler::AbsoluteAddress(set
->addressOfInferredValue()->payloadPointer()),
3949 notifySlow
.append(m_jit
.branch32(
3950 JITCompiler::NotEqual
,
3951 JITCompiler::AbsoluteAddress(set
->addressOfInferredValue()->tagPointer()),
3953 addSlowPathGenerator(
3954 slowPathCall(notifySlow
, this, operationNotifyWrite
, NoResult
, set
, valueTagGPR
, valuePayloadGPR
));
3955 isDone
.link(&m_jit
);
3961 case VarInjectionWatchpoint
:
3962 case VariableWatchpoint
: {
3967 case CheckHasInstance
: {
3968 SpeculateCellOperand
base(this, node
->child1());
3969 GPRTemporary
structure(this);
3971 // Speculate that base 'ImplementsDefaultHasInstance'.
3972 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branchTest8(
3973 MacroAssembler::Zero
,
3974 MacroAssembler::Address(base
.gpr(), JSCell::typeInfoFlagsOffset()),
3975 MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance
)));
3982 compileInstanceOf(node
);
3987 JSValueOperand
value(this, node
->child1());
3988 GPRTemporary
result(this);
3989 GPRTemporary
localGlobalObject(this);
3990 GPRTemporary
remoteGlobalObject(this);
3992 JITCompiler::Jump isCell
= branchIsCell(value
.jsValueRegs());
3994 m_jit
.compare32(JITCompiler::Equal
, value
.tagGPR(), TrustedImm32(JSValue::UndefinedTag
), result
.gpr());
3995 JITCompiler::Jump done
= m_jit
.jump();
3997 isCell
.link(&m_jit
);
3998 JITCompiler::Jump notMasqueradesAsUndefined
;
3999 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
4000 m_jit
.move(TrustedImm32(0), result
.gpr());
4001 notMasqueradesAsUndefined
= m_jit
.jump();
4003 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
4004 JITCompiler::NonZero
,
4005 JITCompiler::Address(value
.payloadGPR(), JSCell::typeInfoFlagsOffset()),
4006 TrustedImm32(MasqueradesAsUndefined
));
4007 m_jit
.move(TrustedImm32(0), result
.gpr());
4008 notMasqueradesAsUndefined
= m_jit
.jump();
4010 isMasqueradesAsUndefined
.link(&m_jit
);
4011 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
4012 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
4013 m_jit
.move(TrustedImmPtr(m_jit
.globalObjectFor(node
->origin
.semantic
)), localGlobalObjectGPR
);
4014 m_jit
.loadPtr(JITCompiler::Address(value
.payloadGPR(), JSCell::structureIDOffset()), result
.gpr());
4015 m_jit
.loadPtr(JITCompiler::Address(result
.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
4016 m_jit
.compare32(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, result
.gpr());
4019 notMasqueradesAsUndefined
.link(&m_jit
);
4021 booleanResult(result
.gpr(), node
);
4026 JSValueOperand
value(this, node
->child1());
4027 GPRTemporary
result(this, Reuse
, value
, TagWord
);
4029 m_jit
.compare32(JITCompiler::Equal
, value
.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag
), result
.gpr());
4030 booleanResult(result
.gpr(), node
);
4035 JSValueOperand
value(this, node
->child1());
4036 GPRTemporary
result(this, Reuse
, value
, TagWord
);
4038 m_jit
.add32(TrustedImm32(1), value
.tagGPR(), result
.gpr());
4039 m_jit
.compare32(JITCompiler::Below
, result
.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag
+ 1), result
.gpr());
4040 booleanResult(result
.gpr(), node
);
4045 JSValueOperand
value(this, node
->child1());
4046 GPRTemporary
result(this, Reuse
, value
, TagWord
);
4048 JITCompiler::Jump isNotCell
= branchNotCell(value
.jsValueRegs());
4050 m_jit
.compare8(JITCompiler::Equal
,
4051 JITCompiler::Address(value
.payloadGPR(), JSCell::typeInfoTypeOffset()),
4052 TrustedImm32(StringType
),
4054 JITCompiler::Jump done
= m_jit
.jump();
4056 isNotCell
.link(&m_jit
);
4057 m_jit
.move(TrustedImm32(0), result
.gpr());
4060 booleanResult(result
.gpr(), node
);
4065 JSValueOperand
value(this, node
->child1());
4066 GPRReg valueTagGPR
= value
.tagGPR();
4067 GPRReg valuePayloadGPR
= value
.payloadGPR();
4068 GPRResult
result(this);
4069 GPRReg resultGPR
= result
.gpr();
4071 callOperation(operationIsObject
, resultGPR
, valueTagGPR
, valuePayloadGPR
);
4072 booleanResult(result
.gpr(), node
);
4077 JSValueOperand
value(this, node
->child1());
4078 GPRReg valueTagGPR
= value
.tagGPR();
4079 GPRReg valuePayloadGPR
= value
.payloadGPR();
4080 GPRResult
result(this);
4081 GPRReg resultGPR
= result
.gpr();
4083 callOperation(operationIsFunction
, resultGPR
, valueTagGPR
, valuePayloadGPR
);
4084 booleanResult(result
.gpr(), node
);
4088 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
4089 GPRReg tagGPR
= value
.tagGPR();
4090 GPRReg payloadGPR
= value
.payloadGPR();
4091 GPRTemporary
temp(this);
4092 GPRReg tempGPR
= temp
.gpr();
4093 GPRResult
result(this);
4094 GPRReg resultGPR
= result
.gpr();
4095 JITCompiler::JumpList doneJumps
;
4099 ASSERT(node
->child1().useKind() == UntypedUse
|| node
->child1().useKind() == CellUse
|| node
->child1().useKind() == StringUse
);
4101 JITCompiler::Jump isNotCell
= branchNotCell(value
.jsValueRegs());
4102 if (node
->child1().useKind() != UntypedUse
)
4103 DFG_TYPE_CHECK(JSValueRegs(tagGPR
, payloadGPR
), node
->child1(), SpecCell
, isNotCell
);
4105 if (!node
->child1()->shouldSpeculateObject() || node
->child1().useKind() == StringUse
) {
4106 JITCompiler::Jump notString
= m_jit
.branch8(
4107 JITCompiler::NotEqual
,
4108 JITCompiler::Address(payloadGPR
, JSCell::typeInfoTypeOffset()),
4109 TrustedImm32(StringType
));
4110 if (node
->child1().useKind() == StringUse
)
4111 DFG_TYPE_CHECK(JSValueRegs(tagGPR
, payloadGPR
), node
->child1(), SpecString
, notString
);
4112 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.stringString()), resultGPR
);
4113 doneJumps
.append(m_jit
.jump());
4114 if (node
->child1().useKind() != StringUse
) {
4115 notString
.link(&m_jit
);
4116 callOperation(operationTypeOf
, resultGPR
, payloadGPR
);
4117 doneJumps
.append(m_jit
.jump());
4120 callOperation(operationTypeOf
, resultGPR
, payloadGPR
);
4121 doneJumps
.append(m_jit
.jump());
4124 if (node
->child1().useKind() == UntypedUse
) {
4125 isNotCell
.link(&m_jit
);
4127 m_jit
.add32(TrustedImm32(1), tagGPR
, tempGPR
);
4128 JITCompiler::Jump notNumber
= m_jit
.branch32(JITCompiler::AboveOrEqual
, tempGPR
, JITCompiler::TrustedImm32(JSValue::LowestTag
+ 1));
4129 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.numberString()), resultGPR
);
4130 doneJumps
.append(m_jit
.jump());
4131 notNumber
.link(&m_jit
);
4133 JITCompiler::Jump notUndefined
= m_jit
.branch32(JITCompiler::NotEqual
, tagGPR
, TrustedImm32(JSValue::UndefinedTag
));
4134 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.undefinedString()), resultGPR
);
4135 doneJumps
.append(m_jit
.jump());
4136 notUndefined
.link(&m_jit
);
4138 JITCompiler::Jump notNull
= m_jit
.branch32(JITCompiler::NotEqual
, tagGPR
, TrustedImm32(JSValue::NullTag
));
4139 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.objectString()), resultGPR
);
4140 doneJumps
.append(m_jit
.jump());
4141 notNull
.link(&m_jit
);
4143 // Only boolean left
4144 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.booleanString()), resultGPR
);
4146 doneJumps
.link(&m_jit
);
4147 cellResult(resultGPR
, node
);
4159 case CreateActivation
: {
4160 JSValueOperand
value(this, node
->child1());
4161 GPRTemporary
result(this, Reuse
, value
, PayloadWord
);
4163 GPRReg valueTagGPR
= value
.tagGPR();
4164 GPRReg valuePayloadGPR
= value
.payloadGPR();
4165 GPRReg resultGPR
= result
.gpr();
4167 m_jit
.move(valuePayloadGPR
, resultGPR
);
4169 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4171 addSlowPathGenerator(
4173 notCreated
, this, operationCreateActivation
, resultGPR
,
4174 framePointerOffsetToGetActivationRegisters()));
4176 cellResult(resultGPR
, node
);
4180 case FunctionReentryWatchpoint
: {
4185 case CreateArguments
: {
4186 JSValueOperand
value(this, node
->child1());
4187 GPRTemporary
scratch1(this);
4188 GPRTemporary
scratch2(this);
4189 GPRTemporary
result(this, Reuse
, value
, PayloadWord
);
4191 GPRReg valueTagGPR
= value
.tagGPR();
4192 GPRReg valuePayloadGPR
= value
.payloadGPR();
4193 GPRReg scratch1GPR
= scratch1
.gpr();
4194 GPRReg scratch2GPR
= scratch2
.gpr();
4195 GPRReg resultGPR
= result
.gpr();
4197 m_jit
.move(valuePayloadGPR
, resultGPR
);
4199 if (node
->origin
.semantic
.inlineCallFrame
) {
4200 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4201 addSlowPathGenerator(
4203 notCreated
, this, operationCreateInlinedArguments
, resultGPR
,
4204 node
->origin
.semantic
.inlineCallFrame
));
4205 cellResult(resultGPR
, node
);
4209 FunctionExecutable
* executable
= jsCast
<FunctionExecutable
*>(m_jit
.graph().executableFor(node
->origin
.semantic
));
4210 if (m_jit
.codeBlock()->hasSlowArguments()
4211 || executable
->isStrictMode()
4212 || !executable
->parameterCount()) {
4213 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4214 addSlowPathGenerator(
4215 slowPathCall(notCreated
, this, operationCreateArguments
, resultGPR
));
4216 cellResult(resultGPR
, node
);
4220 JITCompiler::Jump alreadyCreated
= m_jit
.branch32(JITCompiler::NotEqual
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4222 MacroAssembler::JumpList slowPaths
;
4223 emitAllocateArguments(resultGPR
, scratch1GPR
, scratch2GPR
, slowPaths
);
4224 addSlowPathGenerator(
4225 slowPathCall(slowPaths
, this, operationCreateArguments
, resultGPR
));
4227 alreadyCreated
.link(&m_jit
);
4228 cellResult(resultGPR
, node
);
4232 case TearOffActivation
: {
4233 JSValueOperand
activationValue(this, node
->child1());
4234 GPRTemporary
scratch(this);
4236 GPRReg activationValueTagGPR
= activationValue
.tagGPR();
4237 GPRReg activationValuePayloadGPR
= activationValue
.payloadGPR();
4238 GPRReg scratchGPR
= scratch
.gpr();
4240 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, activationValueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4242 SymbolTable
* symbolTable
= m_jit
.symbolTableFor(node
->origin
.semantic
);
4243 int registersOffset
= JSActivation::registersOffset(symbolTable
);
4245 int bytecodeCaptureStart
= symbolTable
->captureStart();
4246 int machineCaptureStart
= m_jit
.graph().m_machineCaptureStart
;
4247 for (int i
= symbolTable
->captureCount(); i
--;) {
4249 JITCompiler::Address(
4250 GPRInfo::callFrameRegister
, (machineCaptureStart
- i
) * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4253 scratchGPR
, JITCompiler::Address(
4254 activationValuePayloadGPR
, registersOffset
+ (bytecodeCaptureStart
- i
) * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
4256 JITCompiler::Address(
4257 GPRInfo::callFrameRegister
, (machineCaptureStart
- i
) * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4260 scratchGPR
, JITCompiler::Address(
4261 activationValuePayloadGPR
, registersOffset
+ (bytecodeCaptureStart
- i
) * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
4263 m_jit
.addPtr(TrustedImm32(registersOffset
), activationValuePayloadGPR
, scratchGPR
);
4264 m_jit
.storePtr(scratchGPR
, JITCompiler::Address(activationValuePayloadGPR
, JSActivation::offsetOfRegisters()));
4266 notCreated
.link(&m_jit
);
4271 case TearOffArguments
: {
4272 JSValueOperand
unmodifiedArgumentsValue(this, node
->child1());
4273 JSValueOperand
activationValue(this, node
->child2());
4274 GPRReg unmodifiedArgumentsValuePayloadGPR
= unmodifiedArgumentsValue
.payloadGPR();
4275 GPRReg activationValuePayloadGPR
= activationValue
.payloadGPR();
4277 JITCompiler::Jump created
= m_jit
.branchTest32(
4278 JITCompiler::NonZero
, unmodifiedArgumentsValuePayloadGPR
);
4280 if (node
->origin
.semantic
.inlineCallFrame
) {
4281 addSlowPathGenerator(
4283 created
, this, operationTearOffInlinedArguments
, NoResult
,
4284 unmodifiedArgumentsValuePayloadGPR
, activationValuePayloadGPR
, node
->origin
.semantic
.inlineCallFrame
));
4286 addSlowPathGenerator(
4288 created
, this, operationTearOffArguments
, NoResult
,
4289 unmodifiedArgumentsValuePayloadGPR
, activationValuePayloadGPR
));
4296 case CheckArgumentsNotCreated
: {
4297 ASSERT(!isEmptySpeculation(
4298 m_state
.variables().operand(
4299 m_jit
.graph().argumentsRegisterFor(node
->origin
.semantic
)).m_type
));
4301 Uncountable
, JSValueRegs(), 0,
4303 JITCompiler::NotEqual
,
4304 JITCompiler::tagFor(m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
)),
4305 TrustedImm32(JSValue::EmptyValueTag
)));
4310 case GetMyArgumentsLength
: {
4311 GPRTemporary
result(this);
4312 GPRReg resultGPR
= result
.gpr();
4314 if (!isEmptySpeculation(
4315 m_state
.variables().operand(
4316 m_jit
.graph().argumentsRegisterFor(node
->origin
.semantic
)).m_type
)) {
4318 ArgumentsEscaped
, JSValueRegs(), 0,
4320 JITCompiler::NotEqual
,
4321 JITCompiler::tagFor(m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
)),
4322 TrustedImm32(JSValue::EmptyValueTag
)));
4325 ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4326 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultGPR
);
4327 m_jit
.sub32(TrustedImm32(1), resultGPR
);
4328 int32Result(resultGPR
, node
);
4332 case GetMyArgumentsLengthSafe
: {
4333 GPRTemporary
resultPayload(this);
4334 GPRTemporary
resultTag(this);
4335 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4336 GPRReg resultTagGPR
= resultTag
.gpr();
4338 JITCompiler::Jump created
= m_jit
.branch32(
4339 JITCompiler::NotEqual
,
4340 JITCompiler::tagFor(m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
)),
4341 TrustedImm32(JSValue::EmptyValueTag
));
4343 if (node
->origin
.semantic
.inlineCallFrame
) {
4345 Imm32(node
->origin
.semantic
.inlineCallFrame
->arguments
.size() - 1),
4348 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultPayloadGPR
);
4349 m_jit
.sub32(TrustedImm32(1), resultPayloadGPR
);
4351 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), resultTagGPR
);
4353 // FIXME: the slow path generator should perform a forward speculation that the
4354 // result is an integer. For now we postpone the speculation by having this return
4357 addSlowPathGenerator(
4359 created
, this, operationGetArgumentsLength
,
4360 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4361 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
).offset()));
4363 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4367 case GetMyArgumentByVal
: {
4368 SpeculateStrictInt32Operand
index(this, node
->child1());
4369 GPRTemporary
resultPayload(this);
4370 GPRTemporary
resultTag(this);
4371 GPRReg indexGPR
= index
.gpr();
4372 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4373 GPRReg resultTagGPR
= resultTag
.gpr();
4375 if (!isEmptySpeculation(
4376 m_state
.variables().operand(
4377 m_jit
.graph().argumentsRegisterFor(node
->origin
.semantic
)).m_type
)) {
4379 ArgumentsEscaped
, JSValueRegs(), 0,
4381 JITCompiler::NotEqual
,
4382 JITCompiler::tagFor(m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
)),
4383 TrustedImm32(JSValue::EmptyValueTag
)));
4386 m_jit
.add32(TrustedImm32(1), indexGPR
, resultPayloadGPR
);
4388 if (node
->origin
.semantic
.inlineCallFrame
) {
4390 Uncountable
, JSValueRegs(), 0,
4392 JITCompiler::AboveOrEqual
,
4394 Imm32(node
->origin
.semantic
.inlineCallFrame
->arguments
.size())));
4397 Uncountable
, JSValueRegs(), 0,
4399 JITCompiler::AboveOrEqual
,
4401 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4404 JITCompiler::JumpList slowArgument
;
4405 JITCompiler::JumpList slowArgumentOutOfBounds
;
4406 if (m_jit
.symbolTableFor(node
->origin
.semantic
)->slowArguments()) {
4407 RELEASE_ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4408 const SlowArgument
* slowArguments
= m_jit
.graph().m_slowArguments
.get();
4409 slowArgumentOutOfBounds
.append(
4411 JITCompiler::AboveOrEqual
, indexGPR
,
4412 Imm32(m_jit
.symbolTableFor(node
->origin
.semantic
)->parameterCount())));
4414 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4415 m_jit
.move(ImmPtr(slowArguments
), resultPayloadGPR
);
4417 JITCompiler::BaseIndex(
4418 resultPayloadGPR
, indexGPR
, JITCompiler::TimesEight
,
4419 OBJECT_OFFSETOF(SlowArgument
, index
)),
4423 JITCompiler::BaseIndex(
4424 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4425 OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4428 JITCompiler::BaseIndex(
4429 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4430 OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4432 slowArgument
.append(m_jit
.jump());
4434 slowArgumentOutOfBounds
.link(&m_jit
);
4437 JITCompiler::BaseIndex(
4438 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4439 m_jit
.offsetOfArgumentsIncludingThis(node
->origin
.semantic
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4442 JITCompiler::BaseIndex(
4443 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4444 m_jit
.offsetOfArgumentsIncludingThis(node
->origin
.semantic
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4447 slowArgument
.link(&m_jit
);
4448 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4451 case GetMyArgumentByValSafe
: {
4452 SpeculateStrictInt32Operand
index(this, node
->child1());
4453 GPRTemporary
resultPayload(this);
4454 GPRTemporary
resultTag(this);
4455 GPRReg indexGPR
= index
.gpr();
4456 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4457 GPRReg resultTagGPR
= resultTag
.gpr();
4459 JITCompiler::JumpList slowPath
;
4462 JITCompiler::NotEqual
,
4463 JITCompiler::tagFor(m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
)),
4464 TrustedImm32(JSValue::EmptyValueTag
)));
4466 m_jit
.add32(TrustedImm32(1), indexGPR
, resultPayloadGPR
);
4467 if (node
->origin
.semantic
.inlineCallFrame
) {
4470 JITCompiler::AboveOrEqual
,
4472 Imm32(node
->origin
.semantic
.inlineCallFrame
->arguments
.size())));
4476 JITCompiler::AboveOrEqual
,
4478 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4481 JITCompiler::JumpList slowArgument
;
4482 JITCompiler::JumpList slowArgumentOutOfBounds
;
4483 if (m_jit
.symbolTableFor(node
->origin
.semantic
)->slowArguments()) {
4484 RELEASE_ASSERT(!node
->origin
.semantic
.inlineCallFrame
);
4485 const SlowArgument
* slowArguments
= m_jit
.graph().m_slowArguments
.get();
4486 slowArgumentOutOfBounds
.append(
4488 JITCompiler::AboveOrEqual
, indexGPR
,
4489 Imm32(m_jit
.symbolTableFor(node
->origin
.semantic
)->parameterCount())));
4491 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4492 m_jit
.move(ImmPtr(slowArguments
), resultPayloadGPR
);
4494 JITCompiler::BaseIndex(
4495 resultPayloadGPR
, indexGPR
, JITCompiler::TimesEight
,
4496 OBJECT_OFFSETOF(SlowArgument
, index
)),
4499 JITCompiler::BaseIndex(
4500 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4501 OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4504 JITCompiler::BaseIndex(
4505 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4506 OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4508 slowArgument
.append(m_jit
.jump());
4510 slowArgumentOutOfBounds
.link(&m_jit
);
4513 JITCompiler::BaseIndex(
4514 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4515 m_jit
.offsetOfArgumentsIncludingThis(node
->origin
.semantic
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4518 JITCompiler::BaseIndex(
4519 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4520 m_jit
.offsetOfArgumentsIncludingThis(node
->origin
.semantic
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4523 if (node
->origin
.semantic
.inlineCallFrame
) {
4524 addSlowPathGenerator(
4526 slowPath
, this, operationGetInlinedArgumentByVal
,
4527 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4528 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
).offset(),
4529 node
->origin
.semantic
.inlineCallFrame
, indexGPR
));
4531 addSlowPathGenerator(
4533 slowPath
, this, operationGetArgumentByVal
,
4534 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4535 m_jit
.graph().machineArgumentsRegisterFor(node
->origin
.semantic
).offset(),
4539 slowArgument
.link(&m_jit
);
4540 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4544 case NewFunctionNoCheck
:
4545 compileNewFunctionNoCheck(node
);
4549 JSValueOperand
value(this, node
->child1());
4550 GPRTemporary
resultTag(this, Reuse
, value
, TagWord
);
4551 GPRTemporary
resultPayload(this, Reuse
, value
, PayloadWord
);
4553 GPRReg valueTagGPR
= value
.tagGPR();
4554 GPRReg valuePayloadGPR
= value
.payloadGPR();
4555 GPRReg resultTagGPR
= resultTag
.gpr();
4556 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4558 m_jit
.move(valuePayloadGPR
, resultPayloadGPR
);
4559 m_jit
.move(valueTagGPR
, resultTagGPR
);
4561 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4563 addSlowPathGenerator(
4565 notCreated
, this, operationNewFunction
, JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4566 m_jit
.codeBlock()->functionDecl(node
->functionDeclIndex())));
4568 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4572 case NewFunctionExpression
:
4573 compileNewFunctionExpression(node
);
4581 case StoreBarrierWithNullCheck
: {
4582 compileStoreBarrier(node
);
4586 case ForceOSRExit
: {
4587 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
4591 case InvalidationPoint
:
4592 emitInvalidationPoint(node
);
4595 case CheckWatchdogTimer
:
4596 ASSERT(m_jit
.vm()->watchdog
);
4598 WatchdogTimerFired
, JSValueRegs(), 0,
4600 JITCompiler::NonZero
,
4601 JITCompiler::AbsoluteAddress(m_jit
.vm()->watchdog
->timerDidFireAddress())));
4604 case CountExecution
:
4605 m_jit
.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node
->executionCounter()->address()));
4610 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
4615 case ProfileWillCall
:
4616 case ProfileDidCall
:
4624 RELEASE_ASSERT_NOT_REACHED();
4631 case ExtractOSREntryLocal
:
4632 case CheckTierUpInLoop
:
4633 case CheckTierUpAtReturn
:
4634 case CheckTierUpAndOSREnter
:
4640 case MultiGetByOffset
:
4641 case MultiPutByOffset
:
4642 RELEASE_ASSERT_NOT_REACHED();
4649 if (node
->hasResult() && node
->mustGenerate())
4654 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, GPRReg valueTagGPR
, Edge valueUse
, GPRReg scratch1
, GPRReg scratch2
)
4656 JITCompiler::Jump isNotCell
;
4657 if (!isKnownCell(valueUse
.node()))
4658 isNotCell
= m_jit
.branch32(JITCompiler::NotEqual
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::CellTag
));
4660 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered
= m_jit
.checkMarkByte(ownerGPR
);
4661 storeToWriteBarrierBuffer(ownerGPR
, scratch1
, scratch2
);
4662 ownerNotMarkedOrAlreadyRemembered
.link(&m_jit
);
4664 if (!isKnownCell(valueUse
.node()))
4665 isNotCell
.link(&m_jit
);
4668 void SpeculativeJIT::writeBarrier(JSCell
* owner
, GPRReg valueTagGPR
, Edge valueUse
, GPRReg scratch1
, GPRReg scratch2
)
4670 JITCompiler::Jump isNotCell
;
4671 if (!isKnownCell(valueUse
.node()))
4672 isNotCell
= m_jit
.branch32(JITCompiler::NotEqual
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::CellTag
));
4674 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered
= m_jit
.checkMarkByte(owner
);
4675 storeToWriteBarrierBuffer(owner
, scratch1
, scratch2
);
4676 ownerNotMarkedOrAlreadyRemembered
.link(&m_jit
);
4678 if (!isKnownCell(valueUse
.node()))
4679 isNotCell
.link(&m_jit
);
4681 #endif // ENABLE(GGC)
4683 JITCompiler::Jump
SpeculativeJIT::branchIsCell(JSValueRegs regs
)
4685 return m_jit
.branch32(MacroAssembler::Equal
, regs
.tagGPR(), TrustedImm32(JSValue::CellTag
));
4688 JITCompiler::Jump
SpeculativeJIT::branchNotCell(JSValueRegs regs
)
4690 return m_jit
.branch32(MacroAssembler::NotEqual
, regs
.tagGPR(), TrustedImm32(JSValue::CellTag
));
4693 JITCompiler::Jump
SpeculativeJIT::branchIsOther(JSValueRegs regs
, GPRReg tempGPR
)
4695 m_jit
.move(regs
.tagGPR(), tempGPR
);
4696 m_jit
.or32(TrustedImm32(1), tempGPR
);
4697 return m_jit
.branch32(
4698 MacroAssembler::Equal
, tempGPR
,
4699 MacroAssembler::TrustedImm32(JSValue::NullTag
));
4702 JITCompiler::Jump
SpeculativeJIT::branchNotOther(JSValueRegs regs
, GPRReg tempGPR
)
4704 m_jit
.move(regs
.tagGPR(), tempGPR
);
4705 m_jit
.or32(TrustedImm32(1), tempGPR
);
4706 return m_jit
.branch32(
4707 MacroAssembler::NotEqual
, tempGPR
,
4708 MacroAssembler::TrustedImm32(JSValue::NullTag
));
4711 void SpeculativeJIT::moveTrueTo(GPRReg gpr
)
4713 m_jit
.move(TrustedImm32(1), gpr
);
4716 void SpeculativeJIT::moveFalseTo(GPRReg gpr
)
4718 m_jit
.move(TrustedImm32(0), gpr
);
4721 void SpeculativeJIT::blessBoolean(GPRReg
)
4727 } } // namespace JSC::DFG