2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3 * Copyright (C) 2011 Intel Corporation. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "DFGSpeculativeJIT.h"
32 #include "ArrayPrototype.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGOperations.h"
36 #include "DFGSlowPathGenerator.h"
38 #include "DirectArguments.h"
39 #include "GetterSetter.h"
40 #include "JSEnvironmentRecord.h"
41 #include "JSLexicalEnvironment.h"
42 #include "JSPropertyNameEnumerator.h"
43 #include "ObjectPrototype.h"
44 #include "JSCInlines.h"
45 #include "SetupVarargsFrame.h"
46 #include "TypeProfilerLog.h"
48 namespace JSC
{ namespace DFG
{
52 bool SpeculativeJIT::fillJSValue(Edge edge
, GPRReg
& tagGPR
, GPRReg
& payloadGPR
, FPRReg
& fpr
)
54 // FIXME: For double we could fill with a FPR.
57 VirtualRegister virtualRegister
= edge
->virtualRegister();
58 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
60 switch (info
.registerFormat()) {
61 case DataFormatNone
: {
63 if (edge
->hasConstant()) {
65 payloadGPR
= allocate();
66 JSValue value
= edge
->asJSValue();
67 m_jit
.move(Imm32(value
.tag()), tagGPR
);
68 m_jit
.move(Imm32(value
.payload()), payloadGPR
);
69 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderConstant
);
70 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderConstant
);
71 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, DataFormatJS
);
73 DataFormat spillFormat
= info
.spillFormat();
74 ASSERT(spillFormat
!= DataFormatNone
&& spillFormat
!= DataFormatStorage
);
76 payloadGPR
= allocate();
77 switch (spillFormat
) {
79 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), tagGPR
);
80 spillFormat
= DataFormatJSInt32
; // This will be used as the new register format.
83 m_jit
.move(TrustedImm32(JSValue::CellTag
), tagGPR
);
84 spillFormat
= DataFormatJSCell
; // This will be used as the new register format.
86 case DataFormatBoolean
:
87 m_jit
.move(TrustedImm32(JSValue::BooleanTag
), tagGPR
);
88 spillFormat
= DataFormatJSBoolean
; // This will be used as the new register format.
91 m_jit
.load32(JITCompiler::tagFor(virtualRegister
), tagGPR
);
94 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), payloadGPR
);
95 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderSpilled
);
96 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderSpilled
);
97 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, spillFormat
== DataFormatJSDouble
? DataFormatJS
: spillFormat
);
103 case DataFormatInt32
:
105 case DataFormatBoolean
: {
106 GPRReg gpr
= info
.gpr();
107 // If the register has already been locked we need to take a copy.
108 if (m_gprs
.isLocked(gpr
)) {
109 payloadGPR
= allocate();
110 m_jit
.move(gpr
, payloadGPR
);
116 int32_t tag
= JSValue::EmptyValueTag
;
117 DataFormat fillFormat
= DataFormatJS
;
118 switch (info
.registerFormat()) {
119 case DataFormatInt32
:
120 tag
= JSValue::Int32Tag
;
121 fillFormat
= DataFormatJSInt32
;
124 tag
= JSValue::CellTag
;
125 fillFormat
= DataFormatJSCell
;
127 case DataFormatBoolean
:
128 tag
= JSValue::BooleanTag
;
129 fillFormat
= DataFormatJSBoolean
;
132 RELEASE_ASSERT_NOT_REACHED();
135 m_jit
.move(TrustedImm32(tag
), tagGPR
);
137 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderJS
);
138 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderJS
);
139 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, fillFormat
);
143 case DataFormatJSDouble
:
145 case DataFormatJSInt32
:
146 case DataFormatJSCell
:
147 case DataFormatJSBoolean
: {
148 tagGPR
= info
.tagGPR();
149 payloadGPR
= info
.payloadGPR();
151 m_gprs
.lock(payloadGPR
);
155 case DataFormatStorage
:
156 case DataFormatDouble
:
157 // this type currently never occurs
158 RELEASE_ASSERT_NOT_REACHED();
161 RELEASE_ASSERT_NOT_REACHED();
166 void SpeculativeJIT::cachedGetById(
167 CodeOrigin codeOrigin
, GPRReg baseTagGPROrNone
, GPRReg basePayloadGPR
, GPRReg resultTagGPR
, GPRReg resultPayloadGPR
,
168 unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
170 // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens
171 // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to
172 // trip over one move instruction.
173 if (basePayloadGPR
== resultTagGPR
) {
174 RELEASE_ASSERT(basePayloadGPR
!= resultPayloadGPR
);
176 if (baseTagGPROrNone
== resultPayloadGPR
) {
177 m_jit
.swap(basePayloadGPR
, baseTagGPROrNone
);
178 baseTagGPROrNone
= resultTagGPR
;
180 m_jit
.move(basePayloadGPR
, resultPayloadGPR
);
181 basePayloadGPR
= resultPayloadGPR
;
184 JITGetByIdGenerator
gen(
185 m_jit
.codeBlock(), codeOrigin
, usedRegisters(),
186 JSValueRegs(baseTagGPROrNone
, basePayloadGPR
),
187 JSValueRegs(resultTagGPR
, resultPayloadGPR
), spillMode
);
189 gen
.generateFastPath(m_jit
);
191 JITCompiler::JumpList slowCases
;
192 if (slowPathTarget
.isSet())
193 slowCases
.append(slowPathTarget
);
194 slowCases
.append(gen
.slowPathJump());
196 std::unique_ptr
<SlowPathGenerator
> slowPath
;
197 if (baseTagGPROrNone
== InvalidGPRReg
) {
198 slowPath
= slowPathCall(
199 slowCases
, this, operationGetByIdOptimize
,
200 JSValueRegs(resultTagGPR
, resultPayloadGPR
), gen
.stubInfo(),
201 static_cast<int32_t>(JSValue::CellTag
), basePayloadGPR
,
202 identifierUID(identifierNumber
));
204 slowPath
= slowPathCall(
205 slowCases
, this, operationGetByIdOptimize
,
206 JSValueRegs(resultTagGPR
, resultPayloadGPR
), gen
.stubInfo(), baseTagGPROrNone
,
207 basePayloadGPR
, identifierUID(identifierNumber
));
210 m_jit
.addGetById(gen
, slowPath
.get());
211 addSlowPathGenerator(WTF::move(slowPath
));
214 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin
, GPRReg basePayloadGPR
, GPRReg valueTagGPR
, GPRReg valuePayloadGPR
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind putKind
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
216 JITPutByIdGenerator
gen(
217 m_jit
.codeBlock(), codeOrigin
, usedRegisters(),
218 JSValueRegs::payloadOnly(basePayloadGPR
), JSValueRegs(valueTagGPR
, valuePayloadGPR
),
219 scratchGPR
, spillMode
, m_jit
.ecmaModeFor(codeOrigin
), putKind
);
221 gen
.generateFastPath(m_jit
);
223 JITCompiler::JumpList slowCases
;
224 if (slowPathTarget
.isSet())
225 slowCases
.append(slowPathTarget
);
226 slowCases
.append(gen
.slowPathJump());
228 auto slowPath
= slowPathCall(
229 slowCases
, this, gen
.slowPathFunction(), NoResult
, gen
.stubInfo(), valueTagGPR
,
230 valuePayloadGPR
, basePayloadGPR
, identifierUID(identifierNumber
));
232 m_jit
.addPutById(gen
, slowPath
.get());
233 addSlowPathGenerator(WTF::move(slowPath
));
236 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
)
238 JSValueOperand
arg(this, operand
);
239 GPRReg argTagGPR
= arg
.tagGPR();
240 GPRReg argPayloadGPR
= arg
.payloadGPR();
242 GPRTemporary
resultPayload(this, Reuse
, arg
, PayloadWord
);
243 GPRReg resultPayloadGPR
= resultPayload
.gpr();
245 JITCompiler::Jump notCell
;
246 JITCompiler::Jump notMasqueradesAsUndefined
;
247 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
248 if (!isKnownCell(operand
.node()))
249 notCell
= m_jit
.branchIfNotCell(arg
.jsValueRegs());
251 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR
);
252 notMasqueradesAsUndefined
= m_jit
.jump();
254 GPRTemporary
localGlobalObject(this);
255 GPRTemporary
remoteGlobalObject(this);
257 if (!isKnownCell(operand
.node()))
258 notCell
= m_jit
.branchIfNotCell(arg
.jsValueRegs());
260 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
261 JITCompiler::NonZero
,
262 JITCompiler::Address(argPayloadGPR
, JSCell::typeInfoFlagsOffset()),
263 JITCompiler::TrustedImm32(MasqueradesAsUndefined
));
265 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR
);
266 notMasqueradesAsUndefined
= m_jit
.jump();
268 isMasqueradesAsUndefined
.link(&m_jit
);
269 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
270 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
271 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
272 m_jit
.loadPtr(JITCompiler::Address(argPayloadGPR
, JSCell::structureIDOffset()), resultPayloadGPR
);
273 m_jit
.loadPtr(JITCompiler::Address(resultPayloadGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
274 m_jit
.compare32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, resultPayloadGPR
);
277 if (!isKnownCell(operand
.node())) {
278 JITCompiler::Jump done
= m_jit
.jump();
280 notCell
.link(&m_jit
);
281 // null or undefined?
282 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
283 m_jit
.or32(TrustedImm32(1), argTagGPR
, resultPayloadGPR
);
284 m_jit
.compare32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultPayloadGPR
, TrustedImm32(JSValue::NullTag
), resultPayloadGPR
);
289 notMasqueradesAsUndefined
.link(&m_jit
);
291 booleanResult(resultPayloadGPR
, m_currentNode
);
294 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand
, Node
* branchNode
, bool invert
)
296 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
297 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
299 if (taken
== nextBlock()) {
301 BasicBlock
* tmp
= taken
;
306 JSValueOperand
arg(this, operand
);
307 GPRReg argTagGPR
= arg
.tagGPR();
308 GPRReg argPayloadGPR
= arg
.payloadGPR();
310 GPRTemporary
result(this, Reuse
, arg
, TagWord
);
311 GPRReg resultGPR
= result
.gpr();
313 JITCompiler::Jump notCell
;
315 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
316 if (!isKnownCell(operand
.node()))
317 notCell
= m_jit
.branchIfNotCell(arg
.jsValueRegs());
319 jump(invert
? taken
: notTaken
, ForceJump
);
321 GPRTemporary
localGlobalObject(this);
322 GPRTemporary
remoteGlobalObject(this);
324 if (!isKnownCell(operand
.node()))
325 notCell
= m_jit
.branchIfNotCell(arg
.jsValueRegs());
327 branchTest8(JITCompiler::Zero
,
328 JITCompiler::Address(argPayloadGPR
, JSCell::typeInfoFlagsOffset()),
329 JITCompiler::TrustedImm32(MasqueradesAsUndefined
),
330 invert
? taken
: notTaken
);
332 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
333 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
334 m_jit
.move(TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)), localGlobalObjectGPR
);
335 m_jit
.loadPtr(JITCompiler::Address(argPayloadGPR
, JSCell::structureIDOffset()), resultGPR
);
336 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
337 branchPtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, invert
? notTaken
: taken
);
340 if (!isKnownCell(operand
.node())) {
341 jump(notTaken
, ForceJump
);
343 notCell
.link(&m_jit
);
344 // null or undefined?
345 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
346 m_jit
.or32(TrustedImm32(1), argTagGPR
, resultGPR
);
347 branch32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm32(JSValue::NullTag
), taken
);
353 bool SpeculativeJIT::nonSpeculativeCompareNull(Node
* node
, Edge operand
, bool invert
)
355 unsigned branchIndexInBlock
= detectPeepHoleBranch();
356 if (branchIndexInBlock
!= UINT_MAX
) {
357 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
359 ASSERT(node
->adjustedRefCount() == 1);
361 nonSpeculativePeepholeBranchNull(operand
, branchNode
, invert
);
365 m_indexInBlock
= branchIndexInBlock
;
366 m_currentNode
= branchNode
;
371 nonSpeculativeNonPeepholeCompareNull(operand
, invert
);
376 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node
* node
, Node
* branchNode
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
378 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
379 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
381 JITCompiler::ResultCondition callResultCondition
= JITCompiler::NonZero
;
383 // The branch instruction will branch to the taken block.
384 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
385 if (taken
== nextBlock()) {
386 cond
= JITCompiler::invert(cond
);
387 callResultCondition
= JITCompiler::Zero
;
388 BasicBlock
* tmp
= taken
;
393 JSValueOperand
arg1(this, node
->child1());
394 JSValueOperand
arg2(this, node
->child2());
395 GPRReg arg1TagGPR
= arg1
.tagGPR();
396 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
397 GPRReg arg2TagGPR
= arg2
.tagGPR();
398 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
400 JITCompiler::JumpList slowPath
;
402 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
403 GPRFlushedCallResult
result(this);
404 GPRReg resultGPR
= result
.gpr();
410 callOperation(helperFunction
, resultGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
412 branchTest32(callResultCondition
, resultGPR
, taken
);
414 GPRTemporary
result(this);
415 GPRReg resultGPR
= result
.gpr();
420 if (!isKnownInteger(node
->child1().node()))
421 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg1TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
422 if (!isKnownInteger(node
->child2().node()))
423 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg2TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
425 branch32(cond
, arg1PayloadGPR
, arg2PayloadGPR
, taken
);
427 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
428 jump(notTaken
, ForceJump
);
430 slowPath
.link(&m_jit
);
432 silentSpillAllRegisters(resultGPR
);
433 callOperation(helperFunction
, resultGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
434 silentFillAllRegisters(resultGPR
);
436 branchTest32(callResultCondition
, resultGPR
, taken
);
442 m_indexInBlock
= m_block
->size() - 1;
443 m_currentNode
= branchNode
;
446 template<typename JumpType
>
447 class CompareAndBoxBooleanSlowPathGenerator
448 : public CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
> {
450 CompareAndBoxBooleanSlowPathGenerator(
451 JumpType from
, SpeculativeJIT
* jit
,
452 S_JITOperation_EJJ function
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
,
453 GPRReg arg2Tag
, GPRReg arg2Payload
)
454 : CallSlowPathGenerator
<JumpType
, S_JITOperation_EJJ
, GPRReg
>(
455 from
, jit
, function
, NeedToSpill
, result
)
457 , m_arg1Payload(arg1Payload
)
459 , m_arg2Payload(arg2Payload
)
464 virtual void generateInternal(SpeculativeJIT
* jit
)
469 this->m_function
, this->m_result
, m_arg1Tag
, m_arg1Payload
, m_arg2Tag
,
471 jit
->m_jit
.and32(JITCompiler::TrustedImm32(1), this->m_result
);
477 GPRReg m_arg1Payload
;
479 GPRReg m_arg2Payload
;
482 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
484 JSValueOperand
arg1(this, node
->child1());
485 JSValueOperand
arg2(this, node
->child2());
486 GPRReg arg1TagGPR
= arg1
.tagGPR();
487 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
488 GPRReg arg2TagGPR
= arg2
.tagGPR();
489 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
491 JITCompiler::JumpList slowPath
;
493 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
494 GPRFlushedCallResult
result(this);
495 GPRReg resultPayloadGPR
= result
.gpr();
501 callOperation(helperFunction
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
503 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
505 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
506 GPRReg resultPayloadGPR
= resultPayload
.gpr();
511 if (!isKnownInteger(node
->child1().node()))
512 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg1TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
513 if (!isKnownInteger(node
->child2().node()))
514 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg2TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
516 m_jit
.compare32(cond
, arg1PayloadGPR
, arg2PayloadGPR
, resultPayloadGPR
);
518 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
519 addSlowPathGenerator(std::make_unique
<CompareAndBoxBooleanSlowPathGenerator
<JITCompiler::JumpList
>>(
520 slowPath
, this, helperFunction
, resultPayloadGPR
, arg1TagGPR
,
521 arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
));
524 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
528 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node
* node
, Node
* branchNode
, bool invert
)
530 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
531 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
533 // The branch instruction will branch to the taken block.
534 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
535 if (taken
== nextBlock()) {
537 BasicBlock
* tmp
= taken
;
542 JSValueOperand
arg1(this, node
->child1());
543 JSValueOperand
arg2(this, node
->child2());
544 GPRReg arg1TagGPR
= arg1
.tagGPR();
545 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
546 GPRReg arg2TagGPR
= arg2
.tagGPR();
547 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
549 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
550 GPRReg resultPayloadGPR
= resultPayload
.gpr();
555 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
556 // see if we get lucky: if the arguments are cells and they reference the same
557 // cell, then they must be strictly equal.
558 branchPtr(JITCompiler::Equal
, arg1PayloadGPR
, arg2PayloadGPR
, invert
? notTaken
: taken
);
560 silentSpillAllRegisters(resultPayloadGPR
);
561 callOperation(operationCompareStrictEqCell
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
562 silentFillAllRegisters(resultPayloadGPR
);
564 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultPayloadGPR
, taken
);
566 // FIXME: Add fast paths for twoCells, number etc.
568 silentSpillAllRegisters(resultPayloadGPR
);
569 callOperation(operationCompareStrictEq
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
570 silentFillAllRegisters(resultPayloadGPR
);
572 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultPayloadGPR
, taken
);
578 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node
* node
, bool invert
)
580 JSValueOperand
arg1(this, node
->child1());
581 JSValueOperand
arg2(this, node
->child2());
582 GPRReg arg1TagGPR
= arg1
.tagGPR();
583 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
584 GPRReg arg2TagGPR
= arg2
.tagGPR();
585 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
587 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
588 GPRReg resultPayloadGPR
= resultPayload
.gpr();
593 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
594 // see if we get lucky: if the arguments are cells and they reference the same
595 // cell, then they must be strictly equal.
596 // FIXME: this should flush registers instead of silent spill/fill.
597 JITCompiler::Jump notEqualCase
= m_jit
.branchPtr(JITCompiler::NotEqual
, arg1PayloadGPR
, arg2PayloadGPR
);
599 m_jit
.move(JITCompiler::TrustedImm32(!invert
), resultPayloadGPR
);
600 JITCompiler::Jump done
= m_jit
.jump();
602 notEqualCase
.link(&m_jit
);
604 silentSpillAllRegisters(resultPayloadGPR
);
605 callOperation(operationCompareStrictEqCell
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
606 silentFillAllRegisters(resultPayloadGPR
);
608 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR
);
612 // FIXME: Add fast paths.
614 silentSpillAllRegisters(resultPayloadGPR
);
615 callOperation(operationCompareStrictEq
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
616 silentFillAllRegisters(resultPayloadGPR
);
618 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR
);
621 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
624 void SpeculativeJIT::compileMiscStrictEq(Node
* node
)
626 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
627 JSValueOperand
op2(this, node
->child2(), ManualOperandSpeculation
);
628 GPRTemporary
result(this);
630 if (node
->child1().useKind() == MiscUse
)
631 speculateMisc(node
->child1(), op1
.jsValueRegs());
632 if (node
->child2().useKind() == MiscUse
)
633 speculateMisc(node
->child2(), op2
.jsValueRegs());
635 m_jit
.move(TrustedImm32(0), result
.gpr());
636 JITCompiler::Jump notEqual
= m_jit
.branch32(JITCompiler::NotEqual
, op1
.tagGPR(), op2
.tagGPR());
637 m_jit
.compare32(JITCompiler::Equal
, op1
.payloadGPR(), op2
.payloadGPR(), result
.gpr());
638 notEqual
.link(&m_jit
);
639 booleanResult(result
.gpr(), node
);
642 void SpeculativeJIT::emitCall(Node
* node
)
644 CallLinkInfo::CallType callType
;
645 bool isVarargs
= false;
646 bool isForwardVarargs
= false;
647 switch (node
->op()) {
649 callType
= CallLinkInfo::Call
;
652 callType
= CallLinkInfo::Construct
;
655 callType
= CallLinkInfo::CallVarargs
;
658 case ConstructVarargs
:
659 callType
= CallLinkInfo::ConstructVarargs
;
662 case CallForwardVarargs
:
663 callType
= CallLinkInfo::CallVarargs
;
664 isForwardVarargs
= true;
666 case ConstructForwardVarargs
:
667 callType
= CallLinkInfo::ConstructVarargs
;
668 isForwardVarargs
= true;
671 DFG_CRASH(m_jit
.graph(), node
, "bad node type");
675 Edge calleeEdge
= m_jit
.graph().child(node
, 0);
677 // Gotta load the arguments somehow. Varargs is trickier.
678 if (isVarargs
|| isForwardVarargs
) {
679 CallVarargsData
* data
= node
->callVarargsData();
682 unsigned numUsedStackSlots
= m_jit
.graph().m_nextMachineLocal
;
684 if (isForwardVarargs
) {
692 scratchGPR1
= JITCompiler::selectScratchGPR();
693 scratchGPR2
= JITCompiler::selectScratchGPR(scratchGPR1
);
694 scratchGPR3
= JITCompiler::selectScratchGPR(scratchGPR1
, scratchGPR2
);
696 m_jit
.move(TrustedImm32(numUsedStackSlots
), scratchGPR2
);
697 JITCompiler::JumpList slowCase
;
698 emitSetupVarargsFrameFastCase(m_jit
, scratchGPR2
, scratchGPR1
, scratchGPR2
, scratchGPR3
, node
->child2()->origin
.semantic
.inlineCallFrame
, data
->firstVarArgOffset
, slowCase
);
699 JITCompiler::Jump done
= m_jit
.jump();
700 slowCase
.link(&m_jit
);
701 callOperation(operationThrowStackOverflowForVarargs
);
702 m_jit
.abortWithReason(DFGVarargsThrowingPathDidNotThrow
);
704 resultGPR
= scratchGPR2
;
706 GPRReg argumentsPayloadGPR
;
707 GPRReg argumentsTagGPR
;
712 auto loadArgumentsGPR
= [&] (GPRReg reservedGPR
) {
713 if (reservedGPR
!= InvalidGPRReg
)
715 JSValueOperand
arguments(this, node
->child2());
716 argumentsTagGPR
= arguments
.tagGPR();
717 argumentsPayloadGPR
= arguments
.payloadGPR();
718 if (reservedGPR
!= InvalidGPRReg
)
722 scratchGPR1
= JITCompiler::selectScratchGPR(argumentsPayloadGPR
, argumentsTagGPR
, reservedGPR
);
723 scratchGPR2
= JITCompiler::selectScratchGPR(argumentsPayloadGPR
, argumentsTagGPR
, scratchGPR1
, reservedGPR
);
724 scratchGPR3
= JITCompiler::selectScratchGPR(argumentsPayloadGPR
, argumentsTagGPR
, scratchGPR1
, scratchGPR2
, reservedGPR
);
727 loadArgumentsGPR(InvalidGPRReg
);
729 DFG_ASSERT(m_jit
.graph(), node
, isFlushed());
731 // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is
733 callOperation(operationSizeFrameForVarargs
, GPRInfo::returnValueGPR
, argumentsTagGPR
, argumentsPayloadGPR
, numUsedStackSlots
, data
->firstVarArgOffset
);
735 // Now we have the argument count of the callee frame, but we've lost the arguments operand.
736 // Reconstruct the arguments operand while preserving the callee frame.
737 loadArgumentsGPR(GPRInfo::returnValueGPR
);
738 m_jit
.move(TrustedImm32(numUsedStackSlots
), scratchGPR1
);
739 emitSetVarargsFrame(m_jit
, GPRInfo::returnValueGPR
, false, scratchGPR1
, scratchGPR1
);
740 m_jit
.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC
) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1
, JITCompiler::stackPointerRegister
);
742 callOperation(operationSetupVarargsFrame
, GPRInfo::returnValueGPR
, scratchGPR1
, argumentsTagGPR
, argumentsPayloadGPR
, data
->firstVarArgOffset
, GPRInfo::returnValueGPR
);
743 resultGPR
= GPRInfo::returnValueGPR
;
746 m_jit
.addPtr(TrustedImm32(sizeof(CallerFrameAndPC
)), resultGPR
, JITCompiler::stackPointerRegister
);
748 DFG_ASSERT(m_jit
.graph(), node
, isFlushed());
750 // We don't need the arguments array anymore.
754 // Now set up the "this" argument.
755 JSValueOperand
thisArgument(this, node
->child3());
756 GPRReg thisArgumentTagGPR
= thisArgument
.tagGPR();
757 GPRReg thisArgumentPayloadGPR
= thisArgument
.payloadGPR();
760 m_jit
.store32(thisArgumentTagGPR
, JITCompiler::calleeArgumentTagSlot(0));
761 m_jit
.store32(thisArgumentPayloadGPR
, JITCompiler::calleeArgumentPayloadSlot(0));
763 // The call instruction's first child is either the function (normal call) or the
764 // receiver (method call). subsequent children are the arguments.
765 int numPassedArgs
= node
->numChildren() - 1;
767 m_jit
.store32(MacroAssembler::TrustedImm32(numPassedArgs
), m_jit
.calleeFramePayloadSlot(JSStack::ArgumentCount
));
769 for (int i
= 0; i
< numPassedArgs
; i
++) {
770 Edge argEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + 1 + i
];
771 JSValueOperand
arg(this, argEdge
);
772 GPRReg argTagGPR
= arg
.tagGPR();
773 GPRReg argPayloadGPR
= arg
.payloadGPR();
776 m_jit
.store32(argTagGPR
, m_jit
.calleeArgumentTagSlot(i
));
777 m_jit
.store32(argPayloadGPR
, m_jit
.calleeArgumentPayloadSlot(i
));
781 JSValueOperand
callee(this, calleeEdge
);
782 GPRReg calleeTagGPR
= callee
.tagGPR();
783 GPRReg calleePayloadGPR
= callee
.payloadGPR();
785 m_jit
.store32(calleePayloadGPR
, m_jit
.calleeFramePayloadSlot(JSStack::Callee
));
786 m_jit
.store32(calleeTagGPR
, m_jit
.calleeFrameTagSlot(JSStack::Callee
));
790 GPRFlushedCallResult
resultPayload(this);
791 GPRFlushedCallResult2
resultTag(this);
792 GPRReg resultPayloadGPR
= resultPayload
.gpr();
793 GPRReg resultTagGPR
= resultTag
.gpr();
795 JITCompiler::DataLabelPtr targetToCheck
;
796 JITCompiler::JumpList slowPath
;
798 m_jit
.emitStoreCodeOrigin(node
->origin
.semantic
);
800 CallLinkInfo
* info
= m_jit
.codeBlock()->addCallLinkInfo();
802 slowPath
.append(m_jit
.branchIfNotCell(callee
.jsValueRegs()));
803 slowPath
.append(m_jit
.branchPtrWithPatch(MacroAssembler::NotEqual
, calleePayloadGPR
, targetToCheck
));
805 JITCompiler::Call fastCall
= m_jit
.nearCall();
807 JITCompiler::Jump done
= m_jit
.jump();
809 slowPath
.link(&m_jit
);
811 // Callee payload needs to be in regT0, tag in regT1
812 if (calleeTagGPR
== GPRInfo::regT0
) {
813 if (calleePayloadGPR
== GPRInfo::regT1
)
814 m_jit
.swap(GPRInfo::regT1
, GPRInfo::regT0
);
816 m_jit
.move(calleeTagGPR
, GPRInfo::regT1
);
817 m_jit
.move(calleePayloadGPR
, GPRInfo::regT0
);
820 m_jit
.move(calleePayloadGPR
, GPRInfo::regT0
);
821 m_jit
.move(calleeTagGPR
, GPRInfo::regT1
);
823 m_jit
.move(MacroAssembler::TrustedImmPtr(info
), GPRInfo::regT2
);
824 JITCompiler::Call slowCall
= m_jit
.nearCall();
828 m_jit
.setupResults(resultPayloadGPR
, resultTagGPR
);
830 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, DataFormatJS
, UseChildrenCalledExplicitly
);
832 info
->setUpCall(callType
, node
->origin
.semantic
, calleePayloadGPR
);
833 m_jit
.addJSCall(fastCall
, slowCall
, targetToCheck
, info
);
835 // If we were varargs, then after the calls are done, we need to reestablish our stack pointer.
836 if (isVarargs
|| isForwardVarargs
)
837 m_jit
.addPtr(TrustedImm32(m_jit
.graph().stackPointerOffset() * sizeof(Register
)), GPRInfo::callFrameRegister
, JITCompiler::stackPointerRegister
);
840 template<bool strict
>
841 GPRReg
SpeculativeJIT::fillSpeculateInt32Internal(Edge edge
, DataFormat
& returnFormat
)
843 AbstractValue
& value
= m_state
.forNode(edge
);
844 SpeculatedType type
= value
.m_type
;
845 ASSERT(edge
.useKind() != KnownInt32Use
|| !(value
.m_type
& ~SpecInt32
));
847 m_interpreter
.filter(value
, SpecInt32
);
848 if (value
.isClear()) {
849 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
850 returnFormat
= DataFormatInt32
;
854 VirtualRegister virtualRegister
= edge
->virtualRegister();
855 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
857 switch (info
.registerFormat()) {
858 case DataFormatNone
: {
859 if (edge
->hasConstant()) {
860 ASSERT(edge
->isInt32Constant());
861 GPRReg gpr
= allocate();
862 m_jit
.move(MacroAssembler::Imm32(edge
->asInt32()), gpr
);
863 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
864 info
.fillInt32(*m_stream
, gpr
);
865 returnFormat
= DataFormatInt32
;
869 DataFormat spillFormat
= info
.spillFormat();
871 ASSERT_UNUSED(spillFormat
, (spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInt32
);
873 // If we know this was spilled as an integer we can fill without checking.
874 if (type
& ~SpecInt32
)
875 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::Int32Tag
)));
877 GPRReg gpr
= allocate();
878 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
879 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
880 info
.fillInt32(*m_stream
, gpr
);
881 returnFormat
= DataFormatInt32
;
885 case DataFormatJSInt32
:
887 // Check the value is an integer.
888 GPRReg tagGPR
= info
.tagGPR();
889 GPRReg payloadGPR
= info
.payloadGPR();
891 m_gprs
.lock(payloadGPR
);
892 if (type
& ~SpecInt32
)
893 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::Int32Tag
)));
894 m_gprs
.unlock(tagGPR
);
895 m_gprs
.release(tagGPR
);
896 m_gprs
.release(payloadGPR
);
897 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderInteger
);
898 info
.fillInt32(*m_stream
, payloadGPR
);
899 // If !strict we're done, return.
900 returnFormat
= DataFormatInt32
;
904 case DataFormatInt32
: {
905 GPRReg gpr
= info
.gpr();
907 returnFormat
= DataFormatInt32
;
912 case DataFormatBoolean
:
913 case DataFormatJSDouble
:
914 case DataFormatJSCell
:
915 case DataFormatJSBoolean
:
916 case DataFormatDouble
:
917 case DataFormatStorage
:
919 RELEASE_ASSERT_NOT_REACHED();
920 return InvalidGPRReg
;
924 GPRReg
SpeculativeJIT::fillSpeculateInt32(Edge edge
, DataFormat
& returnFormat
)
926 return fillSpeculateInt32Internal
<false>(edge
, returnFormat
);
929 GPRReg
SpeculativeJIT::fillSpeculateInt32Strict(Edge edge
)
931 DataFormat mustBeDataFormatInt32
;
932 GPRReg result
= fillSpeculateInt32Internal
<true>(edge
, mustBeDataFormatInt32
);
933 ASSERT(mustBeDataFormatInt32
== DataFormatInt32
);
937 FPRReg
SpeculativeJIT::fillSpeculateDouble(Edge edge
)
939 ASSERT(isDouble(edge
.useKind()));
940 ASSERT(edge
->hasDoubleResult());
941 VirtualRegister virtualRegister
= edge
->virtualRegister();
942 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
944 if (info
.registerFormat() == DataFormatNone
) {
946 if (edge
->hasConstant()) {
947 RELEASE_ASSERT(edge
->isNumberConstant());
948 FPRReg fpr
= fprAllocate();
949 m_jit
.loadDouble(TrustedImmPtr(m_jit
.addressOfDoubleConstant(edge
.node())), fpr
);
950 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderConstant
);
951 info
.fillDouble(*m_stream
, fpr
);
955 RELEASE_ASSERT(info
.spillFormat() == DataFormatDouble
);
956 FPRReg fpr
= fprAllocate();
957 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
958 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderSpilled
);
959 info
.fillDouble(*m_stream
, fpr
);
963 RELEASE_ASSERT(info
.registerFormat() == DataFormatDouble
);
964 FPRReg fpr
= info
.fpr();
969 GPRReg
SpeculativeJIT::fillSpeculateCell(Edge edge
)
971 AbstractValue
& value
= m_state
.forNode(edge
);
972 SpeculatedType type
= value
.m_type
;
973 ASSERT((edge
.useKind() != KnownCellUse
&& edge
.useKind() != KnownStringUse
) || !(value
.m_type
& ~SpecCell
));
975 m_interpreter
.filter(value
, SpecCell
);
976 if (value
.isClear()) {
977 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
981 VirtualRegister virtualRegister
= edge
->virtualRegister();
982 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
984 switch (info
.registerFormat()) {
985 case DataFormatNone
: {
986 if (edge
->hasConstant()) {
987 JSValue jsValue
= edge
->asJSValue();
988 GPRReg gpr
= allocate();
989 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
990 m_jit
.move(MacroAssembler::TrustedImmPtr(jsValue
.asCell()), gpr
);
991 info
.fillCell(*m_stream
, gpr
);
995 ASSERT((info
.spillFormat() & DataFormatJS
) || info
.spillFormat() == DataFormatCell
);
996 if (type
& ~SpecCell
) {
999 JSValueSource(JITCompiler::addressFor(virtualRegister
)),
1002 MacroAssembler::NotEqual
,
1003 JITCompiler::tagFor(virtualRegister
),
1004 TrustedImm32(JSValue::CellTag
)));
1006 GPRReg gpr
= allocate();
1007 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
1008 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1009 info
.fillCell(*m_stream
, gpr
);
1013 case DataFormatCell
: {
1014 GPRReg gpr
= info
.gpr();
1019 case DataFormatJSCell
:
1020 case DataFormatJS
: {
1021 GPRReg tagGPR
= info
.tagGPR();
1022 GPRReg payloadGPR
= info
.payloadGPR();
1023 m_gprs
.lock(tagGPR
);
1024 m_gprs
.lock(payloadGPR
);
1025 if (type
& ~SpecCell
) {
1027 BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
,
1028 m_jit
.branchIfNotCell(info
.jsValueRegs()));
1030 m_gprs
.unlock(tagGPR
);
1031 m_gprs
.release(tagGPR
);
1032 m_gprs
.release(payloadGPR
);
1033 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderCell
);
1034 info
.fillCell(*m_stream
, payloadGPR
);
1038 case DataFormatJSInt32
:
1039 case DataFormatInt32
:
1040 case DataFormatJSDouble
:
1041 case DataFormatJSBoolean
:
1042 case DataFormatBoolean
:
1043 case DataFormatDouble
:
1044 case DataFormatStorage
:
1045 RELEASE_ASSERT_NOT_REACHED();
1048 RELEASE_ASSERT_NOT_REACHED();
1049 return InvalidGPRReg
;
1053 GPRReg
SpeculativeJIT::fillSpeculateBoolean(Edge edge
)
1055 AbstractValue
& value
= m_state
.forNode(edge
);
1056 SpeculatedType type
= value
.m_type
;
1058 m_interpreter
.filter(value
, SpecBoolean
);
1059 if (value
.isClear()) {
1060 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1064 VirtualRegister virtualRegister
= edge
->virtualRegister();
1065 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1067 switch (info
.registerFormat()) {
1068 case DataFormatNone
: {
1069 if (edge
->hasConstant()) {
1070 JSValue jsValue
= edge
->asJSValue();
1071 GPRReg gpr
= allocate();
1072 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1073 m_jit
.move(MacroAssembler::TrustedImm32(jsValue
.asBoolean()), gpr
);
1074 info
.fillBoolean(*m_stream
, gpr
);
1078 ASSERT((info
.spillFormat() & DataFormatJS
) || info
.spillFormat() == DataFormatBoolean
);
1080 if (type
& ~SpecBoolean
)
1081 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::BooleanTag
)));
1083 GPRReg gpr
= allocate();
1084 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
1085 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1086 info
.fillBoolean(*m_stream
, gpr
);
1090 case DataFormatBoolean
: {
1091 GPRReg gpr
= info
.gpr();
1096 case DataFormatJSBoolean
:
1097 case DataFormatJS
: {
1098 GPRReg tagGPR
= info
.tagGPR();
1099 GPRReg payloadGPR
= info
.payloadGPR();
1100 m_gprs
.lock(tagGPR
);
1101 m_gprs
.lock(payloadGPR
);
1102 if (type
& ~SpecBoolean
)
1103 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::BooleanTag
)));
1105 m_gprs
.unlock(tagGPR
);
1106 m_gprs
.release(tagGPR
);
1107 m_gprs
.release(payloadGPR
);
1108 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderBoolean
);
1109 info
.fillBoolean(*m_stream
, payloadGPR
);
1113 case DataFormatJSInt32
:
1114 case DataFormatInt32
:
1115 case DataFormatJSDouble
:
1116 case DataFormatJSCell
:
1117 case DataFormatCell
:
1118 case DataFormatDouble
:
1119 case DataFormatStorage
:
1120 RELEASE_ASSERT_NOT_REACHED();
1123 RELEASE_ASSERT_NOT_REACHED();
1124 return InvalidGPRReg
;
1128 void SpeculativeJIT::compileBaseValueStoreBarrier(Edge
& baseEdge
, Edge
& valueEdge
)
1131 ASSERT(!isKnownNotCell(valueEdge
.node()));
1133 SpeculateCellOperand
base(this, baseEdge
);
1134 JSValueOperand
value(this, valueEdge
);
1135 GPRTemporary
scratch1(this);
1136 GPRTemporary
scratch2(this);
1138 writeBarrier(base
.gpr(), value
.tagGPR(), valueEdge
, scratch1
.gpr(), scratch2
.gpr());
1140 UNUSED_PARAM(baseEdge
);
1141 UNUSED_PARAM(valueEdge
);
1145 void SpeculativeJIT::compileObjectEquality(Node
* node
)
1147 SpeculateCellOperand
op1(this, node
->child1());
1148 SpeculateCellOperand
op2(this, node
->child2());
1149 GPRReg op1GPR
= op1
.gpr();
1150 GPRReg op2GPR
= op2
.gpr();
1152 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1154 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1156 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1159 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1160 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1162 MacroAssembler::NonZero
,
1163 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1164 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1167 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchIfNotObject(op2GPR
));
1168 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1170 MacroAssembler::NonZero
,
1171 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1172 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1175 GPRTemporary
resultPayload(this, Reuse
, op2
);
1176 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1178 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1179 m_jit
.move(TrustedImm32(1), resultPayloadGPR
);
1180 MacroAssembler::Jump done
= m_jit
.jump();
1181 falseCase
.link(&m_jit
);
1182 m_jit
.move(TrustedImm32(0), resultPayloadGPR
);
1185 booleanResult(resultPayloadGPR
, node
);
1188 void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild
, Edge otherChild
)
1190 SpeculateCellOperand
op1(this, objectChild
);
1191 JSValueOperand
op2(this, otherChild
);
1193 GPRReg op1GPR
= op1
.gpr();
1194 GPRReg op2GPR
= op2
.payloadGPR();
1196 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR
), objectChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1198 GPRTemporary
resultPayload(this, Reuse
, op1
);
1199 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1201 MacroAssembler::Jump op2CellJump
= m_jit
.branchIfCell(op2
.jsValueRegs());
1203 m_jit
.move(TrustedImm32(0), resultPayloadGPR
);
1204 MacroAssembler::Jump op2NotCellJump
= m_jit
.jump();
1206 // At this point we know that we can perform a straight-forward equality comparison on pointer
1207 // values because we are doing strict equality.
1208 op2CellJump
.link(&m_jit
);
1209 m_jit
.compare32(MacroAssembler::Equal
, op1GPR
, op2GPR
, resultPayloadGPR
);
1211 op2NotCellJump
.link(&m_jit
);
1212 booleanResult(resultPayloadGPR
, m_currentNode
);
1215 void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild
, Edge otherChild
, Node
* branchNode
)
1217 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1218 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1220 SpeculateCellOperand
op1(this, objectChild
);
1221 JSValueOperand
op2(this, otherChild
);
1223 GPRReg op1GPR
= op1
.gpr();
1224 GPRReg op2GPR
= op2
.payloadGPR();
1226 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR
), objectChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1228 branch32(MacroAssembler::NotEqual
, op2
.tagGPR(), TrustedImm32(JSValue::CellTag
), notTaken
);
1230 if (taken
== nextBlock()) {
1231 branch32(MacroAssembler::NotEqual
, op1GPR
, op2GPR
, notTaken
);
1234 branch32(MacroAssembler::Equal
, op1GPR
, op2GPR
, taken
);
1239 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
)
1241 SpeculateCellOperand
op1(this, leftChild
);
1242 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1243 GPRTemporary
result(this);
1245 GPRReg op1GPR
= op1
.gpr();
1246 GPRReg op2TagGPR
= op2
.tagGPR();
1247 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1248 GPRReg resultGPR
= result
.gpr();
1250 bool masqueradesAsUndefinedWatchpointValid
=
1251 masqueradesAsUndefinedWatchpointIsStillValid();
1253 if (masqueradesAsUndefinedWatchpointValid
) {
1255 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1258 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1259 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1261 MacroAssembler::NonZero
,
1262 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1263 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1267 // It seems that most of the time when programs do a == b where b may be either null/undefined
1268 // or an object, b is usually an object. Balance the branches to make that case fast.
1269 MacroAssembler::Jump rightNotCell
= m_jit
.branchIfNotCell(op2
.jsValueRegs());
1271 // We know that within this branch, rightChild must be a cell.
1272 if (masqueradesAsUndefinedWatchpointValid
) {
1274 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(op2PayloadGPR
));
1277 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(op2PayloadGPR
));
1278 speculationCheck(BadType
, JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
,
1280 MacroAssembler::NonZero
,
1281 MacroAssembler::Address(op2PayloadGPR
, JSCell::typeInfoFlagsOffset()),
1282 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1285 // At this point we know that we can perform a straight-forward equality comparison on pointer
1286 // values because both left and right are pointers to objects that have no special equality
1288 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2PayloadGPR
);
1289 MacroAssembler::Jump trueCase
= m_jit
.jump();
1291 rightNotCell
.link(&m_jit
);
1293 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1294 // prove that it is either null or undefined.
1295 if (needsTypeCheck(rightChild
, SpecCell
| SpecOther
)) {
1296 m_jit
.or32(TrustedImm32(1), op2TagGPR
, resultGPR
);
1299 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, SpecCell
| SpecOther
,
1301 MacroAssembler::NotEqual
, resultGPR
,
1302 MacroAssembler::TrustedImm32(JSValue::NullTag
)));
1305 falseCase
.link(&m_jit
);
1306 m_jit
.move(TrustedImm32(0), resultGPR
);
1307 MacroAssembler::Jump done
= m_jit
.jump();
1308 trueCase
.link(&m_jit
);
1309 m_jit
.move(TrustedImm32(1), resultGPR
);
1312 booleanResult(resultGPR
, m_currentNode
);
1315 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
, Node
* branchNode
)
1317 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1318 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1320 SpeculateCellOperand
op1(this, leftChild
);
1321 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1322 GPRTemporary
result(this);
1324 GPRReg op1GPR
= op1
.gpr();
1325 GPRReg op2TagGPR
= op2
.tagGPR();
1326 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1327 GPRReg resultGPR
= result
.gpr();
1329 bool masqueradesAsUndefinedWatchpointValid
=
1330 masqueradesAsUndefinedWatchpointIsStillValid();
1332 if (masqueradesAsUndefinedWatchpointValid
) {
1334 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1337 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchIfNotObject(op1GPR
));
1338 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1340 MacroAssembler::NonZero
,
1341 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1342 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1345 // It seems that most of the time when programs do a == b where b may be either null/undefined
1346 // or an object, b is usually an object. Balance the branches to make that case fast.
1347 MacroAssembler::Jump rightNotCell
= m_jit
.branchIfNotCell(op2
.jsValueRegs());
1349 // We know that within this branch, rightChild must be a cell.
1350 if (masqueradesAsUndefinedWatchpointValid
) {
1352 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1353 m_jit
.branchIfNotObject(op2PayloadGPR
));
1356 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1357 m_jit
.branchIfNotObject(op2PayloadGPR
));
1358 speculationCheck(BadType
, JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
,
1360 MacroAssembler::NonZero
,
1361 MacroAssembler::Address(op2PayloadGPR
, JSCell::typeInfoFlagsOffset()),
1362 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1365 // At this point we know that we can perform a straight-forward equality comparison on pointer
1366 // values because both left and right are pointers to objects that have no special equality
1368 branch32(MacroAssembler::Equal
, op1GPR
, op2PayloadGPR
, taken
);
1370 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1371 // prove that it is either null or undefined.
1372 if (!needsTypeCheck(rightChild
, SpecCell
| SpecOther
))
1373 rightNotCell
.link(&m_jit
);
1375 jump(notTaken
, ForceJump
);
1377 rightNotCell
.link(&m_jit
);
1378 m_jit
.or32(TrustedImm32(1), op2TagGPR
, resultGPR
);
1381 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, SpecCell
| SpecOther
,
1383 MacroAssembler::NotEqual
, resultGPR
,
1384 MacroAssembler::TrustedImm32(JSValue::NullTag
)));
1390 void SpeculativeJIT::compileInt32Compare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1392 SpeculateInt32Operand
op1(this, node
->child1());
1393 SpeculateInt32Operand
op2(this, node
->child2());
1394 GPRTemporary
resultPayload(this);
1396 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), resultPayload
.gpr());
1398 // If we add a DataFormatBool, we should use it here.
1399 booleanResult(resultPayload
.gpr(), node
);
1402 void SpeculativeJIT::compileDoubleCompare(Node
* node
, MacroAssembler::DoubleCondition condition
)
1404 SpeculateDoubleOperand
op1(this, node
->child1());
1405 SpeculateDoubleOperand
op2(this, node
->child2());
1406 GPRTemporary
resultPayload(this);
1408 m_jit
.move(TrustedImm32(1), resultPayload
.gpr());
1409 MacroAssembler::Jump trueCase
= m_jit
.branchDouble(condition
, op1
.fpr(), op2
.fpr());
1410 m_jit
.move(TrustedImm32(0), resultPayload
.gpr());
1411 trueCase
.link(&m_jit
);
1413 booleanResult(resultPayload
.gpr(), node
);
1416 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse
)
1418 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1419 GPRTemporary
resultPayload(this);
1420 GPRReg valueTagGPR
= value
.tagGPR();
1421 GPRReg valuePayloadGPR
= value
.payloadGPR();
1422 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1423 GPRTemporary structure
;
1424 GPRReg structureGPR
= InvalidGPRReg
;
1426 bool masqueradesAsUndefinedWatchpointValid
=
1427 masqueradesAsUndefinedWatchpointIsStillValid();
1429 if (!masqueradesAsUndefinedWatchpointValid
) {
1430 // The masquerades as undefined case will use the structure register, so allocate it here.
1431 // Do this at the top of the function to avoid branching around a register allocation.
1432 GPRTemporary
realStructure(this);
1433 structure
.adopt(realStructure
);
1434 structureGPR
= structure
.gpr();
1437 MacroAssembler::Jump notCell
= m_jit
.branchIfNotCell(value
.jsValueRegs());
1438 if (masqueradesAsUndefinedWatchpointValid
) {
1440 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1441 m_jit
.branchIfNotObject(valuePayloadGPR
));
1444 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1445 m_jit
.branchIfNotObject(valuePayloadGPR
));
1447 MacroAssembler::Jump isNotMasqueradesAsUndefined
=
1449 MacroAssembler::Zero
,
1450 MacroAssembler::Address(valuePayloadGPR
, JSCell::typeInfoFlagsOffset()),
1451 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
));
1453 m_jit
.loadPtr(MacroAssembler::Address(valuePayloadGPR
, JSCell::structureIDOffset()), structureGPR
);
1454 speculationCheck(BadType
, JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
,
1456 MacroAssembler::Equal
,
1457 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1458 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1460 isNotMasqueradesAsUndefined
.link(&m_jit
);
1462 m_jit
.move(TrustedImm32(0), resultPayloadGPR
);
1463 MacroAssembler::Jump done
= m_jit
.jump();
1465 notCell
.link(&m_jit
);
1467 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
1468 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1469 m_jit
.or32(TrustedImm32(1), valueTagGPR
, resultPayloadGPR
);
1471 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, SpecCell
| SpecOther
,
1473 MacroAssembler::NotEqual
,
1475 TrustedImm32(JSValue::NullTag
)));
1477 m_jit
.move(TrustedImm32(1), resultPayloadGPR
);
1481 booleanResult(resultPayloadGPR
, m_currentNode
);
1484 void SpeculativeJIT::compileLogicalNot(Node
* node
)
1486 switch (node
->child1().useKind()) {
1488 SpeculateBooleanOperand
value(this, node
->child1());
1489 GPRTemporary
result(this, Reuse
, value
);
1490 m_jit
.xor32(TrustedImm32(1), value
.gpr(), result
.gpr());
1491 booleanResult(result
.gpr(), node
);
1495 case ObjectOrOtherUse
: {
1496 compileObjectOrOtherLogicalNot(node
->child1());
1501 SpeculateInt32Operand
value(this, node
->child1());
1502 GPRTemporary
resultPayload(this, Reuse
, value
);
1503 m_jit
.compare32(MacroAssembler::Equal
, value
.gpr(), MacroAssembler::TrustedImm32(0), resultPayload
.gpr());
1504 booleanResult(resultPayload
.gpr(), node
);
1508 case DoubleRepUse
: {
1509 SpeculateDoubleOperand
value(this, node
->child1());
1510 FPRTemporary
scratch(this);
1511 GPRTemporary
resultPayload(this);
1512 m_jit
.move(TrustedImm32(0), resultPayload
.gpr());
1513 MacroAssembler::Jump nonZero
= m_jit
.branchDoubleNonZero(value
.fpr(), scratch
.fpr());
1514 m_jit
.move(TrustedImm32(1), resultPayload
.gpr());
1515 nonZero
.link(&m_jit
);
1516 booleanResult(resultPayload
.gpr(), node
);
1521 JSValueOperand
arg1(this, node
->child1());
1522 GPRTemporary
resultPayload(this, Reuse
, arg1
, PayloadWord
);
1523 GPRReg arg1TagGPR
= arg1
.tagGPR();
1524 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
1525 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1529 JITCompiler::Jump slowCase
= m_jit
.branch32(JITCompiler::NotEqual
, arg1TagGPR
, TrustedImm32(JSValue::BooleanTag
));
1531 m_jit
.move(arg1PayloadGPR
, resultPayloadGPR
);
1533 addSlowPathGenerator(
1535 slowCase
, this, operationConvertJSValueToBoolean
, resultPayloadGPR
, arg1TagGPR
,
1538 m_jit
.xor32(TrustedImm32(1), resultPayloadGPR
);
1539 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
1543 return compileStringZeroLength(node
);
1546 RELEASE_ASSERT_NOT_REACHED();
1551 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse
, BasicBlock
* taken
, BasicBlock
* notTaken
)
1553 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1554 GPRTemporary
scratch(this);
1555 GPRReg valueTagGPR
= value
.tagGPR();
1556 GPRReg valuePayloadGPR
= value
.payloadGPR();
1557 GPRReg scratchGPR
= scratch
.gpr();
1559 MacroAssembler::Jump notCell
= m_jit
.branchIfNotCell(value
.jsValueRegs());
1560 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1562 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1563 m_jit
.branchIfNotObject(valuePayloadGPR
));
1566 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1567 m_jit
.branchIfNotObject(valuePayloadGPR
));
1569 JITCompiler::Jump isNotMasqueradesAsUndefined
= m_jit
.branchTest8(
1571 MacroAssembler::Address(valuePayloadGPR
, JSCell::typeInfoFlagsOffset()),
1572 TrustedImm32(MasqueradesAsUndefined
));
1574 m_jit
.loadPtr(MacroAssembler::Address(valuePayloadGPR
, JSCell::structureIDOffset()), scratchGPR
);
1575 speculationCheck(BadType
, JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
,
1577 MacroAssembler::Equal
,
1578 MacroAssembler::Address(scratchGPR
, Structure::globalObjectOffset()),
1579 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
))));
1581 isNotMasqueradesAsUndefined
.link(&m_jit
);
1583 jump(taken
, ForceJump
);
1585 notCell
.link(&m_jit
);
1587 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
1588 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1589 m_jit
.or32(TrustedImm32(1), valueTagGPR
, scratchGPR
);
1591 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, SpecCell
| SpecOther
,
1592 m_jit
.branch32(MacroAssembler::NotEqual
, scratchGPR
, TrustedImm32(JSValue::NullTag
)));
1597 noResult(m_currentNode
);
1600 void SpeculativeJIT::emitBranch(Node
* node
)
1602 BasicBlock
* taken
= node
->branchData()->taken
.block
;
1603 BasicBlock
* notTaken
= node
->branchData()->notTaken
.block
;
1605 switch (node
->child1().useKind()) {
1607 SpeculateBooleanOperand
value(this, node
->child1());
1608 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
1610 if (taken
== nextBlock()) {
1611 condition
= MacroAssembler::Zero
;
1612 BasicBlock
* tmp
= taken
;
1617 branchTest32(condition
, value
.gpr(), TrustedImm32(1), taken
);
1624 case ObjectOrOtherUse
: {
1625 emitObjectOrOtherBranch(node
->child1(), taken
, notTaken
);
1630 emitStringBranch(node
->child1(), taken
, notTaken
);
1636 if (node
->child1().useKind() == Int32Use
) {
1637 bool invert
= false;
1639 if (taken
== nextBlock()) {
1641 BasicBlock
* tmp
= taken
;
1646 SpeculateInt32Operand
value(this, node
->child1());
1647 branchTest32(invert
? MacroAssembler::Zero
: MacroAssembler::NonZero
, value
.gpr(), taken
);
1649 SpeculateDoubleOperand
value(this, node
->child1());
1650 FPRTemporary
scratch(this);
1651 branchDoubleNonZero(value
.fpr(), scratch
.fpr(), taken
);
1661 JSValueOperand
value(this, node
->child1());
1663 GPRReg valueTagGPR
= value
.tagGPR();
1664 GPRReg valuePayloadGPR
= value
.payloadGPR();
1666 GPRTemporary
result(this);
1667 GPRReg resultGPR
= result
.gpr();
1669 use(node
->child1());
1671 JITCompiler::Jump fastPath
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
));
1672 JITCompiler::Jump slowPath
= m_jit
.branch32(JITCompiler::NotEqual
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::BooleanTag
));
1674 fastPath
.link(&m_jit
);
1675 branchTest32(JITCompiler::Zero
, valuePayloadGPR
, notTaken
);
1676 jump(taken
, ForceJump
);
1678 slowPath
.link(&m_jit
);
1679 silentSpillAllRegisters(resultGPR
);
1680 callOperation(operationConvertJSValueToBoolean
, resultGPR
, valueTagGPR
, valuePayloadGPR
);
1681 silentFillAllRegisters(resultGPR
);
1683 branchTest32(JITCompiler::NonZero
, resultGPR
, taken
);
1686 noResult(node
, UseChildrenCalledExplicitly
);
1691 RELEASE_ASSERT_NOT_REACHED();
1696 template<typename BaseOperandType
, typename PropertyOperandType
, typename ValueOperandType
, typename TagType
>
1697 void SpeculativeJIT::compileContiguousPutByVal(Node
* node
, BaseOperandType
& base
, PropertyOperandType
& property
, ValueOperandType
& value
, GPRReg valuePayloadReg
, TagType valueTag
)
1699 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
1701 ArrayMode arrayMode
= node
->arrayMode();
1703 GPRReg baseReg
= base
.gpr();
1704 GPRReg propertyReg
= property
.gpr();
1706 StorageOperand
storage(this, child4
);
1707 GPRReg storageReg
= storage
.gpr();
1709 if (node
->op() == PutByValAlias
) {
1710 // Store the value to the array.
1711 GPRReg propertyReg
= property
.gpr();
1712 m_jit
.store32(valueTag
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
1713 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
1719 MacroAssembler::Jump slowCase
;
1721 if (arrayMode
.isInBounds()) {
1723 OutOfBounds
, JSValueRegs(), 0,
1724 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
1726 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1728 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
1730 if (!arrayMode
.isOutOfBounds())
1731 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
1733 m_jit
.add32(TrustedImm32(1), propertyReg
);
1734 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1735 m_jit
.sub32(TrustedImm32(1), propertyReg
);
1737 inBounds
.link(&m_jit
);
1740 m_jit
.store32(valueTag
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
1741 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
1748 if (arrayMode
.isOutOfBounds()) {
1749 if (node
->op() == PutByValDirect
) {
1750 addSlowPathGenerator(slowPathCall(
1752 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
1753 NoResult
, baseReg
, propertyReg
, valueTag
, valuePayloadReg
));
1755 addSlowPathGenerator(slowPathCall(
1757 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
1758 NoResult
, baseReg
, propertyReg
, valueTag
, valuePayloadReg
));
1762 noResult(node
, UseChildrenCalledExplicitly
);
1765 void SpeculativeJIT::compile(Node
* node
)
1767 NodeType op
= node
->op();
1769 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1770 m_jit
.clearRegisterAllocationOffsets();
1775 case DoubleConstant
:
1776 case PhantomDirectArguments
:
1777 case PhantomClonedArguments
:
1778 initConstantInfo(node
);
1782 speculate(node
, node
->child1());
1783 switch (node
->child1().useKind()) {
1785 case DoubleRepRealUse
: {
1786 SpeculateDoubleOperand
op(this, node
->child1());
1787 doubleResult(op
.fpr(), node
);
1792 case DoubleRepMachineIntUse
: {
1793 RELEASE_ASSERT_NOT_REACHED();
1797 JSValueOperand
op(this, node
->child1());
1798 jsValueResult(op
.tagGPR(), op
.payloadGPR(), node
);
1806 AbstractValue
& value
= m_state
.variables().operand(node
->local());
1808 // If the CFA is tracking this variable and it found that the variable
1809 // cannot have been assigned, then don't attempt to proceed.
1810 if (value
.isClear()) {
1811 m_compileOkay
= false;
1815 switch (node
->variableAccessData()->flushFormat()) {
1816 case FlushedDouble
: {
1817 FPRTemporary
result(this);
1818 m_jit
.loadDouble(JITCompiler::addressFor(node
->machineLocal()), result
.fpr());
1819 VirtualRegister virtualRegister
= node
->virtualRegister();
1820 m_fprs
.retain(result
.fpr(), virtualRegister
, SpillOrderDouble
);
1821 generationInfoFromVirtualRegister(virtualRegister
).initDouble(node
, node
->refCount(), result
.fpr());
1825 case FlushedInt32
: {
1826 GPRTemporary
result(this);
1827 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1829 // Like int32Result, but don't useChildren - our children are phi nodes,
1830 // and don't represent values within this dataflow with virtual registers.
1831 VirtualRegister virtualRegister
= node
->virtualRegister();
1832 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
1833 generationInfoFromVirtualRegister(virtualRegister
).initInt32(node
, node
->refCount(), result
.gpr());
1838 GPRTemporary
result(this);
1839 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1841 // Like cellResult, but don't useChildren - our children are phi nodes,
1842 // and don't represent values within this dataflow with virtual registers.
1843 VirtualRegister virtualRegister
= node
->virtualRegister();
1844 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderCell
);
1845 generationInfoFromVirtualRegister(virtualRegister
).initCell(node
, node
->refCount(), result
.gpr());
1849 case FlushedBoolean
: {
1850 GPRTemporary
result(this);
1851 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1853 // Like booleanResult, but don't useChildren - our children are phi nodes,
1854 // and don't represent values within this dataflow with virtual registers.
1855 VirtualRegister virtualRegister
= node
->virtualRegister();
1856 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderBoolean
);
1857 generationInfoFromVirtualRegister(virtualRegister
).initBoolean(node
, node
->refCount(), result
.gpr());
1861 case FlushedJSValue
: {
1862 GPRTemporary
result(this);
1863 GPRTemporary
tag(this);
1864 m_jit
.load32(JITCompiler::payloadFor(node
->machineLocal()), result
.gpr());
1865 m_jit
.load32(JITCompiler::tagFor(node
->machineLocal()), tag
.gpr());
1867 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1868 // and don't represent values within this dataflow with virtual registers.
1869 VirtualRegister virtualRegister
= node
->virtualRegister();
1870 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
1871 m_gprs
.retain(tag
.gpr(), virtualRegister
, SpillOrderJS
);
1873 generationInfoFromVirtualRegister(virtualRegister
).initJSValue(node
, node
->refCount(), tag
.gpr(), result
.gpr(), DataFormatJS
);
1878 RELEASE_ASSERT_NOT_REACHED();
1883 case GetLocalUnlinked
: {
1884 GPRTemporary
payload(this);
1885 GPRTemporary
tag(this);
1886 m_jit
.load32(JITCompiler::payloadFor(node
->unlinkedMachineLocal()), payload
.gpr());
1887 m_jit
.load32(JITCompiler::tagFor(node
->unlinkedMachineLocal()), tag
.gpr());
1888 jsValueResult(tag
.gpr(), payload
.gpr(), node
);
1893 compileMovHint(m_currentNode
);
1899 recordSetLocal(m_currentNode
->unlinkedLocal(), VirtualRegister(), DataFormatDead
);
1905 switch (node
->variableAccessData()->flushFormat()) {
1906 case FlushedDouble
: {
1907 SpeculateDoubleOperand
value(this, node
->child1());
1908 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
->machineLocal()));
1910 // Indicate that it's no longer necessary to retrieve the value of
1911 // this bytecode variable from registers or other locations in the stack,
1912 // but that it is stored as a double.
1913 recordSetLocal(DataFormatDouble
);
1917 case FlushedInt32
: {
1918 SpeculateInt32Operand
value(this, node
->child1());
1919 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->machineLocal()));
1921 recordSetLocal(DataFormatInt32
);
1926 SpeculateCellOperand
cell(this, node
->child1());
1927 GPRReg cellGPR
= cell
.gpr();
1928 m_jit
.storePtr(cellGPR
, JITCompiler::payloadFor(node
->machineLocal()));
1930 recordSetLocal(DataFormatCell
);
1934 case FlushedBoolean
: {
1935 SpeculateBooleanOperand
value(this, node
->child1());
1936 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->machineLocal()));
1938 recordSetLocal(DataFormatBoolean
);
1942 case FlushedJSValue
: {
1943 JSValueOperand
value(this, node
->child1());
1944 m_jit
.store32(value
.payloadGPR(), JITCompiler::payloadFor(node
->machineLocal()));
1945 m_jit
.store32(value
.tagGPR(), JITCompiler::tagFor(node
->machineLocal()));
1947 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
1952 RELEASE_ASSERT_NOT_REACHED();
1959 // This is a no-op; it just marks the fact that the argument is being used.
1960 // But it may be profitable to use this as a hook to run speculation checks
1961 // on arguments, thereby allowing us to trivially eliminate such checks if
1962 // the argument is not used.
1963 recordSetLocal(dataFormatFor(node
->variableAccessData()->flushFormat()));
1969 if (node
->child1()->isInt32Constant()) {
1970 SpeculateInt32Operand
op2(this, node
->child2());
1971 GPRTemporary
result(this, Reuse
, op2
);
1973 bitOp(op
, node
->child1()->asInt32(), op2
.gpr(), result
.gpr());
1975 int32Result(result
.gpr(), node
);
1976 } else if (node
->child2()->isInt32Constant()) {
1977 SpeculateInt32Operand
op1(this, node
->child1());
1978 GPRTemporary
result(this, Reuse
, op1
);
1980 bitOp(op
, node
->child2()->asInt32(), op1
.gpr(), result
.gpr());
1982 int32Result(result
.gpr(), node
);
1984 SpeculateInt32Operand
op1(this, node
->child1());
1985 SpeculateInt32Operand
op2(this, node
->child2());
1986 GPRTemporary
result(this, Reuse
, op1
, op2
);
1988 GPRReg reg1
= op1
.gpr();
1989 GPRReg reg2
= op2
.gpr();
1990 bitOp(op
, reg1
, reg2
, result
.gpr());
1992 int32Result(result
.gpr(), node
);
1999 if (node
->child2()->isInt32Constant()) {
2000 SpeculateInt32Operand
op1(this, node
->child1());
2001 GPRTemporary
result(this, Reuse
, op1
);
2003 shiftOp(op
, op1
.gpr(), node
->child2()->asInt32() & 0x1f, result
.gpr());
2005 int32Result(result
.gpr(), node
);
2007 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2008 SpeculateInt32Operand
op1(this, node
->child1());
2009 SpeculateInt32Operand
op2(this, node
->child2());
2010 GPRTemporary
result(this, Reuse
, op1
);
2012 GPRReg reg1
= op1
.gpr();
2013 GPRReg reg2
= op2
.gpr();
2014 shiftOp(op
, reg1
, reg2
, result
.gpr());
2016 int32Result(result
.gpr(), node
);
2020 case UInt32ToNumber
: {
2021 compileUInt32ToNumber(node
);
2025 case DoubleAsInt32
: {
2026 compileDoubleAsInt32(node
);
2030 case ValueToInt32
: {
2031 compileValueToInt32(node
);
2036 compileDoubleRep(node
);
2041 compileValueRep(node
);
2046 JSValueOperand
op1(this, node
->child1());
2047 JSValueOperand
op2(this, node
->child2());
2049 GPRReg op1TagGPR
= op1
.tagGPR();
2050 GPRReg op1PayloadGPR
= op1
.payloadGPR();
2051 GPRReg op2TagGPR
= op2
.tagGPR();
2052 GPRReg op2PayloadGPR
= op2
.payloadGPR();
2056 GPRFlushedCallResult2
resultTag(this);
2057 GPRFlushedCallResult
resultPayload(this);
2058 if (isKnownNotNumber(node
->child1().node()) || isKnownNotNumber(node
->child2().node()))
2059 callOperation(operationValueAddNotNumber
, resultTag
.gpr(), resultPayload
.gpr(), op1TagGPR
, op1PayloadGPR
, op2TagGPR
, op2PayloadGPR
);
2061 callOperation(operationValueAdd
, resultTag
.gpr(), resultPayload
.gpr(), op1TagGPR
, op1PayloadGPR
, op2TagGPR
, op2PayloadGPR
);
2063 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2072 compileArithClz32(node
);
2076 compileMakeRope(node
);
2080 compileArithSub(node
);
2084 compileArithNegate(node
);
2088 compileArithMul(node
);
2092 compileArithDiv(node
);
2097 compileArithMod(node
);
2102 compileArithPow(node
);
2107 switch (node
->child1().useKind()) {
2109 SpeculateStrictInt32Operand
op1(this, node
->child1());
2110 GPRTemporary
result(this, Reuse
, op1
);
2111 GPRTemporary
scratch(this);
2113 m_jit
.move(op1
.gpr(), result
.gpr());
2114 m_jit
.rshift32(result
.gpr(), MacroAssembler::TrustedImm32(31), scratch
.gpr());
2115 m_jit
.add32(scratch
.gpr(), result
.gpr());
2116 m_jit
.xor32(scratch
.gpr(), result
.gpr());
2117 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, result
.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2118 int32Result(result
.gpr(), node
);
2123 case DoubleRepUse
: {
2124 SpeculateDoubleOperand
op1(this, node
->child1());
2125 FPRTemporary
result(this);
2127 m_jit
.absDouble(op1
.fpr(), result
.fpr());
2128 doubleResult(result
.fpr(), node
);
2133 RELEASE_ASSERT_NOT_REACHED();
2141 switch (node
->binaryUseKind()) {
2143 SpeculateStrictInt32Operand
op1(this, node
->child1());
2144 SpeculateStrictInt32Operand
op2(this, node
->child2());
2145 GPRTemporary
result(this, Reuse
, op1
);
2147 GPRReg op1GPR
= op1
.gpr();
2148 GPRReg op2GPR
= op2
.gpr();
2149 GPRReg resultGPR
= result
.gpr();
2151 MacroAssembler::Jump op1Less
= m_jit
.branch32(op
== ArithMin
? MacroAssembler::LessThan
: MacroAssembler::GreaterThan
, op1GPR
, op2GPR
);
2152 m_jit
.move(op2GPR
, resultGPR
);
2153 if (op1GPR
!= resultGPR
) {
2154 MacroAssembler::Jump done
= m_jit
.jump();
2155 op1Less
.link(&m_jit
);
2156 m_jit
.move(op1GPR
, resultGPR
);
2159 op1Less
.link(&m_jit
);
2161 int32Result(resultGPR
, node
);
2165 case DoubleRepUse
: {
2166 SpeculateDoubleOperand
op1(this, node
->child1());
2167 SpeculateDoubleOperand
op2(this, node
->child2());
2168 FPRTemporary
result(this, op1
);
2170 FPRReg op1FPR
= op1
.fpr();
2171 FPRReg op2FPR
= op2
.fpr();
2172 FPRReg resultFPR
= result
.fpr();
2174 MacroAssembler::JumpList done
;
2176 MacroAssembler::Jump op1Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleLessThan
: MacroAssembler::DoubleGreaterThan
, op1FPR
, op2FPR
);
2178 // op2 is eather the lesser one or one of then is NaN
2179 MacroAssembler::Jump op2Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleGreaterThanOrEqual
: MacroAssembler::DoubleLessThanOrEqual
, op1FPR
, op2FPR
);
2181 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2182 // op1 + op2 and putting it into result.
2183 m_jit
.addDouble(op1FPR
, op2FPR
, resultFPR
);
2184 done
.append(m_jit
.jump());
2186 op2Less
.link(&m_jit
);
2187 m_jit
.moveDouble(op2FPR
, resultFPR
);
2189 if (op1FPR
!= resultFPR
) {
2190 done
.append(m_jit
.jump());
2192 op1Less
.link(&m_jit
);
2193 m_jit
.moveDouble(op1FPR
, resultFPR
);
2195 op1Less
.link(&m_jit
);
2199 doubleResult(resultFPR
, node
);
2204 RELEASE_ASSERT_NOT_REACHED();
2211 compileArithSqrt(node
);
2215 SpeculateDoubleOperand
op1(this, node
->child1());
2216 FPRTemporary
result(this, op1
);
2218 m_jit
.convertDoubleToFloat(op1
.fpr(), result
.fpr());
2219 m_jit
.convertFloatToDouble(result
.fpr(), result
.fpr());
2221 doubleResult(result
.fpr(), node
);
2226 compileArithRound(node
);
2230 SpeculateDoubleOperand
op1(this, node
->child1());
2231 FPRReg op1FPR
= op1
.fpr();
2235 FPRResult
result(this);
2236 callOperation(sin
, result
.fpr(), op1FPR
);
2237 doubleResult(result
.fpr(), node
);
2242 SpeculateDoubleOperand
op1(this, node
->child1());
2243 FPRReg op1FPR
= op1
.fpr();
2247 FPRResult
result(this);
2248 callOperation(cos
, result
.fpr(), op1FPR
);
2249 doubleResult(result
.fpr(), node
);
2254 compileArithLog(node
);
2258 compileLogicalNot(node
);
2262 if (compare(node
, JITCompiler::LessThan
, JITCompiler::DoubleLessThan
, operationCompareLess
))
2267 if (compare(node
, JITCompiler::LessThanOrEqual
, JITCompiler::DoubleLessThanOrEqual
, operationCompareLessEq
))
2271 case CompareGreater
:
2272 if (compare(node
, JITCompiler::GreaterThan
, JITCompiler::DoubleGreaterThan
, operationCompareGreater
))
2276 case CompareGreaterEq
:
2277 if (compare(node
, JITCompiler::GreaterThanOrEqual
, JITCompiler::DoubleGreaterThanOrEqual
, operationCompareGreaterEq
))
2281 case CompareEqConstant
:
2282 ASSERT(node
->child2()->asJSValue().isNull());
2283 if (nonSpeculativeCompareNull(node
, node
->child1()))
2288 if (compare(node
, JITCompiler::Equal
, JITCompiler::DoubleEqual
, operationCompareEq
))
2292 case CompareStrictEq
:
2293 if (compileStrictEq(node
))
2297 case StringCharCodeAt
: {
2298 compileGetCharCodeAt(node
);
2302 case StringCharAt
: {
2303 // Relies on StringCharAt node having same basic layout as GetByVal
2304 compileGetByValOnString(node
);
2308 case StringFromCharCode
: {
2309 compileFromCharCode(node
);
2319 case ArrayifyToStructure
: {
2325 switch (node
->arrayMode().type()) {
2326 case Array::SelectUsingPredictions
:
2327 case Array::ForceExit
:
2328 RELEASE_ASSERT_NOT_REACHED();
2329 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
2330 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2333 case Array::Generic
: {
2334 SpeculateCellOperand
base(this, node
->child1()); // Save a register, speculate cell. We'll probably be right.
2335 JSValueOperand
property(this, node
->child2());
2336 GPRReg baseGPR
= base
.gpr();
2337 GPRReg propertyTagGPR
= property
.tagGPR();
2338 GPRReg propertyPayloadGPR
= property
.payloadGPR();
2341 GPRFlushedCallResult2
resultTag(this);
2342 GPRFlushedCallResult
resultPayload(this);
2343 callOperation(operationGetByValCell
, resultTag
.gpr(), resultPayload
.gpr(), baseGPR
, propertyTagGPR
, propertyPayloadGPR
);
2345 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2349 case Array::Contiguous
: {
2350 if (node
->arrayMode().isInBounds()) {
2351 SpeculateStrictInt32Operand
property(this, node
->child2());
2352 StorageOperand
storage(this, node
->child3());
2354 GPRReg propertyReg
= property
.gpr();
2355 GPRReg storageReg
= storage
.gpr();
2360 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2362 GPRTemporary
resultPayload(this);
2363 if (node
->arrayMode().type() == Array::Int32
) {
2364 ASSERT(!node
->arrayMode().isSaneChain());
2367 OutOfBounds
, JSValueRegs(), 0,
2369 MacroAssembler::Equal
,
2370 MacroAssembler::BaseIndex(
2371 storageReg
, propertyReg
, MacroAssembler::TimesEight
, TagOffset
),
2372 TrustedImm32(JSValue::EmptyValueTag
)));
2374 MacroAssembler::BaseIndex(
2375 storageReg
, propertyReg
, MacroAssembler::TimesEight
, PayloadOffset
),
2376 resultPayload
.gpr());
2377 int32Result(resultPayload
.gpr(), node
);
2381 GPRTemporary
resultTag(this);
2383 MacroAssembler::BaseIndex(
2384 storageReg
, propertyReg
, MacroAssembler::TimesEight
, TagOffset
),
2387 MacroAssembler::BaseIndex(
2388 storageReg
, propertyReg
, MacroAssembler::TimesEight
, PayloadOffset
),
2389 resultPayload
.gpr());
2390 if (node
->arrayMode().isSaneChain()) {
2391 JITCompiler::Jump notHole
= m_jit
.branch32(
2392 MacroAssembler::NotEqual
, resultTag
.gpr(),
2393 TrustedImm32(JSValue::EmptyValueTag
));
2394 m_jit
.move(TrustedImm32(JSValue::UndefinedTag
), resultTag
.gpr());
2395 m_jit
.move(TrustedImm32(0), resultPayload
.gpr());
2396 notHole
.link(&m_jit
);
2399 LoadFromHole
, JSValueRegs(), 0,
2401 MacroAssembler::Equal
, resultTag
.gpr(),
2402 TrustedImm32(JSValue::EmptyValueTag
)));
2404 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2408 SpeculateCellOperand
base(this, node
->child1());
2409 SpeculateStrictInt32Operand
property(this, node
->child2());
2410 StorageOperand
storage(this, node
->child3());
2412 GPRReg baseReg
= base
.gpr();
2413 GPRReg propertyReg
= property
.gpr();
2414 GPRReg storageReg
= storage
.gpr();
2419 GPRTemporary
resultTag(this);
2420 GPRTemporary
resultPayload(this);
2421 GPRReg resultTagReg
= resultTag
.gpr();
2422 GPRReg resultPayloadReg
= resultPayload
.gpr();
2424 MacroAssembler::JumpList slowCases
;
2426 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2428 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagReg
);
2429 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadReg
);
2430 slowCases
.append(m_jit
.branch32(MacroAssembler::Equal
, resultTagReg
, TrustedImm32(JSValue::EmptyValueTag
)));
2432 addSlowPathGenerator(
2434 slowCases
, this, operationGetByValArrayInt
,
2435 JSValueRegs(resultTagReg
, resultPayloadReg
), baseReg
, propertyReg
));
2437 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2440 case Array::Double
: {
2441 if (node
->arrayMode().isInBounds()) {
2442 SpeculateStrictInt32Operand
property(this, node
->child2());
2443 StorageOperand
storage(this, node
->child3());
2445 GPRReg propertyReg
= property
.gpr();
2446 GPRReg storageReg
= storage
.gpr();
2451 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2453 FPRTemporary
result(this);
2454 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.fpr());
2455 if (!node
->arrayMode().isSaneChain())
2456 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, result
.fpr(), result
.fpr()));
2457 doubleResult(result
.fpr(), node
);
2461 SpeculateCellOperand
base(this, node
->child1());
2462 SpeculateStrictInt32Operand
property(this, node
->child2());
2463 StorageOperand
storage(this, node
->child3());
2465 GPRReg baseReg
= base
.gpr();
2466 GPRReg propertyReg
= property
.gpr();
2467 GPRReg storageReg
= storage
.gpr();
2472 GPRTemporary
resultTag(this);
2473 GPRTemporary
resultPayload(this);
2474 FPRTemporary
temp(this);
2475 GPRReg resultTagReg
= resultTag
.gpr();
2476 GPRReg resultPayloadReg
= resultPayload
.gpr();
2477 FPRReg tempReg
= temp
.fpr();
2479 MacroAssembler::JumpList slowCases
;
2481 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2483 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), tempReg
);
2484 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempReg
, tempReg
));
2485 boxDouble(tempReg
, resultTagReg
, resultPayloadReg
);
2487 addSlowPathGenerator(
2489 slowCases
, this, operationGetByValArrayInt
,
2490 JSValueRegs(resultTagReg
, resultPayloadReg
), baseReg
, propertyReg
));
2492 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2495 case Array::ArrayStorage
:
2496 case Array::SlowPutArrayStorage
: {
2497 if (node
->arrayMode().isInBounds()) {
2498 SpeculateStrictInt32Operand
property(this, node
->child2());
2499 StorageOperand
storage(this, node
->child3());
2500 GPRReg propertyReg
= property
.gpr();
2501 GPRReg storageReg
= storage
.gpr();
2506 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2508 GPRTemporary
resultTag(this);
2509 GPRTemporary
resultPayload(this);
2511 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
.gpr());
2512 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
)));
2513 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
.gpr());
2515 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2519 SpeculateCellOperand
base(this, node
->child1());
2520 SpeculateStrictInt32Operand
property(this, node
->child2());
2521 StorageOperand
storage(this, node
->child3());
2522 GPRReg propertyReg
= property
.gpr();
2523 GPRReg storageReg
= storage
.gpr();
2524 GPRReg baseReg
= base
.gpr();
2529 GPRTemporary
resultTag(this);
2530 GPRTemporary
resultPayload(this);
2531 GPRReg resultTagReg
= resultTag
.gpr();
2532 GPRReg resultPayloadReg
= resultPayload
.gpr();
2534 JITCompiler::Jump outOfBounds
= m_jit
.branch32(
2535 MacroAssembler::AboveOrEqual
, propertyReg
,
2536 MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2538 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagReg
);
2539 JITCompiler::Jump hole
= m_jit
.branch32(
2540 MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
));
2541 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadReg
);
2543 JITCompiler::JumpList slowCases
;
2544 slowCases
.append(outOfBounds
);
2545 slowCases
.append(hole
);
2546 addSlowPathGenerator(
2548 slowCases
, this, operationGetByValArrayInt
,
2549 JSValueRegs(resultTagReg
, resultPayloadReg
),
2550 baseReg
, propertyReg
));
2552 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2556 compileGetByValOnString(node
);
2558 case Array::DirectArguments
:
2559 compileGetByValOnDirectArguments(node
);
2561 case Array::ScopedArguments
:
2562 compileGetByValOnScopedArguments(node
);
2565 TypedArrayType type
= node
->arrayMode().typedArrayType();
2567 compileGetByValOnIntTypedArray(node
, type
);
2569 compileGetByValOnFloatTypedArray(node
, type
);
2574 case PutByValDirect
:
2576 case PutByValAlias
: {
2577 Edge child1
= m_jit
.graph().varArgChild(node
, 0);
2578 Edge child2
= m_jit
.graph().varArgChild(node
, 1);
2579 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
2580 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
2582 ArrayMode arrayMode
= node
->arrayMode().modeForPut();
2583 bool alreadyHandled
= false;
2585 switch (arrayMode
.type()) {
2586 case Array::SelectUsingPredictions
:
2587 case Array::ForceExit
:
2588 RELEASE_ASSERT_NOT_REACHED();
2589 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
2590 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2591 alreadyHandled
= true;
2594 case Array::Generic
: {
2595 ASSERT(node
->op() == PutByVal
|| node
->op() == PutByValDirect
);
2597 SpeculateCellOperand
base(this, child1
); // Save a register, speculate cell. We'll probably be right.
2598 JSValueOperand
property(this, child2
);
2599 JSValueOperand
value(this, child3
);
2600 GPRReg baseGPR
= base
.gpr();
2601 GPRReg propertyTagGPR
= property
.tagGPR();
2602 GPRReg propertyPayloadGPR
= property
.payloadGPR();
2603 GPRReg valueTagGPR
= value
.tagGPR();
2604 GPRReg valuePayloadGPR
= value
.payloadGPR();
2607 if (node
->op() == PutByValDirect
)
2608 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict
: operationPutByValDirectCellNonStrict
, baseGPR
, propertyTagGPR
, propertyPayloadGPR
, valueTagGPR
, valuePayloadGPR
);
2610 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByValCellStrict
: operationPutByValCellNonStrict
, baseGPR
, propertyTagGPR
, propertyPayloadGPR
, valueTagGPR
, valuePayloadGPR
);
2613 alreadyHandled
= true;
2623 SpeculateCellOperand
base(this, child1
);
2624 SpeculateStrictInt32Operand
property(this, child2
);
2626 GPRReg baseReg
= base
.gpr();
2627 GPRReg propertyReg
= property
.gpr();
2629 switch (arrayMode
.type()) {
2630 case Array::Int32
: {
2631 SpeculateInt32Operand
value(this, child3
);
2633 GPRReg valuePayloadReg
= value
.gpr();
2638 compileContiguousPutByVal(node
, base
, property
, value
, valuePayloadReg
, TrustedImm32(JSValue::Int32Tag
));
2641 case Array::Contiguous
: {
2642 JSValueOperand
value(this, child3
);
2644 GPRReg valueTagReg
= value
.tagGPR();
2645 GPRReg valuePayloadReg
= value
.payloadGPR();
2650 compileContiguousPutByVal(node
, base
, property
, value
, valuePayloadReg
, valueTagReg
);
2653 case Array::Double
: {
2654 compileDoublePutByVal(node
, base
, property
);
2657 case Array::ArrayStorage
:
2658 case Array::SlowPutArrayStorage
: {
2659 JSValueOperand
value(this, child3
);
2661 GPRReg valueTagReg
= value
.tagGPR();
2662 GPRReg valuePayloadReg
= value
.payloadGPR();
2667 StorageOperand
storage(this, child4
);
2668 GPRReg storageReg
= storage
.gpr();
2670 if (node
->op() == PutByValAlias
) {
2671 // Store the value to the array.
2672 GPRReg propertyReg
= property
.gpr();
2673 m_jit
.store32(value
.tagGPR(), MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2674 m_jit
.store32(value
.payloadGPR(), MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2680 MacroAssembler::JumpList slowCases
;
2682 MacroAssembler::Jump beyondArrayBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2683 if (!arrayMode
.isOutOfBounds())
2684 speculationCheck(OutOfBounds
, JSValueRegs(), 0, beyondArrayBounds
);
2686 slowCases
.append(beyondArrayBounds
);
2688 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2689 if (arrayMode
.isInBounds()) {
2691 StoreToHole
, JSValueRegs(), 0,
2692 m_jit
.branch32(MacroAssembler::Equal
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
)));
2694 MacroAssembler::Jump notHoleValue
= m_jit
.branch32(MacroAssembler::NotEqual
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
));
2695 if (arrayMode
.isSlowPut()) {
2696 // This is sort of strange. If we wanted to optimize this code path, we would invert
2697 // the above branch. But it's simply not worth it since this only happens if we're
2698 // already having a bad time.
2699 slowCases
.append(m_jit
.jump());
2701 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, ArrayStorage::numValuesInVectorOffset()));
2703 // If we're writing to a hole we might be growing the array;
2704 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2705 m_jit
.add32(TrustedImm32(1), propertyReg
);
2706 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2707 m_jit
.sub32(TrustedImm32(1), propertyReg
);
2709 lengthDoesNotNeedUpdate
.link(&m_jit
);
2711 notHoleValue
.link(&m_jit
);
2714 // Store the value to the array.
2715 m_jit
.store32(valueTagReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2716 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2723 if (!slowCases
.empty()) {
2724 if (node
->op() == PutByValDirect
) {
2725 addSlowPathGenerator(slowPathCall(
2727 m_jit
.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict
: operationPutByValDirectBeyondArrayBoundsNonStrict
,
2728 NoResult
, baseReg
, propertyReg
, valueTagReg
, valuePayloadReg
));
2730 addSlowPathGenerator(slowPathCall(
2732 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2733 NoResult
, baseReg
, propertyReg
, valueTagReg
, valuePayloadReg
));
2737 noResult(node
, UseChildrenCalledExplicitly
);
2742 TypedArrayType type
= arrayMode
.typedArrayType();
2744 compilePutByValForIntTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2746 compilePutByValForFloatTypedArray(base
.gpr(), property
.gpr(), node
, type
);
2752 if (compileRegExpExec(node
))
2755 if (!node
->adjustedRefCount()) {
2756 SpeculateCellOperand
base(this, node
->child1());
2757 SpeculateCellOperand
argument(this, node
->child2());
2758 GPRReg baseGPR
= base
.gpr();
2759 GPRReg argumentGPR
= argument
.gpr();
2762 GPRFlushedCallResult
result(this);
2763 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2765 // Must use jsValueResult because otherwise we screw up register
2766 // allocation, which thinks that this node has a result.
2767 booleanResult(result
.gpr(), node
);
2771 SpeculateCellOperand
base(this, node
->child1());
2772 SpeculateCellOperand
argument(this, node
->child2());
2773 GPRReg baseGPR
= base
.gpr();
2774 GPRReg argumentGPR
= argument
.gpr();
2777 GPRFlushedCallResult2
resultTag(this);
2778 GPRFlushedCallResult
resultPayload(this);
2779 callOperation(operationRegExpExec
, resultTag
.gpr(), resultPayload
.gpr(), baseGPR
, argumentGPR
);
2781 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2786 SpeculateCellOperand
base(this, node
->child1());
2787 SpeculateCellOperand
argument(this, node
->child2());
2788 GPRReg baseGPR
= base
.gpr();
2789 GPRReg argumentGPR
= argument
.gpr();
2792 GPRFlushedCallResult
result(this);
2793 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2795 // If we add a DataFormatBool, we should use it here.
2796 booleanResult(result
.gpr(), node
);
2801 ASSERT(node
->arrayMode().isJSArray());
2803 SpeculateCellOperand
base(this, node
->child1());
2804 GPRTemporary
storageLength(this);
2806 GPRReg baseGPR
= base
.gpr();
2807 GPRReg storageLengthGPR
= storageLength
.gpr();
2809 StorageOperand
storage(this, node
->child3());
2810 GPRReg storageGPR
= storage
.gpr();
2812 switch (node
->arrayMode().type()) {
2813 case Array::Int32
: {
2814 SpeculateInt32Operand
value(this, node
->child2());
2815 GPRReg valuePayloadGPR
= value
.gpr();
2817 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2818 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2819 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2820 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2821 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2822 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2823 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2825 addSlowPathGenerator(
2827 slowPath
, this, operationArrayPush
,
2828 JSValueRegs(storageGPR
, storageLengthGPR
),
2829 TrustedImm32(JSValue::Int32Tag
), valuePayloadGPR
, baseGPR
));
2831 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2835 case Array::Contiguous
: {
2836 JSValueOperand
value(this, node
->child2());
2837 GPRReg valueTagGPR
= value
.tagGPR();
2838 GPRReg valuePayloadGPR
= value
.payloadGPR();
2840 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2841 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2842 m_jit
.store32(valueTagGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2843 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2844 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2845 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2846 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2848 addSlowPathGenerator(
2850 slowPath
, this, operationArrayPush
,
2851 JSValueRegs(storageGPR
, storageLengthGPR
),
2852 valueTagGPR
, valuePayloadGPR
, baseGPR
));
2854 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2858 case Array::Double
: {
2859 SpeculateDoubleOperand
value(this, node
->child2());
2860 FPRReg valueFPR
= value
.fpr();
2863 JSValueRegs(), node
->child2(), SpecDoubleReal
,
2864 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, valueFPR
, valueFPR
));
2866 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
2867 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
2868 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
2869 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2870 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2871 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2873 addSlowPathGenerator(
2875 slowPath
, this, operationArrayPushDouble
,
2876 JSValueRegs(storageGPR
, storageLengthGPR
),
2877 valueFPR
, baseGPR
));
2879 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2883 case Array::ArrayStorage
: {
2884 JSValueOperand
value(this, node
->child2());
2885 GPRReg valueTagGPR
= value
.tagGPR();
2886 GPRReg valuePayloadGPR
= value
.payloadGPR();
2888 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
2890 // Refuse to handle bizarre lengths.
2891 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Above
, storageLengthGPR
, TrustedImm32(0x7ffffffe)));
2893 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
2895 m_jit
.store32(valueTagGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2896 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2898 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
2899 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
2900 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
2901 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
2903 addSlowPathGenerator(slowPathCall(slowPath
, this, operationArrayPush
, JSValueRegs(storageGPR
, storageLengthGPR
), valueTagGPR
, valuePayloadGPR
, baseGPR
));
2905 jsValueResult(storageGPR
, storageLengthGPR
, node
);
2917 ASSERT(node
->arrayMode().isJSArray());
2919 SpeculateCellOperand
base(this, node
->child1());
2920 StorageOperand
storage(this, node
->child2());
2921 GPRTemporary
valueTag(this);
2922 GPRTemporary
valuePayload(this);
2924 GPRReg baseGPR
= base
.gpr();
2925 GPRReg valueTagGPR
= valueTag
.gpr();
2926 GPRReg valuePayloadGPR
= valuePayload
.gpr();
2927 GPRReg storageGPR
= storage
.gpr();
2929 switch (node
->arrayMode().type()) {
2931 case Array::Contiguous
: {
2933 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), valuePayloadGPR
);
2934 MacroAssembler::Jump undefinedCase
=
2935 m_jit
.branchTest32(MacroAssembler::Zero
, valuePayloadGPR
);
2936 m_jit
.sub32(TrustedImm32(1), valuePayloadGPR
);
2938 valuePayloadGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2940 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)),
2942 MacroAssembler::Jump slowCase
= m_jit
.branch32(MacroAssembler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
2944 MacroAssembler::TrustedImm32(JSValue::EmptyValueTag
),
2945 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2947 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)),
2950 addSlowPathGenerator(
2952 undefinedCase
, this,
2953 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
2954 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
2955 addSlowPathGenerator(
2957 slowCase
, this, operationArrayPopAndRecoverLength
,
2958 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
2960 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
2964 case Array::Double
: {
2965 FPRTemporary
temp(this);
2966 FPRReg tempFPR
= temp
.fpr();
2969 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), valuePayloadGPR
);
2970 MacroAssembler::Jump undefinedCase
=
2971 m_jit
.branchTest32(MacroAssembler::Zero
, valuePayloadGPR
);
2972 m_jit
.sub32(TrustedImm32(1), valuePayloadGPR
);
2974 valuePayloadGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
2976 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
),
2978 MacroAssembler::Jump slowCase
= m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempFPR
, tempFPR
);
2979 JSValue nan
= JSValue(JSValue::EncodeAsDouble
, PNaN
);
2981 MacroAssembler::TrustedImm32(nan
.u
.asBits
.tag
),
2982 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2984 MacroAssembler::TrustedImm32(nan
.u
.asBits
.payload
),
2985 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2986 boxDouble(tempFPR
, valueTagGPR
, valuePayloadGPR
);
2988 addSlowPathGenerator(
2990 undefinedCase
, this,
2991 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
2992 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
2993 addSlowPathGenerator(
2995 slowCase
, this, operationArrayPopAndRecoverLength
,
2996 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
2998 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
3002 case Array::ArrayStorage
: {
3003 GPRTemporary
storageLength(this);
3004 GPRReg storageLengthGPR
= storageLength
.gpr();
3006 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3008 JITCompiler::JumpList setUndefinedCases
;
3009 setUndefinedCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
));
3011 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
3013 MacroAssembler::Jump slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
3015 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), valueTagGPR
);
3016 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), valuePayloadGPR
);
3018 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3020 setUndefinedCases
.append(m_jit
.branch32(MacroAssembler::Equal
, TrustedImm32(JSValue::EmptyValueTag
), valueTagGPR
));
3022 m_jit
.store32(TrustedImm32(JSValue::EmptyValueTag
), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3024 m_jit
.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3026 addSlowPathGenerator(
3028 setUndefinedCases
, this,
3029 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
3030 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
3032 addSlowPathGenerator(
3034 slowCase
, this, operationArrayPop
,
3035 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
3037 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
3049 jump(node
->targetBlock());
3063 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT2
);
3064 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
3065 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
3067 // Return the result in returnValueGPR.
3068 JSValueOperand
op1(this, node
->child1());
3071 boxDouble(op1
.fpr(), GPRInfo::returnValueGPR2
, GPRInfo::returnValueGPR
);
3073 if (op1
.payloadGPR() == GPRInfo::returnValueGPR2
&& op1
.tagGPR() == GPRInfo::returnValueGPR
)
3074 m_jit
.swap(GPRInfo::returnValueGPR
, GPRInfo::returnValueGPR2
);
3075 else if (op1
.payloadGPR() == GPRInfo::returnValueGPR2
) {
3076 m_jit
.move(op1
.payloadGPR(), GPRInfo::returnValueGPR
);
3077 m_jit
.move(op1
.tagGPR(), GPRInfo::returnValueGPR2
);
3079 m_jit
.move(op1
.tagGPR(), GPRInfo::returnValueGPR2
);
3080 m_jit
.move(op1
.payloadGPR(), GPRInfo::returnValueGPR
);
3084 m_jit
.emitFunctionEpilogue();
3092 case ThrowReferenceError
: {
3093 // We expect that throw statements are rare and are intended to exit the code block
3094 // anyway, so we just OSR back to the old JIT for now.
3095 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
3099 case BooleanToNumber
: {
3100 switch (node
->child1().useKind()) {
3102 SpeculateBooleanOperand
value(this, node
->child1());
3103 GPRTemporary
result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
3105 m_jit
.move(value
.gpr(), result
.gpr());
3107 int32Result(result
.gpr(), node
);
3112 JSValueOperand
value(this, node
->child1());
3114 if (!m_interpreter
.needsTypeCheck(node
->child1(), SpecBoolInt32
| SpecBoolean
)) {
3115 GPRTemporary
result(this);
3117 GPRReg valueGPR
= value
.payloadGPR();
3118 GPRReg resultGPR
= result
.gpr();
3120 m_jit
.move(valueGPR
, resultGPR
);
3121 int32Result(result
.gpr(), node
);
3125 GPRTemporary
resultTag(this);
3126 GPRTemporary
resultPayload(this);
3128 GPRReg valueTagGPR
= value
.tagGPR();
3129 GPRReg valuePayloadGPR
= value
.payloadGPR();
3130 GPRReg resultTagGPR
= resultTag
.gpr();
3131 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3133 m_jit
.move(valuePayloadGPR
, resultPayloadGPR
);
3134 JITCompiler::Jump isBoolean
= m_jit
.branch32(
3135 JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::BooleanTag
));
3136 m_jit
.move(valueTagGPR
, resultTagGPR
);
3137 JITCompiler::Jump done
= m_jit
.jump();
3138 isBoolean
.link(&m_jit
);
3139 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), resultTagGPR
);
3142 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
3147 RELEASE_ASSERT_NOT_REACHED();
3154 RELEASE_ASSERT(node
->child1().useKind() == UntypedUse
);
3155 JSValueOperand
op1(this, node
->child1());
3156 GPRTemporary
resultTag(this, Reuse
, op1
, TagWord
);
3157 GPRTemporary
resultPayload(this, Reuse
, op1
, PayloadWord
);
3159 GPRReg op1TagGPR
= op1
.tagGPR();
3160 GPRReg op1PayloadGPR
= op1
.payloadGPR();
3161 GPRReg resultTagGPR
= resultTag
.gpr();
3162 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3166 if (!(m_state
.forNode(node
->child1()).m_type
& ~(SpecFullNumber
| SpecBoolean
))) {
3167 m_jit
.move(op1TagGPR
, resultTagGPR
);
3168 m_jit
.move(op1PayloadGPR
, resultPayloadGPR
);
3170 MacroAssembler::Jump alreadyPrimitive
= m_jit
.branchIfNotCell(op1
.jsValueRegs());
3171 MacroAssembler::Jump notPrimitive
= m_jit
.branchIfObject(op1PayloadGPR
);
3173 alreadyPrimitive
.link(&m_jit
);
3174 m_jit
.move(op1TagGPR
, resultTagGPR
);
3175 m_jit
.move(op1PayloadGPR
, resultPayloadGPR
);
3177 addSlowPathGenerator(
3179 notPrimitive
, this, operationToPrimitive
,
3180 JSValueRegs(resultTagGPR
, resultPayloadGPR
), op1TagGPR
, op1PayloadGPR
));
3183 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3188 case CallStringConstructor
: {
3189 if (node
->child1().useKind() == UntypedUse
) {
3190 JSValueOperand
op1(this, node
->child1());
3191 GPRReg op1PayloadGPR
= op1
.payloadGPR();
3192 GPRReg op1TagGPR
= op1
.tagGPR();
3194 GPRFlushedCallResult
result(this);
3195 GPRReg resultGPR
= result
.gpr();
3199 JITCompiler::Jump done
;
3200 if (node
->child1()->prediction() & SpecString
) {
3201 JITCompiler::Jump slowPath1
= m_jit
.branchIfNotCell(op1
.jsValueRegs());
3202 JITCompiler::Jump slowPath2
= m_jit
.branchIfNotString(op1PayloadGPR
);
3203 m_jit
.move(op1PayloadGPR
, resultGPR
);
3204 done
= m_jit
.jump();
3205 slowPath1
.link(&m_jit
);
3206 slowPath2
.link(&m_jit
);
3209 callOperation(operationToString
, resultGPR
, op1TagGPR
, op1PayloadGPR
);
3211 ASSERT(op
== CallStringConstructor
);
3212 callOperation(operationCallStringConstructor
, resultGPR
, op1TagGPR
, op1PayloadGPR
);
3216 cellResult(resultGPR
, node
);
3220 compileToStringOrCallStringConstructorOnCell(node
);
3224 case NewStringObject
: {
3225 compileNewStringObject(node
);
3230 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3231 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3232 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3233 ASSERT(structure
->indexingType() == node
->indexingType());
3235 hasUndecided(structure
->indexingType())
3236 || hasInt32(structure
->indexingType())
3237 || hasDouble(structure
->indexingType())
3238 || hasContiguous(structure
->indexingType()));
3240 unsigned numElements
= node
->numChildren();
3242 GPRTemporary
result(this);
3243 GPRTemporary
storage(this);
3245 GPRReg resultGPR
= result
.gpr();
3246 GPRReg storageGPR
= storage
.gpr();
3248 emitAllocateJSArray(resultGPR
, structure
, storageGPR
, numElements
);
3250 // At this point, one way or another, resultGPR and storageGPR have pointers to
3251 // the JSArray and the Butterfly, respectively.
3253 ASSERT(!hasUndecided(structure
->indexingType()) || !node
->numChildren());
3255 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3256 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3257 switch (node
->indexingType()) {
3258 case ALL_BLANK_INDEXING_TYPES
:
3259 case ALL_UNDECIDED_INDEXING_TYPES
:
3262 case ALL_DOUBLE_INDEXING_TYPES
: {
3263 SpeculateDoubleOperand
operand(this, use
);
3264 FPRReg opFPR
= operand
.fpr();
3266 JSValueRegs(), use
, SpecDoubleReal
,
3267 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3269 m_jit
.storeDouble(opFPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * operandIdx
));
3272 case ALL_INT32_INDEXING_TYPES
: {
3273 SpeculateInt32Operand
operand(this, use
);
3274 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3275 m_jit
.store32(operand
.gpr(), MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3278 case ALL_CONTIGUOUS_INDEXING_TYPES
: {
3279 JSValueOperand
operand(this, m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
]);
3280 GPRReg opTagGPR
= operand
.tagGPR();
3281 GPRReg opPayloadGPR
= operand
.payloadGPR();
3282 m_jit
.store32(opTagGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3283 m_jit
.store32(opPayloadGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3292 // Yuck, we should *really* have a way of also returning the storageGPR. But
3293 // that's the least of what's wrong with this code. We really shouldn't be
3294 // allocating the array after having computed - and probably spilled to the
3295 // stack - all of the things that will go into the array. The solution to that
3296 // bigger problem will also likely fix the redundancy in reloading the storage
3297 // pointer that we currently have.
3299 cellResult(resultGPR
, node
);
3303 if (!node
->numChildren()) {
3305 GPRFlushedCallResult
result(this);
3307 operationNewEmptyArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()));
3308 cellResult(result
.gpr(), node
);
3312 size_t scratchSize
= sizeof(EncodedJSValue
) * node
->numChildren();
3313 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(scratchSize
);
3314 EncodedJSValue
* buffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
3316 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3317 // Need to perform the speculations that this node promises to perform. If we're
3318 // emitting code here and the indexing type is not array storage then there is
3319 // probably something hilarious going on and we're already failing at all the
3320 // things, but at least we're going to be sound.
3321 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3322 switch (node
->indexingType()) {
3323 case ALL_BLANK_INDEXING_TYPES
:
3324 case ALL_UNDECIDED_INDEXING_TYPES
:
3327 case ALL_DOUBLE_INDEXING_TYPES
: {
3328 SpeculateDoubleOperand
operand(this, use
);
3329 FPRReg opFPR
= operand
.fpr();
3331 JSValueRegs(), use
, SpecFullRealNumber
,
3332 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3334 m_jit
.storeDouble(opFPR
, TrustedImmPtr(reinterpret_cast<char*>(buffer
+ operandIdx
)));
3337 case ALL_INT32_INDEXING_TYPES
: {
3338 SpeculateInt32Operand
operand(this, use
);
3339 GPRReg opGPR
= operand
.gpr();
3340 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
3341 m_jit
.store32(opGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
3344 case ALL_CONTIGUOUS_INDEXING_TYPES
:
3345 case ALL_ARRAY_STORAGE_INDEXING_TYPES
: {
3346 JSValueOperand
operand(this, m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
]);
3347 GPRReg opTagGPR
= operand
.tagGPR();
3348 GPRReg opPayloadGPR
= operand
.payloadGPR();
3350 m_jit
.store32(opTagGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
3351 m_jit
.store32(opPayloadGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
3361 switch (node
->indexingType()) {
3362 case ALL_DOUBLE_INDEXING_TYPES
:
3363 case ALL_INT32_INDEXING_TYPES
:
3373 GPRTemporary
scratch(this);
3375 // Tell GC mark phase how much of the scratch buffer is active during call.
3376 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3377 m_jit
.storePtr(TrustedImmPtr(scratchSize
), scratch
.gpr());
3380 GPRFlushedCallResult
result(this);
3383 operationNewArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3384 static_cast<void*>(buffer
), node
->numChildren());
3387 GPRTemporary
scratch(this);
3389 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3390 m_jit
.storePtr(TrustedImmPtr(0), scratch
.gpr());
3393 cellResult(result
.gpr(), node
, UseChildrenCalledExplicitly
);
3397 case NewArrayWithSize
: {
3398 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3399 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(node
->indexingType())) {
3400 SpeculateStrictInt32Operand
size(this, node
->child1());
3401 GPRTemporary
result(this);
3402 GPRTemporary
storage(this);
3403 GPRTemporary
scratch(this);
3404 GPRTemporary
scratch2(this);
3406 GPRReg sizeGPR
= size
.gpr();
3407 GPRReg resultGPR
= result
.gpr();
3408 GPRReg storageGPR
= storage
.gpr();
3409 GPRReg scratchGPR
= scratch
.gpr();
3410 GPRReg scratch2GPR
= scratch2
.gpr();
3412 MacroAssembler::JumpList slowCases
;
3413 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH
)));
3415 ASSERT((1 << 3) == sizeof(JSValue
));
3416 m_jit
.move(sizeGPR
, scratchGPR
);
3417 m_jit
.lshift32(TrustedImm32(3), scratchGPR
);
3418 m_jit
.add32(TrustedImm32(sizeof(IndexingHeader
)), scratchGPR
, resultGPR
);
3420 emitAllocateBasicStorage(resultGPR
, storageGPR
));
3421 m_jit
.subPtr(scratchGPR
, storageGPR
);
3422 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3423 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
3425 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3426 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3428 if (hasDouble(node
->indexingType())) {
3429 JSValue nan
= JSValue(JSValue::EncodeAsDouble
, PNaN
);
3431 m_jit
.move(sizeGPR
, scratchGPR
);
3432 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, scratchGPR
);
3433 MacroAssembler::Label loop
= m_jit
.label();
3434 m_jit
.sub32(TrustedImm32(1), scratchGPR
);
3435 m_jit
.store32(TrustedImm32(nan
.u
.asBits
.tag
), MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3436 m_jit
.store32(TrustedImm32(nan
.u
.asBits
.payload
), MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3437 m_jit
.branchTest32(MacroAssembler::NonZero
, scratchGPR
).linkTo(loop
, &m_jit
);
3441 addSlowPathGenerator(std::make_unique
<CallArrayAllocatorWithVariableSizeSlowPathGenerator
>(
3442 slowCases
, this, operationNewArrayWithSize
, resultGPR
,
3443 globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3444 globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
),
3447 cellResult(resultGPR
, node
);
3451 SpeculateStrictInt32Operand
size(this, node
->child1());
3452 GPRReg sizeGPR
= size
.gpr();
3454 GPRFlushedCallResult
result(this);
3455 GPRReg resultGPR
= result
.gpr();
3456 GPRReg structureGPR
= selectScratchGPR(sizeGPR
);
3457 MacroAssembler::Jump bigLength
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH
));
3458 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType())), structureGPR
);
3459 MacroAssembler::Jump done
= m_jit
.jump();
3460 bigLength
.link(&m_jit
);
3461 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
)), structureGPR
);
3464 operationNewArrayWithSize
, resultGPR
, structureGPR
, sizeGPR
);
3465 cellResult(resultGPR
, node
);
3469 case NewArrayBuffer
: {
3470 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3471 IndexingType indexingType
= node
->indexingType();
3472 if (!globalObject
->isHavingABadTime() && !hasAnyArrayStorage(indexingType
)) {
3473 unsigned numElements
= node
->numConstants();
3475 GPRTemporary
result(this);
3476 GPRTemporary
storage(this);
3478 GPRReg resultGPR
= result
.gpr();
3479 GPRReg storageGPR
= storage
.gpr();
3481 emitAllocateJSArray(resultGPR
, globalObject
->arrayStructureForIndexingTypeDuringAllocation(indexingType
), storageGPR
, numElements
);
3483 if (node
->indexingType() == ArrayWithDouble
) {
3484 JSValue
* data
= m_jit
.codeBlock()->constantBuffer(node
->startConstant());
3485 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3490 u
.value
= data
[index
].asNumber();
3491 m_jit
.store32(Imm32(u
.halves
[0]), MacroAssembler::Address(storageGPR
, sizeof(double) * index
));
3492 m_jit
.store32(Imm32(u
.halves
[1]), MacroAssembler::Address(storageGPR
, sizeof(double) * index
+ sizeof(int32_t)));
3495 int32_t* data
= bitwise_cast
<int32_t*>(m_jit
.codeBlock()->constantBuffer(node
->startConstant()));
3496 for (unsigned index
= 0; index
< node
->numConstants() * 2; ++index
) {
3498 Imm32(data
[index
]), MacroAssembler::Address(storageGPR
, sizeof(int32_t) * index
));
3502 cellResult(resultGPR
, node
);
3507 GPRFlushedCallResult
result(this);
3509 callOperation(operationNewArrayBuffer
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()), node
->startConstant(), node
->numConstants());
3511 cellResult(result
.gpr(), node
);
3515 case NewTypedArray
: {
3516 switch (node
->child1().useKind()) {
3518 compileNewTypedArray(node
);
3521 JSValueOperand
argument(this, node
->child1());
3522 GPRReg argumentTagGPR
= argument
.tagGPR();
3523 GPRReg argumentPayloadGPR
= argument
.payloadGPR();
3527 GPRFlushedCallResult
result(this);
3528 GPRReg resultGPR
= result
.gpr();
3530 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
3532 operationNewTypedArrayWithOneArgumentForType(node
->typedArrayType()),
3533 resultGPR
, globalObject
->typedArrayStructure(node
->typedArrayType()),
3534 argumentTagGPR
, argumentPayloadGPR
);
3536 cellResult(resultGPR
, node
);
3540 RELEASE_ASSERT_NOT_REACHED();
3548 GPRFlushedCallResult
resultPayload(this);
3549 GPRFlushedCallResult2
resultTag(this);
3551 callOperation(operationNewRegexp
, resultTag
.gpr(), resultPayload
.gpr(), m_jit
.codeBlock()->regexp(node
->regexpIndex()));
3553 // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
3554 cellResult(resultPayload
.gpr(), node
);
3559 ASSERT(node
->child1().useKind() == UntypedUse
);
3560 JSValueOperand
thisValue(this, node
->child1());
3561 GPRTemporary
temp(this);
3562 GPRTemporary
tempTag(this);
3563 GPRReg thisValuePayloadGPR
= thisValue
.payloadGPR();
3564 GPRReg thisValueTagGPR
= thisValue
.tagGPR();
3565 GPRReg tempGPR
= temp
.gpr();
3566 GPRReg tempTagGPR
= tempTag
.gpr();
3568 MacroAssembler::JumpList slowCases
;
3569 slowCases
.append(m_jit
.branchIfNotCell(thisValue
.jsValueRegs()));
3570 slowCases
.append(m_jit
.branch8(
3571 MacroAssembler::NotEqual
,
3572 MacroAssembler::Address(thisValuePayloadGPR
, JSCell::typeInfoTypeOffset()),
3573 TrustedImm32(FinalObjectType
)));
3574 m_jit
.move(thisValuePayloadGPR
, tempGPR
);
3575 m_jit
.move(thisValueTagGPR
, tempTagGPR
);
3576 J_JITOperation_EJ function
;
3577 if (m_jit
.graph().executableFor(node
->origin
.semantic
)->isStrictMode())
3578 function
= operationToThisStrict
;
3580 function
= operationToThis
;
3581 addSlowPathGenerator(
3583 slowCases
, this, function
,
3584 JSValueRegs(tempTagGPR
, tempGPR
), thisValueTagGPR
, thisValuePayloadGPR
));
3586 jsValueResult(tempTagGPR
, tempGPR
, node
);
3591 // Note that there is not so much profit to speculate here. The only things we
3592 // speculate on are (1) that it's a cell, since that eliminates cell checks
3593 // later if the proto is reused, and (2) if we have a FinalObject prediction
3594 // then we speculate because we want to get recompiled if it isn't (since
3595 // otherwise we'd start taking slow path a lot).
3597 SpeculateCellOperand
callee(this, node
->child1());
3598 GPRTemporary
result(this);
3599 GPRTemporary
allocator(this);
3600 GPRTemporary
structure(this);
3601 GPRTemporary
scratch(this);
3603 GPRReg calleeGPR
= callee
.gpr();
3604 GPRReg resultGPR
= result
.gpr();
3605 GPRReg allocatorGPR
= allocator
.gpr();
3606 GPRReg structureGPR
= structure
.gpr();
3607 GPRReg scratchGPR
= scratch
.gpr();
3608 // Rare data is only used to access the allocator & structure
3609 // We can avoid using an additional GPR this way
3610 GPRReg rareDataGPR
= structureGPR
;
3612 MacroAssembler::JumpList slowPath
;
3614 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfRareData()), rareDataGPR
);
3615 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, rareDataGPR
));
3616 m_jit
.loadPtr(JITCompiler::Address(rareDataGPR
, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR
);
3617 m_jit
.loadPtr(JITCompiler::Address(rareDataGPR
, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR
);
3618 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, allocatorGPR
));
3619 emitAllocateJSObject(resultGPR
, allocatorGPR
, structureGPR
, TrustedImmPtr(0), scratchGPR
, slowPath
);
3621 addSlowPathGenerator(slowPathCall(slowPath
, this, operationCreateThis
, resultGPR
, calleeGPR
, node
->inlineCapacity()));
3623 cellResult(resultGPR
, node
);
3628 GPRTemporary
result(this);
3629 GPRTemporary
allocator(this);
3630 GPRTemporary
scratch(this);
3632 GPRReg resultGPR
= result
.gpr();
3633 GPRReg allocatorGPR
= allocator
.gpr();
3634 GPRReg scratchGPR
= scratch
.gpr();
3636 MacroAssembler::JumpList slowPath
;
3638 Structure
* structure
= node
->structure();
3639 size_t allocationSize
= JSFinalObject::allocationSize(structure
->inlineCapacity());
3640 MarkedAllocator
* allocatorPtr
= &m_jit
.vm()->heap
.allocatorForObjectWithoutDestructor(allocationSize
);
3642 m_jit
.move(TrustedImmPtr(allocatorPtr
), allocatorGPR
);
3643 emitAllocateJSObject(resultGPR
, allocatorGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, slowPath
);
3645 addSlowPathGenerator(slowPathCall(slowPath
, this, operationNewObject
, resultGPR
, structure
));
3647 cellResult(resultGPR
, node
);
3652 GPRTemporary
result(this);
3653 m_jit
.loadPtr(JITCompiler::payloadFor(JSStack::Callee
), result
.gpr());
3654 cellResult(result
.gpr(), node
);
3658 case GetArgumentCount
: {
3659 GPRTemporary
result(this);
3660 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), result
.gpr());
3661 int32Result(result
.gpr(), node
);
3666 compileGetScope(node
);
3670 compileSkipScope(node
);
3673 case GetClosureVar
: {
3674 SpeculateCellOperand
base(this, node
->child1());
3675 GPRTemporary
resultTag(this);
3676 GPRTemporary
resultPayload(this);
3677 GPRReg baseGPR
= base
.gpr();
3678 GPRReg resultTagGPR
= resultTag
.gpr();
3679 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3680 m_jit
.load32(JITCompiler::Address(baseGPR
, JSEnvironmentRecord::offsetOfVariable(node
->scopeOffset()) + TagOffset
), resultTagGPR
);
3681 m_jit
.load32(JITCompiler::Address(baseGPR
, JSEnvironmentRecord::offsetOfVariable(node
->scopeOffset()) + PayloadOffset
), resultPayloadGPR
);
3682 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
3686 case PutClosureVar
: {
3687 SpeculateCellOperand
base(this, node
->child1());
3688 JSValueOperand
value(this, node
->child2());
3690 GPRReg baseGPR
= base
.gpr();
3691 GPRReg valueTagGPR
= value
.tagGPR();
3692 GPRReg valuePayloadGPR
= value
.payloadGPR();
3694 m_jit
.store32(valueTagGPR
, JITCompiler::Address(baseGPR
, JSEnvironmentRecord::offsetOfVariable(node
->scopeOffset()) + TagOffset
));
3695 m_jit
.store32(valuePayloadGPR
, JITCompiler::Address(baseGPR
, JSEnvironmentRecord::offsetOfVariable(node
->scopeOffset()) + PayloadOffset
));
3701 ASSERT(node
->prediction());
3703 switch (node
->child1().useKind()) {
3705 SpeculateCellOperand
base(this, node
->child1());
3706 GPRTemporary
resultTag(this);
3707 GPRTemporary
resultPayload(this, Reuse
, base
);
3709 GPRReg baseGPR
= base
.gpr();
3710 GPRReg resultTagGPR
= resultTag
.gpr();
3711 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3715 cachedGetById(node
->origin
.semantic
, InvalidGPRReg
, baseGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber());
3717 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3722 JSValueOperand
base(this, node
->child1());
3723 GPRTemporary
resultTag(this);
3724 GPRTemporary
resultPayload(this, Reuse
, base
, TagWord
);
3726 GPRReg baseTagGPR
= base
.tagGPR();
3727 GPRReg basePayloadGPR
= base
.payloadGPR();
3728 GPRReg resultTagGPR
= resultTag
.gpr();
3729 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3733 JITCompiler::Jump notCell
= m_jit
.branchIfNotCell(base
.jsValueRegs());
3735 cachedGetById(node
->origin
.semantic
, baseTagGPR
, basePayloadGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), notCell
);
3737 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3742 RELEASE_ASSERT_NOT_REACHED();
3748 case GetByIdFlush
: {
3749 if (!node
->prediction()) {
3750 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3754 switch (node
->child1().useKind()) {
3756 SpeculateCellOperand
base(this, node
->child1());
3758 GPRReg baseGPR
= base
.gpr();
3760 GPRFlushedCallResult
resultPayload(this);
3761 GPRFlushedCallResult2
resultTag(this);
3762 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3763 GPRReg resultTagGPR
= resultTag
.gpr();
3769 cachedGetById(node
->origin
.semantic
, InvalidGPRReg
, baseGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), JITCompiler::Jump(), DontSpill
);
3771 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3776 JSValueOperand
base(this, node
->child1());
3777 GPRReg baseTagGPR
= base
.tagGPR();
3778 GPRReg basePayloadGPR
= base
.payloadGPR();
3780 GPRFlushedCallResult
resultPayload(this);
3781 GPRFlushedCallResult2
resultTag(this);
3782 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3783 GPRReg resultTagGPR
= resultTag
.gpr();
3789 JITCompiler::Jump notCell
= m_jit
.branchIfNotCell(base
.jsValueRegs());
3791 cachedGetById(node
->origin
.semantic
, baseTagGPR
, basePayloadGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), notCell
, DontSpill
);
3793 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3798 RELEASE_ASSERT_NOT_REACHED();
3804 case GetArrayLength
:
3805 compileGetArrayLength(node
);
3809 SpeculateCellOperand
cell(this, node
->child1());
3810 speculationCheck(BadCell
, JSValueSource::unboxedCell(cell
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, cell
.gpr(), node
->cellOperand()->cell()));
3815 case CheckNotEmpty
: {
3816 JSValueOperand
operand(this, node
->child1());
3817 GPRReg tagGPR
= operand
.tagGPR();
3818 speculationCheck(TDZFailure
, JSValueSource(), nullptr, m_jit
.branch32(JITCompiler::Equal
, tagGPR
, TrustedImm32(JSValue::EmptyValueTag
)));
3823 case GetExecutable
: {
3824 SpeculateCellOperand
function(this, node
->child1());
3825 GPRTemporary
result(this, Reuse
, function
);
3826 GPRReg functionGPR
= function
.gpr();
3827 GPRReg resultGPR
= result
.gpr();
3828 speculateCellType(node
->child1(), functionGPR
, SpecFunction
, JSFunctionType
);
3829 m_jit
.loadPtr(JITCompiler::Address(functionGPR
, JSFunction::offsetOfExecutable()), resultGPR
);
3830 cellResult(resultGPR
, node
);
3834 case CheckStructure
: {
3835 SpeculateCellOperand
base(this, node
->child1());
3837 ASSERT(node
->structureSet().size());
3839 if (node
->structureSet().size() == 1) {
3841 BadCache
, JSValueSource::unboxedCell(base
.gpr()), 0,
3842 m_jit
.branchWeakPtr(
3843 JITCompiler::NotEqual
,
3844 JITCompiler::Address(base
.gpr(), JSCell::structureIDOffset()),
3845 node
->structureSet()[0]));
3847 GPRTemporary
structure(this);
3849 m_jit
.loadPtr(JITCompiler::Address(base
.gpr(), JSCell::structureIDOffset()), structure
.gpr());
3851 JITCompiler::JumpList done
;
3853 for (size_t i
= 0; i
< node
->structureSet().size() - 1; ++i
)
3854 done
.append(m_jit
.branchWeakPtr(JITCompiler::Equal
, structure
.gpr(), node
->structureSet()[i
]));
3857 BadCache
, JSValueSource::unboxedCell(base
.gpr()), 0,
3858 m_jit
.branchWeakPtr(
3859 JITCompiler::NotEqual
, structure
.gpr(), node
->structureSet().last()));
3868 case PutStructure
: {
3869 Structure
* oldStructure
= node
->transition()->previous
;
3870 Structure
* newStructure
= node
->transition()->next
;
3872 m_jit
.jitCode()->common
.notifyCompilingStructureTransition(m_jit
.graph().m_plan
, m_jit
.codeBlock(), node
);
3874 SpeculateCellOperand
base(this, node
->child1());
3875 GPRReg baseGPR
= base
.gpr();
3877 ASSERT_UNUSED(oldStructure
, oldStructure
->indexingType() == newStructure
->indexingType());
3878 ASSERT(oldStructure
->typeInfo().type() == newStructure
->typeInfo().type());
3879 ASSERT(oldStructure
->typeInfo().inlineTypeFlags() == newStructure
->typeInfo().inlineTypeFlags());
3880 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(newStructure
), MacroAssembler::Address(baseGPR
, JSCell::structureIDOffset()));
3886 case AllocatePropertyStorage
:
3887 compileAllocatePropertyStorage(node
);
3890 case ReallocatePropertyStorage
:
3891 compileReallocatePropertyStorage(node
);
3894 case GetButterfly
: {
3895 SpeculateCellOperand
base(this, node
->child1());
3896 GPRTemporary
result(this, Reuse
, base
);
3898 GPRReg baseGPR
= base
.gpr();
3899 GPRReg resultGPR
= result
.gpr();
3901 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), resultGPR
);
3903 storageResult(resultGPR
, node
);
3907 case GetIndexedPropertyStorage
: {
3908 compileGetIndexedPropertyStorage(node
);
3912 case ConstantStoragePointer
: {
3913 compileConstantStoragePointer(node
);
3917 case GetTypedArrayByteOffset
: {
3918 compileGetTypedArrayByteOffset(node
);
3923 StorageOperand
storage(this, node
->child1());
3924 GPRTemporary
resultTag(this, Reuse
, storage
);
3925 GPRTemporary
resultPayload(this);
3927 GPRReg storageGPR
= storage
.gpr();
3928 GPRReg resultTagGPR
= resultTag
.gpr();
3929 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3931 StorageAccessData
& storageAccessData
= node
->storageAccessData();
3933 m_jit
.load32(JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayloadGPR
);
3934 m_jit
.load32(JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTagGPR
);
3936 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
3940 case GetGetterSetterByOffset
: {
3941 StorageOperand
storage(this, node
->child1());
3942 GPRTemporary
resultPayload(this);
3944 GPRReg storageGPR
= storage
.gpr();
3945 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3947 StorageAccessData
& storageAccessData
= node
->storageAccessData();
3949 m_jit
.load32(JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayloadGPR
);
3951 cellResult(resultPayloadGPR
, node
);
3956 SpeculateCellOperand
op1(this, node
->child1());
3957 GPRTemporary
result(this, Reuse
, op1
);
3959 GPRReg op1GPR
= op1
.gpr();
3960 GPRReg resultGPR
= result
.gpr();
3962 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, GetterSetter::offsetOfGetter()), resultGPR
);
3964 cellResult(resultGPR
, node
);
3969 SpeculateCellOperand
op1(this, node
->child1());
3970 GPRTemporary
result(this, Reuse
, op1
);
3972 GPRReg op1GPR
= op1
.gpr();
3973 GPRReg resultGPR
= result
.gpr();
3975 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, GetterSetter::offsetOfSetter()), resultGPR
);
3977 cellResult(resultGPR
, node
);
3982 StorageOperand
storage(this, node
->child1());
3983 JSValueOperand
value(this, node
->child3());
3985 GPRReg storageGPR
= storage
.gpr();
3986 GPRReg valueTagGPR
= value
.tagGPR();
3987 GPRReg valuePayloadGPR
= value
.payloadGPR();
3989 speculate(node
, node
->child2());
3991 StorageAccessData
& storageAccessData
= node
->storageAccessData();
3993 m_jit
.storePtr(valueTagGPR
, JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
3994 m_jit
.storePtr(valuePayloadGPR
, JITCompiler::Address(storageGPR
, offsetRelativeToBase(storageAccessData
.offset
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
4000 case PutByIdFlush
: {
4001 SpeculateCellOperand
base(this, node
->child1());
4002 JSValueOperand
value(this, node
->child2());
4003 GPRTemporary
scratch(this);
4005 GPRReg baseGPR
= base
.gpr();
4006 GPRReg valueTagGPR
= value
.tagGPR();
4007 GPRReg valuePayloadGPR
= value
.payloadGPR();
4008 GPRReg scratchGPR
= scratch
.gpr();
4011 cachedPutById(node
->origin
.semantic
, baseGPR
, valueTagGPR
, valuePayloadGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
, MacroAssembler::Jump(), DontSpill
);
4018 SpeculateCellOperand
base(this, node
->child1());
4019 JSValueOperand
value(this, node
->child2());
4020 GPRTemporary
scratch(this);
4022 GPRReg baseGPR
= base
.gpr();
4023 GPRReg valueTagGPR
= value
.tagGPR();
4024 GPRReg valuePayloadGPR
= value
.payloadGPR();
4025 GPRReg scratchGPR
= scratch
.gpr();
4027 cachedPutById(node
->origin
.semantic
, baseGPR
, valueTagGPR
, valuePayloadGPR
, scratchGPR
, node
->identifierNumber(), NotDirect
);
4033 case PutByIdDirect
: {
4034 SpeculateCellOperand
base(this, node
->child1());
4035 JSValueOperand
value(this, node
->child2());
4036 GPRTemporary
scratch(this);
4038 GPRReg baseGPR
= base
.gpr();
4039 GPRReg valueTagGPR
= value
.tagGPR();
4040 GPRReg valuePayloadGPR
= value
.payloadGPR();
4041 GPRReg scratchGPR
= scratch
.gpr();
4043 cachedPutById(node
->origin
.semantic
, baseGPR
, valueTagGPR
, valuePayloadGPR
, scratchGPR
, node
->identifierNumber(), Direct
);
4049 case GetGlobalVar
: {
4050 GPRTemporary
resultPayload(this);
4051 GPRTemporary
resultTag(this);
4053 m_jit
.move(TrustedImmPtr(node
->variablePointer()), resultPayload
.gpr());
4054 m_jit
.load32(JITCompiler::Address(resultPayload
.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTag
.gpr());
4055 m_jit
.load32(JITCompiler::Address(resultPayload
.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayload
.gpr());
4057 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
4061 case PutGlobalVar
: {
4062 JSValueOperand
value(this, node
->child2());
4064 // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have
4065 // a spare register - a good optimization would be to put the register pointer into
4066 // a register and then do a zero offset store followed by a four-offset store (or
4067 // vice-versa depending on endianness).
4068 m_jit
.store32(value
.tagGPR(), node
->variablePointer()->tagPointer());
4069 m_jit
.store32(value
.payloadGPR(), node
->variablePointer()->payloadPointer());
4076 compileNotifyWrite(node
);
4080 case VarInjectionWatchpoint
: {
4085 case CheckHasInstance
: {
4086 SpeculateCellOperand
base(this, node
->child1());
4087 GPRTemporary
structure(this);
4089 // Speculate that base 'ImplementsDefaultHasInstance'.
4090 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branchTest8(
4091 MacroAssembler::Zero
,
4092 MacroAssembler::Address(base
.gpr(), JSCell::typeInfoFlagsOffset()),
4093 MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance
)));
4100 compileInstanceOf(node
);
4105 JSValueOperand
value(this, node
->child1());
4106 GPRTemporary
result(this);
4107 GPRTemporary
localGlobalObject(this);
4108 GPRTemporary
remoteGlobalObject(this);
4110 JITCompiler::Jump isCell
= m_jit
.branchIfCell(value
.jsValueRegs());
4112 m_jit
.compare32(JITCompiler::Equal
, value
.tagGPR(), TrustedImm32(JSValue::UndefinedTag
), result
.gpr());
4113 JITCompiler::Jump done
= m_jit
.jump();
4115 isCell
.link(&m_jit
);
4116 JITCompiler::Jump notMasqueradesAsUndefined
;
4117 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
4118 m_jit
.move(TrustedImm32(0), result
.gpr());
4119 notMasqueradesAsUndefined
= m_jit
.jump();
4121 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(
4122 JITCompiler::NonZero
,
4123 JITCompiler::Address(value
.payloadGPR(), JSCell::typeInfoFlagsOffset()),
4124 TrustedImm32(MasqueradesAsUndefined
));
4125 m_jit
.move(TrustedImm32(0), result
.gpr());
4126 notMasqueradesAsUndefined
= m_jit
.jump();
4128 isMasqueradesAsUndefined
.link(&m_jit
);
4129 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
4130 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
4131 m_jit
.move(TrustedImmPtr(m_jit
.globalObjectFor(node
->origin
.semantic
)), localGlobalObjectGPR
);
4132 m_jit
.loadPtr(JITCompiler::Address(value
.payloadGPR(), JSCell::structureIDOffset()), result
.gpr());
4133 m_jit
.loadPtr(JITCompiler::Address(result
.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
4134 m_jit
.compare32(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, result
.gpr());
4137 notMasqueradesAsUndefined
.link(&m_jit
);
4139 booleanResult(result
.gpr(), node
);
4144 JSValueOperand
value(this, node
->child1());
4145 GPRTemporary
result(this, Reuse
, value
, TagWord
);
4147 m_jit
.compare32(JITCompiler::Equal
, value
.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag
), result
.gpr());
4148 booleanResult(result
.gpr(), node
);
4153 JSValueOperand
value(this, node
->child1());
4154 GPRTemporary
result(this, Reuse
, value
, TagWord
);
4156 m_jit
.add32(TrustedImm32(1), value
.tagGPR(), result
.gpr());
4157 m_jit
.compare32(JITCompiler::Below
, result
.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag
+ 1), result
.gpr());
4158 booleanResult(result
.gpr(), node
);
4163 JSValueOperand
value(this, node
->child1());
4164 GPRTemporary
result(this, Reuse
, value
, TagWord
);
4166 JITCompiler::Jump isNotCell
= m_jit
.branchIfNotCell(value
.jsValueRegs());
4168 m_jit
.compare8(JITCompiler::Equal
,
4169 JITCompiler::Address(value
.payloadGPR(), JSCell::typeInfoTypeOffset()),
4170 TrustedImm32(StringType
),
4172 JITCompiler::Jump done
= m_jit
.jump();
4174 isNotCell
.link(&m_jit
);
4175 m_jit
.move(TrustedImm32(0), result
.gpr());
4178 booleanResult(result
.gpr(), node
);
4183 JSValueOperand
value(this, node
->child1());
4184 GPRTemporary
result(this, Reuse
, value
, TagWord
);
4186 JITCompiler::Jump isNotCell
= m_jit
.branchIfNotCell(value
.jsValueRegs());
4188 m_jit
.compare8(JITCompiler::AboveOrEqual
,
4189 JITCompiler::Address(value
.payloadGPR(), JSCell::typeInfoTypeOffset()),
4190 TrustedImm32(ObjectType
),
4192 JITCompiler::Jump done
= m_jit
.jump();
4194 isNotCell
.link(&m_jit
);
4195 m_jit
.move(TrustedImm32(0), result
.gpr());
4198 booleanResult(result
.gpr(), node
);
4202 case IsObjectOrNull
: {
4203 compileIsObjectOrNull(node
);
4208 compileIsFunction(node
);
4212 compileTypeOf(node
);
4222 case CallForwardVarargs
:
4223 case ConstructVarargs
:
4224 case ConstructForwardVarargs
:
4229 LoadVarargsData
* data
= node
->loadVarargsData();
4231 GPRReg argumentsTagGPR
;
4232 GPRReg argumentsPayloadGPR
;
4234 JSValueOperand
arguments(this, node
->child1());
4235 argumentsTagGPR
= arguments
.tagGPR();
4236 argumentsPayloadGPR
= arguments
.payloadGPR();
4240 callOperation(operationSizeOfVarargs
, GPRInfo::returnValueGPR
, argumentsTagGPR
, argumentsPayloadGPR
, data
->offset
);
4242 lock(GPRInfo::returnValueGPR
);
4244 JSValueOperand
arguments(this, node
->child1());
4245 argumentsTagGPR
= arguments
.tagGPR();
4246 argumentsPayloadGPR
= arguments
.payloadGPR();
4249 unlock(GPRInfo::returnValueGPR
);
4251 // FIXME: There is a chance that we will call an effectful length property twice. This is safe
4252 // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
4253 // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
4255 // https://bugs.webkit.org/show_bug.cgi?id=141448
4257 GPRReg argCountIncludingThisGPR
=
4258 JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR
, argumentsTagGPR
, argumentsPayloadGPR
);
4260 m_jit
.add32(TrustedImm32(1), GPRInfo::returnValueGPR
, argCountIncludingThisGPR
);
4262 VarargsOverflow
, JSValueSource(), Edge(), m_jit
.branch32(
4263 MacroAssembler::Above
,
4264 argCountIncludingThisGPR
,
4265 TrustedImm32(data
->limit
)));
4267 m_jit
.store32(argCountIncludingThisGPR
, JITCompiler::payloadFor(data
->machineCount
));
4269 callOperation(operationLoadVarargs
, data
->machineStart
.offset(), argumentsTagGPR
, argumentsPayloadGPR
, data
->offset
, GPRInfo::returnValueGPR
, data
->mandatoryMinimum
);
4275 case ForwardVarargs
: {
4276 compileForwardVarargs(node
);
4280 case CreateActivation
: {
4281 compileCreateActivation(node
);
4285 case CreateDirectArguments
: {
4286 compileCreateDirectArguments(node
);
4290 case GetFromArguments
: {
4291 compileGetFromArguments(node
);
4295 case PutToArguments
: {
4296 compilePutToArguments(node
);
4300 case CreateScopedArguments
: {
4301 compileCreateScopedArguments(node
);
4305 case CreateClonedArguments
: {
4306 compileCreateClonedArguments(node
);
4311 compileNewFunction(node
);
4318 case StoreBarrier
: {
4319 compileStoreBarrier(node
);
4323 case GetEnumerableLength
: {
4324 SpeculateCellOperand
enumerator(this, node
->child1());
4325 GPRFlushedCallResult
result(this);
4326 GPRReg resultGPR
= result
.gpr();
4328 m_jit
.load32(MacroAssembler::Address(enumerator
.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR
);
4329 int32Result(resultGPR
, node
);
4332 case HasGenericProperty
: {
4333 JSValueOperand
base(this, node
->child1());
4334 SpeculateCellOperand
property(this, node
->child2());
4335 GPRFlushedCallResult
resultPayload(this);
4336 GPRFlushedCallResult2
resultTag(this);
4337 GPRReg basePayloadGPR
= base
.payloadGPR();
4338 GPRReg baseTagGPR
= base
.tagGPR();
4339 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4340 GPRReg resultTagGPR
= resultTag
.gpr();
4343 callOperation(operationHasGenericProperty
, resultTagGPR
, resultPayloadGPR
, baseTagGPR
, basePayloadGPR
, property
.gpr());
4344 booleanResult(resultPayloadGPR
, node
);
4347 case HasStructureProperty
: {
4348 JSValueOperand
base(this, node
->child1());
4349 SpeculateCellOperand
property(this, node
->child2());
4350 SpeculateCellOperand
enumerator(this, node
->child3());
4351 GPRTemporary
resultPayload(this);
4352 GPRTemporary
resultTag(this);
4354 GPRReg baseTagGPR
= base
.tagGPR();
4355 GPRReg basePayloadGPR
= base
.payloadGPR();
4356 GPRReg propertyGPR
= property
.gpr();
4357 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4358 GPRReg resultTagGPR
= resultTag
.gpr();
4360 m_jit
.load32(MacroAssembler::Address(basePayloadGPR
, JSCell::structureIDOffset()), resultTagGPR
);
4361 MacroAssembler::Jump wrongStructure
= m_jit
.branch32(MacroAssembler::NotEqual
,
4363 MacroAssembler::Address(enumerator
.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset()));
4365 moveTrueTo(resultPayloadGPR
);
4366 MacroAssembler::Jump done
= m_jit
.jump();
4370 addSlowPathGenerator(slowPathCall(wrongStructure
, this, operationHasGenericProperty
, resultTagGPR
, resultPayloadGPR
, baseTagGPR
, basePayloadGPR
, propertyGPR
));
4371 booleanResult(resultPayloadGPR
, node
);
4374 case HasIndexedProperty
: {
4375 SpeculateCellOperand
base(this, node
->child1());
4376 SpeculateInt32Operand
index(this, node
->child2());
4377 GPRTemporary
resultPayload(this);
4378 GPRTemporary
resultTag(this);
4380 GPRReg baseGPR
= base
.gpr();
4381 GPRReg indexGPR
= index
.gpr();
4382 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4383 GPRReg resultTagGPR
= resultTag
.gpr();
4385 MacroAssembler::JumpList slowCases
;
4386 ArrayMode mode
= node
->arrayMode();
4387 switch (mode
.type()) {
4389 case Array::Contiguous
: {
4390 ASSERT(!!node
->child3());
4391 StorageOperand
storage(this, node
->child3());
4392 GPRTemporary
scratch(this);
4394 GPRReg storageGPR
= storage
.gpr();
4395 GPRReg scratchGPR
= scratch
.gpr();
4397 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength())));
4398 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, indexGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), scratchGPR
);
4399 slowCases
.append(m_jit
.branch32(MacroAssembler::Equal
, scratchGPR
, TrustedImm32(JSValue::EmptyValueTag
)));
4402 case Array::Double
: {
4403 ASSERT(!!node
->child3());
4404 StorageOperand
storage(this, node
->child3());
4405 FPRTemporary
scratch(this);
4406 FPRReg scratchFPR
= scratch
.fpr();
4407 GPRReg storageGPR
= storage
.gpr();
4409 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength())));
4410 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageGPR
, indexGPR
, MacroAssembler::TimesEight
), scratchFPR
);
4411 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, scratchFPR
, scratchFPR
));
4414 case Array::ArrayStorage
: {
4415 ASSERT(!!node
->child3());
4416 StorageOperand
storage(this, node
->child3());
4417 GPRTemporary
scratch(this);
4419 GPRReg storageGPR
= storage
.gpr();
4420 GPRReg scratchGPR
= scratch
.gpr();
4422 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset())));
4423 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, indexGPR
, MacroAssembler::TimesEight
, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), scratchGPR
);
4424 slowCases
.append(m_jit
.branch32(MacroAssembler::Equal
, scratchGPR
, TrustedImm32(JSValue::EmptyValueTag
)));
4428 slowCases
.append(m_jit
.jump());
4433 moveTrueTo(resultPayloadGPR
);
4434 MacroAssembler::Jump done
= m_jit
.jump();
4436 addSlowPathGenerator(slowPathCall(slowCases
, this, operationHasIndexedProperty
, resultTagGPR
, resultPayloadGPR
, baseGPR
, indexGPR
));
4439 booleanResult(resultPayloadGPR
, node
);
4442 case GetDirectPname
: {
4443 Edge
& baseEdge
= m_jit
.graph().varArgChild(node
, 0);
4444 Edge
& propertyEdge
= m_jit
.graph().varArgChild(node
, 1);
4446 SpeculateCellOperand
base(this, baseEdge
);
4447 SpeculateCellOperand
property(this, propertyEdge
);
4448 GPRReg baseGPR
= base
.gpr();
4449 GPRReg propertyGPR
= property
.gpr();
4452 GPRFlushedCallResult
resultPayload(this);
4453 GPRFlushedCallResult2
resultTag(this);
4454 GPRTemporary
scratch(this);
4456 GPRReg resultTagGPR
= resultTag
.gpr();
4457 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4458 GPRReg scratchGPR
= scratch
.gpr();
4460 // Not enough registers on X86 for this code, so always use the slow path.
4462 m_jit
.move(MacroAssembler::TrustedImm32(JSValue::CellTag
), scratchGPR
);
4463 callOperation(operationGetByValCell
, resultTagGPR
, resultPayloadGPR
, baseGPR
, scratchGPR
, propertyGPR
);
4465 GPRTemporary
resultPayload(this);
4466 GPRTemporary
resultTag(this);
4467 GPRTemporary
scratch(this);
4469 GPRReg resultTagGPR
= resultTag
.gpr();
4470 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4471 GPRReg scratchGPR
= scratch
.gpr();
4473 Edge
& indexEdge
= m_jit
.graph().varArgChild(node
, 2);
4474 Edge
& enumeratorEdge
= m_jit
.graph().varArgChild(node
, 3);
4476 SpeculateInt32Operand
index(this, indexEdge
);
4477 SpeculateCellOperand
enumerator(this, enumeratorEdge
);
4479 GPRReg indexGPR
= index
.gpr();
4480 GPRReg enumeratorGPR
= enumerator
.gpr();
4482 // Check the structure
4483 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSCell::structureIDOffset()), scratchGPR
);
4484 MacroAssembler::Jump wrongStructure
= m_jit
.branch32(MacroAssembler::NotEqual
,
4485 scratchGPR
, MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedStructureIDOffset()));
4487 // Compute the offset
4488 // If index is less than the enumerator's cached inline storage, then it's an inline access
4489 MacroAssembler::Jump outOfLineAccess
= m_jit
.branch32(MacroAssembler::AboveOrEqual
,
4490 indexGPR
, MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
4492 m_jit
.move(indexGPR
, scratchGPR
);
4493 m_jit
.signExtend32ToPtr(scratchGPR
, scratchGPR
);
4494 m_jit
.load32(MacroAssembler::BaseIndex(baseGPR
, scratchGPR
, MacroAssembler::TimesEight
, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagGPR
);
4495 m_jit
.load32(MacroAssembler::BaseIndex(baseGPR
, scratchGPR
, MacroAssembler::TimesEight
, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadGPR
);
4497 MacroAssembler::Jump done
= m_jit
.jump();
4499 // Otherwise it's out of line
4500 outOfLineAccess
.link(&m_jit
);
4501 m_jit
.move(indexGPR
, scratchGPR
);
4502 m_jit
.sub32(MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR
);
4503 m_jit
.neg32(scratchGPR
);
4504 m_jit
.signExtend32ToPtr(scratchGPR
, scratchGPR
);
4505 // We use resultPayloadGPR as a temporary here. We have to make sure clobber it after getting the
4506 // value out of indexGPR and enumeratorGPR because resultPayloadGPR could reuse either of those registers.
4507 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSObject::butterflyOffset()), resultPayloadGPR
);
4508 int32_t offsetOfFirstProperty
= static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset
)) * sizeof(EncodedJSValue
);
4509 m_jit
.load32(MacroAssembler::BaseIndex(resultPayloadGPR
, scratchGPR
, MacroAssembler::TimesEight
, offsetOfFirstProperty
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagGPR
);
4510 m_jit
.load32(MacroAssembler::BaseIndex(resultPayloadGPR
, scratchGPR
, MacroAssembler::TimesEight
, offsetOfFirstProperty
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadGPR
);
4514 addSlowPathGenerator(slowPathCall(wrongStructure
, this, operationGetByValCell
, resultTagGPR
, resultPayloadGPR
, baseGPR
, propertyGPR
));
4517 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4520 case GetPropertyEnumerator
: {
4521 SpeculateCellOperand
base(this, node
->child1());
4522 GPRFlushedCallResult
result(this);
4523 GPRReg resultGPR
= result
.gpr();
4526 callOperation(operationGetPropertyEnumerator
, resultGPR
, base
.gpr());
4527 cellResult(resultGPR
, node
);
4530 case GetEnumeratorStructurePname
:
4531 case GetEnumeratorGenericPname
: {
4532 SpeculateCellOperand
enumerator(this, node
->child1());
4533 SpeculateInt32Operand
index(this, node
->child2());
4534 GPRTemporary
scratch(this);
4535 GPRTemporary
resultPayload(this);
4536 GPRTemporary
resultTag(this);
4538 GPRReg enumeratorGPR
= enumerator
.gpr();
4539 GPRReg indexGPR
= index
.gpr();
4540 GPRReg scratchGPR
= scratch
.gpr();
4541 GPRReg resultTagGPR
= resultTag
.gpr();
4542 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4544 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, indexGPR
,
4545 MacroAssembler::Address(enumeratorGPR
, (op
== GetEnumeratorStructurePname
)
4546 ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset()
4547 : JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
4549 m_jit
.move(MacroAssembler::TrustedImm32(JSValue::NullTag
), resultTagGPR
);
4550 m_jit
.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR
);
4552 MacroAssembler::Jump done
= m_jit
.jump();
4553 inBounds
.link(&m_jit
);
4555 m_jit
.loadPtr(MacroAssembler::Address(enumeratorGPR
, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratchGPR
);
4556 m_jit
.loadPtr(MacroAssembler::BaseIndex(scratchGPR
, indexGPR
, MacroAssembler::ScalePtr
), resultPayloadGPR
);
4557 m_jit
.move(MacroAssembler::TrustedImm32(JSValue::CellTag
), resultTagGPR
);
4560 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4563 case ToIndexString
: {
4564 SpeculateInt32Operand
index(this, node
->child1());
4565 GPRFlushedCallResult
result(this);
4566 GPRReg resultGPR
= result
.gpr();
4569 callOperation(operationToIndexString
, resultGPR
, index
.gpr());
4570 cellResult(resultGPR
, node
);
4574 JSValueOperand
value(this, node
->child1());
4575 GPRTemporary
scratch1(this);
4576 GPRTemporary
scratch2(this);
4577 GPRTemporary
scratch3(this);
4579 GPRReg scratch1GPR
= scratch1
.gpr();
4580 GPRReg scratch2GPR
= scratch2
.gpr();
4581 GPRReg scratch3GPR
= scratch3
.gpr();
4583 // Load the TypeProfilerLog into Scratch2.
4584 TypeProfilerLog
* cachedTypeProfilerLog
= m_jit
.vm()->typeProfilerLog();
4585 m_jit
.move(TrustedImmPtr(cachedTypeProfilerLog
), scratch2GPR
);
4587 // Load the next LogEntry into Scratch1.
4588 m_jit
.loadPtr(MacroAssembler::Address(scratch2GPR
, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR
);
4590 // Store the JSValue onto the log entry.
4591 m_jit
.store32(value
.tagGPR(), MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
4592 m_jit
.store32(value
.payloadGPR(), MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
4594 // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry.
4595 MacroAssembler::Jump isNotCell
= m_jit
.branchIfNotCell(value
.jsValueRegs());
4596 m_jit
.load32(MacroAssembler::Address(value
.payloadGPR(), JSCell::structureIDOffset()), scratch3GPR
);
4597 m_jit
.store32(scratch3GPR
, MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::structureIDOffset()));
4598 MacroAssembler::Jump skipIsCell
= m_jit
.jump();
4599 isNotCell
.link(&m_jit
);
4600 m_jit
.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::structureIDOffset()));
4601 skipIsCell
.link(&m_jit
);
4603 // Store the typeLocation on the log entry.
4604 TypeLocation
* cachedTypeLocation
= node
->typeLocation();
4605 m_jit
.move(TrustedImmPtr(cachedTypeLocation
), scratch3GPR
);
4606 m_jit
.storePtr(scratch3GPR
, MacroAssembler::Address(scratch1GPR
, TypeProfilerLog::LogEntry::locationOffset()));
4608 // Increment the current log entry.
4609 m_jit
.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry
)), scratch1GPR
);
4610 m_jit
.storePtr(scratch1GPR
, MacroAssembler::Address(scratch2GPR
, TypeProfilerLog::currentLogEntryOffset()));
4611 MacroAssembler::Jump clearLog
= m_jit
.branchPtr(MacroAssembler::Equal
, scratch1GPR
, TrustedImmPtr(cachedTypeProfilerLog
->logEndPtr()));
4612 addSlowPathGenerator(
4613 slowPathCall(clearLog
, this, operationProcessTypeProfilerLogDFG
, NoResult
));
4618 case ProfileControlFlow
: {
4619 BasicBlockLocation
* basicBlockLocation
= node
->basicBlockLocation();
4620 if (!basicBlockLocation
->hasExecuted()) {
4621 GPRTemporary
scratch1(this);
4622 basicBlockLocation
->emitExecuteCode(m_jit
, scratch1
.gpr());
4628 case ForceOSRExit
: {
4629 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
4633 case InvalidationPoint
:
4634 emitInvalidationPoint(node
);
4637 case CheckWatchdogTimer
:
4638 ASSERT(m_jit
.vm()->watchdog
);
4640 WatchdogTimerFired
, JSValueRegs(), 0,
4642 JITCompiler::NonZero
,
4643 JITCompiler::AbsoluteAddress(m_jit
.vm()->watchdog
->timerDidFireAddress())));
4646 case CountExecution
:
4647 m_jit
.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node
->executionCounter()->address()));
4652 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
4657 case ProfileWillCall
:
4658 case ProfileDidCall
:
4667 RELEASE_ASSERT_NOT_REACHED();
4673 case ExtractOSREntryLocal
:
4674 case CheckTierUpInLoop
:
4675 case CheckTierUpAtReturn
:
4676 case CheckTierUpAndOSREnter
:
4677 case CheckTierUpWithNestedTriggerAndOSREnter
:
4683 case MultiGetByOffset
:
4684 case MultiPutByOffset
:
4686 case NativeConstruct
:
4689 case PhantomNewObject
:
4690 case PhantomNewFunction
:
4691 case PhantomCreateActivation
:
4693 case CheckStructureImmediate
:
4694 case MaterializeNewObject
:
4695 case MaterializeCreateActivation
:
4699 case GetMyArgumentByVal
:
4700 DFG_CRASH(m_jit
.graph(), node
, "unexpected node in DFG backend");
4707 if (node
->hasResult() && node
->mustGenerate())
4712 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, GPRReg valueTagGPR
, Edge valueUse
, GPRReg scratch1
, GPRReg scratch2
)
4714 JITCompiler::Jump isNotCell
;
4715 if (!isKnownCell(valueUse
.node()))
4716 isNotCell
= m_jit
.branch32(JITCompiler::NotEqual
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::CellTag
));
4718 JITCompiler::Jump ownerIsRememberedOrInEden
= m_jit
.jumpIfIsRememberedOrInEden(ownerGPR
);
4719 storeToWriteBarrierBuffer(ownerGPR
, scratch1
, scratch2
);
4720 ownerIsRememberedOrInEden
.link(&m_jit
);
4722 if (!isKnownCell(valueUse
.node()))
4723 isNotCell
.link(&m_jit
);
4725 #endif // ENABLE(GGC)
4727 void SpeculativeJIT::moveTrueTo(GPRReg gpr
)
4729 m_jit
.move(TrustedImm32(1), gpr
);
4732 void SpeculativeJIT::moveFalseTo(GPRReg gpr
)
4734 m_jit
.move(TrustedImm32(0), gpr
);
4737 void SpeculativeJIT::blessBoolean(GPRReg
)
4743 } } // namespace JSC::DFG