2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3 * Copyright (C) 2011 Intel Corporation. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "DFGSpeculativeJIT.h"
32 #include "ArrayPrototype.h"
33 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
34 #include "DFGSlowPathGenerator.h"
35 #include "JSActivation.h"
36 #include "ObjectPrototype.h"
37 #include "Operations.h"
39 namespace JSC
{ namespace DFG
{
43 GPRReg
SpeculativeJIT::fillInteger(Edge edge
, DataFormat
& returnFormat
)
45 ASSERT(!needsTypeCheck(edge
, SpecInt32
));
47 VirtualRegister virtualRegister
= edge
->virtualRegister();
48 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
50 if (info
.registerFormat() == DataFormatNone
) {
51 GPRReg gpr
= allocate();
53 if (edge
->hasConstant()) {
54 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
55 if (isInt32Constant(edge
.node()))
56 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
57 else if (isNumberConstant(edge
.node()))
58 RELEASE_ASSERT_NOT_REACHED();
60 ASSERT(isJSConstant(edge
.node()));
61 JSValue jsValue
= valueOfJSConstant(edge
.node());
62 m_jit
.move(MacroAssembler::Imm32(jsValue
.payload()), gpr
);
65 ASSERT(info
.spillFormat() == DataFormatJS
|| info
.spillFormat() == DataFormatJSInteger
|| info
.spillFormat() == DataFormatInteger
);
66 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
67 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
70 info
.fillInteger(*m_stream
, gpr
);
71 returnFormat
= DataFormatInteger
;
75 switch (info
.registerFormat()) {
77 // Should have filled, above.
78 case DataFormatJSDouble
:
79 case DataFormatDouble
:
82 case DataFormatJSCell
:
83 case DataFormatBoolean
:
84 case DataFormatJSBoolean
:
85 case DataFormatStorage
:
86 // Should only be calling this function if we know this operand to be integer.
87 RELEASE_ASSERT_NOT_REACHED();
89 case DataFormatJSInteger
: {
90 GPRReg tagGPR
= info
.tagGPR();
91 GPRReg payloadGPR
= info
.payloadGPR();
93 m_jit
.jitAssertIsJSInt32(tagGPR
);
94 m_gprs
.unlock(tagGPR
);
95 m_gprs
.lock(payloadGPR
);
96 m_gprs
.release(tagGPR
);
97 m_gprs
.release(payloadGPR
);
98 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderInteger
);
99 info
.fillInteger(*m_stream
, payloadGPR
);
100 returnFormat
= DataFormatInteger
;
104 case DataFormatInteger
: {
105 GPRReg gpr
= info
.gpr();
107 m_jit
.jitAssertIsInt32(gpr
);
108 returnFormat
= DataFormatInteger
;
113 RELEASE_ASSERT_NOT_REACHED();
114 return InvalidGPRReg
;
118 bool SpeculativeJIT::fillJSValue(Edge edge
, GPRReg
& tagGPR
, GPRReg
& payloadGPR
, FPRReg
& fpr
)
120 // FIXME: For double we could fill with a FPR.
123 VirtualRegister virtualRegister
= edge
->virtualRegister();
124 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
126 switch (info
.registerFormat()) {
127 case DataFormatNone
: {
129 if (edge
->hasConstant()) {
131 payloadGPR
= allocate();
132 m_jit
.move(Imm32(valueOfJSConstant(edge
.node()).tag()), tagGPR
);
133 m_jit
.move(Imm32(valueOfJSConstant(edge
.node()).payload()), payloadGPR
);
134 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderConstant
);
135 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderConstant
);
136 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, isInt32Constant(edge
.node()) ? DataFormatJSInteger
: DataFormatJS
);
138 DataFormat spillFormat
= info
.spillFormat();
139 ASSERT(spillFormat
!= DataFormatNone
&& spillFormat
!= DataFormatStorage
);
141 payloadGPR
= allocate();
142 switch (spillFormat
) {
143 case DataFormatInteger
:
144 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), tagGPR
);
145 spillFormat
= DataFormatJSInteger
; // This will be used as the new register format.
148 m_jit
.move(TrustedImm32(JSValue::CellTag
), tagGPR
);
149 spillFormat
= DataFormatJSCell
; // This will be used as the new register format.
151 case DataFormatBoolean
:
152 m_jit
.move(TrustedImm32(JSValue::BooleanTag
), tagGPR
);
153 spillFormat
= DataFormatJSBoolean
; // This will be used as the new register format.
156 m_jit
.load32(JITCompiler::tagFor(virtualRegister
), tagGPR
);
159 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), payloadGPR
);
160 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderSpilled
);
161 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderSpilled
);
162 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, spillFormat
== DataFormatJSDouble
? DataFormatJS
: spillFormat
);
168 case DataFormatInteger
:
170 case DataFormatBoolean
: {
171 GPRReg gpr
= info
.gpr();
172 // If the register has already been locked we need to take a copy.
173 if (m_gprs
.isLocked(gpr
)) {
174 payloadGPR
= allocate();
175 m_jit
.move(gpr
, payloadGPR
);
181 uint32_t tag
= JSValue::EmptyValueTag
;
182 DataFormat fillFormat
= DataFormatJS
;
183 switch (info
.registerFormat()) {
184 case DataFormatInteger
:
185 tag
= JSValue::Int32Tag
;
186 fillFormat
= DataFormatJSInteger
;
189 tag
= JSValue::CellTag
;
190 fillFormat
= DataFormatJSCell
;
192 case DataFormatBoolean
:
193 tag
= JSValue::BooleanTag
;
194 fillFormat
= DataFormatJSBoolean
;
197 RELEASE_ASSERT_NOT_REACHED();
200 m_jit
.move(TrustedImm32(tag
), tagGPR
);
202 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderJS
);
203 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderJS
);
204 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, fillFormat
);
208 case DataFormatJSDouble
:
209 case DataFormatDouble
: {
210 FPRReg oldFPR
= info
.fpr();
213 payloadGPR
= allocate();
214 boxDouble(oldFPR
, tagGPR
, payloadGPR
);
215 m_fprs
.unlock(oldFPR
);
216 m_fprs
.release(oldFPR
);
217 m_gprs
.retain(tagGPR
, virtualRegister
, SpillOrderJS
);
218 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderJS
);
219 info
.fillJSValue(*m_stream
, tagGPR
, payloadGPR
, DataFormatJS
);
224 case DataFormatJSInteger
:
225 case DataFormatJSCell
:
226 case DataFormatJSBoolean
: {
227 tagGPR
= info
.tagGPR();
228 payloadGPR
= info
.payloadGPR();
230 m_gprs
.lock(payloadGPR
);
234 case DataFormatStorage
:
235 // this type currently never occurs
236 RELEASE_ASSERT_NOT_REACHED();
239 RELEASE_ASSERT_NOT_REACHED();
244 void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node
* node
)
246 IntegerOperand
op1(this, node
->child1());
247 FPRTemporary
boxer(this);
248 GPRTemporary
resultTag(this, op1
);
249 GPRTemporary
resultPayload(this);
251 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, op1
.gpr(), TrustedImm32(0));
253 m_jit
.convertInt32ToDouble(op1
.gpr(), boxer
.fpr());
254 m_jit
.move(JITCompiler::TrustedImmPtr(&AssemblyHelpers::twoToThe32
), resultPayload
.gpr()); // reuse resultPayload register here.
255 m_jit
.addDouble(JITCompiler::Address(resultPayload
.gpr(), 0), boxer
.fpr());
257 boxDouble(boxer
.fpr(), resultTag
.gpr(), resultPayload
.gpr());
259 JITCompiler::Jump done
= m_jit
.jump();
261 positive
.link(&m_jit
);
263 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), resultTag
.gpr());
264 m_jit
.move(op1
.gpr(), resultPayload
.gpr());
268 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
271 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin
, GPRReg baseTagGPROrNone
, GPRReg basePayloadGPR
, GPRReg resultTagGPR
, GPRReg resultPayloadGPR
, unsigned identifierNumber
, JITCompiler::Jump slowPathTarget
, SpillRegistersMode spillMode
)
273 JITCompiler::DataLabelPtr structureToCompare
;
274 JITCompiler::PatchableJump structureCheck
= m_jit
.patchableBranchPtrWithPatch(JITCompiler::NotEqual
, JITCompiler::Address(basePayloadGPR
, JSCell::structureOffset()), structureToCompare
, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer
)));
276 JITCompiler::ConvertibleLoadLabel propertyStorageLoad
= m_jit
.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR
, JSObject::butterflyOffset()), resultPayloadGPR
);
277 JITCompiler::DataLabelCompact tagLoadWithPatch
= m_jit
.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR
, OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTagGPR
);
278 JITCompiler::DataLabelCompact payloadLoadWithPatch
= m_jit
.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR
, OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayloadGPR
);
280 JITCompiler::Label doneLabel
= m_jit
.label();
282 OwnPtr
<SlowPathGenerator
> slowPath
;
283 if (baseTagGPROrNone
== InvalidGPRReg
) {
284 if (!slowPathTarget
.isSet()) {
285 slowPath
= slowPathCall(
286 structureCheck
.m_jump
, this, operationGetByIdOptimize
,
287 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
288 static_cast<int32_t>(JSValue::CellTag
), basePayloadGPR
,
289 identifier(identifierNumber
));
291 JITCompiler::JumpList slowCases
;
292 slowCases
.append(structureCheck
.m_jump
);
293 slowCases
.append(slowPathTarget
);
294 slowPath
= slowPathCall(
295 slowCases
, this, operationGetByIdOptimize
,
296 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
297 static_cast<int32_t>(JSValue::CellTag
), basePayloadGPR
,
298 identifier(identifierNumber
));
301 if (!slowPathTarget
.isSet()) {
302 slowPath
= slowPathCall(
303 structureCheck
.m_jump
, this, operationGetByIdOptimize
,
304 JSValueRegs(resultTagGPR
, resultPayloadGPR
), baseTagGPROrNone
, basePayloadGPR
,
305 identifier(identifierNumber
));
307 JITCompiler::JumpList slowCases
;
308 slowCases
.append(structureCheck
.m_jump
);
309 slowCases
.append(slowPathTarget
);
310 slowPath
= slowPathCall(
311 slowCases
, this, operationGetByIdOptimize
,
312 JSValueRegs(resultTagGPR
, resultPayloadGPR
), baseTagGPROrNone
, basePayloadGPR
,
313 identifier(identifierNumber
));
316 m_jit
.addPropertyAccess(
317 PropertyAccessRecord(
318 codeOrigin
, structureToCompare
, structureCheck
, propertyStorageLoad
,
319 tagLoadWithPatch
, payloadLoadWithPatch
, slowPath
.get(), doneLabel
,
320 safeCast
<int8_t>(basePayloadGPR
), safeCast
<int8_t>(resultTagGPR
),
321 safeCast
<int8_t>(resultPayloadGPR
), usedRegisters(),
322 spillMode
== NeedToSpill
? PropertyAccessRecord::RegistersInUse
: PropertyAccessRecord::RegistersFlushed
));
323 addSlowPathGenerator(slowPath
.release());
326 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin
, GPRReg basePayloadGPR
, GPRReg valueTagGPR
, GPRReg valuePayloadGPR
, Edge valueUse
, GPRReg scratchGPR
, unsigned identifierNumber
, PutKind putKind
, JITCompiler::Jump slowPathTarget
)
328 JITCompiler::DataLabelPtr structureToCompare
;
329 JITCompiler::PatchableJump structureCheck
= m_jit
.patchableBranchPtrWithPatch(JITCompiler::NotEqual
, JITCompiler::Address(basePayloadGPR
, JSCell::structureOffset()), structureToCompare
, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer
)));
331 writeBarrier(basePayloadGPR
, valueTagGPR
, valueUse
, WriteBarrierForPropertyAccess
, scratchGPR
);
333 JITCompiler::ConvertibleLoadLabel propertyStorageLoad
= m_jit
.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR
, JSObject::butterflyOffset()), scratchGPR
);
334 JITCompiler::DataLabel32 tagStoreWithPatch
= m_jit
.store32WithAddressOffsetPatch(valueTagGPR
, JITCompiler::Address(scratchGPR
, OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
335 JITCompiler::DataLabel32 payloadStoreWithPatch
= m_jit
.store32WithAddressOffsetPatch(valuePayloadGPR
, JITCompiler::Address(scratchGPR
, OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
337 JITCompiler::Label doneLabel
= m_jit
.label();
338 V_DFGOperation_EJCI optimizedCall
;
339 if (m_jit
.strictModeFor(m_currentNode
->codeOrigin
)) {
340 if (putKind
== Direct
)
341 optimizedCall
= operationPutByIdDirectStrictOptimize
;
343 optimizedCall
= operationPutByIdStrictOptimize
;
345 if (putKind
== Direct
)
346 optimizedCall
= operationPutByIdDirectNonStrictOptimize
;
348 optimizedCall
= operationPutByIdNonStrictOptimize
;
350 OwnPtr
<SlowPathGenerator
> slowPath
;
351 if (!slowPathTarget
.isSet()) {
352 slowPath
= slowPathCall(
353 structureCheck
.m_jump
, this, optimizedCall
, NoResult
, valueTagGPR
, valuePayloadGPR
,
354 basePayloadGPR
, identifier(identifierNumber
));
356 JITCompiler::JumpList slowCases
;
357 slowCases
.append(structureCheck
.m_jump
);
358 slowCases
.append(slowPathTarget
);
359 slowPath
= slowPathCall(
360 slowCases
, this, optimizedCall
, NoResult
, valueTagGPR
, valuePayloadGPR
,
361 basePayloadGPR
, identifier(identifierNumber
));
363 RegisterSet currentlyUsedRegisters
= usedRegisters();
364 currentlyUsedRegisters
.clear(scratchGPR
);
365 ASSERT(currentlyUsedRegisters
.get(basePayloadGPR
));
366 ASSERT(currentlyUsedRegisters
.get(valueTagGPR
));
367 ASSERT(currentlyUsedRegisters
.get(valuePayloadGPR
));
368 m_jit
.addPropertyAccess(
369 PropertyAccessRecord(
370 codeOrigin
, structureToCompare
, structureCheck
, propertyStorageLoad
,
371 JITCompiler::DataLabelCompact(tagStoreWithPatch
.label()),
372 JITCompiler::DataLabelCompact(payloadStoreWithPatch
.label()),
373 slowPath
.get(), doneLabel
, safeCast
<int8_t>(basePayloadGPR
),
374 safeCast
<int8_t>(valueTagGPR
), safeCast
<int8_t>(valuePayloadGPR
),
376 addSlowPathGenerator(slowPath
.release());
379 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand
, bool invert
)
381 JSValueOperand
arg(this, operand
);
382 GPRReg argTagGPR
= arg
.tagGPR();
383 GPRReg argPayloadGPR
= arg
.payloadGPR();
385 GPRTemporary
resultPayload(this, arg
, false);
386 GPRReg resultPayloadGPR
= resultPayload
.gpr();
388 JITCompiler::Jump notCell
;
389 JITCompiler::Jump notMasqueradesAsUndefined
;
390 if (m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
391 if (!isKnownCell(operand
.node()))
392 notCell
= m_jit
.branch32(MacroAssembler::NotEqual
, argTagGPR
, TrustedImm32(JSValue::CellTag
));
394 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
395 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR
);
396 notMasqueradesAsUndefined
= m_jit
.jump();
398 GPRTemporary
localGlobalObject(this);
399 GPRTemporary
remoteGlobalObject(this);
401 if (!isKnownCell(operand
.node()))
402 notCell
= m_jit
.branch32(MacroAssembler::NotEqual
, argTagGPR
, TrustedImm32(JSValue::CellTag
));
404 m_jit
.loadPtr(JITCompiler::Address(argPayloadGPR
, JSCell::structureOffset()), resultPayloadGPR
);
405 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(JITCompiler::NonZero
, JITCompiler::Address(resultPayloadGPR
, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined
));
407 m_jit
.move(invert
? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR
);
408 notMasqueradesAsUndefined
= m_jit
.jump();
410 isMasqueradesAsUndefined
.link(&m_jit
);
411 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
412 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
413 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)), localGlobalObjectGPR
);
414 m_jit
.loadPtr(JITCompiler::Address(resultPayloadGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
415 m_jit
.compare32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, resultPayloadGPR
);
418 if (!isKnownCell(operand
.node())) {
419 JITCompiler::Jump done
= m_jit
.jump();
421 notCell
.link(&m_jit
);
422 // null or undefined?
423 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
424 m_jit
.move(argTagGPR
, resultPayloadGPR
);
425 m_jit
.or32(TrustedImm32(1), resultPayloadGPR
);
426 m_jit
.compare32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultPayloadGPR
, TrustedImm32(JSValue::NullTag
), resultPayloadGPR
);
431 notMasqueradesAsUndefined
.link(&m_jit
);
433 booleanResult(resultPayloadGPR
, m_currentNode
);
436 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand
, Node
* branchNode
, bool invert
)
438 BlockIndex taken
= branchNode
->takenBlockIndex();
439 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
441 if (taken
== nextBlock()) {
443 BlockIndex tmp
= taken
;
448 JSValueOperand
arg(this, operand
);
449 GPRReg argTagGPR
= arg
.tagGPR();
450 GPRReg argPayloadGPR
= arg
.payloadGPR();
452 GPRTemporary
result(this, arg
);
453 GPRReg resultGPR
= result
.gpr();
455 JITCompiler::Jump notCell
;
457 if (m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
458 if (!isKnownCell(operand
.node()))
459 notCell
= m_jit
.branch32(MacroAssembler::NotEqual
, argTagGPR
, TrustedImm32(JSValue::CellTag
));
461 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
462 jump(invert
? taken
: notTaken
, ForceJump
);
464 GPRTemporary
localGlobalObject(this);
465 GPRTemporary
remoteGlobalObject(this);
467 if (!isKnownCell(operand
.node()))
468 notCell
= m_jit
.branch32(MacroAssembler::NotEqual
, argTagGPR
, TrustedImm32(JSValue::CellTag
));
470 m_jit
.loadPtr(JITCompiler::Address(argPayloadGPR
, JSCell::structureOffset()), resultGPR
);
471 branchTest8(JITCompiler::Zero
, JITCompiler::Address(resultGPR
, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined
), invert
? taken
: notTaken
);
473 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
474 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
475 m_jit
.move(TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)), localGlobalObjectGPR
);
476 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
477 branchPtr(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, invert
? notTaken
: taken
);
480 if (!isKnownCell(operand
.node())) {
481 jump(notTaken
, ForceJump
);
483 notCell
.link(&m_jit
);
484 // null or undefined?
485 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
486 m_jit
.move(argTagGPR
, resultGPR
);
487 m_jit
.or32(TrustedImm32(1), resultGPR
);
488 branch32(invert
? JITCompiler::NotEqual
: JITCompiler::Equal
, resultGPR
, JITCompiler::TrustedImm32(JSValue::NullTag
), taken
);
494 bool SpeculativeJIT::nonSpeculativeCompareNull(Node
* node
, Edge operand
, bool invert
)
496 unsigned branchIndexInBlock
= detectPeepHoleBranch();
497 if (branchIndexInBlock
!= UINT_MAX
) {
498 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
500 ASSERT(node
->adjustedRefCount() == 1);
502 nonSpeculativePeepholeBranchNull(operand
, branchNode
, invert
);
506 m_indexInBlock
= branchIndexInBlock
;
507 m_currentNode
= branchNode
;
512 nonSpeculativeNonPeepholeCompareNull(operand
, invert
);
517 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node
* node
, Node
* branchNode
, MacroAssembler::RelationalCondition cond
, S_DFGOperation_EJJ helperFunction
)
519 BlockIndex taken
= branchNode
->takenBlockIndex();
520 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
522 JITCompiler::ResultCondition callResultCondition
= JITCompiler::NonZero
;
524 // The branch instruction will branch to the taken block.
525 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
526 if (taken
== nextBlock()) {
527 cond
= JITCompiler::invert(cond
);
528 callResultCondition
= JITCompiler::Zero
;
529 BlockIndex tmp
= taken
;
534 JSValueOperand
arg1(this, node
->child1());
535 JSValueOperand
arg2(this, node
->child2());
536 GPRReg arg1TagGPR
= arg1
.tagGPR();
537 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
538 GPRReg arg2TagGPR
= arg2
.tagGPR();
539 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
541 JITCompiler::JumpList slowPath
;
543 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
544 GPRResult
result(this);
545 GPRReg resultGPR
= result
.gpr();
551 callOperation(helperFunction
, resultGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
553 branchTest32(callResultCondition
, resultGPR
, taken
);
555 GPRTemporary
result(this);
556 GPRReg resultGPR
= result
.gpr();
561 if (!isKnownInteger(node
->child1().node()))
562 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg1TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
563 if (!isKnownInteger(node
->child2().node()))
564 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg2TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
566 branch32(cond
, arg1PayloadGPR
, arg2PayloadGPR
, taken
);
568 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
569 jump(notTaken
, ForceJump
);
571 slowPath
.link(&m_jit
);
573 silentSpillAllRegisters(resultGPR
);
574 callOperation(helperFunction
, resultGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
575 silentFillAllRegisters(resultGPR
);
577 branchTest32(callResultCondition
, resultGPR
, taken
);
583 m_indexInBlock
= m_jit
.graph().m_blocks
[m_block
]->size() - 1;
584 m_currentNode
= branchNode
;
587 template<typename JumpType
>
588 class CompareAndBoxBooleanSlowPathGenerator
589 : public CallSlowPathGenerator
<JumpType
, S_DFGOperation_EJJ
, GPRReg
> {
591 CompareAndBoxBooleanSlowPathGenerator(
592 JumpType from
, SpeculativeJIT
* jit
,
593 S_DFGOperation_EJJ function
, GPRReg result
, GPRReg arg1Tag
, GPRReg arg1Payload
,
594 GPRReg arg2Tag
, GPRReg arg2Payload
)
595 : CallSlowPathGenerator
<JumpType
, S_DFGOperation_EJJ
, GPRReg
>(
596 from
, jit
, function
, NeedToSpill
, result
)
598 , m_arg1Payload(arg1Payload
)
600 , m_arg2Payload(arg2Payload
)
605 virtual void generateInternal(SpeculativeJIT
* jit
)
610 this->m_function
, this->m_result
, m_arg1Tag
, m_arg1Payload
, m_arg2Tag
,
612 jit
->m_jit
.and32(JITCompiler::TrustedImm32(1), this->m_result
);
618 GPRReg m_arg1Payload
;
620 GPRReg m_arg2Payload
;
623 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_DFGOperation_EJJ helperFunction
)
625 JSValueOperand
arg1(this, node
->child1());
626 JSValueOperand
arg2(this, node
->child2());
627 GPRReg arg1TagGPR
= arg1
.tagGPR();
628 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
629 GPRReg arg2TagGPR
= arg2
.tagGPR();
630 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
632 JITCompiler::JumpList slowPath
;
634 if (isKnownNotInteger(node
->child1().node()) || isKnownNotInteger(node
->child2().node())) {
635 GPRResult
result(this);
636 GPRReg resultPayloadGPR
= result
.gpr();
642 callOperation(helperFunction
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
644 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
646 GPRTemporary
resultPayload(this, arg1
, false);
647 GPRReg resultPayloadGPR
= resultPayload
.gpr();
652 if (!isKnownInteger(node
->child1().node()))
653 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg1TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
654 if (!isKnownInteger(node
->child2().node()))
655 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, arg2TagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
)));
657 m_jit
.compare32(cond
, arg1PayloadGPR
, arg2PayloadGPR
, resultPayloadGPR
);
659 if (!isKnownInteger(node
->child1().node()) || !isKnownInteger(node
->child2().node())) {
660 addSlowPathGenerator(adoptPtr(
661 new CompareAndBoxBooleanSlowPathGenerator
<JITCompiler::JumpList
>(
662 slowPath
, this, helperFunction
, resultPayloadGPR
, arg1TagGPR
,
663 arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
)));
666 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
670 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node
* node
, Node
* branchNode
, bool invert
)
672 BlockIndex taken
= branchNode
->takenBlockIndex();
673 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
675 // The branch instruction will branch to the taken block.
676 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
677 if (taken
== nextBlock()) {
679 BlockIndex tmp
= taken
;
684 JSValueOperand
arg1(this, node
->child1());
685 JSValueOperand
arg2(this, node
->child2());
686 GPRReg arg1TagGPR
= arg1
.tagGPR();
687 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
688 GPRReg arg2TagGPR
= arg2
.tagGPR();
689 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
691 GPRTemporary
resultPayload(this, arg1
, false);
692 GPRReg resultPayloadGPR
= resultPayload
.gpr();
697 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
698 // see if we get lucky: if the arguments are cells and they reference the same
699 // cell, then they must be strictly equal.
700 branchPtr(JITCompiler::Equal
, arg1PayloadGPR
, arg2PayloadGPR
, invert
? notTaken
: taken
);
702 silentSpillAllRegisters(resultPayloadGPR
);
703 callOperation(operationCompareStrictEqCell
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
704 silentFillAllRegisters(resultPayloadGPR
);
706 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultPayloadGPR
, taken
);
708 // FIXME: Add fast paths for twoCells, number etc.
710 silentSpillAllRegisters(resultPayloadGPR
);
711 callOperation(operationCompareStrictEq
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
712 silentFillAllRegisters(resultPayloadGPR
);
714 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, resultPayloadGPR
, taken
);
720 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node
* node
, bool invert
)
722 JSValueOperand
arg1(this, node
->child1());
723 JSValueOperand
arg2(this, node
->child2());
724 GPRReg arg1TagGPR
= arg1
.tagGPR();
725 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
726 GPRReg arg2TagGPR
= arg2
.tagGPR();
727 GPRReg arg2PayloadGPR
= arg2
.payloadGPR();
729 GPRTemporary
resultPayload(this, arg1
, false);
730 GPRReg resultPayloadGPR
= resultPayload
.gpr();
735 if (isKnownCell(node
->child1().node()) && isKnownCell(node
->child2().node())) {
736 // see if we get lucky: if the arguments are cells and they reference the same
737 // cell, then they must be strictly equal.
738 // FIXME: this should flush registers instead of silent spill/fill.
739 JITCompiler::Jump notEqualCase
= m_jit
.branchPtr(JITCompiler::NotEqual
, arg1PayloadGPR
, arg2PayloadGPR
);
741 m_jit
.move(JITCompiler::TrustedImm32(!invert
), resultPayloadGPR
);
742 JITCompiler::Jump done
= m_jit
.jump();
744 notEqualCase
.link(&m_jit
);
746 silentSpillAllRegisters(resultPayloadGPR
);
747 callOperation(operationCompareStrictEqCell
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
748 silentFillAllRegisters(resultPayloadGPR
);
750 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR
);
754 // FIXME: Add fast paths.
756 silentSpillAllRegisters(resultPayloadGPR
);
757 callOperation(operationCompareStrictEq
, resultPayloadGPR
, arg1TagGPR
, arg1PayloadGPR
, arg2TagGPR
, arg2PayloadGPR
);
758 silentFillAllRegisters(resultPayloadGPR
);
760 m_jit
.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR
);
763 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
766 void SpeculativeJIT::emitCall(Node
* node
)
768 if (node
->op() != Call
)
769 ASSERT(node
->op() == Construct
);
771 // For constructors, the this argument is not passed but we have to make space
773 int dummyThisArgument
= node
->op() == Call
? 0 : 1;
775 CallLinkInfo::CallType callType
= node
->op() == Call
? CallLinkInfo::Call
: CallLinkInfo::Construct
;
777 Edge calleeEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild()];
778 JSValueOperand
callee(this, calleeEdge
);
779 GPRReg calleeTagGPR
= callee
.tagGPR();
780 GPRReg calleePayloadGPR
= callee
.payloadGPR();
783 // The call instruction's first child is either the function (normal call) or the
784 // receiver (method call). subsequent children are the arguments.
785 int numPassedArgs
= node
->numChildren() - 1;
787 m_jit
.store32(MacroAssembler::TrustedImm32(numPassedArgs
+ dummyThisArgument
), callFramePayloadSlot(JSStack::ArgumentCount
));
788 m_jit
.storePtr(GPRInfo::callFrameRegister
, callFramePayloadSlot(JSStack::CallerFrame
));
789 m_jit
.store32(calleePayloadGPR
, callFramePayloadSlot(JSStack::Callee
));
790 m_jit
.store32(calleeTagGPR
, callFrameTagSlot(JSStack::Callee
));
792 for (int i
= 0; i
< numPassedArgs
; i
++) {
793 Edge argEdge
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + 1 + i
];
794 JSValueOperand
arg(this, argEdge
);
795 GPRReg argTagGPR
= arg
.tagGPR();
796 GPRReg argPayloadGPR
= arg
.payloadGPR();
799 m_jit
.store32(argTagGPR
, argumentTagSlot(i
+ dummyThisArgument
));
800 m_jit
.store32(argPayloadGPR
, argumentPayloadSlot(i
+ dummyThisArgument
));
805 GPRResult
resultPayload(this);
806 GPRResult2
resultTag(this);
807 GPRReg resultPayloadGPR
= resultPayload
.gpr();
808 GPRReg resultTagGPR
= resultTag
.gpr();
810 JITCompiler::DataLabelPtr targetToCheck
;
811 JITCompiler::JumpList slowPath
;
813 CallBeginToken token
;
814 m_jit
.beginCall(node
->codeOrigin
, token
);
816 m_jit
.addPtr(TrustedImm32(m_jit
.codeBlock()->m_numCalleeRegisters
* sizeof(Register
)), GPRInfo::callFrameRegister
);
818 slowPath
.append(m_jit
.branch32(MacroAssembler::NotEqual
, calleeTagGPR
, TrustedImm32(JSValue::CellTag
)));
819 slowPath
.append(m_jit
.branchPtrWithPatch(MacroAssembler::NotEqual
, calleePayloadGPR
, targetToCheck
));
820 m_jit
.loadPtr(MacroAssembler::Address(calleePayloadGPR
, OBJECT_OFFSETOF(JSFunction
, m_scope
)), resultPayloadGPR
);
821 m_jit
.storePtr(resultPayloadGPR
, MacroAssembler::Address(GPRInfo::callFrameRegister
, static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ScopeChain
+ OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
822 m_jit
.store32(MacroAssembler::TrustedImm32(JSValue::CellTag
), MacroAssembler::Address(GPRInfo::callFrameRegister
, static_cast<ptrdiff_t>(sizeof(Register
)) * JSStack::ScopeChain
+ OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
824 CodeOrigin codeOrigin
= node
->codeOrigin
;
825 JITCompiler::Call fastCall
= m_jit
.nearCall();
826 m_jit
.notifyCall(fastCall
, codeOrigin
, token
);
828 JITCompiler::Jump done
= m_jit
.jump();
830 slowPath
.link(&m_jit
);
832 if (calleeTagGPR
== GPRInfo::nonArgGPR0
) {
833 if (calleePayloadGPR
== GPRInfo::nonArgGPR1
)
834 m_jit
.swap(GPRInfo::nonArgGPR1
, GPRInfo::nonArgGPR0
);
836 m_jit
.move(calleeTagGPR
, GPRInfo::nonArgGPR1
);
837 m_jit
.move(calleePayloadGPR
, GPRInfo::nonArgGPR0
);
840 m_jit
.move(calleePayloadGPR
, GPRInfo::nonArgGPR0
);
841 m_jit
.move(calleeTagGPR
, GPRInfo::nonArgGPR1
);
843 m_jit
.prepareForExceptionCheck();
844 JITCompiler::Call slowCall
= m_jit
.nearCall();
845 m_jit
.notifyCall(slowCall
, codeOrigin
, token
);
849 m_jit
.setupResults(resultPayloadGPR
, resultTagGPR
);
851 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, DataFormatJS
, UseChildrenCalledExplicitly
);
853 m_jit
.addJSCall(fastCall
, slowCall
, targetToCheck
, callType
, calleePayloadGPR
, node
->codeOrigin
);
856 template<bool strict
>
857 GPRReg
SpeculativeJIT::fillSpeculateIntInternal(Edge edge
, DataFormat
& returnFormat
)
859 #if DFG_ENABLE(DEBUG_VERBOSE)
860 dataLogF("SpecInt@%d ", edge
->index());
862 AbstractValue
& value
= m_state
.forNode(edge
);
863 SpeculatedType type
= value
.m_type
;
864 ASSERT(edge
.useKind() != KnownInt32Use
|| !(value
.m_type
& ~SpecInt32
));
865 value
.filter(SpecInt32
);
866 VirtualRegister virtualRegister
= edge
->virtualRegister();
867 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
869 switch (info
.registerFormat()) {
870 case DataFormatNone
: {
871 if ((edge
->hasConstant() && !isInt32Constant(edge
.node())) || info
.spillFormat() == DataFormatDouble
) {
872 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
873 returnFormat
= DataFormatInteger
;
877 if (edge
->hasConstant()) {
878 ASSERT(isInt32Constant(edge
.node()));
879 GPRReg gpr
= allocate();
880 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
881 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
882 info
.fillInteger(*m_stream
, gpr
);
883 returnFormat
= DataFormatInteger
;
887 DataFormat spillFormat
= info
.spillFormat();
888 ASSERT_UNUSED(spillFormat
, (spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInteger
);
890 // If we know this was spilled as an integer we can fill without checking.
891 if (type
& ~SpecInt32
)
892 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::Int32Tag
)));
894 GPRReg gpr
= allocate();
895 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
896 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
897 info
.fillInteger(*m_stream
, gpr
);
898 returnFormat
= DataFormatInteger
;
902 case DataFormatJSInteger
:
904 // Check the value is an integer.
905 GPRReg tagGPR
= info
.tagGPR();
906 GPRReg payloadGPR
= info
.payloadGPR();
908 m_gprs
.lock(payloadGPR
);
909 if (type
& ~SpecInt32
)
910 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::Int32Tag
)));
911 m_gprs
.unlock(tagGPR
);
912 m_gprs
.release(tagGPR
);
913 m_gprs
.release(payloadGPR
);
914 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderInteger
);
915 info
.fillInteger(*m_stream
, payloadGPR
);
916 // If !strict we're done, return.
917 returnFormat
= DataFormatInteger
;
921 case DataFormatInteger
: {
922 GPRReg gpr
= info
.gpr();
924 returnFormat
= DataFormatInteger
;
928 case DataFormatDouble
:
930 case DataFormatBoolean
:
931 case DataFormatJSDouble
:
932 case DataFormatJSCell
:
933 case DataFormatJSBoolean
:
934 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
935 returnFormat
= DataFormatInteger
;
938 case DataFormatStorage
:
940 RELEASE_ASSERT_NOT_REACHED();
941 return InvalidGPRReg
;
945 GPRReg
SpeculativeJIT::fillSpeculateInt(Edge edge
, DataFormat
& returnFormat
)
947 return fillSpeculateIntInternal
<false>(edge
, returnFormat
);
950 GPRReg
SpeculativeJIT::fillSpeculateIntStrict(Edge edge
)
952 DataFormat mustBeDataFormatInteger
;
953 GPRReg result
= fillSpeculateIntInternal
<true>(edge
, mustBeDataFormatInteger
);
954 ASSERT(mustBeDataFormatInteger
== DataFormatInteger
);
958 FPRReg
SpeculativeJIT::fillSpeculateDouble(Edge edge
)
960 #if DFG_ENABLE(DEBUG_VERBOSE)
961 dataLogF("SpecDouble@%d ", edge
->index());
963 AbstractValue
& value
= m_state
.forNode(edge
);
964 SpeculatedType type
= value
.m_type
;
965 ASSERT(edge
.useKind() != KnownNumberUse
|| !(value
.m_type
& ~SpecNumber
));
966 value
.filter(SpecNumber
);
967 VirtualRegister virtualRegister
= edge
->virtualRegister();
968 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
970 if (info
.registerFormat() == DataFormatNone
) {
972 if (edge
->hasConstant()) {
973 if (isInt32Constant(edge
.node())) {
974 GPRReg gpr
= allocate();
975 m_jit
.move(MacroAssembler::Imm32(valueOfInt32Constant(edge
.node())), gpr
);
976 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
977 info
.fillInteger(*m_stream
, gpr
);
979 } else if (isNumberConstant(edge
.node())) {
980 FPRReg fpr
= fprAllocate();
981 m_jit
.loadDouble(addressOfDoubleConstant(edge
.node()), fpr
);
982 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderConstant
);
983 info
.fillDouble(*m_stream
, fpr
);
986 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
987 return fprAllocate();
990 DataFormat spillFormat
= info
.spillFormat();
991 ASSERT((spillFormat
& DataFormatJS
) || spillFormat
== DataFormatInteger
);
992 if (spillFormat
== DataFormatJSDouble
|| spillFormat
== DataFormatDouble
) {
993 FPRReg fpr
= fprAllocate();
994 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
995 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderSpilled
);
996 info
.fillDouble(*m_stream
, fpr
);
1000 FPRReg fpr
= fprAllocate();
1001 JITCompiler::Jump hasUnboxedDouble
;
1003 if (spillFormat
!= DataFormatJSInteger
&& spillFormat
!= DataFormatInteger
) {
1004 JITCompiler::Jump isInteger
= m_jit
.branch32(MacroAssembler::Equal
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::Int32Tag
));
1005 if (type
& ~SpecNumber
)
1006 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::AboveOrEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::LowestTag
)));
1007 m_jit
.loadDouble(JITCompiler::addressFor(virtualRegister
), fpr
);
1008 hasUnboxedDouble
= m_jit
.jump();
1010 isInteger
.link(&m_jit
);
1013 m_jit
.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister
), fpr
);
1015 if (hasUnboxedDouble
.isSet())
1016 hasUnboxedDouble
.link(&m_jit
);
1018 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderSpilled
);
1019 info
.fillDouble(*m_stream
, fpr
);
1025 switch (info
.registerFormat()) {
1027 case DataFormatJSInteger
: {
1028 GPRReg tagGPR
= info
.tagGPR();
1029 GPRReg payloadGPR
= info
.payloadGPR();
1030 FPRReg fpr
= fprAllocate();
1032 m_gprs
.lock(tagGPR
);
1033 m_gprs
.lock(payloadGPR
);
1035 JITCompiler::Jump hasUnboxedDouble
;
1037 if (info
.registerFormat() != DataFormatJSInteger
) {
1038 FPRTemporary
scratch(this);
1039 JITCompiler::Jump isInteger
= m_jit
.branch32(MacroAssembler::Equal
, tagGPR
, TrustedImm32(JSValue::Int32Tag
));
1040 if (type
& ~SpecNumber
)
1041 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::AboveOrEqual
, tagGPR
, TrustedImm32(JSValue::LowestTag
)));
1042 unboxDouble(tagGPR
, payloadGPR
, fpr
, scratch
.fpr());
1043 hasUnboxedDouble
= m_jit
.jump();
1044 isInteger
.link(&m_jit
);
1047 m_jit
.convertInt32ToDouble(payloadGPR
, fpr
);
1049 if (hasUnboxedDouble
.isSet())
1050 hasUnboxedDouble
.link(&m_jit
);
1052 m_gprs
.release(tagGPR
);
1053 m_gprs
.release(payloadGPR
);
1054 m_gprs
.unlock(tagGPR
);
1055 m_gprs
.unlock(payloadGPR
);
1056 m_fprs
.retain(fpr
, virtualRegister
, SpillOrderDouble
);
1057 info
.fillDouble(*m_stream
, fpr
);
1062 case DataFormatInteger
: {
1063 FPRReg fpr
= fprAllocate();
1064 GPRReg gpr
= info
.gpr();
1066 m_jit
.convertInt32ToDouble(gpr
, fpr
);
1071 case DataFormatJSDouble
:
1072 case DataFormatDouble
: {
1073 FPRReg fpr
= info
.fpr();
1078 case DataFormatNone
:
1079 case DataFormatStorage
:
1080 RELEASE_ASSERT_NOT_REACHED();
1082 case DataFormatCell
:
1083 case DataFormatJSCell
:
1084 case DataFormatBoolean
:
1085 case DataFormatJSBoolean
:
1086 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1087 return fprAllocate();
1090 RELEASE_ASSERT_NOT_REACHED();
1091 return InvalidFPRReg
;
1095 GPRReg
SpeculativeJIT::fillSpeculateCell(Edge edge
)
1097 #if DFG_ENABLE(DEBUG_VERBOSE)
1098 dataLogF("SpecCell@%d ", edge
->index());
1100 AbstractValue
& value
= m_state
.forNode(edge
);
1101 SpeculatedType type
= value
.m_type
;
1102 ASSERT((edge
.useKind() != KnownCellUse
&& edge
.useKind() != KnownStringUse
) || !(value
.m_type
& ~SpecCell
));
1103 value
.filter(SpecCell
);
1104 VirtualRegister virtualRegister
= edge
->virtualRegister();
1105 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1107 switch (info
.registerFormat()) {
1108 case DataFormatNone
: {
1109 if (info
.spillFormat() == DataFormatInteger
|| info
.spillFormat() == DataFormatDouble
) {
1110 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1114 if (edge
->hasConstant()) {
1115 JSValue jsValue
= valueOfJSConstant(edge
.node());
1116 GPRReg gpr
= allocate();
1117 if (jsValue
.isCell()) {
1118 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1119 m_jit
.move(MacroAssembler::TrustedImmPtr(jsValue
.asCell()), gpr
);
1120 info
.fillCell(*m_stream
, gpr
);
1123 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1127 ASSERT((info
.spillFormat() & DataFormatJS
) || info
.spillFormat() == DataFormatCell
);
1128 if (type
& ~SpecCell
)
1129 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::CellTag
)));
1130 GPRReg gpr
= allocate();
1131 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
1132 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1133 info
.fillCell(*m_stream
, gpr
);
1137 case DataFormatCell
: {
1138 GPRReg gpr
= info
.gpr();
1143 case DataFormatJSCell
:
1144 case DataFormatJS
: {
1145 GPRReg tagGPR
= info
.tagGPR();
1146 GPRReg payloadGPR
= info
.payloadGPR();
1147 m_gprs
.lock(tagGPR
);
1148 m_gprs
.lock(payloadGPR
);
1149 if (type
& ~SpecCell
)
1150 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::CellTag
)));
1151 m_gprs
.unlock(tagGPR
);
1152 m_gprs
.release(tagGPR
);
1153 m_gprs
.release(payloadGPR
);
1154 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderCell
);
1155 info
.fillCell(*m_stream
, payloadGPR
);
1159 case DataFormatJSInteger
:
1160 case DataFormatInteger
:
1161 case DataFormatJSDouble
:
1162 case DataFormatDouble
:
1163 case DataFormatJSBoolean
:
1164 case DataFormatBoolean
:
1165 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1168 case DataFormatStorage
:
1169 RELEASE_ASSERT_NOT_REACHED();
1172 RELEASE_ASSERT_NOT_REACHED();
1173 return InvalidGPRReg
;
1177 GPRReg
SpeculativeJIT::fillSpeculateBoolean(Edge edge
)
1179 #if DFG_ENABLE(DEBUG_VERBOSE)
1180 dataLogF("SpecBool@%d ", edge
.node()->index());
1182 AbstractValue
& value
= m_state
.forNode(edge
);
1183 SpeculatedType type
= value
.m_type
;
1184 value
.filter(SpecBoolean
);
1185 VirtualRegister virtualRegister
= edge
->virtualRegister();
1186 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1188 switch (info
.registerFormat()) {
1189 case DataFormatNone
: {
1190 if (info
.spillFormat() == DataFormatInteger
|| info
.spillFormat() == DataFormatDouble
) {
1191 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1195 if (edge
->hasConstant()) {
1196 JSValue jsValue
= valueOfJSConstant(edge
.node());
1197 GPRReg gpr
= allocate();
1198 if (jsValue
.isBoolean()) {
1199 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderConstant
);
1200 m_jit
.move(MacroAssembler::TrustedImm32(jsValue
.asBoolean()), gpr
);
1201 info
.fillBoolean(*m_stream
, gpr
);
1204 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1208 ASSERT((info
.spillFormat() & DataFormatJS
) || info
.spillFormat() == DataFormatBoolean
);
1210 if (type
& ~SpecBoolean
)
1211 speculationCheck(BadType
, JSValueSource(JITCompiler::addressFor(virtualRegister
)), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::BooleanTag
)));
1213 GPRReg gpr
= allocate();
1214 m_jit
.load32(JITCompiler::payloadFor(virtualRegister
), gpr
);
1215 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
1216 info
.fillBoolean(*m_stream
, gpr
);
1220 case DataFormatBoolean
: {
1221 GPRReg gpr
= info
.gpr();
1226 case DataFormatJSBoolean
:
1227 case DataFormatJS
: {
1228 GPRReg tagGPR
= info
.tagGPR();
1229 GPRReg payloadGPR
= info
.payloadGPR();
1230 m_gprs
.lock(tagGPR
);
1231 m_gprs
.lock(payloadGPR
);
1232 if (type
& ~SpecBoolean
)
1233 speculationCheck(BadType
, JSValueRegs(tagGPR
, payloadGPR
), edge
, m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::BooleanTag
)));
1235 m_gprs
.unlock(tagGPR
);
1236 m_gprs
.release(tagGPR
);
1237 m_gprs
.release(payloadGPR
);
1238 m_gprs
.retain(payloadGPR
, virtualRegister
, SpillOrderBoolean
);
1239 info
.fillBoolean(*m_stream
, payloadGPR
);
1243 case DataFormatJSInteger
:
1244 case DataFormatInteger
:
1245 case DataFormatJSDouble
:
1246 case DataFormatDouble
:
1247 case DataFormatJSCell
:
1248 case DataFormatCell
:
1249 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1252 case DataFormatStorage
:
1253 RELEASE_ASSERT_NOT_REACHED();
1256 RELEASE_ASSERT_NOT_REACHED();
1257 return InvalidGPRReg
;
1261 JITCompiler::Jump
SpeculativeJIT::convertToDouble(JSValueOperand
& op
, FPRReg result
)
1263 FPRTemporary
scratch(this);
1265 GPRReg opPayloadGPR
= op
.payloadGPR();
1266 GPRReg opTagGPR
= op
.tagGPR();
1267 FPRReg scratchFPR
= scratch
.fpr();
1269 JITCompiler::Jump isInteger
= m_jit
.branch32(MacroAssembler::Equal
, opTagGPR
, TrustedImm32(JSValue::Int32Tag
));
1270 JITCompiler::Jump notNumber
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, opPayloadGPR
, TrustedImm32(JSValue::LowestTag
));
1272 unboxDouble(opTagGPR
, opPayloadGPR
, result
, scratchFPR
);
1273 JITCompiler::Jump done
= m_jit
.jump();
1275 isInteger
.link(&m_jit
);
1276 m_jit
.convertInt32ToDouble(opPayloadGPR
, result
);
1283 void SpeculativeJIT::compileObjectEquality(Node
* node
)
1285 SpeculateCellOperand
op1(this, node
->child1());
1286 SpeculateCellOperand
op2(this, node
->child2());
1287 GPRReg op1GPR
= op1
.gpr();
1288 GPRReg op2GPR
= op2
.gpr();
1290 if (m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
1291 m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1293 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchPtr(
1294 MacroAssembler::Equal
,
1295 MacroAssembler::Address(op1GPR
, JSCell::structureOffset()),
1296 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1298 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchPtr(
1299 MacroAssembler::Equal
,
1300 MacroAssembler::Address(op2GPR
, JSCell::structureOffset()),
1301 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1303 GPRTemporary
structure(this);
1304 GPRReg structureGPR
= structure
.gpr();
1306 m_jit
.loadPtr(MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), structureGPR
);
1308 JSValueSource::unboxedCell(op1GPR
), node
->child1(), SpecObject
, m_jit
.branchPtr(
1309 MacroAssembler::Equal
,
1311 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1312 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1314 MacroAssembler::NonZero
,
1315 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1316 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1318 m_jit
.loadPtr(MacroAssembler::Address(op2GPR
, JSCell::structureOffset()), structureGPR
);
1320 JSValueSource::unboxedCell(op2GPR
), node
->child2(), SpecObject
, m_jit
.branchPtr(
1321 MacroAssembler::Equal
,
1323 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1324 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1326 MacroAssembler::NonZero
,
1327 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1328 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1331 GPRTemporary
resultPayload(this, op2
);
1332 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1334 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2GPR
);
1335 m_jit
.move(TrustedImm32(1), resultPayloadGPR
);
1336 MacroAssembler::Jump done
= m_jit
.jump();
1337 falseCase
.link(&m_jit
);
1338 m_jit
.move(TrustedImm32(0), resultPayloadGPR
);
1341 booleanResult(resultPayloadGPR
, node
);
1344 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
)
1346 SpeculateCellOperand
op1(this, leftChild
);
1347 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1348 GPRTemporary
result(this);
1350 GPRReg op1GPR
= op1
.gpr();
1351 GPRReg op2TagGPR
= op2
.tagGPR();
1352 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1353 GPRReg resultGPR
= result
.gpr();
1354 GPRTemporary structure
;
1355 GPRReg structureGPR
= InvalidGPRReg
;
1357 bool masqueradesAsUndefinedWatchpointValid
= m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid();
1359 if (!masqueradesAsUndefinedWatchpointValid
) {
1360 // The masquerades as undefined case will use the structure register, so allocate it here.
1361 // Do this at the top of the function to avoid branching around a register allocation.
1362 GPRTemporary
realStructure(this);
1363 structure
.adopt(realStructure
);
1364 structureGPR
= structure
.gpr();
1367 if (masqueradesAsUndefinedWatchpointValid
) {
1368 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1370 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1371 MacroAssembler::Equal
,
1372 MacroAssembler::Address(op1GPR
, JSCell::structureOffset()),
1373 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1375 m_jit
.loadPtr(MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), structureGPR
);
1377 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1378 MacroAssembler::Equal
,
1380 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1381 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1383 MacroAssembler::NonZero
,
1384 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1385 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1389 // It seems that most of the time when programs do a == b where b may be either null/undefined
1390 // or an object, b is usually an object. Balance the branches to make that case fast.
1391 MacroAssembler::Jump rightNotCell
=
1392 m_jit
.branch32(MacroAssembler::NotEqual
, op2TagGPR
, TrustedImm32(JSValue::CellTag
));
1394 // We know that within this branch, rightChild must be a cell.
1395 if (masqueradesAsUndefinedWatchpointValid
) {
1396 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1398 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1400 MacroAssembler::Equal
,
1401 MacroAssembler::Address(op2PayloadGPR
, JSCell::structureOffset()),
1402 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1404 m_jit
.loadPtr(MacroAssembler::Address(op2PayloadGPR
, JSCell::structureOffset()), structureGPR
);
1406 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1408 MacroAssembler::Equal
,
1410 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1411 speculationCheck(BadType
, JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
,
1413 MacroAssembler::NonZero
,
1414 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1415 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1418 // At this point we know that we can perform a straight-forward equality comparison on pointer
1419 // values because both left and right are pointers to objects that have no special equality
1421 MacroAssembler::Jump falseCase
= m_jit
.branchPtr(MacroAssembler::NotEqual
, op1GPR
, op2PayloadGPR
);
1422 MacroAssembler::Jump trueCase
= m_jit
.jump();
1424 rightNotCell
.link(&m_jit
);
1426 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1427 // prove that it is either null or undefined.
1428 if (needsTypeCheck(rightChild
, SpecCell
| SpecOther
)) {
1429 m_jit
.move(op2TagGPR
, resultGPR
);
1430 m_jit
.or32(TrustedImm32(1), resultGPR
);
1433 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, SpecCell
| SpecOther
,
1435 MacroAssembler::NotEqual
, resultGPR
,
1436 MacroAssembler::TrustedImm32(JSValue::NullTag
)));
1439 falseCase
.link(&m_jit
);
1440 m_jit
.move(TrustedImm32(0), resultGPR
);
1441 MacroAssembler::Jump done
= m_jit
.jump();
1442 trueCase
.link(&m_jit
);
1443 m_jit
.move(TrustedImm32(1), resultGPR
);
1446 booleanResult(resultGPR
, m_currentNode
);
1449 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild
, Edge rightChild
, Node
* branchNode
)
1451 BlockIndex taken
= branchNode
->takenBlockIndex();
1452 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
1454 SpeculateCellOperand
op1(this, leftChild
);
1455 JSValueOperand
op2(this, rightChild
, ManualOperandSpeculation
);
1456 GPRTemporary
result(this);
1458 GPRReg op1GPR
= op1
.gpr();
1459 GPRReg op2TagGPR
= op2
.tagGPR();
1460 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1461 GPRReg resultGPR
= result
.gpr();
1462 GPRTemporary structure
;
1463 GPRReg structureGPR
= InvalidGPRReg
;
1465 bool masqueradesAsUndefinedWatchpointValid
= m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid();
1467 if (!masqueradesAsUndefinedWatchpointValid
) {
1468 // The masquerades as undefined case will use the structure register, so allocate it here.
1469 // Do this at the top of the function to avoid branching around a register allocation.
1470 GPRTemporary
realStructure(this);
1471 structure
.adopt(realStructure
);
1472 structureGPR
= structure
.gpr();
1475 if (masqueradesAsUndefinedWatchpointValid
) {
1476 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1478 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1479 MacroAssembler::Equal
,
1480 MacroAssembler::Address(op1GPR
, JSCell::structureOffset()),
1481 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1483 m_jit
.loadPtr(MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), structureGPR
);
1485 JSValueSource::unboxedCell(op1GPR
), leftChild
, SpecObject
, m_jit
.branchPtr(
1486 MacroAssembler::Equal
,
1488 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1489 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), leftChild
,
1491 MacroAssembler::NonZero
,
1492 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1493 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1496 // It seems that most of the time when programs do a == b where b may be either null/undefined
1497 // or an object, b is usually an object. Balance the branches to make that case fast.
1498 MacroAssembler::Jump rightNotCell
=
1499 m_jit
.branch32(MacroAssembler::NotEqual
, op2TagGPR
, TrustedImm32(JSValue::CellTag
));
1501 // We know that within this branch, rightChild must be a cell.
1502 if (masqueradesAsUndefinedWatchpointValid
) {
1503 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1505 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1507 MacroAssembler::Equal
,
1508 MacroAssembler::Address(op2PayloadGPR
, JSCell::structureOffset()),
1509 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1511 m_jit
.loadPtr(MacroAssembler::Address(op2PayloadGPR
, JSCell::structureOffset()), structureGPR
);
1513 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, (~SpecCell
) | SpecObject
,
1515 MacroAssembler::Equal
,
1517 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1518 speculationCheck(BadType
, JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
,
1520 MacroAssembler::NonZero
,
1521 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1522 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1525 // At this point we know that we can perform a straight-forward equality comparison on pointer
1526 // values because both left and right are pointers to objects that have no special equality
1528 branch32(MacroAssembler::Equal
, op1GPR
, op2PayloadGPR
, taken
);
1530 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1531 // prove that it is either null or undefined.
1532 if (!needsTypeCheck(rightChild
, SpecCell
| SpecOther
))
1533 rightNotCell
.link(&m_jit
);
1535 jump(notTaken
, ForceJump
);
1537 rightNotCell
.link(&m_jit
);
1538 m_jit
.move(op2TagGPR
, resultGPR
);
1539 m_jit
.or32(TrustedImm32(1), resultGPR
);
1542 JSValueRegs(op2TagGPR
, op2PayloadGPR
), rightChild
, SpecCell
| SpecOther
,
1544 MacroAssembler::NotEqual
, resultGPR
,
1545 MacroAssembler::TrustedImm32(JSValue::NullTag
)));
1551 void SpeculativeJIT::compileIntegerCompare(Node
* node
, MacroAssembler::RelationalCondition condition
)
1553 SpeculateIntegerOperand
op1(this, node
->child1());
1554 SpeculateIntegerOperand
op2(this, node
->child2());
1555 GPRTemporary
resultPayload(this);
1557 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), resultPayload
.gpr());
1559 // If we add a DataFormatBool, we should use it here.
1560 booleanResult(resultPayload
.gpr(), node
);
1563 void SpeculativeJIT::compileDoubleCompare(Node
* node
, MacroAssembler::DoubleCondition condition
)
1565 SpeculateDoubleOperand
op1(this, node
->child1());
1566 SpeculateDoubleOperand
op2(this, node
->child2());
1567 GPRTemporary
resultPayload(this);
1569 m_jit
.move(TrustedImm32(1), resultPayload
.gpr());
1570 MacroAssembler::Jump trueCase
= m_jit
.branchDouble(condition
, op1
.fpr(), op2
.fpr());
1571 m_jit
.move(TrustedImm32(0), resultPayload
.gpr());
1572 trueCase
.link(&m_jit
);
1574 booleanResult(resultPayload
.gpr(), node
);
1577 void SpeculativeJIT::compileValueAdd(Node
* node
)
1579 JSValueOperand
op1(this, node
->child1());
1580 JSValueOperand
op2(this, node
->child2());
1582 GPRReg op1TagGPR
= op1
.tagGPR();
1583 GPRReg op1PayloadGPR
= op1
.payloadGPR();
1584 GPRReg op2TagGPR
= op2
.tagGPR();
1585 GPRReg op2PayloadGPR
= op2
.payloadGPR();
1589 GPRResult2
resultTag(this);
1590 GPRResult
resultPayload(this);
1591 if (isKnownNotNumber(node
->child1().node()) || isKnownNotNumber(node
->child2().node()))
1592 callOperation(operationValueAddNotNumber
, resultTag
.gpr(), resultPayload
.gpr(), op1TagGPR
, op1PayloadGPR
, op2TagGPR
, op2PayloadGPR
);
1594 callOperation(operationValueAdd
, resultTag
.gpr(), resultPayload
.gpr(), op1TagGPR
, op1PayloadGPR
, op2TagGPR
, op2PayloadGPR
);
1596 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
1599 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse
)
1601 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1602 GPRTemporary
resultPayload(this);
1603 GPRReg valueTagGPR
= value
.tagGPR();
1604 GPRReg valuePayloadGPR
= value
.payloadGPR();
1605 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1606 GPRTemporary structure
;
1607 GPRReg structureGPR
= InvalidGPRReg
;
1609 bool masqueradesAsUndefinedWatchpointValid
= m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid();
1611 if (!masqueradesAsUndefinedWatchpointValid
) {
1612 // The masquerades as undefined case will use the structure register, so allocate it here.
1613 // Do this at the top of the function to avoid branching around a register allocation.
1614 GPRTemporary
realStructure(this);
1615 structure
.adopt(realStructure
);
1616 structureGPR
= structure
.gpr();
1619 MacroAssembler::Jump notCell
= m_jit
.branch32(MacroAssembler::NotEqual
, valueTagGPR
, TrustedImm32(JSValue::CellTag
));
1620 if (masqueradesAsUndefinedWatchpointValid
) {
1621 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1624 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1626 MacroAssembler::Equal
,
1627 MacroAssembler::Address(valuePayloadGPR
, JSCell::structureOffset()),
1628 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1630 m_jit
.loadPtr(MacroAssembler::Address(valuePayloadGPR
, JSCell::structureOffset()), structureGPR
);
1633 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1635 MacroAssembler::Equal
,
1637 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1639 MacroAssembler::Jump isNotMasqueradesAsUndefined
=
1641 MacroAssembler::Zero
,
1642 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1643 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
));
1645 speculationCheck(BadType
, JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
,
1647 MacroAssembler::Equal
,
1648 MacroAssembler::Address(structureGPR
, Structure::globalObjectOffset()),
1649 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
))));
1651 isNotMasqueradesAsUndefined
.link(&m_jit
);
1653 m_jit
.move(TrustedImm32(0), resultPayloadGPR
);
1654 MacroAssembler::Jump done
= m_jit
.jump();
1656 notCell
.link(&m_jit
);
1658 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
1659 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1660 m_jit
.move(valueTagGPR
, resultPayloadGPR
);
1661 m_jit
.or32(TrustedImm32(1), resultPayloadGPR
);
1663 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, SpecCell
| SpecOther
,
1665 MacroAssembler::NotEqual
,
1667 TrustedImm32(JSValue::NullTag
)));
1669 m_jit
.move(TrustedImm32(1), resultPayloadGPR
);
1673 booleanResult(resultPayloadGPR
, m_currentNode
);
1676 void SpeculativeJIT::compileLogicalNot(Node
* node
)
1678 switch (node
->child1().useKind()) {
1680 SpeculateBooleanOperand
value(this, node
->child1());
1681 GPRTemporary
result(this, value
);
1682 m_jit
.xor32(TrustedImm32(1), value
.gpr(), result
.gpr());
1683 booleanResult(result
.gpr(), node
);
1687 case ObjectOrOtherUse
: {
1688 compileObjectOrOtherLogicalNot(node
->child1());
1693 SpeculateIntegerOperand
value(this, node
->child1());
1694 GPRTemporary
resultPayload(this, value
);
1695 m_jit
.compare32(MacroAssembler::Equal
, value
.gpr(), MacroAssembler::TrustedImm32(0), resultPayload
.gpr());
1696 booleanResult(resultPayload
.gpr(), node
);
1701 SpeculateDoubleOperand
value(this, node
->child1());
1702 FPRTemporary
scratch(this);
1703 GPRTemporary
resultPayload(this);
1704 m_jit
.move(TrustedImm32(0), resultPayload
.gpr());
1705 MacroAssembler::Jump nonZero
= m_jit
.branchDoubleNonZero(value
.fpr(), scratch
.fpr());
1706 m_jit
.move(TrustedImm32(1), resultPayload
.gpr());
1707 nonZero
.link(&m_jit
);
1708 booleanResult(resultPayload
.gpr(), node
);
1713 JSValueOperand
arg1(this, node
->child1());
1714 GPRTemporary
resultPayload(this, arg1
, false);
1715 GPRReg arg1TagGPR
= arg1
.tagGPR();
1716 GPRReg arg1PayloadGPR
= arg1
.payloadGPR();
1717 GPRReg resultPayloadGPR
= resultPayload
.gpr();
1721 JITCompiler::Jump slowCase
= m_jit
.branch32(JITCompiler::NotEqual
, arg1TagGPR
, TrustedImm32(JSValue::BooleanTag
));
1723 m_jit
.move(arg1PayloadGPR
, resultPayloadGPR
);
1725 addSlowPathGenerator(
1727 slowCase
, this, dfgConvertJSValueToBoolean
, resultPayloadGPR
, arg1TagGPR
,
1730 m_jit
.xor32(TrustedImm32(1), resultPayloadGPR
);
1731 booleanResult(resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
1736 RELEASE_ASSERT_NOT_REACHED();
1741 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse
, BlockIndex taken
, BlockIndex notTaken
)
1743 JSValueOperand
value(this, nodeUse
, ManualOperandSpeculation
);
1744 GPRTemporary
scratch(this);
1745 GPRReg valueTagGPR
= value
.tagGPR();
1746 GPRReg valuePayloadGPR
= value
.payloadGPR();
1747 GPRReg scratchGPR
= scratch
.gpr();
1749 MacroAssembler::Jump notCell
= m_jit
.branch32(MacroAssembler::NotEqual
, valueTagGPR
, TrustedImm32(JSValue::CellTag
));
1750 if (m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
1751 m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1754 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1756 MacroAssembler::Equal
,
1757 MacroAssembler::Address(valuePayloadGPR
, JSCell::structureOffset()),
1758 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1760 m_jit
.loadPtr(MacroAssembler::Address(valuePayloadGPR
, JSCell::structureOffset()), scratchGPR
);
1763 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, (~SpecCell
) | SpecObject
,
1765 MacroAssembler::Equal
,
1767 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1769 JITCompiler::Jump isNotMasqueradesAsUndefined
= m_jit
.branchTest8(JITCompiler::Zero
, MacroAssembler::Address(scratchGPR
, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined
));
1771 speculationCheck(BadType
, JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
,
1773 MacroAssembler::Equal
,
1774 MacroAssembler::Address(scratchGPR
, Structure::globalObjectOffset()),
1775 MacroAssembler::TrustedImmPtr(m_jit
.graph().globalObjectFor(m_currentNode
->codeOrigin
))));
1777 isNotMasqueradesAsUndefined
.link(&m_jit
);
1779 jump(taken
, ForceJump
);
1781 notCell
.link(&m_jit
);
1783 COMPILE_ASSERT((JSValue::UndefinedTag
| 1) == JSValue::NullTag
, UndefinedTag_OR_1_EQUALS_NullTag
);
1784 if (needsTypeCheck(nodeUse
, SpecCell
| SpecOther
)) {
1785 m_jit
.move(valueTagGPR
, scratchGPR
);
1786 m_jit
.or32(TrustedImm32(1), scratchGPR
);
1788 JSValueRegs(valueTagGPR
, valuePayloadGPR
), nodeUse
, SpecCell
| SpecOther
,
1789 m_jit
.branch32(MacroAssembler::NotEqual
, scratchGPR
, TrustedImm32(JSValue::NullTag
)));
1794 noResult(m_currentNode
);
1797 void SpeculativeJIT::emitBranch(Node
* node
)
1799 BlockIndex taken
= node
->takenBlockIndex();
1800 BlockIndex notTaken
= node
->notTakenBlockIndex();
1802 switch (node
->child1().useKind()) {
1804 SpeculateBooleanOperand
value(this, node
->child1());
1805 MacroAssembler::ResultCondition condition
= MacroAssembler::NonZero
;
1807 if (taken
== nextBlock()) {
1808 condition
= MacroAssembler::Zero
;
1809 BlockIndex tmp
= taken
;
1814 branchTest32(condition
, value
.gpr(), TrustedImm32(1), taken
);
1821 case ObjectOrOtherUse
: {
1822 emitObjectOrOtherBranch(node
->child1(), taken
, notTaken
);
1828 if (node
->child1().useKind() == Int32Use
) {
1829 bool invert
= false;
1831 if (taken
== nextBlock()) {
1833 BlockIndex tmp
= taken
;
1838 SpeculateIntegerOperand
value(this, node
->child1());
1839 branchTest32(invert
? MacroAssembler::Zero
: MacroAssembler::NonZero
, value
.gpr(), taken
);
1841 SpeculateDoubleOperand
value(this, node
->child1());
1842 FPRTemporary
scratch(this);
1843 branchDoubleNonZero(value
.fpr(), scratch
.fpr(), taken
);
1853 JSValueOperand
value(this, node
->child1());
1855 GPRReg valueTagGPR
= value
.tagGPR();
1856 GPRReg valuePayloadGPR
= value
.payloadGPR();
1858 GPRTemporary
result(this);
1859 GPRReg resultGPR
= result
.gpr();
1861 use(node
->child1());
1863 JITCompiler::Jump fastPath
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::Int32Tag
));
1864 JITCompiler::Jump slowPath
= m_jit
.branch32(JITCompiler::NotEqual
, valueTagGPR
, JITCompiler::TrustedImm32(JSValue::BooleanTag
));
1866 fastPath
.link(&m_jit
);
1867 branchTest32(JITCompiler::Zero
, valuePayloadGPR
, notTaken
);
1868 jump(taken
, ForceJump
);
1870 slowPath
.link(&m_jit
);
1871 silentSpillAllRegisters(resultGPR
);
1872 callOperation(dfgConvertJSValueToBoolean
, resultGPR
, valueTagGPR
, valuePayloadGPR
);
1873 silentFillAllRegisters(resultGPR
);
1875 branchTest32(JITCompiler::NonZero
, resultGPR
, taken
);
1878 noResult(node
, UseChildrenCalledExplicitly
);
1883 RELEASE_ASSERT_NOT_REACHED();
1888 template<typename BaseOperandType
, typename PropertyOperandType
, typename ValueOperandType
, typename TagType
>
1889 void SpeculativeJIT::compileContiguousPutByVal(Node
* node
, BaseOperandType
& base
, PropertyOperandType
& property
, ValueOperandType
& value
, GPRReg valuePayloadReg
, TagType valueTag
)
1891 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
1893 ArrayMode arrayMode
= node
->arrayMode();
1895 GPRReg baseReg
= base
.gpr();
1896 GPRReg propertyReg
= property
.gpr();
1898 StorageOperand
storage(this, child4
);
1899 GPRReg storageReg
= storage
.gpr();
1901 if (node
->op() == PutByValAlias
) {
1902 // Store the value to the array.
1903 GPRReg propertyReg
= property
.gpr();
1904 m_jit
.store32(valueTag
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
1905 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
1911 MacroAssembler::Jump slowCase
;
1913 if (arrayMode
.isInBounds()) {
1915 StoreToHoleOrOutOfBounds
, JSValueRegs(), 0,
1916 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
1918 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1920 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
1922 if (!arrayMode
.isOutOfBounds())
1923 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
1925 m_jit
.add32(TrustedImm32(1), propertyReg
);
1926 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1927 m_jit
.sub32(TrustedImm32(1), propertyReg
);
1929 inBounds
.link(&m_jit
);
1932 m_jit
.store32(valueTag
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
1933 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
1940 if (arrayMode
.isOutOfBounds()) {
1941 addSlowPathGenerator(
1944 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
1945 NoResult
, baseReg
, propertyReg
, valueTag
, valuePayloadReg
));
1948 noResult(node
, UseChildrenCalledExplicitly
);
1951 void SpeculativeJIT::compile(Node
* node
)
1953 NodeType op
= node
->op();
1955 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1956 m_jit
.clearRegisterAllocationOffsets();
1961 initConstantInfo(node
);
1964 case PhantomArguments
:
1965 initConstantInfo(node
);
1968 case WeakJSConstant
:
1969 m_jit
.addWeakReference(node
->weakConstant());
1970 initConstantInfo(node
);
1974 RELEASE_ASSERT_NOT_REACHED();
1979 SpeculatedType prediction
= node
->variableAccessData()->prediction();
1980 AbstractValue
& value
= m_state
.variables().operand(node
->local());
1982 // If we have no prediction for this local, then don't attempt to compile.
1983 if (prediction
== SpecNone
) {
1984 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
1988 // If the CFA is tracking this variable and it found that the variable
1989 // cannot have been assigned, then don't attempt to proceed.
1990 if (value
.isClear()) {
1991 // FIXME: We should trap instead.
1992 // https://bugs.webkit.org/show_bug.cgi?id=110383
1993 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
1997 if (node
->variableAccessData()->shouldUseDoubleFormat()) {
1998 FPRTemporary
result(this);
1999 m_jit
.loadDouble(JITCompiler::addressFor(node
->local()), result
.fpr());
2000 VirtualRegister virtualRegister
= node
->virtualRegister();
2001 m_fprs
.retain(result
.fpr(), virtualRegister
, SpillOrderDouble
);
2002 m_generationInfo
[virtualRegister
].initDouble(node
, node
->refCount(), result
.fpr());
2006 if (isInt32Speculation(value
.m_type
)) {
2007 GPRTemporary
result(this);
2008 m_jit
.load32(JITCompiler::payloadFor(node
->local()), result
.gpr());
2010 // Like integerResult, but don't useChildren - our children are phi nodes,
2011 // and don't represent values within this dataflow with virtual registers.
2012 VirtualRegister virtualRegister
= node
->virtualRegister();
2013 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderInteger
);
2014 m_generationInfo
[virtualRegister
].initInteger(node
, node
->refCount(), result
.gpr());
2018 if (isCellSpeculation(value
.m_type
)) {
2019 GPRTemporary
result(this);
2020 m_jit
.load32(JITCompiler::payloadFor(node
->local()), result
.gpr());
2022 // Like cellResult, but don't useChildren - our children are phi nodes,
2023 // and don't represent values within this dataflow with virtual registers.
2024 VirtualRegister virtualRegister
= node
->virtualRegister();
2025 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderCell
);
2026 m_generationInfo
[virtualRegister
].initCell(node
, node
->refCount(), result
.gpr());
2030 if (isBooleanSpeculation(value
.m_type
)) {
2031 GPRTemporary
result(this);
2032 m_jit
.load32(JITCompiler::payloadFor(node
->local()), result
.gpr());
2034 // Like booleanResult, but don't useChildren - our children are phi nodes,
2035 // and don't represent values within this dataflow with virtual registers.
2036 VirtualRegister virtualRegister
= node
->virtualRegister();
2037 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderBoolean
);
2038 m_generationInfo
[virtualRegister
].initBoolean(node
, node
->refCount(), result
.gpr());
2042 GPRTemporary
result(this);
2043 GPRTemporary
tag(this);
2044 m_jit
.load32(JITCompiler::payloadFor(node
->local()), result
.gpr());
2045 m_jit
.load32(JITCompiler::tagFor(node
->local()), tag
.gpr());
2047 // Like jsValueResult, but don't useChildren - our children are phi nodes,
2048 // and don't represent values within this dataflow with virtual registers.
2049 VirtualRegister virtualRegister
= node
->virtualRegister();
2050 m_gprs
.retain(result
.gpr(), virtualRegister
, SpillOrderJS
);
2051 m_gprs
.retain(tag
.gpr(), virtualRegister
, SpillOrderJS
);
2053 m_generationInfo
[virtualRegister
].initJSValue(node
, node
->refCount(), tag
.gpr(), result
.gpr(), DataFormatJS
);
2057 case GetLocalUnlinked
: {
2058 GPRTemporary
payload(this);
2059 GPRTemporary
tag(this);
2060 m_jit
.load32(JITCompiler::payloadFor(node
->unlinkedLocal()), payload
.gpr());
2061 m_jit
.load32(JITCompiler::tagFor(node
->unlinkedLocal()), tag
.gpr());
2062 jsValueResult(tag
.gpr(), payload
.gpr(), node
);
2066 case MovHintAndCheck
: {
2067 compileMovHintAndCheck(node
);
2072 compileInlineStart(node
);
2078 RELEASE_ASSERT_NOT_REACHED();
2083 // SetLocal doubles as a hint as to where a node will be stored and
2084 // as a speculation point. So before we speculate make sure that we
2085 // know where the child of this node needs to go in the virtual
2087 compileMovHint(node
);
2089 if (node
->variableAccessData()->shouldUnboxIfPossible()) {
2090 if (node
->variableAccessData()->shouldUseDoubleFormat()) {
2091 SpeculateDoubleOperand
value(this, node
->child1());
2092 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
->local()));
2094 // Indicate that it's no longer necessary to retrieve the value of
2095 // this bytecode variable from registers or other locations in the stack,
2096 // but that it is stored as a double.
2097 recordSetLocal(node
->local(), ValueSource(DoubleInJSStack
));
2100 SpeculatedType predictedType
= node
->variableAccessData()->argumentAwarePrediction();
2101 if (m_generationInfo
[node
->child1()->virtualRegister()].registerFormat() == DataFormatDouble
) {
2102 SpeculateDoubleOperand
value(this, node
->child1(), ManualOperandSpeculation
);
2103 m_jit
.storeDouble(value
.fpr(), JITCompiler::addressFor(node
->local()));
2105 recordSetLocal(node
->local(), ValueSource(DoubleInJSStack
));
2108 if (isInt32Speculation(predictedType
)) {
2109 SpeculateIntegerOperand
value(this, node
->child1());
2110 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->local()));
2112 recordSetLocal(node
->local(), ValueSource(Int32InJSStack
));
2115 if (isCellSpeculation(predictedType
)) {
2116 SpeculateCellOperand
cell(this, node
->child1());
2117 GPRReg cellGPR
= cell
.gpr();
2118 m_jit
.storePtr(cellGPR
, JITCompiler::payloadFor(node
->local()));
2120 recordSetLocal(node
->local(), ValueSource(CellInJSStack
));
2123 if (isBooleanSpeculation(predictedType
)) {
2124 SpeculateBooleanOperand
value(this, node
->child1());
2125 m_jit
.store32(value
.gpr(), JITCompiler::payloadFor(node
->local()));
2127 recordSetLocal(node
->local(), ValueSource(BooleanInJSStack
));
2131 JSValueOperand
value(this, node
->child1());
2132 m_jit
.store32(value
.payloadGPR(), JITCompiler::payloadFor(node
->local()));
2133 m_jit
.store32(value
.tagGPR(), JITCompiler::tagFor(node
->local()));
2135 recordSetLocal(node
->local(), ValueSource(ValueInJSStack
));
2137 // If we're storing an arguments object that has been optimized away,
2138 // our variable event stream for OSR exit now reflects the optimized
2139 // value (JSValue()). On the slow path, we want an arguments object
2140 // instead. We add an additional move hint to show OSR exit that it
2141 // needs to reconstruct the arguments object.
2142 if (node
->child1()->op() == PhantomArguments
)
2143 compileMovHint(node
);
2149 // This is a no-op; it just marks the fact that the argument is being used.
2150 // But it may be profitable to use this as a hook to run speculation checks
2151 // on arguments, thereby allowing us to trivially eliminate such checks if
2152 // the argument is not used.
2158 if (isInt32Constant(node
->child1().node())) {
2159 SpeculateIntegerOperand
op2(this, node
->child2());
2160 GPRTemporary
result(this, op2
);
2162 bitOp(op
, valueOfInt32Constant(node
->child1().node()), op2
.gpr(), result
.gpr());
2164 integerResult(result
.gpr(), node
);
2165 } else if (isInt32Constant(node
->child2().node())) {
2166 SpeculateIntegerOperand
op1(this, node
->child1());
2167 GPRTemporary
result(this, op1
);
2169 bitOp(op
, valueOfInt32Constant(node
->child2().node()), op1
.gpr(), result
.gpr());
2171 integerResult(result
.gpr(), node
);
2173 SpeculateIntegerOperand
op1(this, node
->child1());
2174 SpeculateIntegerOperand
op2(this, node
->child2());
2175 GPRTemporary
result(this, op1
, op2
);
2177 GPRReg reg1
= op1
.gpr();
2178 GPRReg reg2
= op2
.gpr();
2179 bitOp(op
, reg1
, reg2
, result
.gpr());
2181 integerResult(result
.gpr(), node
);
2188 if (isInt32Constant(node
->child2().node())) {
2189 SpeculateIntegerOperand
op1(this, node
->child1());
2190 GPRTemporary
result(this, op1
);
2192 shiftOp(op
, op1
.gpr(), valueOfInt32Constant(node
->child2().node()) & 0x1f, result
.gpr());
2194 integerResult(result
.gpr(), node
);
2196 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2197 SpeculateIntegerOperand
op1(this, node
->child1());
2198 SpeculateIntegerOperand
op2(this, node
->child2());
2199 GPRTemporary
result(this, op1
);
2201 GPRReg reg1
= op1
.gpr();
2202 GPRReg reg2
= op2
.gpr();
2203 shiftOp(op
, reg1
, reg2
, result
.gpr());
2205 integerResult(result
.gpr(), node
);
2209 case UInt32ToNumber
: {
2210 compileUInt32ToNumber(node
);
2214 case DoubleAsInt32
: {
2215 compileDoubleAsInt32(node
);
2219 case ValueToInt32
: {
2220 compileValueToInt32(node
);
2225 case ForwardInt32ToDouble
: {
2226 compileInt32ToDouble(node
);
2236 compileMakeRope(node
);
2240 compileArithSub(node
);
2244 compileArithNegate(node
);
2248 compileArithMul(node
);
2252 compileArithIMul(node
);
2256 switch (node
->binaryUseKind()) {
2259 compileIntegerArithDivForX86(node
);
2261 compileIntegerArithDivForARM64(node
);
2262 #elif CPU(APPLE_ARMV7S)
2263 compileIntegerArithDivForARMv7s(node
);
2264 #else // CPU type without integer divide
2265 RELEASE_ASSERT_NOT_REACHED(); // should have been coverted into a double divide.
2271 SpeculateDoubleOperand
op1(this, node
->child1());
2272 SpeculateDoubleOperand
op2(this, node
->child2());
2273 FPRTemporary
result(this, op1
);
2275 FPRReg reg1
= op1
.fpr();
2276 FPRReg reg2
= op2
.fpr();
2277 m_jit
.divDouble(reg1
, reg2
, result
.fpr());
2279 doubleResult(result
.fpr(), node
);
2284 RELEASE_ASSERT_NOT_REACHED();
2291 compileArithMod(node
);
2296 switch (node
->child1().useKind()) {
2298 SpeculateIntegerOperand
op1(this, node
->child1());
2299 GPRTemporary
result(this, op1
);
2300 GPRTemporary
scratch(this);
2302 m_jit
.zeroExtend32ToPtr(op1
.gpr(), result
.gpr());
2303 m_jit
.rshift32(result
.gpr(), MacroAssembler::TrustedImm32(31), scratch
.gpr());
2304 m_jit
.add32(scratch
.gpr(), result
.gpr());
2305 m_jit
.xor32(scratch
.gpr(), result
.gpr());
2306 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, result
.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2307 integerResult(result
.gpr(), node
);
2313 SpeculateDoubleOperand
op1(this, node
->child1());
2314 FPRTemporary
result(this);
2316 m_jit
.absDouble(op1
.fpr(), result
.fpr());
2317 doubleResult(result
.fpr(), node
);
2322 RELEASE_ASSERT_NOT_REACHED();
2330 switch (node
->binaryUseKind()) {
2332 SpeculateStrictInt32Operand
op1(this, node
->child1());
2333 SpeculateStrictInt32Operand
op2(this, node
->child2());
2334 GPRTemporary
result(this, op1
);
2336 GPRReg op1GPR
= op1
.gpr();
2337 GPRReg op2GPR
= op2
.gpr();
2338 GPRReg resultGPR
= result
.gpr();
2340 MacroAssembler::Jump op1Less
= m_jit
.branch32(op
== ArithMin
? MacroAssembler::LessThan
: MacroAssembler::GreaterThan
, op1GPR
, op2GPR
);
2341 m_jit
.move(op2GPR
, resultGPR
);
2342 if (op1GPR
!= resultGPR
) {
2343 MacroAssembler::Jump done
= m_jit
.jump();
2344 op1Less
.link(&m_jit
);
2345 m_jit
.move(op1GPR
, resultGPR
);
2348 op1Less
.link(&m_jit
);
2350 integerResult(resultGPR
, node
);
2355 SpeculateDoubleOperand
op1(this, node
->child1());
2356 SpeculateDoubleOperand
op2(this, node
->child2());
2357 FPRTemporary
result(this, op1
);
2359 FPRReg op1FPR
= op1
.fpr();
2360 FPRReg op2FPR
= op2
.fpr();
2361 FPRReg resultFPR
= result
.fpr();
2363 MacroAssembler::JumpList done
;
2365 MacroAssembler::Jump op1Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleLessThan
: MacroAssembler::DoubleGreaterThan
, op1FPR
, op2FPR
);
2367 // op2 is eather the lesser one or one of then is NaN
2368 MacroAssembler::Jump op2Less
= m_jit
.branchDouble(op
== ArithMin
? MacroAssembler::DoubleGreaterThanOrEqual
: MacroAssembler::DoubleLessThanOrEqual
, op1FPR
, op2FPR
);
2370 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2371 // op1 + op2 and putting it into result.
2372 m_jit
.addDouble(op1FPR
, op2FPR
, resultFPR
);
2373 done
.append(m_jit
.jump());
2375 op2Less
.link(&m_jit
);
2376 m_jit
.moveDouble(op2FPR
, resultFPR
);
2378 if (op1FPR
!= resultFPR
) {
2379 done
.append(m_jit
.jump());
2381 op1Less
.link(&m_jit
);
2382 m_jit
.moveDouble(op1FPR
, resultFPR
);
2384 op1Less
.link(&m_jit
);
2388 doubleResult(resultFPR
, node
);
2393 RELEASE_ASSERT_NOT_REACHED();
2400 SpeculateDoubleOperand
op1(this, node
->child1());
2401 FPRTemporary
result(this, op1
);
2403 m_jit
.sqrtDouble(op1
.fpr(), result
.fpr());
2405 doubleResult(result
.fpr(), node
);
2410 compileLogicalNot(node
);
2414 if (compare(node
, JITCompiler::LessThan
, JITCompiler::DoubleLessThan
, operationCompareLess
))
2419 if (compare(node
, JITCompiler::LessThanOrEqual
, JITCompiler::DoubleLessThanOrEqual
, operationCompareLessEq
))
2423 case CompareGreater
:
2424 if (compare(node
, JITCompiler::GreaterThan
, JITCompiler::DoubleGreaterThan
, operationCompareGreater
))
2428 case CompareGreaterEq
:
2429 if (compare(node
, JITCompiler::GreaterThanOrEqual
, JITCompiler::DoubleGreaterThanOrEqual
, operationCompareGreaterEq
))
2433 case CompareEqConstant
:
2434 ASSERT(isNullConstant(node
->child2().node()));
2435 if (nonSpeculativeCompareNull(node
, node
->child1()))
2440 if (compare(node
, JITCompiler::Equal
, JITCompiler::DoubleEqual
, operationCompareEq
))
2444 case CompareStrictEqConstant
:
2445 if (compileStrictEqForConstant(node
, node
->child1(), valueOfJSConstant(node
->child2().node())))
2449 case CompareStrictEq
:
2450 if (compileStrictEq(node
))
2454 case StringCharCodeAt
: {
2455 compileGetCharCodeAt(node
);
2459 case StringCharAt
: {
2460 // Relies on StringCharAt node having same basic layout as GetByVal
2461 compileGetByValOnString(node
);
2465 case StringFromCharCode
: {
2466 compileFromCharCode(node
);
2476 case ArrayifyToStructure
: {
2482 switch (node
->arrayMode().type()) {
2483 case Array::SelectUsingPredictions
:
2484 case Array::ForceExit
:
2485 RELEASE_ASSERT_NOT_REACHED();
2486 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2488 case Array::Generic
: {
2489 SpeculateCellOperand
base(this, node
->child1()); // Save a register, speculate cell. We'll probably be right.
2490 JSValueOperand
property(this, node
->child2());
2491 GPRReg baseGPR
= base
.gpr();
2492 GPRReg propertyTagGPR
= property
.tagGPR();
2493 GPRReg propertyPayloadGPR
= property
.payloadGPR();
2496 GPRResult2
resultTag(this);
2497 GPRResult
resultPayload(this);
2498 callOperation(operationGetByValCell
, resultTag
.gpr(), resultPayload
.gpr(), baseGPR
, propertyTagGPR
, propertyPayloadGPR
);
2500 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2504 case Array::Contiguous
: {
2505 if (node
->arrayMode().isInBounds()) {
2506 SpeculateStrictInt32Operand
property(this, node
->child2());
2507 StorageOperand
storage(this, node
->child3());
2509 GPRReg propertyReg
= property
.gpr();
2510 GPRReg storageReg
= storage
.gpr();
2515 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2517 GPRTemporary
resultPayload(this);
2518 if (node
->arrayMode().type() == Array::Int32
) {
2520 OutOfBounds
, JSValueRegs(), 0,
2522 MacroAssembler::Equal
,
2523 MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)),
2524 TrustedImm32(JSValue::EmptyValueTag
)));
2525 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
.gpr());
2526 integerResult(resultPayload
.gpr(), node
);
2530 GPRTemporary
resultTag(this);
2531 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
.gpr());
2532 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
)));
2533 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
.gpr());
2534 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2538 SpeculateCellOperand
base(this, node
->child1());
2539 SpeculateStrictInt32Operand
property(this, node
->child2());
2540 StorageOperand
storage(this, node
->child3());
2542 GPRReg baseReg
= base
.gpr();
2543 GPRReg propertyReg
= property
.gpr();
2544 GPRReg storageReg
= storage
.gpr();
2549 GPRTemporary
resultTag(this);
2550 GPRTemporary
resultPayload(this);
2551 GPRReg resultTagReg
= resultTag
.gpr();
2552 GPRReg resultPayloadReg
= resultPayload
.gpr();
2554 MacroAssembler::JumpList slowCases
;
2556 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2558 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagReg
);
2559 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadReg
);
2560 slowCases
.append(m_jit
.branch32(MacroAssembler::Equal
, resultTagReg
, TrustedImm32(JSValue::EmptyValueTag
)));
2562 addSlowPathGenerator(
2564 slowCases
, this, operationGetByValArrayInt
,
2565 JSValueRegs(resultTagReg
, resultPayloadReg
), baseReg
, propertyReg
));
2567 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2570 case Array::Double
: {
2571 if (node
->arrayMode().isInBounds()) {
2572 if (node
->arrayMode().isSaneChain()) {
2573 JSGlobalObject
* globalObject
= m_jit
.globalObjectFor(node
->codeOrigin
);
2574 ASSERT(globalObject
->arrayPrototypeChainIsSane());
2575 globalObject
->arrayPrototype()->structure()->addTransitionWatchpoint(speculationWatchpoint());
2576 globalObject
->objectPrototype()->structure()->addTransitionWatchpoint(speculationWatchpoint());
2579 SpeculateStrictInt32Operand
property(this, node
->child2());
2580 StorageOperand
storage(this, node
->child3());
2582 GPRReg propertyReg
= property
.gpr();
2583 GPRReg storageReg
= storage
.gpr();
2588 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2590 FPRTemporary
result(this);
2591 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), result
.fpr());
2592 if (!node
->arrayMode().isSaneChain())
2593 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, result
.fpr(), result
.fpr()));
2594 doubleResult(result
.fpr(), node
);
2598 SpeculateCellOperand
base(this, node
->child1());
2599 SpeculateStrictInt32Operand
property(this, node
->child2());
2600 StorageOperand
storage(this, node
->child3());
2602 GPRReg baseReg
= base
.gpr();
2603 GPRReg propertyReg
= property
.gpr();
2604 GPRReg storageReg
= storage
.gpr();
2609 GPRTemporary
resultTag(this);
2610 GPRTemporary
resultPayload(this);
2611 FPRTemporary
temp(this);
2612 GPRReg resultTagReg
= resultTag
.gpr();
2613 GPRReg resultPayloadReg
= resultPayload
.gpr();
2614 FPRReg tempReg
= temp
.fpr();
2616 MacroAssembler::JumpList slowCases
;
2618 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2620 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), tempReg
);
2621 slowCases
.append(m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempReg
, tempReg
));
2622 boxDouble(tempReg
, resultTagReg
, resultPayloadReg
);
2624 addSlowPathGenerator(
2626 slowCases
, this, operationGetByValArrayInt
,
2627 JSValueRegs(resultTagReg
, resultPayloadReg
), baseReg
, propertyReg
));
2629 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2632 case Array::ArrayStorage
:
2633 case Array::SlowPutArrayStorage
: {
2634 if (node
->arrayMode().isInBounds()) {
2635 SpeculateStrictInt32Operand
property(this, node
->child2());
2636 StorageOperand
storage(this, node
->child3());
2637 GPRReg propertyReg
= property
.gpr();
2638 GPRReg storageReg
= storage
.gpr();
2643 speculationCheck(OutOfBounds
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset())));
2645 GPRTemporary
resultTag(this);
2646 GPRTemporary
resultPayload(this);
2648 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
.gpr());
2649 speculationCheck(LoadFromHole
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
)));
2650 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
.gpr());
2652 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2656 SpeculateCellOperand
base(this, node
->child1());
2657 SpeculateStrictInt32Operand
property(this, node
->child2());
2658 StorageOperand
storage(this, node
->child3());
2659 GPRReg propertyReg
= property
.gpr();
2660 GPRReg storageReg
= storage
.gpr();
2661 GPRReg baseReg
= base
.gpr();
2666 GPRTemporary
resultTag(this);
2667 GPRTemporary
resultPayload(this);
2668 GPRReg resultTagReg
= resultTag
.gpr();
2669 GPRReg resultPayloadReg
= resultPayload
.gpr();
2671 JITCompiler::Jump outOfBounds
= m_jit
.branch32(
2672 MacroAssembler::AboveOrEqual
, propertyReg
,
2673 MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2675 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTagReg
);
2676 JITCompiler::Jump hole
= m_jit
.branch32(
2677 MacroAssembler::Equal
, resultTag
.gpr(), TrustedImm32(JSValue::EmptyValueTag
));
2678 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayloadReg
);
2680 JITCompiler::JumpList slowCases
;
2681 slowCases
.append(outOfBounds
);
2682 slowCases
.append(hole
);
2683 addSlowPathGenerator(
2685 slowCases
, this, operationGetByValArrayInt
,
2686 JSValueRegs(resultTagReg
, resultPayloadReg
),
2687 baseReg
, propertyReg
));
2689 jsValueResult(resultTagReg
, resultPayloadReg
, node
);
2693 compileGetByValOnString(node
);
2695 case Array::Arguments
:
2696 compileGetByValOnArguments(node
);
2698 case Array::Int8Array
:
2699 compileGetByValOnIntTypedArray(m_jit
.vm()->int8ArrayDescriptor(), node
, sizeof(int8_t), SignedTypedArray
);
2701 case Array::Int16Array
:
2702 compileGetByValOnIntTypedArray(m_jit
.vm()->int16ArrayDescriptor(), node
, sizeof(int16_t), SignedTypedArray
);
2704 case Array::Int32Array
:
2705 compileGetByValOnIntTypedArray(m_jit
.vm()->int32ArrayDescriptor(), node
, sizeof(int32_t), SignedTypedArray
);
2707 case Array::Uint8Array
:
2708 compileGetByValOnIntTypedArray(m_jit
.vm()->uint8ArrayDescriptor(), node
, sizeof(uint8_t), UnsignedTypedArray
);
2710 case Array::Uint8ClampedArray
:
2711 compileGetByValOnIntTypedArray(m_jit
.vm()->uint8ClampedArrayDescriptor(), node
, sizeof(uint8_t), UnsignedTypedArray
);
2713 case Array::Uint16Array
:
2714 compileGetByValOnIntTypedArray(m_jit
.vm()->uint16ArrayDescriptor(), node
, sizeof(uint16_t), UnsignedTypedArray
);
2716 case Array::Uint32Array
:
2717 compileGetByValOnIntTypedArray(m_jit
.vm()->uint32ArrayDescriptor(), node
, sizeof(uint32_t), UnsignedTypedArray
);
2719 case Array::Float32Array
:
2720 compileGetByValOnFloatTypedArray(m_jit
.vm()->float32ArrayDescriptor(), node
, sizeof(float));
2722 case Array::Float64Array
:
2723 compileGetByValOnFloatTypedArray(m_jit
.vm()->float64ArrayDescriptor(), node
, sizeof(double));
2726 RELEASE_ASSERT_NOT_REACHED();
2733 case PutByValAlias
: {
2734 Edge child1
= m_jit
.graph().varArgChild(node
, 0);
2735 Edge child2
= m_jit
.graph().varArgChild(node
, 1);
2736 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
2737 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
2739 ArrayMode arrayMode
= node
->arrayMode().modeForPut();
2740 bool alreadyHandled
= false;
2742 switch (arrayMode
.type()) {
2743 case Array::SelectUsingPredictions
:
2744 case Array::ForceExit
:
2745 RELEASE_ASSERT_NOT_REACHED();
2746 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
2747 alreadyHandled
= true;
2749 case Array::Generic
: {
2750 ASSERT(node
->op() == PutByVal
);
2752 SpeculateCellOperand
base(this, child1
); // Save a register, speculate cell. We'll probably be right.
2753 JSValueOperand
property(this, child2
);
2754 JSValueOperand
value(this, child3
);
2755 GPRReg baseGPR
= base
.gpr();
2756 GPRReg propertyTagGPR
= property
.tagGPR();
2757 GPRReg propertyPayloadGPR
= property
.payloadGPR();
2758 GPRReg valueTagGPR
= value
.tagGPR();
2759 GPRReg valuePayloadGPR
= value
.payloadGPR();
2762 callOperation(m_jit
.codeBlock()->isStrictMode() ? operationPutByValCellStrict
: operationPutByValCellNonStrict
, baseGPR
, propertyTagGPR
, propertyPayloadGPR
, valueTagGPR
, valuePayloadGPR
);
2765 alreadyHandled
= true;
2775 SpeculateCellOperand
base(this, child1
);
2776 SpeculateStrictInt32Operand
property(this, child2
);
2778 GPRReg baseReg
= base
.gpr();
2779 GPRReg propertyReg
= property
.gpr();
2781 switch (arrayMode
.type()) {
2782 case Array::Int32
: {
2783 SpeculateIntegerOperand
value(this, child3
);
2785 GPRReg valuePayloadReg
= value
.gpr();
2790 compileContiguousPutByVal(node
, base
, property
, value
, valuePayloadReg
, TrustedImm32(JSValue::Int32Tag
));
2793 case Array::Contiguous
: {
2794 JSValueOperand
value(this, child3
);
2796 GPRReg valueTagReg
= value
.tagGPR();
2797 GPRReg valuePayloadReg
= value
.payloadGPR();
2802 if (Heap::isWriteBarrierEnabled()) {
2803 GPRTemporary
scratch(this);
2804 writeBarrier(baseReg
, valueTagReg
, child3
, WriteBarrierForPropertyAccess
, scratch
.gpr());
2807 compileContiguousPutByVal(node
, base
, property
, value
, valuePayloadReg
, valueTagReg
);
2810 case Array::Double
: {
2811 compileDoublePutByVal(node
, base
, property
);
2814 case Array::ArrayStorage
:
2815 case Array::SlowPutArrayStorage
: {
2816 JSValueOperand
value(this, child3
);
2818 GPRReg valueTagReg
= value
.tagGPR();
2819 GPRReg valuePayloadReg
= value
.payloadGPR();
2825 GPRTemporary
scratch(this);
2826 GPRReg scratchReg
= scratch
.gpr();
2827 writeBarrier(baseReg
, valueTagReg
, child3
, WriteBarrierForPropertyAccess
, scratchReg
);
2830 StorageOperand
storage(this, child4
);
2831 GPRReg storageReg
= storage
.gpr();
2833 if (node
->op() == PutByValAlias
) {
2834 // Store the value to the array.
2835 GPRReg propertyReg
= property
.gpr();
2836 m_jit
.store32(value
.tagGPR(), MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2837 m_jit
.store32(value
.payloadGPR(), MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2843 MacroAssembler::JumpList slowCases
;
2845 MacroAssembler::Jump beyondArrayBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::vectorLengthOffset()));
2846 if (!arrayMode
.isOutOfBounds())
2847 speculationCheck(OutOfBounds
, JSValueRegs(), 0, beyondArrayBounds
);
2849 slowCases
.append(beyondArrayBounds
);
2851 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2852 if (arrayMode
.isInBounds()) {
2854 StoreToHole
, JSValueRegs(), 0,
2855 m_jit
.branch32(MacroAssembler::Equal
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
)));
2857 MacroAssembler::Jump notHoleValue
= m_jit
.branch32(MacroAssembler::NotEqual
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
));
2858 if (arrayMode
.isSlowPut()) {
2859 // This is sort of strange. If we wanted to optimize this code path, we would invert
2860 // the above branch. But it's simply not worth it since this only happens if we're
2861 // already having a bad time.
2862 slowCases
.append(m_jit
.jump());
2864 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageReg
, ArrayStorage::numValuesInVectorOffset()));
2866 // If we're writing to a hole we might be growing the array;
2867 MacroAssembler::Jump lengthDoesNotNeedUpdate
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2868 m_jit
.add32(TrustedImm32(1), propertyReg
);
2869 m_jit
.store32(propertyReg
, MacroAssembler::Address(storageReg
, ArrayStorage::lengthOffset()));
2870 m_jit
.sub32(TrustedImm32(1), propertyReg
);
2872 lengthDoesNotNeedUpdate
.link(&m_jit
);
2874 notHoleValue
.link(&m_jit
);
2877 // Store the value to the array.
2878 m_jit
.store32(valueTagReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
2879 m_jit
.store32(valuePayloadReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
2886 if (!slowCases
.empty()) {
2887 addSlowPathGenerator(
2890 m_jit
.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict
: operationPutByValBeyondArrayBoundsNonStrict
,
2891 NoResult
, baseReg
, propertyReg
, valueTagReg
, valuePayloadReg
));
2894 noResult(node
, UseChildrenCalledExplicitly
);
2898 case Array::Arguments
:
2899 // FIXME: we could at some point make this work. Right now we're assuming that the register
2900 // pressure would be too great.
2901 RELEASE_ASSERT_NOT_REACHED();
2904 case Array::Int8Array
:
2905 compilePutByValForIntTypedArray(m_jit
.vm()->int8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int8_t), SignedTypedArray
);
2908 case Array::Int16Array
:
2909 compilePutByValForIntTypedArray(m_jit
.vm()->int16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int16_t), SignedTypedArray
);
2912 case Array::Int32Array
:
2913 compilePutByValForIntTypedArray(m_jit
.vm()->int32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(int32_t), SignedTypedArray
);
2916 case Array::Uint8Array
:
2917 compilePutByValForIntTypedArray(m_jit
.vm()->uint8ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), UnsignedTypedArray
);
2920 case Array::Uint8ClampedArray
:
2921 compilePutByValForIntTypedArray(m_jit
.vm()->uint8ClampedArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint8_t), UnsignedTypedArray
, ClampRounding
);
2924 case Array::Uint16Array
:
2925 compilePutByValForIntTypedArray(m_jit
.vm()->uint16ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint16_t), UnsignedTypedArray
);
2928 case Array::Uint32Array
:
2929 compilePutByValForIntTypedArray(m_jit
.vm()->uint32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(uint32_t), UnsignedTypedArray
);
2932 case Array::Float32Array
:
2933 compilePutByValForFloatTypedArray(m_jit
.vm()->float32ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(float));
2936 case Array::Float64Array
:
2937 compilePutByValForFloatTypedArray(m_jit
.vm()->float64ArrayDescriptor(), base
.gpr(), property
.gpr(), node
, sizeof(double));
2941 RELEASE_ASSERT_NOT_REACHED();
2948 if (compileRegExpExec(node
))
2951 if (!node
->adjustedRefCount()) {
2952 SpeculateCellOperand
base(this, node
->child1());
2953 SpeculateCellOperand
argument(this, node
->child2());
2954 GPRReg baseGPR
= base
.gpr();
2955 GPRReg argumentGPR
= argument
.gpr();
2958 GPRResult
result(this);
2959 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2961 // Must use jsValueResult because otherwise we screw up register
2962 // allocation, which thinks that this node has a result.
2963 booleanResult(result
.gpr(), node
);
2967 SpeculateCellOperand
base(this, node
->child1());
2968 SpeculateCellOperand
argument(this, node
->child2());
2969 GPRReg baseGPR
= base
.gpr();
2970 GPRReg argumentGPR
= argument
.gpr();
2973 GPRResult2
resultTag(this);
2974 GPRResult
resultPayload(this);
2975 callOperation(operationRegExpExec
, resultTag
.gpr(), resultPayload
.gpr(), baseGPR
, argumentGPR
);
2977 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
2982 SpeculateCellOperand
base(this, node
->child1());
2983 SpeculateCellOperand
argument(this, node
->child2());
2984 GPRReg baseGPR
= base
.gpr();
2985 GPRReg argumentGPR
= argument
.gpr();
2988 GPRResult
result(this);
2989 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
2991 // If we add a DataFormatBool, we should use it here.
2992 booleanResult(result
.gpr(), node
);
2997 ASSERT(node
->arrayMode().isJSArray());
2999 SpeculateCellOperand
base(this, node
->child1());
3000 GPRTemporary
storageLength(this);
3002 GPRReg baseGPR
= base
.gpr();
3003 GPRReg storageLengthGPR
= storageLength
.gpr();
3005 StorageOperand
storage(this, node
->child3());
3006 GPRReg storageGPR
= storage
.gpr();
3008 switch (node
->arrayMode().type()) {
3009 case Array::Int32
: {
3010 SpeculateIntegerOperand
value(this, node
->child2());
3011 GPRReg valuePayloadGPR
= value
.gpr();
3013 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
3014 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3015 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3016 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3017 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3018 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3019 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
3021 addSlowPathGenerator(
3023 slowPath
, this, operationArrayPush
,
3024 JSValueRegs(storageGPR
, storageLengthGPR
),
3025 TrustedImm32(JSValue::Int32Tag
), valuePayloadGPR
, baseGPR
));
3027 jsValueResult(storageGPR
, storageLengthGPR
, node
);
3031 case Array::Contiguous
: {
3032 JSValueOperand
value(this, node
->child2());
3033 GPRReg valueTagGPR
= value
.tagGPR();
3034 GPRReg valuePayloadGPR
= value
.payloadGPR();
3036 if (Heap::isWriteBarrierEnabled()) {
3037 GPRTemporary
scratch(this);
3038 writeBarrier(baseGPR
, valueTagGPR
, node
->child2(), WriteBarrierForPropertyAccess
, scratch
.gpr(), storageLengthGPR
);
3041 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
3042 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3043 m_jit
.store32(valueTagGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3044 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3045 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3046 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3047 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
3049 addSlowPathGenerator(
3051 slowPath
, this, operationArrayPush
,
3052 JSValueRegs(storageGPR
, storageLengthGPR
),
3053 valueTagGPR
, valuePayloadGPR
, baseGPR
));
3055 jsValueResult(storageGPR
, storageLengthGPR
, node
);
3059 case Array::Double
: {
3060 SpeculateDoubleOperand
value(this, node
->child2());
3061 FPRReg valueFPR
= value
.fpr();
3064 JSValueRegs(), node
->child2(), SpecRealNumber
,
3065 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, valueFPR
, valueFPR
));
3067 m_jit
.load32(MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), storageLengthGPR
);
3068 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3069 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
));
3070 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3071 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3072 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
3074 addSlowPathGenerator(
3076 slowPath
, this, operationArrayPushDouble
,
3077 JSValueRegs(storageGPR
, storageLengthGPR
),
3078 valueFPR
, baseGPR
));
3080 jsValueResult(storageGPR
, storageLengthGPR
, node
);
3084 case Array::ArrayStorage
: {
3085 JSValueOperand
value(this, node
->child2());
3086 GPRReg valueTagGPR
= value
.tagGPR();
3087 GPRReg valuePayloadGPR
= value
.payloadGPR();
3089 if (Heap::isWriteBarrierEnabled()) {
3090 GPRTemporary
scratch(this);
3091 writeBarrier(baseGPR
, valueTagGPR
, node
->child2(), WriteBarrierForPropertyAccess
, scratch
.gpr(), storageLengthGPR
);
3094 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3096 // Refuse to handle bizarre lengths.
3097 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::Above
, storageLengthGPR
, TrustedImm32(0x7ffffffe)));
3099 MacroAssembler::Jump slowPath
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
3101 m_jit
.store32(valueTagGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3102 m_jit
.store32(valuePayloadGPR
, MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3104 m_jit
.add32(TrustedImm32(1), storageLengthGPR
);
3105 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3106 m_jit
.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3107 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), storageGPR
);
3109 addSlowPathGenerator(slowPathCall(slowPath
, this, operationArrayPush
, JSValueRegs(storageGPR
, storageLengthGPR
), valueTagGPR
, valuePayloadGPR
, baseGPR
));
3111 jsValueResult(storageGPR
, storageLengthGPR
, node
);
3123 ASSERT(node
->arrayMode().isJSArray());
3125 SpeculateCellOperand
base(this, node
->child1());
3126 StorageOperand
storage(this, node
->child2());
3127 GPRTemporary
valueTag(this);
3128 GPRTemporary
valuePayload(this);
3130 GPRReg baseGPR
= base
.gpr();
3131 GPRReg valueTagGPR
= valueTag
.gpr();
3132 GPRReg valuePayloadGPR
= valuePayload
.gpr();
3133 GPRReg storageGPR
= storage
.gpr();
3135 switch (node
->arrayMode().type()) {
3137 case Array::Contiguous
: {
3139 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), valuePayloadGPR
);
3140 MacroAssembler::Jump undefinedCase
=
3141 m_jit
.branchTest32(MacroAssembler::Zero
, valuePayloadGPR
);
3142 m_jit
.sub32(TrustedImm32(1), valuePayloadGPR
);
3144 valuePayloadGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3146 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)),
3148 MacroAssembler::Jump slowCase
= m_jit
.branch32(MacroAssembler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
3150 MacroAssembler::TrustedImm32(JSValue::EmptyValueTag
),
3151 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3153 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)),
3156 addSlowPathGenerator(
3158 undefinedCase
, this,
3159 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
3160 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
3161 addSlowPathGenerator(
3163 slowCase
, this, operationArrayPopAndRecoverLength
,
3164 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
3166 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
3170 case Array::Double
: {
3171 FPRTemporary
temp(this);
3172 FPRReg tempFPR
= temp
.fpr();
3175 MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()), valuePayloadGPR
);
3176 MacroAssembler::Jump undefinedCase
=
3177 m_jit
.branchTest32(MacroAssembler::Zero
, valuePayloadGPR
);
3178 m_jit
.sub32(TrustedImm32(1), valuePayloadGPR
);
3180 valuePayloadGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3182 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
),
3184 MacroAssembler::Jump slowCase
= m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, tempFPR
, tempFPR
);
3185 JSValue nan
= JSValue(JSValue::EncodeAsDouble
, QNaN
);
3187 MacroAssembler::TrustedImm32(nan
.u
.asBits
.tag
),
3188 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3190 MacroAssembler::TrustedImm32(nan
.u
.asBits
.payload
),
3191 MacroAssembler::BaseIndex(storageGPR
, valuePayloadGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3192 boxDouble(tempFPR
, valueTagGPR
, valuePayloadGPR
);
3194 addSlowPathGenerator(
3196 undefinedCase
, this,
3197 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
3198 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
3199 addSlowPathGenerator(
3201 slowCase
, this, operationArrayPopAndRecoverLength
,
3202 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
3204 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
3208 case Array::ArrayStorage
: {
3209 GPRTemporary
storageLength(this);
3210 GPRReg storageLengthGPR
= storageLength
.gpr();
3212 m_jit
.load32(MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()), storageLengthGPR
);
3214 JITCompiler::JumpList setUndefinedCases
;
3215 setUndefinedCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, storageLengthGPR
));
3217 m_jit
.sub32(TrustedImm32(1), storageLengthGPR
);
3219 MacroAssembler::Jump slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::vectorLengthOffset()));
3221 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), valueTagGPR
);
3222 m_jit
.load32(MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), valuePayloadGPR
);
3224 m_jit
.store32(storageLengthGPR
, MacroAssembler::Address(storageGPR
, ArrayStorage::lengthOffset()));
3226 setUndefinedCases
.append(m_jit
.branch32(MacroAssembler::Equal
, TrustedImm32(JSValue::EmptyValueTag
), valueTagGPR
));
3228 m_jit
.store32(TrustedImm32(JSValue::EmptyValueTag
), MacroAssembler::BaseIndex(storageGPR
, storageLengthGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3230 m_jit
.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
3232 addSlowPathGenerator(
3234 setUndefinedCases
, this,
3235 MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR
,
3236 MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR
));
3238 addSlowPathGenerator(
3240 slowCase
, this, operationArrayPop
,
3241 JSValueRegs(valueTagGPR
, valuePayloadGPR
), baseGPR
));
3243 jsValueResult(valueTagGPR
, valuePayloadGPR
, node
);
3255 BlockIndex taken
= node
->takenBlockIndex();
3266 ASSERT(GPRInfo::callFrameRegister
!= GPRInfo::regT2
);
3267 ASSERT(GPRInfo::regT1
!= GPRInfo::returnValueGPR
);
3268 ASSERT(GPRInfo::returnValueGPR
!= GPRInfo::callFrameRegister
);
3270 #if DFG_ENABLE(SUCCESS_STATS)
3271 static SamplingCounter
counter("SpeculativeJIT");
3272 m_jit
.emitCount(counter
);
3275 // Return the result in returnValueGPR.
3276 JSValueOperand
op1(this, node
->child1());
3279 boxDouble(op1
.fpr(), GPRInfo::returnValueGPR2
, GPRInfo::returnValueGPR
);
3281 if (op1
.payloadGPR() == GPRInfo::returnValueGPR2
&& op1
.tagGPR() == GPRInfo::returnValueGPR
)
3282 m_jit
.swap(GPRInfo::returnValueGPR
, GPRInfo::returnValueGPR2
);
3283 else if (op1
.payloadGPR() == GPRInfo::returnValueGPR2
) {
3284 m_jit
.move(op1
.payloadGPR(), GPRInfo::returnValueGPR
);
3285 m_jit
.move(op1
.tagGPR(), GPRInfo::returnValueGPR2
);
3287 m_jit
.move(op1
.tagGPR(), GPRInfo::returnValueGPR2
);
3288 m_jit
.move(op1
.payloadGPR(), GPRInfo::returnValueGPR
);
3292 // Grab the return address.
3293 m_jit
.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC
, GPRInfo::regT2
);
3294 // Restore our caller's "r".
3295 m_jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, GPRInfo::callFrameRegister
);
3297 m_jit
.restoreReturnAddressBeforeReturn(GPRInfo::regT2
);
3305 case ThrowReferenceError
: {
3306 // We expect that throw statements are rare and are intended to exit the code block
3307 // anyway, so we just OSR back to the old JIT for now.
3308 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
3313 RELEASE_ASSERT(node
->child1().useKind() == UntypedUse
);
3314 JSValueOperand
op1(this, node
->child1());
3315 GPRTemporary
resultTag(this, op1
);
3316 GPRTemporary
resultPayload(this, op1
, false);
3318 GPRReg op1TagGPR
= op1
.tagGPR();
3319 GPRReg op1PayloadGPR
= op1
.payloadGPR();
3320 GPRReg resultTagGPR
= resultTag
.gpr();
3321 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3325 if (!(m_state
.forNode(node
->child1()).m_type
& ~(SpecNumber
| SpecBoolean
))) {
3326 m_jit
.move(op1TagGPR
, resultTagGPR
);
3327 m_jit
.move(op1PayloadGPR
, resultPayloadGPR
);
3329 MacroAssembler::Jump alreadyPrimitive
= m_jit
.branch32(MacroAssembler::NotEqual
, op1TagGPR
, TrustedImm32(JSValue::CellTag
));
3330 MacroAssembler::Jump notPrimitive
= m_jit
.branchPtr(MacroAssembler::NotEqual
, MacroAssembler::Address(op1PayloadGPR
, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
3332 alreadyPrimitive
.link(&m_jit
);
3333 m_jit
.move(op1TagGPR
, resultTagGPR
);
3334 m_jit
.move(op1PayloadGPR
, resultPayloadGPR
);
3336 addSlowPathGenerator(
3338 notPrimitive
, this, operationToPrimitive
,
3339 JSValueRegs(resultTagGPR
, resultPayloadGPR
), op1TagGPR
, op1PayloadGPR
));
3342 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3347 if (node
->child1().useKind() == UntypedUse
) {
3348 JSValueOperand
op1(this, node
->child1());
3349 GPRReg op1PayloadGPR
= op1
.payloadGPR();
3350 GPRReg op1TagGPR
= op1
.tagGPR();
3352 GPRResult
result(this);
3353 GPRReg resultGPR
= result
.gpr();
3357 JITCompiler::Jump done
;
3358 if (node
->child1()->prediction() & SpecString
) {
3359 JITCompiler::Jump slowPath1
= m_jit
.branch32(
3360 JITCompiler::NotEqual
, op1TagGPR
, TrustedImm32(JSValue::CellTag
));
3361 JITCompiler::Jump slowPath2
= m_jit
.branchPtr(
3362 JITCompiler::NotEqual
,
3363 JITCompiler::Address(op1PayloadGPR
, JSCell::structureOffset()),
3364 TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
3365 m_jit
.move(op1PayloadGPR
, resultGPR
);
3366 done
= m_jit
.jump();
3367 slowPath1
.link(&m_jit
);
3368 slowPath2
.link(&m_jit
);
3370 callOperation(operationToString
, resultGPR
, op1TagGPR
, op1PayloadGPR
);
3373 cellResult(resultGPR
, node
);
3377 compileToStringOnCell(node
);
3381 case NewStringObject
: {
3382 compileNewStringObject(node
);
3387 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->codeOrigin
);
3388 if (!globalObject
->isHavingABadTime() && !hasArrayStorage(node
->indexingType())) {
3389 globalObject
->havingABadTimeWatchpoint()->add(speculationWatchpoint());
3391 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3392 ASSERT(structure
->indexingType() == node
->indexingType());
3394 hasUndecided(structure
->indexingType())
3395 || hasInt32(structure
->indexingType())
3396 || hasDouble(structure
->indexingType())
3397 || hasContiguous(structure
->indexingType()));
3399 unsigned numElements
= node
->numChildren();
3401 GPRTemporary
result(this);
3402 GPRTemporary
storage(this);
3404 GPRReg resultGPR
= result
.gpr();
3405 GPRReg storageGPR
= storage
.gpr();
3407 emitAllocateJSArray(resultGPR
, structure
, storageGPR
, numElements
);
3409 // At this point, one way or another, resultGPR and storageGPR have pointers to
3410 // the JSArray and the Butterfly, respectively.
3412 ASSERT(!hasUndecided(structure
->indexingType()) || !node
->numChildren());
3414 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3415 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3416 switch (node
->indexingType()) {
3417 case ALL_BLANK_INDEXING_TYPES
:
3418 case ALL_UNDECIDED_INDEXING_TYPES
:
3421 case ALL_DOUBLE_INDEXING_TYPES
: {
3422 SpeculateDoubleOperand
operand(this, use
);
3423 FPRReg opFPR
= operand
.fpr();
3425 JSValueRegs(), use
, SpecRealNumber
,
3426 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3428 m_jit
.storeDouble(opFPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * operandIdx
));
3431 case ALL_INT32_INDEXING_TYPES
: {
3432 SpeculateIntegerOperand
operand(this, use
);
3433 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3434 m_jit
.store32(operand
.gpr(), MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3437 case ALL_CONTIGUOUS_INDEXING_TYPES
: {
3438 JSValueOperand
operand(this, m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
]);
3439 GPRReg opTagGPR
= operand
.tagGPR();
3440 GPRReg opPayloadGPR
= operand
.payloadGPR();
3441 m_jit
.store32(opTagGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3442 m_jit
.store32(opPayloadGPR
, MacroAssembler::Address(storageGPR
, sizeof(JSValue
) * operandIdx
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3451 // Yuck, we should *really* have a way of also returning the storageGPR. But
3452 // that's the least of what's wrong with this code. We really shouldn't be
3453 // allocating the array after having computed - and probably spilled to the
3454 // stack - all of the things that will go into the array. The solution to that
3455 // bigger problem will also likely fix the redundancy in reloading the storage
3456 // pointer that we currently have.
3458 cellResult(resultGPR
, node
);
3462 if (!node
->numChildren()) {
3464 GPRResult
result(this);
3466 operationNewEmptyArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()));
3467 cellResult(result
.gpr(), node
);
3471 size_t scratchSize
= sizeof(EncodedJSValue
) * node
->numChildren();
3472 ScratchBuffer
* scratchBuffer
= m_jit
.vm()->scratchBufferForSize(scratchSize
);
3473 EncodedJSValue
* buffer
= scratchBuffer
? static_cast<EncodedJSValue
*>(scratchBuffer
->dataBuffer()) : 0;
3475 for (unsigned operandIdx
= 0; operandIdx
< node
->numChildren(); ++operandIdx
) {
3476 // Need to perform the speculations that this node promises to perform. If we're
3477 // emitting code here and the indexing type is not array storage then there is
3478 // probably something hilarious going on and we're already failing at all the
3479 // things, but at least we're going to be sound.
3480 Edge use
= m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
];
3481 switch (node
->indexingType()) {
3482 case ALL_BLANK_INDEXING_TYPES
:
3483 case ALL_UNDECIDED_INDEXING_TYPES
:
3486 case ALL_DOUBLE_INDEXING_TYPES
: {
3487 SpeculateDoubleOperand
operand(this, use
);
3488 FPRReg opFPR
= operand
.fpr();
3490 JSValueRegs(), use
, SpecRealNumber
,
3491 m_jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, opFPR
, opFPR
));
3493 m_jit
.storeDouble(opFPR
, reinterpret_cast<char*>(buffer
+ operandIdx
));
3496 case ALL_INT32_INDEXING_TYPES
: {
3497 SpeculateIntegerOperand
operand(this, use
);
3498 GPRReg opGPR
= operand
.gpr();
3499 m_jit
.store32(TrustedImm32(JSValue::Int32Tag
), reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
3500 m_jit
.store32(opGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
3503 case ALL_CONTIGUOUS_INDEXING_TYPES
:
3504 case ALL_ARRAY_STORAGE_INDEXING_TYPES
: {
3505 JSValueOperand
operand(this, m_jit
.graph().m_varArgChildren
[node
->firstChild() + operandIdx
]);
3506 GPRReg opTagGPR
= operand
.tagGPR();
3507 GPRReg opPayloadGPR
= operand
.payloadGPR();
3509 m_jit
.store32(opTagGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
3510 m_jit
.store32(opPayloadGPR
, reinterpret_cast<char*>(buffer
+ operandIdx
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
3520 switch (node
->indexingType()) {
3521 case ALL_DOUBLE_INDEXING_TYPES
:
3522 case ALL_INT32_INDEXING_TYPES
:
3532 GPRTemporary
scratch(this);
3534 // Tell GC mark phase how much of the scratch buffer is active during call.
3535 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3536 m_jit
.storePtr(TrustedImmPtr(scratchSize
), scratch
.gpr());
3539 GPRResult
result(this);
3542 operationNewArray
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3543 static_cast<void*>(buffer
), node
->numChildren());
3546 GPRTemporary
scratch(this);
3548 m_jit
.move(TrustedImmPtr(scratchBuffer
->activeLengthPtr()), scratch
.gpr());
3549 m_jit
.storePtr(TrustedImmPtr(0), scratch
.gpr());
3552 cellResult(result
.gpr(), node
, UseChildrenCalledExplicitly
);
3556 case NewArrayWithSize
: {
3557 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->codeOrigin
);
3558 if (!globalObject
->isHavingABadTime() && !hasArrayStorage(node
->indexingType())) {
3559 globalObject
->havingABadTimeWatchpoint()->add(speculationWatchpoint());
3561 SpeculateStrictInt32Operand
size(this, node
->child1());
3562 GPRTemporary
result(this);
3563 GPRTemporary
storage(this);
3564 GPRTemporary
scratch(this);
3565 GPRTemporary
scratch2(this);
3567 GPRReg sizeGPR
= size
.gpr();
3568 GPRReg resultGPR
= result
.gpr();
3569 GPRReg storageGPR
= storage
.gpr();
3570 GPRReg scratchGPR
= scratch
.gpr();
3571 GPRReg scratch2GPR
= scratch2
.gpr();
3573 MacroAssembler::JumpList slowCases
;
3574 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
)));
3576 ASSERT((1 << 3) == sizeof(JSValue
));
3577 m_jit
.move(sizeGPR
, scratchGPR
);
3578 m_jit
.lshift32(TrustedImm32(3), scratchGPR
);
3579 m_jit
.add32(TrustedImm32(sizeof(IndexingHeader
)), scratchGPR
, resultGPR
);
3581 emitAllocateBasicStorage(resultGPR
, storageGPR
));
3582 m_jit
.subPtr(scratchGPR
, storageGPR
);
3583 Structure
* structure
= globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType());
3584 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
3586 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
3587 m_jit
.store32(sizeGPR
, MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
3589 if (hasDouble(node
->indexingType())) {
3590 JSValue nan
= JSValue(JSValue::EncodeAsDouble
, QNaN
);
3592 m_jit
.move(sizeGPR
, scratchGPR
);
3593 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, scratchGPR
);
3594 MacroAssembler::Label loop
= m_jit
.label();
3595 m_jit
.sub32(TrustedImm32(1), scratchGPR
);
3596 m_jit
.store32(TrustedImm32(nan
.u
.asBits
.tag
), MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
3597 m_jit
.store32(TrustedImm32(nan
.u
.asBits
.payload
), MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
3598 m_jit
.branchTest32(MacroAssembler::NonZero
, scratchGPR
).linkTo(loop
, &m_jit
);
3602 addSlowPathGenerator(adoptPtr(
3603 new CallArrayAllocatorWithVariableSizeSlowPathGenerator(
3604 slowCases
, this, operationNewArrayWithSize
, resultGPR
,
3605 globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()),
3606 globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
),
3609 cellResult(resultGPR
, node
);
3613 SpeculateStrictInt32Operand
size(this, node
->child1());
3614 GPRReg sizeGPR
= size
.gpr();
3616 GPRResult
result(this);
3617 GPRReg resultGPR
= result
.gpr();
3618 GPRReg structureGPR
= selectScratchGPR(sizeGPR
);
3619 MacroAssembler::Jump bigLength
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, sizeGPR
, TrustedImm32(MIN_SPARSE_ARRAY_INDEX
));
3620 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType())), structureGPR
);
3621 MacroAssembler::Jump done
= m_jit
.jump();
3622 bigLength
.link(&m_jit
);
3623 m_jit
.move(TrustedImmPtr(globalObject
->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage
)), structureGPR
);
3626 operationNewArrayWithSize
, resultGPR
, structureGPR
, sizeGPR
);
3627 cellResult(resultGPR
, node
);
3631 case NewArrayBuffer
: {
3632 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->codeOrigin
);
3633 IndexingType indexingType
= node
->indexingType();
3634 if (!globalObject
->isHavingABadTime() && !hasArrayStorage(indexingType
)) {
3635 globalObject
->havingABadTimeWatchpoint()->add(speculationWatchpoint());
3637 unsigned numElements
= node
->numConstants();
3639 GPRTemporary
result(this);
3640 GPRTemporary
storage(this);
3642 GPRReg resultGPR
= result
.gpr();
3643 GPRReg storageGPR
= storage
.gpr();
3645 emitAllocateJSArray(resultGPR
, globalObject
->arrayStructureForIndexingTypeDuringAllocation(indexingType
), storageGPR
, numElements
);
3647 if (node
->indexingType() == ArrayWithDouble
) {
3648 JSValue
* data
= m_jit
.codeBlock()->constantBuffer(node
->startConstant());
3649 for (unsigned index
= 0; index
< node
->numConstants(); ++index
) {
3654 u
.value
= data
[index
].asNumber();
3655 m_jit
.store32(Imm32(u
.halves
[0]), MacroAssembler::Address(storageGPR
, sizeof(double) * index
));
3656 m_jit
.store32(Imm32(u
.halves
[1]), MacroAssembler::Address(storageGPR
, sizeof(double) * index
+ sizeof(int32_t)));
3659 int32_t* data
= bitwise_cast
<int32_t*>(m_jit
.codeBlock()->constantBuffer(node
->startConstant()));
3660 for (unsigned index
= 0; index
< node
->numConstants() * 2; ++index
) {
3662 Imm32(data
[index
]), MacroAssembler::Address(storageGPR
, sizeof(int32_t) * index
));
3666 cellResult(resultGPR
, node
);
3671 GPRResult
result(this);
3673 callOperation(operationNewArrayBuffer
, result
.gpr(), globalObject
->arrayStructureForIndexingTypeDuringAllocation(node
->indexingType()), node
->startConstant(), node
->numConstants());
3675 cellResult(result
.gpr(), node
);
3681 GPRResult
resultPayload(this);
3682 GPRResult2
resultTag(this);
3684 callOperation(operationNewRegexp
, resultTag
.gpr(), resultPayload
.gpr(), m_jit
.codeBlock()->regexp(node
->regexpIndex()));
3686 // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
3687 cellResult(resultPayload
.gpr(), node
);
3692 ASSERT(node
->child1().useKind() == UntypedUse
);
3694 JSValueOperand
thisValue(this, node
->child1());
3695 GPRReg thisValueTagGPR
= thisValue
.tagGPR();
3696 GPRReg thisValuePayloadGPR
= thisValue
.payloadGPR();
3700 GPRResult2
resultTag(this);
3701 GPRResult
resultPayload(this);
3702 callOperation(operationConvertThis
, resultTag
.gpr(), resultPayload
.gpr(), thisValueTagGPR
, thisValuePayloadGPR
);
3704 cellResult(resultPayload
.gpr(), node
);
3709 // Note that there is not so much profit to speculate here. The only things we
3710 // speculate on are (1) that it's a cell, since that eliminates cell checks
3711 // later if the proto is reused, and (2) if we have a FinalObject prediction
3712 // then we speculate because we want to get recompiled if it isn't (since
3713 // otherwise we'd start taking slow path a lot).
3715 SpeculateCellOperand
callee(this, node
->child1());
3716 GPRTemporary
result(this);
3717 GPRTemporary
allocator(this);
3718 GPRTemporary
structure(this);
3719 GPRTemporary
scratch(this);
3721 GPRReg calleeGPR
= callee
.gpr();
3722 GPRReg resultGPR
= result
.gpr();
3723 GPRReg allocatorGPR
= allocator
.gpr();
3724 GPRReg structureGPR
= structure
.gpr();
3725 GPRReg scratchGPR
= scratch
.gpr();
3727 MacroAssembler::JumpList slowPath
;
3729 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR
);
3730 m_jit
.loadPtr(JITCompiler::Address(calleeGPR
, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR
);
3731 slowPath
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, allocatorGPR
));
3732 emitAllocateJSObject(resultGPR
, allocatorGPR
, structureGPR
, TrustedImmPtr(0), scratchGPR
, slowPath
);
3734 addSlowPathGenerator(slowPathCall(slowPath
, this, operationCreateThis
, resultGPR
, calleeGPR
, node
->inlineCapacity()));
3736 cellResult(resultGPR
, node
);
3740 case AllocationProfileWatchpoint
: {
3741 jsCast
<JSFunction
*>(node
->function())->addAllocationProfileWatchpoint(speculationWatchpoint());
3747 GPRTemporary
result(this);
3748 GPRTemporary
allocator(this);
3749 GPRTemporary
scratch(this);
3751 GPRReg resultGPR
= result
.gpr();
3752 GPRReg allocatorGPR
= allocator
.gpr();
3753 GPRReg scratchGPR
= scratch
.gpr();
3755 MacroAssembler::JumpList slowPath
;
3757 Structure
* structure
= node
->structure();
3758 size_t allocationSize
= JSObject::allocationSize(structure
->inlineCapacity());
3759 MarkedAllocator
* allocatorPtr
= &m_jit
.vm()->heap
.allocatorForObjectWithoutDestructor(allocationSize
);
3761 m_jit
.move(TrustedImmPtr(allocatorPtr
), allocatorGPR
);
3762 emitAllocateJSObject(resultGPR
, allocatorGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, slowPath
);
3764 addSlowPathGenerator(slowPathCall(slowPath
, this, operationNewObject
, resultGPR
, structure
));
3766 cellResult(resultGPR
, node
);
3771 GPRTemporary
result(this);
3772 m_jit
.loadPtr(JITCompiler::payloadFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::Callee
))), result
.gpr());
3773 cellResult(result
.gpr(), node
);
3778 SpeculateCellOperand
callee(this, node
->child1());
3779 m_jit
.storePtr(callee
.gpr(), JITCompiler::payloadFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::Callee
))));
3780 m_jit
.store32(MacroAssembler::TrustedImm32(JSValue::CellTag
), JITCompiler::tagFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::Callee
))));
3786 SpeculateCellOperand
function(this, node
->child1());
3787 GPRTemporary
result(this, function
);
3788 m_jit
.loadPtr(JITCompiler::Address(function
.gpr(), JSFunction::offsetOfScopeChain()), result
.gpr());
3789 cellResult(result
.gpr(), node
);
3794 GPRTemporary
result(this);
3795 GPRReg resultGPR
= result
.gpr();
3797 m_jit
.loadPtr(JITCompiler::payloadFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::ScopeChain
))), resultGPR
);
3798 cellResult(resultGPR
, node
);
3803 SpeculateCellOperand
callee(this, node
->child1());
3804 m_jit
.storePtr(callee
.gpr(), JITCompiler::payloadFor(static_cast<VirtualRegister
>(node
->codeOrigin
.stackOffset() + static_cast<int>(JSStack::ScopeChain
))));
3809 case SkipTopScope
: {
3810 SpeculateCellOperand
scope(this, node
->child1());
3811 GPRTemporary
result(this, scope
);
3812 GPRReg resultGPR
= result
.gpr();
3813 m_jit
.move(scope
.gpr(), resultGPR
);
3814 JITCompiler::Jump activationNotCreated
=
3815 m_jit
.branchTestPtr(
3817 JITCompiler::payloadFor(
3818 static_cast<VirtualRegister
>(m_jit
.codeBlock()->activationRegister())));
3819 m_jit
.loadPtr(JITCompiler::Address(resultGPR
, JSScope::offsetOfNext()), resultGPR
);
3820 activationNotCreated
.link(&m_jit
);
3821 cellResult(resultGPR
, node
);
3826 SpeculateCellOperand
scope(this, node
->child1());
3827 GPRTemporary
result(this, scope
);
3828 m_jit
.loadPtr(JITCompiler::Address(scope
.gpr(), JSScope::offsetOfNext()), result
.gpr());
3829 cellResult(result
.gpr(), node
);
3833 case GetScopeRegisters
: {
3834 SpeculateCellOperand
scope(this, node
->child1());
3835 GPRTemporary
result(this);
3836 GPRReg scopeGPR
= scope
.gpr();
3837 GPRReg resultGPR
= result
.gpr();
3839 m_jit
.loadPtr(JITCompiler::Address(scopeGPR
, JSVariableObject::offsetOfRegisters()), resultGPR
);
3840 storageResult(resultGPR
, node
);
3843 case GetScopedVar
: {
3844 StorageOperand
registers(this, node
->child1());
3845 GPRTemporary
resultTag(this);
3846 GPRTemporary
resultPayload(this);
3847 GPRReg registersGPR
= registers
.gpr();
3848 GPRReg resultTagGPR
= resultTag
.gpr();
3849 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3850 m_jit
.load32(JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTagGPR
);
3851 m_jit
.load32(JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayloadGPR
);
3852 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
3855 case PutScopedVar
: {
3856 SpeculateCellOperand
scope(this, node
->child1());
3857 StorageOperand
registers(this, node
->child2());
3858 JSValueOperand
value(this, node
->child3());
3859 GPRTemporary
scratchRegister(this);
3860 GPRReg scopeGPR
= scope
.gpr();
3861 GPRReg registersGPR
= registers
.gpr();
3862 GPRReg valueTagGPR
= value
.tagGPR();
3863 GPRReg valuePayloadGPR
= value
.payloadGPR();
3864 GPRReg scratchGPR
= scratchRegister
.gpr();
3866 m_jit
.store32(valueTagGPR
, JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
3867 m_jit
.store32(valuePayloadGPR
, JITCompiler::Address(registersGPR
, node
->varNumber() * sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
3868 writeBarrier(scopeGPR
, valueTagGPR
, node
->child2(), WriteBarrierForVariableAccess
, scratchGPR
);
3874 if (!node
->prediction()) {
3875 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3879 if (isCellSpeculation(node
->child1()->prediction())) {
3880 SpeculateCellOperand
base(this, node
->child1());
3881 GPRTemporary
resultTag(this, base
);
3882 GPRTemporary
resultPayload(this);
3884 GPRReg baseGPR
= base
.gpr();
3885 GPRReg resultTagGPR
= resultTag
.gpr();
3886 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3890 cachedGetById(node
->codeOrigin
, InvalidGPRReg
, baseGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber());
3892 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3896 JSValueOperand
base(this, node
->child1());
3897 GPRTemporary
resultTag(this, base
);
3898 GPRTemporary
resultPayload(this);
3900 GPRReg baseTagGPR
= base
.tagGPR();
3901 GPRReg basePayloadGPR
= base
.payloadGPR();
3902 GPRReg resultTagGPR
= resultTag
.gpr();
3903 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3907 JITCompiler::Jump notCell
= m_jit
.branch32(JITCompiler::NotEqual
, baseTagGPR
, TrustedImm32(JSValue::CellTag
));
3909 cachedGetById(node
->codeOrigin
, baseTagGPR
, basePayloadGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), notCell
);
3911 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3915 case GetByIdFlush
: {
3916 if (!node
->prediction()) {
3917 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
3921 switch (node
->child1().useKind()) {
3923 SpeculateCellOperand
base(this, node
->child1());
3925 GPRReg baseGPR
= base
.gpr();
3927 GPRResult
resultTag(this);
3928 GPRResult2
resultPayload(this);
3929 GPRReg resultTagGPR
= resultTag
.gpr();
3930 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3936 cachedGetById(node
->codeOrigin
, InvalidGPRReg
, baseGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), JITCompiler::Jump(), DontSpill
);
3938 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3943 JSValueOperand
base(this, node
->child1());
3944 GPRReg baseTagGPR
= base
.tagGPR();
3945 GPRReg basePayloadGPR
= base
.payloadGPR();
3947 GPRResult
resultTag(this);
3948 GPRResult2
resultPayload(this);
3949 GPRReg resultTagGPR
= resultTag
.gpr();
3950 GPRReg resultPayloadGPR
= resultPayload
.gpr();
3956 JITCompiler::Jump notCell
= m_jit
.branch32(JITCompiler::NotEqual
, baseTagGPR
, TrustedImm32(JSValue::CellTag
));
3958 cachedGetById(node
->codeOrigin
, baseTagGPR
, basePayloadGPR
, resultTagGPR
, resultPayloadGPR
, node
->identifierNumber(), notCell
, DontSpill
);
3960 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
, UseChildrenCalledExplicitly
);
3965 RELEASE_ASSERT_NOT_REACHED();
3971 case GetArrayLength
:
3972 compileGetArrayLength(node
);
3975 case CheckFunction
: {
3976 SpeculateCellOperand
function(this, node
->child1());
3977 speculationCheck(BadFunction
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, function
.gpr(), node
->function()));
3982 case CheckExecutable
: {
3983 SpeculateCellOperand
function(this, node
->child1());
3984 speculationCheck(BadExecutable
, JSValueSource::unboxedCell(function
.gpr()), node
->child1(), m_jit
.branchWeakPtr(JITCompiler::NotEqual
, JITCompiler::Address(function
.gpr(), JSFunction::offsetOfExecutable()), node
->executable()));
3989 case CheckStructure
:
3990 case ForwardCheckStructure
: {
3991 SpeculateCellOperand
base(this, node
->child1());
3993 ASSERT(node
->structureSet().size());
3995 if (node
->structureSet().size() == 1) {
3997 BadCache
, JSValueSource::unboxedCell(base
.gpr()), 0,
3998 m_jit
.branchWeakPtr(
3999 JITCompiler::NotEqual
,
4000 JITCompiler::Address(base
.gpr(), JSCell::structureOffset()),
4001 node
->structureSet()[0]));
4003 GPRTemporary
structure(this);
4005 m_jit
.loadPtr(JITCompiler::Address(base
.gpr(), JSCell::structureOffset()), structure
.gpr());
4007 JITCompiler::JumpList done
;
4009 for (size_t i
= 0; i
< node
->structureSet().size() - 1; ++i
)
4010 done
.append(m_jit
.branchWeakPtr(JITCompiler::Equal
, structure
.gpr(), node
->structureSet()[i
]));
4013 BadCache
, JSValueSource::unboxedCell(base
.gpr()), 0,
4014 m_jit
.branchWeakPtr(
4015 JITCompiler::NotEqual
, structure
.gpr(), node
->structureSet().last()));
4024 case StructureTransitionWatchpoint
:
4025 case ForwardStructureTransitionWatchpoint
: {
4026 // There is a fascinating question here of what to do about array profiling.
4027 // We *could* try to tell the OSR exit about where the base of the access is.
4028 // The DFG will have kept it alive, though it may not be in a register, and
4029 // we shouldn't really load it since that could be a waste. For now though,
4030 // we'll just rely on the fact that when a watchpoint fires then that's
4031 // quite a hint already.
4033 m_jit
.addWeakReference(node
->structure());
4034 node
->structure()->addTransitionWatchpoint(
4035 speculationWatchpoint(
4036 node
->child1()->op() == WeakJSConstant
? BadWeakConstantCache
: BadCache
));
4038 #if !ASSERT_DISABLED
4039 SpeculateCellOperand
op1(this, node
->child1());
4040 JITCompiler::Jump isOK
= m_jit
.branchPtr(JITCompiler::Equal
, JITCompiler::Address(op1
.gpr(), JSCell::structureOffset()), TrustedImmPtr(node
->structure()));
4044 speculateCell(node
->child1());
4051 case PhantomPutStructure
: {
4052 ASSERT(isKnownCell(node
->child1().node()));
4053 ASSERT(node
->structureTransitionData().previousStructure
->transitionWatchpointSetHasBeenInvalidated());
4054 m_jit
.addWeakReferenceTransition(
4055 node
->codeOrigin
.codeOriginOwner(),
4056 node
->structureTransitionData().previousStructure
,
4057 node
->structureTransitionData().newStructure
);
4062 case PutStructure
: {
4063 ASSERT(node
->structureTransitionData().previousStructure
->transitionWatchpointSetHasBeenInvalidated());
4065 SpeculateCellOperand
base(this, node
->child1());
4066 GPRReg baseGPR
= base
.gpr();
4068 m_jit
.addWeakReferenceTransition(
4069 node
->codeOrigin
.codeOriginOwner(),
4070 node
->structureTransitionData().previousStructure
,
4071 node
->structureTransitionData().newStructure
);
4073 #if ENABLE(WRITE_BARRIER_PROFILING)
4074 // Must always emit this write barrier as the structure transition itself requires it
4075 writeBarrier(baseGPR
, node
->structureTransitionData().newStructure
, WriteBarrierForGenericAccess
);
4078 m_jit
.storePtr(MacroAssembler::TrustedImmPtr(node
->structureTransitionData().newStructure
), MacroAssembler::Address(baseGPR
, JSCell::structureOffset()));
4084 case AllocatePropertyStorage
:
4085 compileAllocatePropertyStorage(node
);
4088 case ReallocatePropertyStorage
:
4089 compileReallocatePropertyStorage(node
);
4092 case GetButterfly
: {
4093 SpeculateCellOperand
base(this, node
->child1());
4094 GPRTemporary
result(this, base
);
4096 GPRReg baseGPR
= base
.gpr();
4097 GPRReg resultGPR
= result
.gpr();
4099 m_jit
.loadPtr(JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()), resultGPR
);
4101 storageResult(resultGPR
, node
);
4105 case GetIndexedPropertyStorage
: {
4106 compileGetIndexedPropertyStorage(node
);
4111 StorageOperand
storage(this, node
->child1());
4112 GPRTemporary
resultTag(this, storage
);
4113 GPRTemporary
resultPayload(this);
4115 GPRReg storageGPR
= storage
.gpr();
4116 GPRReg resultTagGPR
= resultTag
.gpr();
4117 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4119 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
4121 m_jit
.load32(JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayloadGPR
);
4122 m_jit
.load32(JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTagGPR
);
4124 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4129 #if ENABLE(WRITE_BARRIER_PROFILING)
4130 SpeculateCellOperand
base(this, node
->child2());
4132 StorageOperand
storage(this, node
->child1());
4133 JSValueOperand
value(this, node
->child3());
4135 GPRReg storageGPR
= storage
.gpr();
4136 GPRReg valueTagGPR
= value
.tagGPR();
4137 GPRReg valuePayloadGPR
= value
.payloadGPR();
4139 #if ENABLE(WRITE_BARRIER_PROFILING)
4140 writeBarrier(base
.gpr(), valueTagGPR
, node
->child3(), WriteBarrierForPropertyAccess
);
4143 StorageAccessData
& storageAccessData
= m_jit
.graph().m_storageAccessData
[node
->storageAccessDataIndex()];
4145 m_jit
.storePtr(valueTagGPR
, JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
4146 m_jit
.storePtr(valuePayloadGPR
, JITCompiler::Address(storageGPR
, storageAccessData
.offset
* sizeof(EncodedJSValue
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
4153 SpeculateCellOperand
base(this, node
->child1());
4154 JSValueOperand
value(this, node
->child2());
4155 GPRTemporary
scratch(this);
4157 GPRReg baseGPR
= base
.gpr();
4158 GPRReg valueTagGPR
= value
.tagGPR();
4159 GPRReg valuePayloadGPR
= value
.payloadGPR();
4160 GPRReg scratchGPR
= scratch
.gpr();
4165 cachedPutById(node
->codeOrigin
, baseGPR
, valueTagGPR
, valuePayloadGPR
, node
->child2(), scratchGPR
, node
->identifierNumber(), NotDirect
);
4167 noResult(node
, UseChildrenCalledExplicitly
);
4171 case PutByIdDirect
: {
4172 SpeculateCellOperand
base(this, node
->child1());
4173 JSValueOperand
value(this, node
->child2());
4174 GPRTemporary
scratch(this);
4176 GPRReg baseGPR
= base
.gpr();
4177 GPRReg valueTagGPR
= value
.tagGPR();
4178 GPRReg valuePayloadGPR
= value
.payloadGPR();
4179 GPRReg scratchGPR
= scratch
.gpr();
4184 cachedPutById(node
->codeOrigin
, baseGPR
, valueTagGPR
, valuePayloadGPR
, node
->child2(), scratchGPR
, node
->identifierNumber(), Direct
);
4186 noResult(node
, UseChildrenCalledExplicitly
);
4190 case GetGlobalVar
: {
4191 GPRTemporary
resultPayload(this);
4192 GPRTemporary
resultTag(this);
4194 m_jit
.move(TrustedImmPtr(node
->registerPointer()), resultPayload
.gpr());
4195 m_jit
.load32(JITCompiler::Address(resultPayload
.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), resultTag
.gpr());
4196 m_jit
.load32(JITCompiler::Address(resultPayload
.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), resultPayload
.gpr());
4198 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
4202 case PutGlobalVar
: {
4203 JSValueOperand
value(this, node
->child1());
4204 if (Heap::isWriteBarrierEnabled()) {
4205 GPRTemporary
scratch(this);
4206 GPRReg scratchReg
= scratch
.gpr();
4208 writeBarrier(m_jit
.globalObjectFor(node
->codeOrigin
), value
.tagGPR(), node
->child1(), WriteBarrierForVariableAccess
, scratchReg
);
4211 // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have
4212 // a spare register - a good optimization would be to put the register pointer into
4213 // a register and then do a zero offset store followed by a four-offset store (or
4214 // vice-versa depending on endianness).
4215 m_jit
.store32(value
.tagGPR(), node
->registerPointer()->tagPointer());
4216 m_jit
.store32(value
.payloadGPR(), node
->registerPointer()->payloadPointer());
4222 case PutGlobalVarCheck
: {
4223 JSValueOperand
value(this, node
->child1());
4225 WatchpointSet
* watchpointSet
=
4226 m_jit
.globalObjectFor(node
->codeOrigin
)->symbolTable()->get(
4227 identifier(node
->identifierNumberForCheck())->impl()).watchpointSet();
4228 addSlowPathGenerator(
4231 JITCompiler::NonZero
,
4232 JITCompiler::AbsoluteAddress(watchpointSet
->addressOfIsWatched())),
4233 this, operationNotifyGlobalVarWrite
, NoResult
, watchpointSet
));
4235 if (Heap::isWriteBarrierEnabled()) {
4236 GPRTemporary
scratch(this);
4237 GPRReg scratchReg
= scratch
.gpr();
4239 writeBarrier(m_jit
.globalObjectFor(node
->codeOrigin
), value
.tagGPR(), node
->child1(), WriteBarrierForVariableAccess
, scratchReg
);
4242 // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have
4243 // a spare register - a good optimization would be to put the register pointer into
4244 // a register and then do a zero offset store followed by a four-offset store (or
4245 // vice-versa depending on endianness).
4246 m_jit
.store32(value
.tagGPR(), node
->registerPointer()->tagPointer());
4247 m_jit
.store32(value
.payloadGPR(), node
->registerPointer()->payloadPointer());
4253 case GlobalVarWatchpoint
: {
4254 m_jit
.globalObjectFor(node
->codeOrigin
)->symbolTable()->get(
4255 identifier(node
->identifierNumberForCheck())->impl()).addWatchpoint(
4256 speculationWatchpoint());
4258 #if DFG_ENABLE(JIT_ASSERT)
4259 GPRTemporary
scratch(this);
4260 GPRReg scratchGPR
= scratch
.gpr();
4261 m_jit
.load32(node
->registerPointer()->tagPointer(), scratchGPR
);
4262 JITCompiler::Jump notOK
= m_jit
.branch32(
4263 JITCompiler::NotEqual
, scratchGPR
,
4264 TrustedImm32(node
->registerPointer()->get().tag()));
4265 m_jit
.load32(node
->registerPointer()->payloadPointer(), scratchGPR
);
4266 JITCompiler::Jump ok
= m_jit
.branch32(
4267 JITCompiler::Equal
, scratchGPR
,
4268 TrustedImm32(node
->registerPointer()->get().payload()));
4278 case CheckHasInstance
: {
4279 SpeculateCellOperand
base(this, node
->child1());
4280 GPRTemporary
structure(this);
4282 // Speculate that base 'ImplementsDefaultHasInstance'.
4283 m_jit
.loadPtr(MacroAssembler::Address(base
.gpr(), JSCell::structureOffset()), structure
.gpr());
4284 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branchTest8(MacroAssembler::Zero
, MacroAssembler::Address(structure
.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance
)));
4291 compileInstanceOf(node
);
4296 JSValueOperand
value(this, node
->child1());
4297 GPRTemporary
result(this);
4298 GPRTemporary
localGlobalObject(this);
4299 GPRTemporary
remoteGlobalObject(this);
4301 JITCompiler::Jump isCell
= m_jit
.branch32(JITCompiler::Equal
, value
.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag
));
4303 m_jit
.compare32(JITCompiler::Equal
, value
.tagGPR(), TrustedImm32(JSValue::UndefinedTag
), result
.gpr());
4304 JITCompiler::Jump done
= m_jit
.jump();
4306 isCell
.link(&m_jit
);
4307 JITCompiler::Jump notMasqueradesAsUndefined
;
4308 if (m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
4309 m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
4310 m_jit
.move(TrustedImm32(0), result
.gpr());
4311 notMasqueradesAsUndefined
= m_jit
.jump();
4313 m_jit
.loadPtr(JITCompiler::Address(value
.payloadGPR(), JSCell::structureOffset()), result
.gpr());
4314 JITCompiler::Jump isMasqueradesAsUndefined
= m_jit
.branchTest8(JITCompiler::NonZero
, JITCompiler::Address(result
.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined
));
4315 m_jit
.move(TrustedImm32(0), result
.gpr());
4316 notMasqueradesAsUndefined
= m_jit
.jump();
4318 isMasqueradesAsUndefined
.link(&m_jit
);
4319 GPRReg localGlobalObjectGPR
= localGlobalObject
.gpr();
4320 GPRReg remoteGlobalObjectGPR
= remoteGlobalObject
.gpr();
4321 m_jit
.move(TrustedImmPtr(m_jit
.globalObjectFor(node
->codeOrigin
)), localGlobalObjectGPR
);
4322 m_jit
.loadPtr(JITCompiler::Address(result
.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR
);
4323 m_jit
.compare32(JITCompiler::Equal
, localGlobalObjectGPR
, remoteGlobalObjectGPR
, result
.gpr());
4326 notMasqueradesAsUndefined
.link(&m_jit
);
4328 booleanResult(result
.gpr(), node
);
4333 JSValueOperand
value(this, node
->child1());
4334 GPRTemporary
result(this, value
);
4336 m_jit
.compare32(JITCompiler::Equal
, value
.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag
), result
.gpr());
4337 booleanResult(result
.gpr(), node
);
4342 JSValueOperand
value(this, node
->child1());
4343 GPRTemporary
result(this, value
);
4345 m_jit
.add32(TrustedImm32(1), value
.tagGPR(), result
.gpr());
4346 m_jit
.compare32(JITCompiler::Below
, result
.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag
+ 1), result
.gpr());
4347 booleanResult(result
.gpr(), node
);
4352 JSValueOperand
value(this, node
->child1());
4353 GPRTemporary
result(this, value
);
4355 JITCompiler::Jump isNotCell
= m_jit
.branch32(JITCompiler::NotEqual
, value
.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag
));
4357 m_jit
.loadPtr(JITCompiler::Address(value
.payloadGPR(), JSCell::structureOffset()), result
.gpr());
4358 m_jit
.compare8(JITCompiler::Equal
, JITCompiler::Address(result
.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType
), result
.gpr());
4359 JITCompiler::Jump done
= m_jit
.jump();
4361 isNotCell
.link(&m_jit
);
4362 m_jit
.move(TrustedImm32(0), result
.gpr());
4365 booleanResult(result
.gpr(), node
);
4370 JSValueOperand
value(this, node
->child1());
4371 GPRReg valueTagGPR
= value
.tagGPR();
4372 GPRReg valuePayloadGPR
= value
.payloadGPR();
4373 GPRResult
result(this);
4374 GPRReg resultGPR
= result
.gpr();
4376 callOperation(operationIsObject
, resultGPR
, valueTagGPR
, valuePayloadGPR
);
4377 booleanResult(result
.gpr(), node
);
4382 JSValueOperand
value(this, node
->child1());
4383 GPRReg valueTagGPR
= value
.tagGPR();
4384 GPRReg valuePayloadGPR
= value
.payloadGPR();
4385 GPRResult
result(this);
4386 GPRReg resultGPR
= result
.gpr();
4388 callOperation(operationIsFunction
, resultGPR
, valueTagGPR
, valuePayloadGPR
);
4389 booleanResult(result
.gpr(), node
);
4393 JSValueOperand
value(this, node
->child1(), ManualOperandSpeculation
);
4394 GPRReg tagGPR
= value
.tagGPR();
4395 GPRReg payloadGPR
= value
.payloadGPR();
4396 GPRTemporary
temp(this);
4397 GPRReg tempGPR
= temp
.gpr();
4398 GPRResult
result(this);
4399 GPRReg resultGPR
= result
.gpr();
4400 JITCompiler::JumpList doneJumps
;
4404 ASSERT(node
->child1().useKind() == UntypedUse
|| node
->child1().useKind() == CellUse
|| node
->child1().useKind() == StringUse
);
4406 JITCompiler::Jump isNotCell
= m_jit
.branch32(JITCompiler::NotEqual
, tagGPR
, JITCompiler::TrustedImm32(JSValue::CellTag
));
4407 if (node
->child1().useKind() != UntypedUse
)
4408 DFG_TYPE_CHECK(JSValueRegs(tagGPR
, payloadGPR
), node
->child1(), SpecCell
, isNotCell
);
4410 if (!node
->child1()->shouldSpeculateObject() || node
->child1().useKind() == StringUse
) {
4411 m_jit
.loadPtr(JITCompiler::Address(payloadGPR
, JSCell::structureOffset()), tempGPR
);
4412 JITCompiler::Jump notString
= m_jit
.branch8(JITCompiler::NotEqual
, JITCompiler::Address(tempGPR
, Structure::typeInfoTypeOffset()), TrustedImm32(StringType
));
4413 if (node
->child1().useKind() == StringUse
)
4414 DFG_TYPE_CHECK(JSValueRegs(tagGPR
, payloadGPR
), node
->child1(), SpecString
, notString
);
4415 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.stringString()), resultGPR
);
4416 doneJumps
.append(m_jit
.jump());
4417 if (node
->child1().useKind() != StringUse
) {
4418 notString
.link(&m_jit
);
4419 callOperation(operationTypeOf
, resultGPR
, payloadGPR
);
4420 doneJumps
.append(m_jit
.jump());
4423 callOperation(operationTypeOf
, resultGPR
, payloadGPR
);
4424 doneJumps
.append(m_jit
.jump());
4427 if (node
->child1().useKind() == UntypedUse
) {
4428 isNotCell
.link(&m_jit
);
4430 m_jit
.add32(TrustedImm32(1), tagGPR
, tempGPR
);
4431 JITCompiler::Jump notNumber
= m_jit
.branch32(JITCompiler::AboveOrEqual
, tempGPR
, JITCompiler::TrustedImm32(JSValue::LowestTag
+ 1));
4432 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.numberString()), resultGPR
);
4433 doneJumps
.append(m_jit
.jump());
4434 notNumber
.link(&m_jit
);
4436 JITCompiler::Jump notUndefined
= m_jit
.branch32(JITCompiler::NotEqual
, tagGPR
, TrustedImm32(JSValue::UndefinedTag
));
4437 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.undefinedString()), resultGPR
);
4438 doneJumps
.append(m_jit
.jump());
4439 notUndefined
.link(&m_jit
);
4441 JITCompiler::Jump notNull
= m_jit
.branch32(JITCompiler::NotEqual
, tagGPR
, TrustedImm32(JSValue::NullTag
));
4442 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.objectString()), resultGPR
);
4443 doneJumps
.append(m_jit
.jump());
4444 notNull
.link(&m_jit
);
4446 // Only boolean left
4447 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.booleanString()), resultGPR
);
4449 doneJumps
.link(&m_jit
);
4450 cellResult(resultGPR
, node
);
4459 #if ENABLE(DEBUG_WITH_BREAKPOINT)
4462 RELEASE_ASSERT_NOT_REACHED();
4473 GPRResult
resultPayload(this);
4474 GPRResult2
resultTag(this);
4475 ResolveOperationData
& data
= m_jit
.graph().m_resolveOperationsData
[node
->resolveOperationsDataIndex()];
4476 callOperation(operationResolve
, resultTag
.gpr(), resultPayload
.gpr(), identifier(data
.identifierNumber
), data
.resolveOperations
);
4477 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
4483 GPRResult
resultPayload(this);
4484 GPRResult2
resultTag(this);
4485 ResolveOperationData
& data
= m_jit
.graph().m_resolveOperationsData
[node
->resolveOperationsDataIndex()];
4486 callOperation(operationResolveBase
, resultTag
.gpr(), resultPayload
.gpr(), identifier(data
.identifierNumber
), data
.resolveOperations
, data
.putToBaseOperation
);
4487 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
4491 case ResolveBaseStrictPut
: {
4493 GPRResult
resultPayload(this);
4494 GPRResult2
resultTag(this);
4495 ResolveOperationData
& data
= m_jit
.graph().m_resolveOperationsData
[node
->resolveOperationsDataIndex()];
4496 callOperation(operationResolveBaseStrictPut
, resultTag
.gpr(), resultPayload
.gpr(), identifier(data
.identifierNumber
), data
.resolveOperations
, data
.putToBaseOperation
);
4497 jsValueResult(resultTag
.gpr(), resultPayload
.gpr(), node
);
4501 case ResolveGlobal
: {
4502 GPRTemporary
globalObject(this);
4503 GPRTemporary
resolveInfo(this);
4504 GPRTemporary
resultTag(this);
4505 GPRTemporary
resultPayload(this);
4507 GPRReg globalObjectGPR
= globalObject
.gpr();
4508 GPRReg resolveInfoGPR
= resolveInfo
.gpr();
4509 GPRReg resultTagGPR
= resultTag
.gpr();
4510 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4512 ResolveGlobalData
& data
= m_jit
.graph().m_resolveGlobalData
[node
->resolveGlobalDataIndex()];
4513 ResolveOperation
* resolveOperationAddress
= &(data
.resolveOperations
->data()[data
.resolvePropertyIndex
]);
4515 // Check Structure of global object
4516 m_jit
.move(JITCompiler::TrustedImmPtr(m_jit
.globalObjectFor(node
->codeOrigin
)), globalObjectGPR
);
4517 m_jit
.move(JITCompiler::TrustedImmPtr(resolveOperationAddress
), resolveInfoGPR
);
4518 m_jit
.loadPtr(JITCompiler::Address(resolveInfoGPR
, OBJECT_OFFSETOF(ResolveOperation
, m_structure
)), resultPayloadGPR
);
4520 JITCompiler::Jump structuresNotMatch
= m_jit
.branchPtr(JITCompiler::NotEqual
, resultPayloadGPR
, JITCompiler::Address(globalObjectGPR
, JSCell::structureOffset()));
4523 m_jit
.loadPtr(JITCompiler::Address(globalObjectGPR
, JSObject::butterflyOffset()), resultPayloadGPR
);
4524 m_jit
.load32(JITCompiler::Address(resolveInfoGPR
, OBJECT_OFFSETOF(ResolveOperation
, m_offset
)), resolveInfoGPR
);
4525 #if DFG_ENABLE(JIT_ASSERT)
4526 JITCompiler::Jump isOutOfLine
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, resolveInfoGPR
, TrustedImm32(firstOutOfLineOffset
));
4528 isOutOfLine
.link(&m_jit
);
4530 m_jit
.neg32(resolveInfoGPR
);
4531 m_jit
.signExtend32ToPtr(resolveInfoGPR
, resolveInfoGPR
);
4532 m_jit
.load32(JITCompiler::BaseIndex(resultPayloadGPR
, resolveInfoGPR
, JITCompiler::TimesEight
, OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
) + (firstOutOfLineOffset
- 2) * static_cast<ptrdiff_t>(sizeof(JSValue
))), resultTagGPR
);
4533 m_jit
.load32(JITCompiler::BaseIndex(resultPayloadGPR
, resolveInfoGPR
, JITCompiler::TimesEight
, OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
) + (firstOutOfLineOffset
- 2) * static_cast<ptrdiff_t>(sizeof(JSValue
))), resultPayloadGPR
);
4535 addSlowPathGenerator(
4537 structuresNotMatch
, this, operationResolveGlobal
,
4538 JSValueRegs(resultTagGPR
, resultPayloadGPR
), resolveInfoGPR
, globalObjectGPR
,
4539 &m_jit
.codeBlock()->identifier(data
.identifierNumber
)));
4541 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4545 case CreateActivation
: {
4546 JSValueOperand
value(this, node
->child1());
4547 GPRTemporary
result(this, value
, false);
4549 GPRReg valueTagGPR
= value
.tagGPR();
4550 GPRReg valuePayloadGPR
= value
.payloadGPR();
4551 GPRReg resultGPR
= result
.gpr();
4553 m_jit
.move(valuePayloadGPR
, resultGPR
);
4555 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4557 addSlowPathGenerator(
4558 slowPathCall(notCreated
, this, operationCreateActivation
, resultGPR
));
4560 cellResult(resultGPR
, node
);
4564 case CreateArguments
: {
4565 JSValueOperand
value(this, node
->child1());
4566 GPRTemporary
result(this, value
, false);
4568 GPRReg valueTagGPR
= value
.tagGPR();
4569 GPRReg valuePayloadGPR
= value
.payloadGPR();
4570 GPRReg resultGPR
= result
.gpr();
4572 m_jit
.move(valuePayloadGPR
, resultGPR
);
4574 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4576 if (node
->codeOrigin
.inlineCallFrame
) {
4577 addSlowPathGenerator(
4579 notCreated
, this, operationCreateInlinedArguments
, resultGPR
,
4580 node
->codeOrigin
.inlineCallFrame
));
4582 addSlowPathGenerator(
4583 slowPathCall(notCreated
, this, operationCreateArguments
, resultGPR
));
4586 cellResult(resultGPR
, node
);
4590 case TearOffActivation
: {
4591 JSValueOperand
activationValue(this, node
->child1());
4592 GPRTemporary
scratch(this);
4594 GPRReg activationValueTagGPR
= activationValue
.tagGPR();
4595 GPRReg activationValuePayloadGPR
= activationValue
.payloadGPR();
4596 GPRReg scratchGPR
= scratch
.gpr();
4598 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, activationValueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4600 SharedSymbolTable
* symbolTable
= m_jit
.symbolTableFor(node
->codeOrigin
);
4601 int registersOffset
= JSActivation::registersOffset(symbolTable
);
4603 int captureEnd
= symbolTable
->captureEnd();
4604 for (int i
= symbolTable
->captureStart(); i
< captureEnd
; ++i
) {
4606 JITCompiler::Address(
4607 GPRInfo::callFrameRegister
, i
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4610 scratchGPR
, JITCompiler::Address(
4611 activationValuePayloadGPR
, registersOffset
+ i
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
4613 JITCompiler::Address(
4614 GPRInfo::callFrameRegister
, i
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4617 scratchGPR
, JITCompiler::Address(
4618 activationValuePayloadGPR
, registersOffset
+ i
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
4620 m_jit
.addPtr(TrustedImm32(registersOffset
), activationValuePayloadGPR
, scratchGPR
);
4621 m_jit
.storePtr(scratchGPR
, JITCompiler::Address(activationValuePayloadGPR
, JSActivation::offsetOfRegisters()));
4623 notCreated
.link(&m_jit
);
4628 case TearOffArguments
: {
4629 JSValueOperand
unmodifiedArgumentsValue(this, node
->child1());
4630 JSValueOperand
activationValue(this, node
->child2());
4631 GPRReg unmodifiedArgumentsValuePayloadGPR
= unmodifiedArgumentsValue
.payloadGPR();
4632 GPRReg activationValuePayloadGPR
= activationValue
.payloadGPR();
4634 JITCompiler::Jump created
= m_jit
.branchTest32(
4635 JITCompiler::NonZero
, unmodifiedArgumentsValuePayloadGPR
);
4637 if (node
->codeOrigin
.inlineCallFrame
) {
4638 addSlowPathGenerator(
4640 created
, this, operationTearOffInlinedArguments
, NoResult
,
4641 unmodifiedArgumentsValuePayloadGPR
, activationValuePayloadGPR
, node
->codeOrigin
.inlineCallFrame
));
4643 addSlowPathGenerator(
4645 created
, this, operationTearOffArguments
, NoResult
,
4646 unmodifiedArgumentsValuePayloadGPR
, activationValuePayloadGPR
));
4653 case CheckArgumentsNotCreated
: {
4654 ASSERT(!isEmptySpeculation(
4655 m_state
.variables().operand(
4656 m_jit
.graph().argumentsRegisterFor(node
->codeOrigin
)).m_type
));
4658 Uncountable
, JSValueRegs(), 0,
4660 JITCompiler::NotEqual
,
4661 JITCompiler::tagFor(m_jit
.argumentsRegisterFor(node
->codeOrigin
)),
4662 TrustedImm32(JSValue::EmptyValueTag
)));
4667 case GetMyArgumentsLength
: {
4668 GPRTemporary
result(this);
4669 GPRReg resultGPR
= result
.gpr();
4671 if (!isEmptySpeculation(
4672 m_state
.variables().operand(
4673 m_jit
.graph().argumentsRegisterFor(node
->codeOrigin
)).m_type
)) {
4675 ArgumentsEscaped
, JSValueRegs(), 0,
4677 JITCompiler::NotEqual
,
4678 JITCompiler::tagFor(m_jit
.argumentsRegisterFor(node
->codeOrigin
)),
4679 TrustedImm32(JSValue::EmptyValueTag
)));
4682 ASSERT(!node
->codeOrigin
.inlineCallFrame
);
4683 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultGPR
);
4684 m_jit
.sub32(TrustedImm32(1), resultGPR
);
4685 integerResult(resultGPR
, node
);
4689 case GetMyArgumentsLengthSafe
: {
4690 GPRTemporary
resultPayload(this);
4691 GPRTemporary
resultTag(this);
4692 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4693 GPRReg resultTagGPR
= resultTag
.gpr();
4695 JITCompiler::Jump created
= m_jit
.branch32(
4696 JITCompiler::NotEqual
,
4697 JITCompiler::tagFor(m_jit
.argumentsRegisterFor(node
->codeOrigin
)),
4698 TrustedImm32(JSValue::EmptyValueTag
));
4700 if (node
->codeOrigin
.inlineCallFrame
) {
4702 Imm32(node
->codeOrigin
.inlineCallFrame
->arguments
.size() - 1),
4705 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), resultPayloadGPR
);
4706 m_jit
.sub32(TrustedImm32(1), resultPayloadGPR
);
4708 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), resultTagGPR
);
4710 // FIXME: the slow path generator should perform a forward speculation that the
4711 // result is an integer. For now we postpone the speculation by having this return
4714 addSlowPathGenerator(
4716 created
, this, operationGetArgumentsLength
,
4717 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4718 m_jit
.argumentsRegisterFor(node
->codeOrigin
)));
4720 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4724 case GetMyArgumentByVal
: {
4725 SpeculateStrictInt32Operand
index(this, node
->child1());
4726 GPRTemporary
resultPayload(this);
4727 GPRTemporary
resultTag(this);
4728 GPRReg indexGPR
= index
.gpr();
4729 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4730 GPRReg resultTagGPR
= resultTag
.gpr();
4732 if (!isEmptySpeculation(
4733 m_state
.variables().operand(
4734 m_jit
.graph().argumentsRegisterFor(node
->codeOrigin
)).m_type
)) {
4736 ArgumentsEscaped
, JSValueRegs(), 0,
4738 JITCompiler::NotEqual
,
4739 JITCompiler::tagFor(m_jit
.argumentsRegisterFor(node
->codeOrigin
)),
4740 TrustedImm32(JSValue::EmptyValueTag
)));
4743 m_jit
.add32(TrustedImm32(1), indexGPR
, resultPayloadGPR
);
4745 if (node
->codeOrigin
.inlineCallFrame
) {
4747 Uncountable
, JSValueRegs(), 0,
4749 JITCompiler::AboveOrEqual
,
4751 Imm32(node
->codeOrigin
.inlineCallFrame
->arguments
.size())));
4754 Uncountable
, JSValueRegs(), 0,
4756 JITCompiler::AboveOrEqual
,
4758 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4761 JITCompiler::JumpList slowArgument
;
4762 JITCompiler::JumpList slowArgumentOutOfBounds
;
4763 if (const SlowArgument
* slowArguments
= m_jit
.symbolTableFor(node
->codeOrigin
)->slowArguments()) {
4764 slowArgumentOutOfBounds
.append(
4766 JITCompiler::AboveOrEqual
, indexGPR
,
4767 Imm32(m_jit
.symbolTableFor(node
->codeOrigin
)->parameterCount())));
4769 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4770 m_jit
.move(ImmPtr(slowArguments
), resultPayloadGPR
);
4772 JITCompiler::BaseIndex(
4773 resultPayloadGPR
, indexGPR
, JITCompiler::TimesEight
,
4774 OBJECT_OFFSETOF(SlowArgument
, index
)),
4778 JITCompiler::BaseIndex(
4779 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4780 m_jit
.offsetOfLocals(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4783 JITCompiler::BaseIndex(
4784 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4785 m_jit
.offsetOfLocals(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4787 slowArgument
.append(m_jit
.jump());
4789 slowArgumentOutOfBounds
.link(&m_jit
);
4791 m_jit
.neg32(resultPayloadGPR
);
4794 JITCompiler::BaseIndex(
4795 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4796 m_jit
.offsetOfArgumentsIncludingThis(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4799 JITCompiler::BaseIndex(
4800 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4801 m_jit
.offsetOfArgumentsIncludingThis(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4804 slowArgument
.link(&m_jit
);
4805 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4808 case GetMyArgumentByValSafe
: {
4809 SpeculateStrictInt32Operand
index(this, node
->child1());
4810 GPRTemporary
resultPayload(this);
4811 GPRTemporary
resultTag(this);
4812 GPRReg indexGPR
= index
.gpr();
4813 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4814 GPRReg resultTagGPR
= resultTag
.gpr();
4816 JITCompiler::JumpList slowPath
;
4819 JITCompiler::NotEqual
,
4820 JITCompiler::tagFor(m_jit
.argumentsRegisterFor(node
->codeOrigin
)),
4821 TrustedImm32(JSValue::EmptyValueTag
)));
4823 m_jit
.add32(TrustedImm32(1), indexGPR
, resultPayloadGPR
);
4824 if (node
->codeOrigin
.inlineCallFrame
) {
4827 JITCompiler::AboveOrEqual
,
4829 Imm32(node
->codeOrigin
.inlineCallFrame
->arguments
.size())));
4833 JITCompiler::AboveOrEqual
,
4835 JITCompiler::payloadFor(JSStack::ArgumentCount
)));
4838 JITCompiler::JumpList slowArgument
;
4839 JITCompiler::JumpList slowArgumentOutOfBounds
;
4840 if (const SlowArgument
* slowArguments
= m_jit
.symbolTableFor(node
->codeOrigin
)->slowArguments()) {
4841 slowArgumentOutOfBounds
.append(
4843 JITCompiler::AboveOrEqual
, indexGPR
,
4844 Imm32(m_jit
.symbolTableFor(node
->codeOrigin
)->parameterCount())));
4846 COMPILE_ASSERT(sizeof(SlowArgument
) == 8, SlowArgument_size_is_eight_bytes
);
4847 m_jit
.move(ImmPtr(slowArguments
), resultPayloadGPR
);
4849 JITCompiler::BaseIndex(
4850 resultPayloadGPR
, indexGPR
, JITCompiler::TimesEight
,
4851 OBJECT_OFFSETOF(SlowArgument
, index
)),
4854 JITCompiler::BaseIndex(
4855 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4856 m_jit
.offsetOfLocals(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4859 JITCompiler::BaseIndex(
4860 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4861 m_jit
.offsetOfLocals(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4863 slowArgument
.append(m_jit
.jump());
4865 slowArgumentOutOfBounds
.link(&m_jit
);
4867 m_jit
.neg32(resultPayloadGPR
);
4870 JITCompiler::BaseIndex(
4871 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4872 m_jit
.offsetOfArgumentsIncludingThis(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)),
4875 JITCompiler::BaseIndex(
4876 GPRInfo::callFrameRegister
, resultPayloadGPR
, JITCompiler::TimesEight
,
4877 m_jit
.offsetOfArgumentsIncludingThis(node
->codeOrigin
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)),
4880 if (node
->codeOrigin
.inlineCallFrame
) {
4881 addSlowPathGenerator(
4883 slowPath
, this, operationGetInlinedArgumentByVal
,
4884 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4885 m_jit
.argumentsRegisterFor(node
->codeOrigin
),
4886 node
->codeOrigin
.inlineCallFrame
, indexGPR
));
4888 addSlowPathGenerator(
4890 slowPath
, this, operationGetArgumentByVal
,
4891 JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4892 m_jit
.argumentsRegisterFor(node
->codeOrigin
), indexGPR
));
4895 slowArgument
.link(&m_jit
);
4896 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4900 case NewFunctionNoCheck
:
4901 compileNewFunctionNoCheck(node
);
4905 JSValueOperand
value(this, node
->child1());
4906 GPRTemporary
resultTag(this, value
);
4907 GPRTemporary
resultPayload(this, value
, false);
4909 GPRReg valueTagGPR
= value
.tagGPR();
4910 GPRReg valuePayloadGPR
= value
.payloadGPR();
4911 GPRReg resultTagGPR
= resultTag
.gpr();
4912 GPRReg resultPayloadGPR
= resultPayload
.gpr();
4914 m_jit
.move(valuePayloadGPR
, resultPayloadGPR
);
4915 m_jit
.move(valueTagGPR
, resultTagGPR
);
4917 JITCompiler::Jump notCreated
= m_jit
.branch32(JITCompiler::Equal
, valueTagGPR
, TrustedImm32(JSValue::EmptyValueTag
));
4919 addSlowPathGenerator(
4921 notCreated
, this, operationNewFunction
, JSValueRegs(resultTagGPR
, resultPayloadGPR
),
4922 m_jit
.codeBlock()->functionDecl(node
->functionDeclIndex())));
4924 jsValueResult(resultTagGPR
, resultPayloadGPR
, node
);
4928 case NewFunctionExpression
:
4929 compileNewFunctionExpression(node
);
4933 // We should never get to the point of code emission for a GarbageValue
4937 case ForceOSRExit
: {
4938 terminateSpeculativeExecution(InadequateCoverage
, JSValueRegs(), 0);
4942 case CheckWatchdogTimer
:
4944 WatchdogTimerFired
, JSValueRegs(), 0,
4946 JITCompiler::NonZero
,
4947 JITCompiler::AbsoluteAddress(m_jit
.vm()->watchdog
.timerDidFireAddress())));
4950 case CountExecution
:
4951 m_jit
.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node
->executionCounter()->address()));
4955 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
4965 RELEASE_ASSERT_NOT_REACHED();
4970 RELEASE_ASSERT_NOT_REACHED();
4974 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
4975 m_jit
.clearRegisterAllocationOffsets();
4981 if (node
->hasResult() && node
->mustGenerate())
4987 } } // namespace JSC::DFG