2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "DFGArrayifySlowPathGenerator.h"
33 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
34 #include "DFGSlowPathGenerator.h"
35 #include "JSCJSValueInlines.h"
36 #include "LinkBuffer.h"
37 #include <wtf/MathExtras.h>
39 namespace JSC
{ namespace DFG
{
41 SpeculativeJIT::SpeculativeJIT(JITCompiler
& jit
)
46 , m_generationInfo(m_jit
.codeBlock()->m_numCalleeRegisters
)
47 , m_blockHeads(jit
.graph().m_blocks
.size())
48 , m_arguments(jit
.codeBlock()->numParameters())
49 , m_variables(jit
.graph().m_localVars
)
50 , m_lastSetOperand(std::numeric_limits
<int>::max())
51 , m_state(m_jit
.graph())
52 , m_stream(&jit
.codeBlock()->variableEventStream())
53 , m_minifiedGraph(&jit
.codeBlock()->minifiedDFG())
54 , m_isCheckingArgumentTypes(false)
58 SpeculativeJIT::~SpeculativeJIT()
62 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR
, Structure
* structure
, GPRReg storageGPR
, unsigned numElements
)
64 ASSERT(hasUndecided(structure
->indexingType()) || hasInt32(structure
->indexingType()) || hasDouble(structure
->indexingType()) || hasContiguous(structure
->indexingType()));
66 GPRTemporary
scratch(this);
67 GPRTemporary
scratch2(this);
68 GPRReg scratchGPR
= scratch
.gpr();
69 GPRReg scratch2GPR
= scratch2
.gpr();
71 unsigned vectorLength
= std::max(BASE_VECTOR_LEN
, numElements
);
73 JITCompiler::JumpList slowCases
;
76 emitAllocateBasicStorage(TrustedImm32(vectorLength
* sizeof(JSValue
) + sizeof(IndexingHeader
)), storageGPR
));
77 m_jit
.subPtr(TrustedImm32(vectorLength
* sizeof(JSValue
)), storageGPR
);
78 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
80 m_jit
.store32(TrustedImm32(numElements
), MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
81 m_jit
.store32(TrustedImm32(vectorLength
), MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
83 if (hasDouble(structure
->indexingType()) && numElements
< vectorLength
) {
85 m_jit
.move(TrustedImm64(bitwise_cast
<int64_t>(QNaN
)), scratchGPR
);
86 for (unsigned i
= numElements
; i
< vectorLength
; ++i
)
87 m_jit
.store64(scratchGPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * i
));
89 EncodedValueDescriptor value
;
90 value
.asInt64
= JSValue::encode(JSValue(JSValue::EncodeAsDouble
, QNaN
));
91 for (unsigned i
= numElements
; i
< vectorLength
; ++i
) {
92 m_jit
.store32(TrustedImm32(value
.asBits
.tag
), MacroAssembler::Address(storageGPR
, sizeof(double) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
93 m_jit
.store32(TrustedImm32(value
.asBits
.payload
), MacroAssembler::Address(storageGPR
, sizeof(double) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
98 // I want a slow path that also loads out the storage pointer, and that's
99 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
100 // of work for a very small piece of functionality. :-/
101 addSlowPathGenerator(adoptPtr(
102 new CallArrayAllocatorSlowPathGenerator(
103 slowCases
, this, operationNewArrayWithSize
, resultGPR
, storageGPR
,
104 structure
, numElements
)));
107 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
)
111 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
112 m_jit
.appendExitInfo(jumpToFail
);
113 m_jit
.codeBlock()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
116 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, const MacroAssembler::JumpList
& jumpsToFail
)
120 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
121 m_jit
.appendExitInfo(jumpsToFail
);
122 m_jit
.codeBlock()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
125 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
)
129 backwardSpeculationCheck(kind
, jsValueSource
, node
, jumpToFail
);
130 if (m_speculationDirection
== ForwardSpeculation
)
131 convertLastOSRExitToForward();
134 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
)
136 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
137 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpToFail
);
140 OSRExitJumpPlaceholder
SpeculativeJIT::backwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
)
143 return OSRExitJumpPlaceholder();
144 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
145 unsigned index
= m_jit
.codeBlock()->numberOfOSRExits();
146 m_jit
.appendExitInfo();
147 m_jit
.codeBlock()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
148 return OSRExitJumpPlaceholder(index
);
151 OSRExitJumpPlaceholder
SpeculativeJIT::backwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
)
153 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
154 return backwardSpeculationCheck(kind
, jsValueSource
, nodeUse
.node());
157 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, const MacroAssembler::JumpList
& jumpsToFail
)
161 backwardSpeculationCheck(kind
, jsValueSource
, node
, jumpsToFail
);
162 if (m_speculationDirection
== ForwardSpeculation
)
163 convertLastOSRExitToForward();
166 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, const MacroAssembler::JumpList
& jumpsToFail
)
168 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
169 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpsToFail
);
172 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
176 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
177 m_jit
.codeBlock()->appendSpeculationRecovery(recovery
);
178 m_jit
.appendExitInfo(jumpToFail
);
179 m_jit
.codeBlock()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size(), m_jit
.codeBlock()->numberOfSpeculationRecoveries()));
182 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
184 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
185 backwardSpeculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpToFail
, recovery
);
188 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
192 backwardSpeculationCheck(kind
, jsValueSource
, node
, jumpToFail
, recovery
);
193 if (m_speculationDirection
== ForwardSpeculation
)
194 convertLastOSRExitToForward();
197 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge edge
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
199 speculationCheck(kind
, jsValueSource
, edge
.node(), jumpToFail
, recovery
);
202 JumpReplacementWatchpoint
* SpeculativeJIT::speculationWatchpoint(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
)
206 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
207 m_jit
.appendExitInfo(JITCompiler::JumpList());
208 OSRExit
& exit
= m_jit
.codeBlock()->osrExit(
209 m_jit
.codeBlock()->appendOSRExit(OSRExit(
211 m_jit
.graph().methodOfGettingAValueProfileFor(node
),
212 this, m_stream
->size())));
213 exit
.m_watchpointIndex
= m_jit
.codeBlock()->appendWatchpoint(
214 JumpReplacementWatchpoint(m_jit
.watchpointLabel()));
215 if (m_speculationDirection
== ForwardSpeculation
)
216 convertLastOSRExitToForward();
217 return &m_jit
.codeBlock()->watchpoint(exit
.m_watchpointIndex
);
220 JumpReplacementWatchpoint
* SpeculativeJIT::speculationWatchpoint(ExitKind kind
)
222 return speculationWatchpoint(kind
, JSValueSource(), 0);
225 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery
& valueRecovery
)
227 if (!valueRecovery
) {
228 // Check that either the current node is a SetLocal, or the preceding node was a
229 // SetLocal with the same code origin.
230 if (!m_currentNode
->containsMovHint()) {
231 Node
* setLocal
= m_jit
.graph().m_blocks
[m_block
]->at(m_indexInBlock
- 1);
232 ASSERT_UNUSED(setLocal
, setLocal
->containsMovHint());
233 ASSERT_UNUSED(setLocal
, setLocal
->codeOrigin
== m_currentNode
->codeOrigin
);
236 // Find the next node.
237 unsigned indexInBlock
= m_indexInBlock
+ 1;
240 if (indexInBlock
== m_jit
.graph().m_blocks
[m_block
]->size()) {
241 // This is an inline return. Give up and do a backwards speculation. This is safe
242 // because an inline return has its own bytecode index and it's always safe to
243 // reexecute that bytecode.
244 ASSERT(node
->op() == Jump
);
247 node
= m_jit
.graph().m_blocks
[m_block
]->at(indexInBlock
);
248 if (node
->codeOrigin
!= m_currentNode
->codeOrigin
)
253 ASSERT(node
->codeOrigin
!= m_currentNode
->codeOrigin
);
254 OSRExit
& exit
= m_jit
.codeBlock()->lastOSRExit();
255 exit
.m_codeOrigin
= node
->codeOrigin
;
259 unsigned setLocalIndexInBlock
= m_indexInBlock
+ 1;
261 Node
* setLocal
= m_jit
.graph().m_blocks
[m_block
]->at(setLocalIndexInBlock
);
262 bool hadInt32ToDouble
= false;
264 if (setLocal
->op() == ForwardInt32ToDouble
) {
265 setLocal
= m_jit
.graph().m_blocks
[m_block
]->at(++setLocalIndexInBlock
);
266 hadInt32ToDouble
= true;
268 if (setLocal
->op() == Flush
|| setLocal
->op() == Phantom
)
269 setLocal
= m_jit
.graph().m_blocks
[m_block
]->at(++setLocalIndexInBlock
);
271 if (hadInt32ToDouble
)
272 ASSERT(setLocal
->child1()->child1() == m_currentNode
);
274 ASSERT(setLocal
->child1() == m_currentNode
);
275 ASSERT(setLocal
->containsMovHint());
276 ASSERT(setLocal
->codeOrigin
== m_currentNode
->codeOrigin
);
278 Node
* nextNode
= m_jit
.graph().m_blocks
[m_block
]->at(setLocalIndexInBlock
+ 1);
279 if (nextNode
->op() == Jump
&& nextNode
->codeOrigin
== m_currentNode
->codeOrigin
) {
280 // We're at an inlined return. Use a backward speculation instead.
283 ASSERT(nextNode
->codeOrigin
!= m_currentNode
->codeOrigin
);
285 OSRExit
& exit
= m_jit
.codeBlock()->lastOSRExit();
286 exit
.m_codeOrigin
= nextNode
->codeOrigin
;
288 exit
.m_lastSetOperand
= setLocal
->local();
289 exit
.m_valueRecoveryOverride
= adoptRef(
290 new ValueRecoveryOverride(setLocal
->local(), valueRecovery
));
293 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
, const ValueRecovery
& valueRecovery
)
295 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
296 backwardSpeculationCheck(kind
, jsValueSource
, node
, jumpToFail
);
297 convertLastOSRExitToForward(valueRecovery
);
300 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, const MacroAssembler::JumpList
& jumpsToFail
, const ValueRecovery
& valueRecovery
)
302 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
303 backwardSpeculationCheck(kind
, jsValueSource
, node
, jumpsToFail
);
304 convertLastOSRExitToForward(valueRecovery
);
307 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, Node
* node
)
309 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
310 #if DFG_ENABLE(DEBUG_VERBOSE)
311 dataLogF("SpeculativeJIT was terminated.\n");
315 speculationCheck(kind
, jsValueRegs
, node
, m_jit
.jump());
316 m_compileOkay
= false;
319 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, Edge nodeUse
)
321 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
322 terminateSpeculativeExecution(kind
, jsValueRegs
, nodeUse
.node());
325 void SpeculativeJIT::backwardTypeCheck(JSValueSource source
, Edge edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
)
327 ASSERT(needsTypeCheck(edge
, typesPassedThrough
));
328 m_state
.forNode(edge
).filter(typesPassedThrough
);
329 backwardSpeculationCheck(BadType
, source
, edge
.node(), jumpToFail
);
332 void SpeculativeJIT::typeCheck(JSValueSource source
, Edge edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
)
334 backwardTypeCheck(source
, edge
, typesPassedThrough
, jumpToFail
);
335 if (m_speculationDirection
== ForwardSpeculation
)
336 convertLastOSRExitToForward();
339 void SpeculativeJIT::forwardTypeCheck(JSValueSource source
, Edge edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
, const ValueRecovery
& valueRecovery
)
341 backwardTypeCheck(source
, edge
, typesPassedThrough
, jumpToFail
);
342 convertLastOSRExitToForward(valueRecovery
);
345 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr
<SlowPathGenerator
> slowPathGenerator
)
347 m_slowPathGenerators
.append(slowPathGenerator
);
350 void SpeculativeJIT::runSlowPathGenerators()
352 #if DFG_ENABLE(DEBUG_VERBOSE)
353 dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators
.size());
355 for (unsigned i
= 0; i
< m_slowPathGenerators
.size(); ++i
)
356 m_slowPathGenerators
[i
]->generate(this);
359 // On Windows we need to wrap fmod; on other platforms we can call it directly.
360 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
361 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
362 static double DFG_OPERATION
fmodAsDFGOperation(double x
, double y
)
367 #define fmodAsDFGOperation fmod
370 void SpeculativeJIT::clearGenerationInfo()
372 for (unsigned i
= 0; i
< m_generationInfo
.size(); ++i
)
373 m_generationInfo
[i
] = GenerationInfo();
374 m_gprs
= RegisterBank
<GPRInfo
>();
375 m_fprs
= RegisterBank
<FPRInfo
>();
378 SilentRegisterSavePlan
SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe
, GPRReg source
)
380 GenerationInfo
& info
= m_generationInfo
[spillMe
];
381 Node
* node
= info
.node();
382 DataFormat registerFormat
= info
.registerFormat();
383 ASSERT(registerFormat
!= DataFormatNone
);
384 ASSERT(registerFormat
!= DataFormatDouble
);
386 SilentSpillAction spillAction
;
387 SilentFillAction fillAction
;
389 if (!info
.needsSpill())
390 spillAction
= DoNothingForSpill
;
393 ASSERT(info
.gpr() == source
);
394 if (registerFormat
== DataFormatInteger
)
395 spillAction
= Store32Payload
;
396 else if (registerFormat
== DataFormatCell
|| registerFormat
== DataFormatStorage
)
397 spillAction
= StorePtr
;
399 ASSERT(registerFormat
& DataFormatJS
);
400 spillAction
= Store64
;
402 #elif USE(JSVALUE32_64)
403 if (registerFormat
& DataFormatJS
) {
404 ASSERT(info
.tagGPR() == source
|| info
.payloadGPR() == source
);
405 spillAction
= source
== info
.tagGPR() ? Store32Tag
: Store32Payload
;
407 ASSERT(info
.gpr() == source
);
408 spillAction
= Store32Payload
;
413 if (registerFormat
== DataFormatInteger
) {
414 ASSERT(info
.gpr() == source
);
415 ASSERT(isJSInteger(info
.registerFormat()));
416 if (node
->hasConstant()) {
417 ASSERT(isInt32Constant(node
));
418 fillAction
= SetInt32Constant
;
420 fillAction
= Load32Payload
;
421 } else if (registerFormat
== DataFormatBoolean
) {
423 RELEASE_ASSERT_NOT_REACHED();
424 fillAction
= DoNothingForFill
;
425 #elif USE(JSVALUE32_64)
426 ASSERT(info
.gpr() == source
);
427 if (node
->hasConstant()) {
428 ASSERT(isBooleanConstant(node
));
429 fillAction
= SetBooleanConstant
;
431 fillAction
= Load32Payload
;
433 } else if (registerFormat
== DataFormatCell
) {
434 ASSERT(info
.gpr() == source
);
435 if (node
->hasConstant()) {
436 JSValue value
= valueOfJSConstant(node
);
437 ASSERT_UNUSED(value
, value
.isCell());
438 fillAction
= SetCellConstant
;
441 fillAction
= LoadPtr
;
443 fillAction
= Load32Payload
;
446 } else if (registerFormat
== DataFormatStorage
) {
447 ASSERT(info
.gpr() == source
);
448 fillAction
= LoadPtr
;
450 ASSERT(registerFormat
& DataFormatJS
);
452 ASSERT(info
.gpr() == source
);
453 if (node
->hasConstant()) {
454 if (valueOfJSConstant(node
).isCell())
455 fillAction
= SetTrustedJSConstant
;
457 fillAction
= SetJSConstant
;
458 } else if (info
.spillFormat() == DataFormatInteger
) {
459 ASSERT(registerFormat
== DataFormatJSInteger
);
460 fillAction
= Load32PayloadBoxInt
;
461 } else if (info
.spillFormat() == DataFormatDouble
) {
462 ASSERT(registerFormat
== DataFormatJSDouble
);
463 fillAction
= LoadDoubleBoxDouble
;
467 ASSERT(info
.tagGPR() == source
|| info
.payloadGPR() == source
);
468 if (node
->hasConstant())
469 fillAction
= info
.tagGPR() == source
? SetJSConstantTag
: SetJSConstantPayload
;
470 else if (info
.payloadGPR() == source
)
471 fillAction
= Load32Payload
;
472 else { // Fill the Tag
473 switch (info
.spillFormat()) {
474 case DataFormatInteger
:
475 ASSERT(registerFormat
== DataFormatJSInteger
);
476 fillAction
= SetInt32Tag
;
479 ASSERT(registerFormat
== DataFormatJSCell
);
480 fillAction
= SetCellTag
;
482 case DataFormatBoolean
:
483 ASSERT(registerFormat
== DataFormatJSBoolean
);
484 fillAction
= SetBooleanTag
;
487 fillAction
= Load32Tag
;
494 return SilentRegisterSavePlan(spillAction
, fillAction
, node
, source
);
497 SilentRegisterSavePlan
SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe
, FPRReg source
)
499 GenerationInfo
& info
= m_generationInfo
[spillMe
];
500 Node
* node
= info
.node();
501 ASSERT(info
.registerFormat() == DataFormatDouble
);
503 SilentSpillAction spillAction
;
504 SilentFillAction fillAction
;
506 if (!info
.needsSpill())
507 spillAction
= DoNothingForSpill
;
509 ASSERT(!node
->hasConstant());
510 ASSERT(info
.spillFormat() == DataFormatNone
);
511 ASSERT(info
.fpr() == source
);
512 spillAction
= StoreDouble
;
516 if (node
->hasConstant()) {
517 ASSERT(isNumberConstant(node
));
518 fillAction
= SetDoubleConstant
;
519 } else if (info
.spillFormat() != DataFormatNone
&& info
.spillFormat() != DataFormatDouble
) {
520 // it was already spilled previously and not as a double, which means we need unboxing.
521 ASSERT(info
.spillFormat() & DataFormatJS
);
522 fillAction
= LoadJSUnboxDouble
;
524 fillAction
= LoadDouble
;
525 #elif USE(JSVALUE32_64)
526 ASSERT(info
.registerFormat() == DataFormatDouble
|| info
.registerFormat() == DataFormatJSDouble
);
527 if (node
->hasConstant()) {
528 ASSERT(isNumberConstant(node
));
529 fillAction
= SetDoubleConstant
;
531 fillAction
= LoadDouble
;
534 return SilentRegisterSavePlan(spillAction
, fillAction
, node
, source
);
537 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan
& plan
)
539 switch (plan
.spillAction()) {
540 case DoNothingForSpill
:
543 m_jit
.store32(plan
.gpr(), JITCompiler::tagFor(plan
.node()->virtualRegister()));
546 m_jit
.store32(plan
.gpr(), JITCompiler::payloadFor(plan
.node()->virtualRegister()));
549 m_jit
.storePtr(plan
.gpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
553 m_jit
.store64(plan
.gpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
557 m_jit
.storeDouble(plan
.fpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
560 RELEASE_ASSERT_NOT_REACHED();
564 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan
& plan
, GPRReg canTrample
)
566 #if USE(JSVALUE32_64)
567 UNUSED_PARAM(canTrample
);
569 switch (plan
.fillAction()) {
570 case DoNothingForFill
:
572 case SetInt32Constant
:
573 m_jit
.move(Imm32(valueOfInt32Constant(plan
.node())), plan
.gpr());
575 case SetBooleanConstant
:
576 m_jit
.move(TrustedImm32(valueOfBooleanConstant(plan
.node())), plan
.gpr());
578 case SetCellConstant
:
579 m_jit
.move(TrustedImmPtr(valueOfJSConstant(plan
.node()).asCell()), plan
.gpr());
582 case SetTrustedJSConstant
:
583 m_jit
.move(valueOfJSConstantAsImm64(plan
.node()).asTrustedImm64(), plan
.gpr());
586 m_jit
.move(valueOfJSConstantAsImm64(plan
.node()), plan
.gpr());
588 case SetDoubleConstant
:
589 m_jit
.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan
.node()))), canTrample
);
590 m_jit
.move64ToDouble(canTrample
, plan
.fpr());
592 case Load32PayloadBoxInt
:
593 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
594 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, plan
.gpr());
596 case LoadDoubleBoxDouble
:
597 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
598 m_jit
.sub64(GPRInfo::tagTypeNumberRegister
, plan
.gpr());
600 case LoadJSUnboxDouble
:
601 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), canTrample
);
602 unboxDouble(canTrample
, plan
.fpr());
605 case SetJSConstantTag
:
606 m_jit
.move(Imm32(valueOfJSConstant(plan
.node()).tag()), plan
.gpr());
608 case SetJSConstantPayload
:
609 m_jit
.move(Imm32(valueOfJSConstant(plan
.node()).payload()), plan
.gpr());
612 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), plan
.gpr());
615 m_jit
.move(TrustedImm32(JSValue::CellTag
), plan
.gpr());
618 m_jit
.move(TrustedImm32(JSValue::BooleanTag
), plan
.gpr());
620 case SetDoubleConstant
:
621 m_jit
.loadDouble(addressOfDoubleConstant(plan
.node()), plan
.fpr());
625 m_jit
.load32(JITCompiler::tagFor(plan
.node()->virtualRegister()), plan
.gpr());
628 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
631 m_jit
.loadPtr(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
635 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
639 m_jit
.loadDouble(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.fpr());
642 RELEASE_ASSERT_NOT_REACHED();
646 const TypedArrayDescriptor
* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode
)
648 switch (arrayMode
.type()) {
649 case Array::Int8Array
:
650 return &m_jit
.vm()->int8ArrayDescriptor();
651 case Array::Int16Array
:
652 return &m_jit
.vm()->int16ArrayDescriptor();
653 case Array::Int32Array
:
654 return &m_jit
.vm()->int32ArrayDescriptor();
655 case Array::Uint8Array
:
656 return &m_jit
.vm()->uint8ArrayDescriptor();
657 case Array::Uint8ClampedArray
:
658 return &m_jit
.vm()->uint8ClampedArrayDescriptor();
659 case Array::Uint16Array
:
660 return &m_jit
.vm()->uint16ArrayDescriptor();
661 case Array::Uint32Array
:
662 return &m_jit
.vm()->uint32ArrayDescriptor();
663 case Array::Float32Array
:
664 return &m_jit
.vm()->float32ArrayDescriptor();
665 case Array::Float64Array
:
666 return &m_jit
.vm()->float64ArrayDescriptor();
672 JITCompiler::Jump
SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR
, ArrayMode arrayMode
, IndexingType shape
)
674 switch (arrayMode
.arrayClass()) {
675 case Array::OriginalArray
: {
677 JITCompiler::Jump result
; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
682 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
683 return m_jit
.branch32(
684 MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(IsArray
| shape
));
687 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
688 return m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(shape
));
692 JITCompiler::JumpList
SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR
, ArrayMode arrayMode
)
694 JITCompiler::JumpList result
;
696 switch (arrayMode
.type()) {
698 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, Int32Shape
);
701 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, DoubleShape
);
703 case Array::Contiguous
:
704 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, ContiguousShape
);
706 case Array::ArrayStorage
:
707 case Array::SlowPutArrayStorage
: {
708 ASSERT(!arrayMode
.isJSArrayWithOriginalStructure());
710 if (arrayMode
.isJSArray()) {
711 if (arrayMode
.isSlowPut()) {
714 MacroAssembler::Zero
, tempGPR
, MacroAssembler::TrustedImm32(IsArray
)));
715 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
716 m_jit
.sub32(TrustedImm32(ArrayStorageShape
), tempGPR
);
719 MacroAssembler::Above
, tempGPR
,
720 TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
)));
723 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
725 m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(IsArray
| ArrayStorageShape
)));
728 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
729 if (arrayMode
.isSlowPut()) {
730 m_jit
.sub32(TrustedImm32(ArrayStorageShape
), tempGPR
);
733 MacroAssembler::Above
, tempGPR
,
734 TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
)));
738 m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(ArrayStorageShape
)));
749 void SpeculativeJIT::checkArray(Node
* node
)
751 ASSERT(node
->arrayMode().isSpecific());
752 ASSERT(!node
->arrayMode().doesConversion());
754 SpeculateCellOperand
base(this, node
->child1());
755 GPRReg baseReg
= base
.gpr();
757 const TypedArrayDescriptor
* result
= typedArrayDescriptor(node
->arrayMode());
759 if (node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1()))) {
760 noResult(m_currentNode
);
764 const ClassInfo
* expectedClassInfo
= 0;
766 switch (node
->arrayMode().type()) {
768 expectedClassInfo
= &JSString::s_info
;
772 case Array::Contiguous
:
773 case Array::ArrayStorage
:
774 case Array::SlowPutArrayStorage
: {
775 GPRTemporary
temp(this);
776 GPRReg tempGPR
= temp
.gpr();
778 MacroAssembler::Address(baseReg
, JSCell::structureOffset()), tempGPR
);
779 m_jit
.load8(MacroAssembler::Address(tempGPR
, Structure::indexingTypeOffset()), tempGPR
);
781 BadIndexingType
, JSValueSource::unboxedCell(baseReg
), 0,
782 jumpSlowForUnwantedArrayMode(tempGPR
, node
->arrayMode()));
784 noResult(m_currentNode
);
787 case Array::Arguments
:
788 expectedClassInfo
= &Arguments::s_info
;
790 case Array::Int8Array
:
791 case Array::Int16Array
:
792 case Array::Int32Array
:
793 case Array::Uint8Array
:
794 case Array::Uint8ClampedArray
:
795 case Array::Uint16Array
:
796 case Array::Uint32Array
:
797 case Array::Float32Array
:
798 case Array::Float64Array
:
799 expectedClassInfo
= result
->m_classInfo
;
802 RELEASE_ASSERT_NOT_REACHED();
806 GPRTemporary
temp(this);
808 MacroAssembler::Address(baseReg
, JSCell::structureOffset()), temp
.gpr());
810 Uncountable
, JSValueRegs(), 0,
812 MacroAssembler::NotEqual
,
813 MacroAssembler::Address(temp
.gpr(), Structure::classInfoOffset()),
814 MacroAssembler::TrustedImmPtr(expectedClassInfo
)));
816 noResult(m_currentNode
);
819 void SpeculativeJIT::arrayify(Node
* node
, GPRReg baseReg
, GPRReg propertyReg
)
821 ASSERT(node
->arrayMode().doesConversion());
823 GPRTemporary
temp(this);
824 GPRTemporary structure
;
825 GPRReg tempGPR
= temp
.gpr();
826 GPRReg structureGPR
= InvalidGPRReg
;
828 if (node
->op() != ArrayifyToStructure
) {
829 GPRTemporary
realStructure(this);
830 structure
.adopt(realStructure
);
831 structureGPR
= structure
.gpr();
834 // We can skip all that comes next if we already have array storage.
835 MacroAssembler::JumpList slowPath
;
837 if (node
->op() == ArrayifyToStructure
) {
838 slowPath
.append(m_jit
.branchWeakPtr(
839 JITCompiler::NotEqual
,
840 JITCompiler::Address(baseReg
, JSCell::structureOffset()),
844 MacroAssembler::Address(baseReg
, JSCell::structureOffset()), structureGPR
);
847 MacroAssembler::Address(structureGPR
, Structure::indexingTypeOffset()), tempGPR
);
849 slowPath
.append(jumpSlowForUnwantedArrayMode(tempGPR
, node
->arrayMode()));
852 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
853 slowPath
, this, node
, baseReg
, propertyReg
, tempGPR
, structureGPR
)));
855 noResult(m_currentNode
);
858 void SpeculativeJIT::arrayify(Node
* node
)
860 ASSERT(node
->arrayMode().isSpecific());
862 SpeculateCellOperand
base(this, node
->child1());
864 if (!node
->child2()) {
865 arrayify(node
, base
.gpr(), InvalidGPRReg
);
869 SpeculateIntegerOperand
property(this, node
->child2());
871 arrayify(node
, base
.gpr(), property
.gpr());
874 GPRReg
SpeculativeJIT::fillStorage(Edge edge
)
876 VirtualRegister virtualRegister
= edge
->virtualRegister();
877 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
879 switch (info
.registerFormat()) {
880 case DataFormatNone
: {
881 if (info
.spillFormat() == DataFormatStorage
) {
882 GPRReg gpr
= allocate();
883 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
884 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
885 info
.fillStorage(*m_stream
, gpr
);
889 // Must be a cell; fill it as a cell and then return the pointer.
890 return fillSpeculateCell(edge
);
893 case DataFormatStorage
: {
894 GPRReg gpr
= info
.gpr();
900 return fillSpeculateCell(edge
);
904 void SpeculativeJIT::useChildren(Node
* node
)
906 if (node
->flags() & NodeHasVarArgs
) {
907 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); childIdx
++) {
908 if (!!m_jit
.graph().m_varArgChildren
[childIdx
])
909 use(m_jit
.graph().m_varArgChildren
[childIdx
]);
912 Edge child1
= node
->child1();
914 ASSERT(!node
->child2() && !node
->child3());
919 Edge child2
= node
->child2();
921 ASSERT(!node
->child3());
926 Edge child3
= node
->child3();
933 void SpeculativeJIT::writeBarrier(MacroAssembler
& jit
, GPRReg owner
, GPRReg scratch1
, GPRReg scratch2
, WriteBarrierUseKind useKind
)
937 UNUSED_PARAM(scratch1
);
938 UNUSED_PARAM(scratch2
);
939 UNUSED_PARAM(useKind
);
940 ASSERT(owner
!= scratch1
);
941 ASSERT(owner
!= scratch2
);
942 ASSERT(scratch1
!= scratch2
);
944 #if ENABLE(WRITE_BARRIER_PROFILING)
945 JITCompiler::emitCount(jit
, WriteBarrierCounters::jitCounterFor(useKind
));
949 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, GPRReg valueGPR
, Edge valueUse
, WriteBarrierUseKind useKind
, GPRReg scratch1
, GPRReg scratch2
)
951 UNUSED_PARAM(ownerGPR
);
952 UNUSED_PARAM(valueGPR
);
953 UNUSED_PARAM(scratch1
);
954 UNUSED_PARAM(scratch2
);
955 UNUSED_PARAM(useKind
);
957 if (isKnownNotCell(valueUse
.node()))
960 #if ENABLE(WRITE_BARRIER_PROFILING)
961 JITCompiler::emitCount(m_jit
, WriteBarrierCounters::jitCounterFor(useKind
));
965 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, JSCell
* value
, WriteBarrierUseKind useKind
, GPRReg scratch1
, GPRReg scratch2
)
967 UNUSED_PARAM(ownerGPR
);
969 UNUSED_PARAM(scratch1
);
970 UNUSED_PARAM(scratch2
);
971 UNUSED_PARAM(useKind
);
973 if (Heap::isMarked(value
))
976 #if ENABLE(WRITE_BARRIER_PROFILING)
977 JITCompiler::emitCount(m_jit
, WriteBarrierCounters::jitCounterFor(useKind
));
981 void SpeculativeJIT::writeBarrier(JSCell
* owner
, GPRReg valueGPR
, Edge valueUse
, WriteBarrierUseKind useKind
, GPRReg scratch
)
984 UNUSED_PARAM(valueGPR
);
985 UNUSED_PARAM(scratch
);
986 UNUSED_PARAM(useKind
);
988 if (isKnownNotCell(valueUse
.node()))
991 #if ENABLE(WRITE_BARRIER_PROFILING)
992 JITCompiler::emitCount(m_jit
, WriteBarrierCounters::jitCounterFor(useKind
));
996 bool SpeculativeJIT::nonSpeculativeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_DFGOperation_EJJ helperFunction
)
998 unsigned branchIndexInBlock
= detectPeepHoleBranch();
999 if (branchIndexInBlock
!= UINT_MAX
) {
1000 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
1002 ASSERT(node
->adjustedRefCount() == 1);
1004 nonSpeculativePeepholeBranch(node
, branchNode
, cond
, helperFunction
);
1006 m_indexInBlock
= branchIndexInBlock
;
1007 m_currentNode
= branchNode
;
1012 nonSpeculativeNonPeepholeCompare(node
, cond
, helperFunction
);
1017 bool SpeculativeJIT::nonSpeculativeStrictEq(Node
* node
, bool invert
)
1019 unsigned branchIndexInBlock
= detectPeepHoleBranch();
1020 if (branchIndexInBlock
!= UINT_MAX
) {
1021 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
1023 ASSERT(node
->adjustedRefCount() == 1);
1025 nonSpeculativePeepholeStrictEq(node
, branchNode
, invert
);
1027 m_indexInBlock
= branchIndexInBlock
;
1028 m_currentNode
= branchNode
;
1033 nonSpeculativeNonPeepholeStrictEq(node
, invert
);
1039 static const char* dataFormatString(DataFormat format
)
1041 // These values correspond to the DataFormat enum.
1042 const char* strings
[] = {
1060 return strings
[format
];
1063 void SpeculativeJIT::dump(const char* label
)
1066 dataLogF("<%s>\n", label
);
1068 dataLogF(" gprs:\n");
1070 dataLogF(" fprs:\n");
1072 dataLogF(" VirtualRegisters:\n");
1073 for (unsigned i
= 0; i
< m_generationInfo
.size(); ++i
) {
1074 GenerationInfo
& info
= m_generationInfo
[i
];
1076 dataLogF(" % 3d:%s%s", i
, dataFormatString(info
.registerFormat()), dataFormatString(info
.spillFormat()));
1078 dataLogF(" % 3d:[__][__]", i
);
1079 if (info
.registerFormat() == DataFormatDouble
)
1080 dataLogF(":fpr%d\n", info
.fpr());
1081 else if (info
.registerFormat() != DataFormatNone
1082 #if USE(JSVALUE32_64)
1083 && !(info
.registerFormat() & DataFormatJS
)
1086 ASSERT(info
.gpr() != InvalidGPRReg
);
1087 dataLogF(":%s\n", GPRInfo::debugName(info
.gpr()));
1092 dataLogF("</%s>\n", label
);
1097 #if DFG_ENABLE(CONSISTENCY_CHECK)
1098 void SpeculativeJIT::checkConsistency()
1100 bool failed
= false;
1102 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
1103 if (iter
.isLocked()) {
1104 dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter
.debugName());
1108 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
1109 if (iter
.isLocked()) {
1110 dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter
.debugName());
1115 for (unsigned i
= 0; i
< m_generationInfo
.size(); ++i
) {
1116 VirtualRegister virtualRegister
= (VirtualRegister
)i
;
1117 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1120 switch (info
.registerFormat()) {
1121 case DataFormatNone
:
1124 case DataFormatJSInteger
:
1125 case DataFormatJSDouble
:
1126 case DataFormatJSCell
:
1127 case DataFormatJSBoolean
:
1128 #if USE(JSVALUE32_64)
1131 case DataFormatInteger
:
1132 case DataFormatCell
:
1133 case DataFormatBoolean
:
1134 case DataFormatStorage
: {
1135 GPRReg gpr
= info
.gpr();
1136 ASSERT(gpr
!= InvalidGPRReg
);
1137 if (m_gprs
.name(gpr
) != virtualRegister
) {
1138 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister
, GPRInfo::debugName(gpr
));
1143 case DataFormatDouble
: {
1144 FPRReg fpr
= info
.fpr();
1145 ASSERT(fpr
!= InvalidFPRReg
);
1146 if (m_fprs
.name(fpr
) != virtualRegister
) {
1147 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister
, FPRInfo::debugName(fpr
));
1152 case DataFormatOSRMarker
:
1153 case DataFormatDead
:
1154 case DataFormatArguments
:
1155 RELEASE_ASSERT_NOT_REACHED();
1160 for (gpr_iterator iter
= m_gprs
.begin(); iter
!= m_gprs
.end(); ++iter
) {
1161 VirtualRegister virtualRegister
= iter
.name();
1162 if (virtualRegister
== InvalidVirtualRegister
)
1165 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1167 if (iter
.regID() != info
.gpr()) {
1168 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter
.debugName(), virtualRegister
);
1172 if (!(info
.registerFormat() & DataFormatJS
)) {
1173 if (iter
.regID() != info
.gpr()) {
1174 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter
.debugName(), virtualRegister
);
1178 if (iter
.regID() != info
.tagGPR() && iter
.regID() != info
.payloadGPR()) {
1179 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter
.debugName(), virtualRegister
);
1186 for (fpr_iterator iter
= m_fprs
.begin(); iter
!= m_fprs
.end(); ++iter
) {
1187 VirtualRegister virtualRegister
= iter
.name();
1188 if (virtualRegister
== InvalidVirtualRegister
)
1191 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1192 if (iter
.regID() != info
.fpr()) {
1193 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter
.debugName(), virtualRegister
);
1205 GPRTemporary::GPRTemporary()
1207 , m_gpr(InvalidGPRReg
)
1211 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
)
1213 , m_gpr(InvalidGPRReg
)
1215 m_gpr
= m_jit
->allocate();
1218 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, GPRReg specific
)
1220 , m_gpr(InvalidGPRReg
)
1222 m_gpr
= m_jit
->allocate(specific
);
1225 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, SpeculateIntegerOperand
& op1
)
1227 , m_gpr(InvalidGPRReg
)
1229 if (m_jit
->canReuse(op1
.node()))
1230 m_gpr
= m_jit
->reuse(op1
.gpr());
1232 m_gpr
= m_jit
->allocate();
1235 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, SpeculateIntegerOperand
& op1
, SpeculateIntegerOperand
& op2
)
1237 , m_gpr(InvalidGPRReg
)
1239 if (m_jit
->canReuse(op1
.node()))
1240 m_gpr
= m_jit
->reuse(op1
.gpr());
1241 else if (m_jit
->canReuse(op2
.node()))
1242 m_gpr
= m_jit
->reuse(op2
.gpr());
1244 m_gpr
= m_jit
->allocate();
1247 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, SpeculateStrictInt32Operand
& op1
)
1249 , m_gpr(InvalidGPRReg
)
1251 if (m_jit
->canReuse(op1
.node()))
1252 m_gpr
= m_jit
->reuse(op1
.gpr());
1254 m_gpr
= m_jit
->allocate();
1257 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, IntegerOperand
& op1
)
1259 , m_gpr(InvalidGPRReg
)
1261 if (m_jit
->canReuse(op1
.node()))
1262 m_gpr
= m_jit
->reuse(op1
.gpr());
1264 m_gpr
= m_jit
->allocate();
1267 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, IntegerOperand
& op1
, IntegerOperand
& op2
)
1269 , m_gpr(InvalidGPRReg
)
1271 if (m_jit
->canReuse(op1
.node()))
1272 m_gpr
= m_jit
->reuse(op1
.gpr());
1273 else if (m_jit
->canReuse(op2
.node()))
1274 m_gpr
= m_jit
->reuse(op2
.gpr());
1276 m_gpr
= m_jit
->allocate();
1279 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, SpeculateCellOperand
& op1
)
1281 , m_gpr(InvalidGPRReg
)
1283 if (m_jit
->canReuse(op1
.node()))
1284 m_gpr
= m_jit
->reuse(op1
.gpr());
1286 m_gpr
= m_jit
->allocate();
1289 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, SpeculateBooleanOperand
& op1
)
1291 , m_gpr(InvalidGPRReg
)
1293 if (m_jit
->canReuse(op1
.node()))
1294 m_gpr
= m_jit
->reuse(op1
.gpr());
1296 m_gpr
= m_jit
->allocate();
1300 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, JSValueOperand
& op1
)
1302 , m_gpr(InvalidGPRReg
)
1304 if (m_jit
->canReuse(op1
.node()))
1305 m_gpr
= m_jit
->reuse(op1
.gpr());
1307 m_gpr
= m_jit
->allocate();
1310 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, JSValueOperand
& op1
, bool tag
)
1312 , m_gpr(InvalidGPRReg
)
1314 if (!op1
.isDouble() && m_jit
->canReuse(op1
.node()))
1315 m_gpr
= m_jit
->reuse(tag
? op1
.tagGPR() : op1
.payloadGPR());
1317 m_gpr
= m_jit
->allocate();
1321 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, StorageOperand
& op1
)
1323 , m_gpr(InvalidGPRReg
)
1325 if (m_jit
->canReuse(op1
.node()))
1326 m_gpr
= m_jit
->reuse(op1
.gpr());
1328 m_gpr
= m_jit
->allocate();
1331 void GPRTemporary::adopt(GPRTemporary
& other
)
1334 ASSERT(m_gpr
== InvalidGPRReg
);
1335 ASSERT(other
.m_jit
);
1336 ASSERT(other
.m_gpr
!= InvalidGPRReg
);
1337 m_jit
= other
.m_jit
;
1338 m_gpr
= other
.m_gpr
;
1340 other
.m_gpr
= InvalidGPRReg
;
1343 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
)
1345 , m_fpr(InvalidFPRReg
)
1347 m_fpr
= m_jit
->fprAllocate();
1350 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, SpeculateDoubleOperand
& op1
)
1352 , m_fpr(InvalidFPRReg
)
1354 if (m_jit
->canReuse(op1
.node()))
1355 m_fpr
= m_jit
->reuse(op1
.fpr());
1357 m_fpr
= m_jit
->fprAllocate();
1360 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, SpeculateDoubleOperand
& op1
, SpeculateDoubleOperand
& op2
)
1362 , m_fpr(InvalidFPRReg
)
1364 if (m_jit
->canReuse(op1
.node()))
1365 m_fpr
= m_jit
->reuse(op1
.fpr());
1366 else if (m_jit
->canReuse(op2
.node()))
1367 m_fpr
= m_jit
->reuse(op2
.fpr());
1369 m_fpr
= m_jit
->fprAllocate();
1372 #if USE(JSVALUE32_64)
1373 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, JSValueOperand
& op1
)
1375 , m_fpr(InvalidFPRReg
)
1377 if (op1
.isDouble() && m_jit
->canReuse(op1
.node()))
1378 m_fpr
= m_jit
->reuse(op1
.fpr());
1380 m_fpr
= m_jit
->fprAllocate();
1384 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node
* node
, Node
* branchNode
, JITCompiler::DoubleCondition condition
)
1386 BlockIndex taken
= branchNode
->takenBlockIndex();
1387 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
1389 SpeculateDoubleOperand
op1(this, node
->child1());
1390 SpeculateDoubleOperand
op2(this, node
->child2());
1392 branchDouble(condition
, op1
.fpr(), op2
.fpr(), taken
);
1396 void SpeculativeJIT::compilePeepHoleObjectEquality(Node
* node
, Node
* branchNode
)
1398 BlockIndex taken
= branchNode
->takenBlockIndex();
1399 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
1401 MacroAssembler::RelationalCondition condition
= MacroAssembler::Equal
;
1403 if (taken
== nextBlock()) {
1404 condition
= MacroAssembler::NotEqual
;
1405 BlockIndex tmp
= taken
;
1410 SpeculateCellOperand
op1(this, node
->child1());
1411 SpeculateCellOperand
op2(this, node
->child2());
1413 GPRReg op1GPR
= op1
.gpr();
1414 GPRReg op2GPR
= op2
.gpr();
1416 if (m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
1417 m_jit
.graph().globalObjectFor(node
->codeOrigin
)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1418 if (m_state
.forNode(node
->child1()).m_type
& ~SpecObject
) {
1420 BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1422 MacroAssembler::Equal
,
1423 MacroAssembler::Address(op1GPR
, JSCell::structureOffset()),
1424 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1426 if (m_state
.forNode(node
->child2()).m_type
& ~SpecObject
) {
1428 BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1430 MacroAssembler::Equal
,
1431 MacroAssembler::Address(op2GPR
, JSCell::structureOffset()),
1432 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1435 GPRTemporary
structure(this);
1436 GPRReg structureGPR
= structure
.gpr();
1438 m_jit
.loadPtr(MacroAssembler::Address(op1GPR
, JSCell::structureOffset()), structureGPR
);
1439 if (m_state
.forNode(node
->child1()).m_type
& ~SpecObject
) {
1441 BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1443 MacroAssembler::Equal
,
1445 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1447 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1449 MacroAssembler::NonZero
,
1450 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1451 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1453 m_jit
.loadPtr(MacroAssembler::Address(op2GPR
, JSCell::structureOffset()), structureGPR
);
1454 if (m_state
.forNode(node
->child2()).m_type
& ~SpecObject
) {
1456 BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1458 MacroAssembler::Equal
,
1460 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1462 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1464 MacroAssembler::NonZero
,
1465 MacroAssembler::Address(structureGPR
, Structure::typeInfoFlagsOffset()),
1466 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1469 branchPtr(condition
, op1GPR
, op2GPR
, taken
);
1473 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1475 BlockIndex taken
= branchNode
->takenBlockIndex();
1476 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
1478 // The branch instruction will branch to the taken block.
1479 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1480 if (taken
== nextBlock()) {
1481 condition
= JITCompiler::invert(condition
);
1482 BlockIndex tmp
= taken
;
1487 if (isBooleanConstant(node
->child1().node())) {
1488 bool imm
= valueOfBooleanConstant(node
->child1().node());
1489 SpeculateBooleanOperand
op2(this, node
->child2());
1490 branch32(condition
, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm
)))), op2
.gpr(), taken
);
1491 } else if (isBooleanConstant(node
->child2().node())) {
1492 SpeculateBooleanOperand
op1(this, node
->child1());
1493 bool imm
= valueOfBooleanConstant(node
->child2().node());
1494 branch32(condition
, op1
.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm
)))), taken
);
1496 SpeculateBooleanOperand
op1(this, node
->child1());
1497 SpeculateBooleanOperand
op2(this, node
->child2());
1498 branch32(condition
, op1
.gpr(), op2
.gpr(), taken
);
1504 void SpeculativeJIT::compilePeepHoleIntegerBranch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1506 BlockIndex taken
= branchNode
->takenBlockIndex();
1507 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
1509 // The branch instruction will branch to the taken block.
1510 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1511 if (taken
== nextBlock()) {
1512 condition
= JITCompiler::invert(condition
);
1513 BlockIndex tmp
= taken
;
1518 if (isInt32Constant(node
->child1().node())) {
1519 int32_t imm
= valueOfInt32Constant(node
->child1().node());
1520 SpeculateIntegerOperand
op2(this, node
->child2());
1521 branch32(condition
, JITCompiler::Imm32(imm
), op2
.gpr(), taken
);
1522 } else if (isInt32Constant(node
->child2().node())) {
1523 SpeculateIntegerOperand
op1(this, node
->child1());
1524 int32_t imm
= valueOfInt32Constant(node
->child2().node());
1525 branch32(condition
, op1
.gpr(), JITCompiler::Imm32(imm
), taken
);
1527 SpeculateIntegerOperand
op1(this, node
->child1());
1528 SpeculateIntegerOperand
op2(this, node
->child2());
1529 branch32(condition
, op1
.gpr(), op2
.gpr(), taken
);
1535 // Returns true if the compare is fused with a subsequent branch.
1536 bool SpeculativeJIT::compilePeepHoleBranch(Node
* node
, MacroAssembler::RelationalCondition condition
, MacroAssembler::DoubleCondition doubleCondition
, S_DFGOperation_EJJ operation
)
1538 // Fused compare & branch.
1539 unsigned branchIndexInBlock
= detectPeepHoleBranch();
1540 if (branchIndexInBlock
!= UINT_MAX
) {
1541 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
1543 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1544 // so can be no intervening nodes to also reference the compare.
1545 ASSERT(node
->adjustedRefCount() == 1);
1547 if (node
->isBinaryUseKind(Int32Use
))
1548 compilePeepHoleIntegerBranch(node
, branchNode
, condition
);
1549 else if (node
->isBinaryUseKind(NumberUse
))
1550 compilePeepHoleDoubleBranch(node
, branchNode
, doubleCondition
);
1551 else if (node
->op() == CompareEq
) {
1552 if (node
->isBinaryUseKind(StringUse
)) {
1553 // Use non-peephole comparison, for now.
1556 if (node
->isBinaryUseKind(BooleanUse
))
1557 compilePeepHoleBooleanBranch(node
, branchNode
, condition
);
1558 else if (node
->isBinaryUseKind(ObjectUse
))
1559 compilePeepHoleObjectEquality(node
, branchNode
);
1560 else if (node
->child1().useKind() == ObjectUse
&& node
->child2().useKind() == ObjectOrOtherUse
)
1561 compilePeepHoleObjectToObjectOrOtherEquality(node
->child1(), node
->child2(), branchNode
);
1562 else if (node
->child1().useKind() == ObjectOrOtherUse
&& node
->child2().useKind() == ObjectUse
)
1563 compilePeepHoleObjectToObjectOrOtherEquality(node
->child2(), node
->child1(), branchNode
);
1565 nonSpeculativePeepholeBranch(node
, branchNode
, condition
, operation
);
1569 nonSpeculativePeepholeBranch(node
, branchNode
, condition
, operation
);
1573 use(node
->child1());
1574 use(node
->child2());
1575 m_indexInBlock
= branchIndexInBlock
;
1576 m_currentNode
= branchNode
;
1582 void SpeculativeJIT::noticeOSRBirth(Node
* node
)
1584 if (!node
->hasVirtualRegister())
1587 VirtualRegister virtualRegister
= node
->virtualRegister();
1588 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
1590 info
.noticeOSRBirth(*m_stream
, node
, virtualRegister
);
1593 void SpeculativeJIT::compileMovHint(Node
* node
)
1595 ASSERT(node
->containsMovHint() && node
->op() != ZombieHint
);
1597 m_lastSetOperand
= node
->local();
1599 Node
* child
= node
->child1().node();
1600 noticeOSRBirth(child
);
1602 if (child
->op() == UInt32ToNumber
)
1603 noticeOSRBirth(child
->child1().node());
1605 m_stream
->appendAndLog(VariableEvent::movHint(MinifiedID(child
), node
->local()));
1608 void SpeculativeJIT::compileMovHintAndCheck(Node
* node
)
1610 compileMovHint(node
);
1611 speculate(node
, node
->child1());
1615 void SpeculativeJIT::compileInlineStart(Node
* node
)
1617 InlineCallFrame
* inlineCallFrame
= node
->codeOrigin
.inlineCallFrame
;
1618 int argumentCountIncludingThis
= inlineCallFrame
->arguments
.size();
1619 unsigned argumentPositionStart
= node
->argumentPositionStart();
1620 CodeBlock
* codeBlock
= baselineCodeBlockForInlineCallFrame(inlineCallFrame
);
1621 for (int i
= 0; i
< argumentCountIncludingThis
; ++i
) {
1622 ValueRecovery recovery
;
1623 if (codeBlock
->isCaptured(argumentToOperand(i
)))
1624 recovery
= ValueRecovery::alreadyInJSStack();
1626 ArgumentPosition
& argumentPosition
=
1627 m_jit
.graph().m_argumentPositions
[argumentPositionStart
+ i
];
1628 ValueSource valueSource
;
1629 if (!argumentPosition
.shouldUnboxIfPossible())
1630 valueSource
= ValueSource(ValueInJSStack
);
1631 else if (argumentPosition
.shouldUseDoubleFormat())
1632 valueSource
= ValueSource(DoubleInJSStack
);
1633 else if (isInt32Speculation(argumentPosition
.prediction()))
1634 valueSource
= ValueSource(Int32InJSStack
);
1635 else if (isCellSpeculation(argumentPosition
.prediction()))
1636 valueSource
= ValueSource(CellInJSStack
);
1637 else if (isBooleanSpeculation(argumentPosition
.prediction()))
1638 valueSource
= ValueSource(BooleanInJSStack
);
1640 valueSource
= ValueSource(ValueInJSStack
);
1641 recovery
= computeValueRecoveryFor(valueSource
);
1643 // The recovery should refer either to something that has already been
1644 // stored into the stack at the right place, or to a constant,
1645 // since the Arguments code isn't smart enough to handle anything else.
1646 // The exception is the this argument, which we don't really need to be
1648 #if DFG_ENABLE(DEBUG_VERBOSE)
1649 dataLogF("\nRecovery for argument %d: ", i
);
1650 recovery
.dump(WTF::dataFile());
1652 inlineCallFrame
->arguments
[i
] = recovery
;
1656 void SpeculativeJIT::compile(BasicBlock
& block
)
1658 ASSERT(m_compileOkay
);
1660 if (!block
.isReachable
)
1663 if (!block
.cfaHasVisited
) {
1664 // Don't generate code for basic blocks that are unreachable according to CFA.
1665 // But to be sure that nobody has generated a jump to this block, drop in a
1667 #if !ASSERT_DISABLED
1673 m_blockHeads
[m_block
] = m_jit
.label();
1674 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
1678 #if DFG_ENABLE(DEBUG_VERBOSE)
1679 dataLogF("Setting up state for block #%u: ", m_block
);
1682 m_stream
->appendAndLog(VariableEvent::reset());
1684 m_jit
.jitAssertHasValidCallFrame();
1686 ASSERT(m_arguments
.size() == block
.variablesAtHead
.numberOfArguments());
1687 for (size_t i
= 0; i
< m_arguments
.size(); ++i
) {
1688 ValueSource valueSource
= ValueSource(ValueInJSStack
);
1689 m_arguments
[i
] = valueSource
;
1690 m_stream
->appendAndLog(VariableEvent::setLocal(argumentToOperand(i
), valueSource
.dataFormat()));
1694 m_state
.beginBasicBlock(&block
);
1696 ASSERT(m_variables
.size() == block
.variablesAtHead
.numberOfLocals());
1697 for (size_t i
= 0; i
< m_variables
.size(); ++i
) {
1698 Node
* node
= block
.variablesAtHead
.local(i
);
1699 ValueSource valueSource
;
1701 valueSource
= ValueSource(SourceIsDead
);
1702 else if (node
->variableAccessData()->isArgumentsAlias())
1703 valueSource
= ValueSource(ArgumentsSource
);
1704 else if (!node
->refCount())
1705 valueSource
= ValueSource(SourceIsDead
);
1706 else if (!node
->variableAccessData()->shouldUnboxIfPossible())
1707 valueSource
= ValueSource(ValueInJSStack
);
1708 else if (node
->variableAccessData()->shouldUseDoubleFormat())
1709 valueSource
= ValueSource(DoubleInJSStack
);
1711 valueSource
= ValueSource::forSpeculation(node
->variableAccessData()->argumentAwarePrediction());
1712 m_variables
[i
] = valueSource
;
1713 // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
1714 m_stream
->appendAndLog(VariableEvent::setLocal(i
, valueSource
.dataFormat()));
1717 m_lastSetOperand
= std::numeric_limits
<int>::max();
1718 m_codeOriginForOSR
= CodeOrigin();
1720 if (DFG_ENABLE_EDGE_CODE_VERIFICATION
) {
1721 JITCompiler::Jump verificationSucceeded
=
1722 m_jit
.branch32(JITCompiler::Equal
, GPRInfo::regT0
, TrustedImm32(m_block
));
1724 verificationSucceeded
.link(&m_jit
);
1727 #if DFG_ENABLE(DEBUG_VERBOSE)
1731 for (m_indexInBlock
= 0; m_indexInBlock
< block
.size(); ++m_indexInBlock
) {
1732 m_currentNode
= block
[m_indexInBlock
];
1733 #if !ASSERT_DISABLED
1734 m_canExit
= m_currentNode
->canExit();
1736 bool shouldExecuteEffects
= m_state
.startExecuting(m_currentNode
);
1737 m_jit
.setForNode(m_currentNode
);
1738 m_codeOriginForOSR
= m_currentNode
->codeOrigin
;
1739 if (!m_currentNode
->shouldGenerate()) {
1740 #if DFG_ENABLE(DEBUG_VERBOSE)
1741 dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", m_currentNode
->index(), m_currentNode
->codeOrigin
.bytecodeIndex
, m_jit
.debugOffset());
1743 switch (m_currentNode
->op()) {
1745 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1748 case WeakJSConstant
:
1749 m_jit
.addWeakReference(m_currentNode
->weakConstant());
1750 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1754 RELEASE_ASSERT_NOT_REACHED();
1758 compileMovHint(m_currentNode
);
1762 m_lastSetOperand
= m_currentNode
->local();
1763 m_stream
->appendAndLog(VariableEvent::setLocal(m_currentNode
->local(), DataFormatDead
));
1768 if (belongsInMinifiedGraph(m_currentNode
->op()))
1769 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1774 if (verboseCompilationEnabled()) {
1776 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1777 (int)m_currentNode
->index(),
1778 m_currentNode
->codeOrigin
.bytecodeIndex
, m_jit
.debugOffset());
1779 #if DFG_ENABLE(DEBUG_VERBOSE)
1785 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
1788 #if DFG_ENABLE(XOR_DEBUG_AID)
1789 m_jit
.xorPtr(JITCompiler::TrustedImm32(m_currentNode
->index()), GPRInfo::regT0
);
1790 m_jit
.xorPtr(JITCompiler::TrustedImm32(m_currentNode
->index()), GPRInfo::regT0
);
1794 m_speculationDirection
= (m_currentNode
->flags() & NodeExitsForward
) ? ForwardSpeculation
: BackwardSpeculation
;
1796 compile(m_currentNode
);
1797 if (!m_compileOkay
) {
1798 m_compileOkay
= true;
1799 clearGenerationInfo();
1803 if (belongsInMinifiedGraph(m_currentNode
->op())) {
1804 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1805 noticeOSRBirth(m_currentNode
);
1808 #if DFG_ENABLE(DEBUG_VERBOSE)
1809 if (m_currentNode
->hasResult()) {
1810 GenerationInfo
& info
= m_generationInfo
[m_currentNode
->virtualRegister()];
1811 dataLogF("-> %s, vr#%d", dataFormatToString(info
.registerFormat()), (int)m_currentNode
->virtualRegister());
1812 if (info
.registerFormat() != DataFormatNone
) {
1813 if (info
.registerFormat() == DataFormatDouble
)
1814 dataLogF(", %s", FPRInfo::debugName(info
.fpr()));
1815 #if USE(JSVALUE32_64)
1816 else if (info
.registerFormat() & DataFormatJS
)
1817 dataLogF(", %s %s", GPRInfo::debugName(info
.tagGPR()), GPRInfo::debugName(info
.payloadGPR()));
1820 dataLogF(", %s", GPRInfo::debugName(info
.gpr()));
1828 #if DFG_ENABLE(DEBUG_VERBOSE)
1832 // Make sure that the abstract state is rematerialized for the next node.
1833 if (shouldExecuteEffects
)
1834 m_state
.executeEffects(m_indexInBlock
);
1836 if (m_currentNode
->shouldGenerate())
1840 // Perform the most basic verification that children have been used correctly.
1841 #if !ASSERT_DISABLED
1842 for (unsigned index
= 0; index
< m_generationInfo
.size(); ++index
) {
1843 GenerationInfo
& info
= m_generationInfo
[index
];
1844 ASSERT(!info
.alive());
1849 // If we are making type predictions about our arguments then
1850 // we need to check that they are correct on function entry.
1851 void SpeculativeJIT::checkArgumentTypes()
1853 ASSERT(!m_currentNode
);
1854 m_isCheckingArgumentTypes
= true;
1855 m_speculationDirection
= BackwardSpeculation
;
1856 m_codeOriginForOSR
= CodeOrigin(0);
1858 for (size_t i
= 0; i
< m_arguments
.size(); ++i
)
1859 m_arguments
[i
] = ValueSource(ValueInJSStack
);
1860 for (size_t i
= 0; i
< m_variables
.size(); ++i
)
1861 m_variables
[i
] = ValueSource(ValueInJSStack
);
1863 for (int i
= 0; i
< m_jit
.codeBlock()->numParameters(); ++i
) {
1864 Node
* node
= m_jit
.graph().m_arguments
[i
];
1865 ASSERT(node
->op() == SetArgument
);
1866 if (!node
->shouldGenerate()) {
1867 // The argument is dead. We don't do any checks for such arguments.
1871 VariableAccessData
* variableAccessData
= node
->variableAccessData();
1872 if (!variableAccessData
->isProfitableToUnbox())
1875 VirtualRegister virtualRegister
= variableAccessData
->local();
1876 SpeculatedType predictedType
= variableAccessData
->prediction();
1878 JSValueSource valueSource
= JSValueSource(JITCompiler::addressFor(virtualRegister
));
1881 if (isInt32Speculation(predictedType
))
1882 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch64(MacroAssembler::Below
, JITCompiler::addressFor(virtualRegister
), GPRInfo::tagTypeNumberRegister
));
1883 else if (isBooleanSpeculation(predictedType
)) {
1884 GPRTemporary
temp(this);
1885 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), temp
.gpr());
1886 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), temp
.gpr());
1887 speculationCheck(BadType
, valueSource
, node
, m_jit
.branchTest64(MacroAssembler::NonZero
, temp
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1888 } else if (isCellSpeculation(predictedType
))
1889 speculationCheck(BadType
, valueSource
, node
, m_jit
.branchTest64(MacroAssembler::NonZero
, JITCompiler::addressFor(virtualRegister
), GPRInfo::tagMaskRegister
));
1891 if (isInt32Speculation(predictedType
))
1892 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::Int32Tag
)));
1893 else if (isBooleanSpeculation(predictedType
))
1894 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::BooleanTag
)));
1895 else if (isCellSpeculation(predictedType
))
1896 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::CellTag
)));
1899 m_isCheckingArgumentTypes
= false;
1902 bool SpeculativeJIT::compile()
1904 checkArgumentTypes();
1906 if (DFG_ENABLE_EDGE_CODE_VERIFICATION
)
1907 m_jit
.move(TrustedImm32(0), GPRInfo::regT0
);
1909 ASSERT(!m_currentNode
);
1910 for (m_block
= 0; m_block
< m_jit
.graph().m_blocks
.size(); ++m_block
) {
1911 m_jit
.setForBlock(m_block
);
1912 BasicBlock
* block
= m_jit
.graph().m_blocks
[m_block
].get();
1920 void SpeculativeJIT::createOSREntries()
1922 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().m_blocks
.size(); ++blockIndex
) {
1923 BasicBlock
* block
= m_jit
.graph().m_blocks
[blockIndex
].get();
1926 if (!block
->isOSRTarget
)
1929 // Currently we only need to create OSR entry trampolines when using edge code
1930 // verification. But in the future, we'll need this for other things as well (like
1931 // when we have global reg alloc).
1932 // If we don't need OSR entry trampolin
1933 if (!DFG_ENABLE_EDGE_CODE_VERIFICATION
) {
1934 m_osrEntryHeads
.append(m_blockHeads
[blockIndex
]);
1938 m_osrEntryHeads
.append(m_jit
.label());
1939 m_jit
.move(TrustedImm32(blockIndex
), GPRInfo::regT0
);
1940 m_jit
.jump().linkTo(m_blockHeads
[blockIndex
], &m_jit
);
1944 void SpeculativeJIT::linkOSREntries(LinkBuffer
& linkBuffer
)
1946 unsigned osrEntryIndex
= 0;
1947 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().m_blocks
.size(); ++blockIndex
) {
1948 BasicBlock
* block
= m_jit
.graph().m_blocks
[blockIndex
].get();
1951 if (!block
->isOSRTarget
)
1953 m_jit
.noticeOSREntry(*block
, m_osrEntryHeads
[osrEntryIndex
++], linkBuffer
);
1955 ASSERT(osrEntryIndex
== m_osrEntryHeads
.size());
1958 ValueRecovery
SpeculativeJIT::computeValueRecoveryFor(const ValueSource
& valueSource
)
1960 if (valueSource
.isInJSStack())
1961 return valueSource
.valueRecovery();
1963 ASSERT(valueSource
.kind() == HaveNode
);
1964 Node
* node
= valueSource
.id().node(m_jit
.graph());
1965 if (isConstant(node
))
1966 return ValueRecovery::constant(valueOfJSConstant(node
));
1968 return ValueRecovery();
1971 void SpeculativeJIT::compileDoublePutByVal(Node
* node
, SpeculateCellOperand
& base
, SpeculateStrictInt32Operand
& property
)
1973 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
1974 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
1976 ArrayMode arrayMode
= node
->arrayMode();
1978 GPRReg baseReg
= base
.gpr();
1979 GPRReg propertyReg
= property
.gpr();
1981 SpeculateDoubleOperand
value(this, child3
);
1983 FPRReg valueReg
= value
.fpr();
1986 JSValueRegs(), child3
, SpecRealNumber
,
1988 MacroAssembler::DoubleNotEqualOrUnordered
, valueReg
, valueReg
));
1993 StorageOperand
storage(this, child4
);
1994 GPRReg storageReg
= storage
.gpr();
1996 if (node
->op() == PutByValAlias
) {
1997 // Store the value to the array.
1998 GPRReg propertyReg
= property
.gpr();
1999 FPRReg valueReg
= value
.fpr();
2000 m_jit
.storeDouble(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2002 noResult(m_currentNode
);
2006 GPRTemporary temporary
;
2007 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
2009 MacroAssembler::Jump slowCase
;
2011 if (arrayMode
.isInBounds()) {
2013 StoreToHoleOrOutOfBounds
, JSValueRegs(), 0,
2014 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
2016 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2018 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
2020 if (!arrayMode
.isOutOfBounds())
2021 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
2023 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
2024 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
2026 inBounds
.link(&m_jit
);
2029 m_jit
.storeDouble(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
2036 if (arrayMode
.isOutOfBounds()) {
2037 addSlowPathGenerator(
2040 m_jit
.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict
: operationPutDoubleByValBeyondArrayBoundsNonStrict
,
2041 NoResult
, baseReg
, propertyReg
, valueReg
));
2044 noResult(m_currentNode
, UseChildrenCalledExplicitly
);
2047 void SpeculativeJIT::compileGetCharCodeAt(Node
* node
)
2049 SpeculateCellOperand
string(this, node
->child1());
2050 SpeculateStrictInt32Operand
index(this, node
->child2());
2051 StorageOperand
storage(this, node
->child3());
2053 GPRReg stringReg
= string
.gpr();
2054 GPRReg indexReg
= index
.gpr();
2055 GPRReg storageReg
= storage
.gpr();
2057 ASSERT(speculationChecked(m_state
.forNode(node
->child1()).m_type
, SpecString
));
2059 // unsigned comparison so we can filter out negative indices and indices that are too large
2060 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexReg
, MacroAssembler::Address(stringReg
, JSString::offsetOfLength())));
2062 GPRTemporary
scratch(this);
2063 GPRReg scratchReg
= scratch
.gpr();
2065 m_jit
.loadPtr(MacroAssembler::Address(stringReg
, JSString::offsetOfValue()), scratchReg
);
2067 // Load the character into scratchReg
2068 JITCompiler::Jump is16Bit
= m_jit
.branchTest32(MacroAssembler::Zero
, MacroAssembler::Address(scratchReg
, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2070 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, indexReg
, MacroAssembler::TimesOne
, 0), scratchReg
);
2071 JITCompiler::Jump cont8Bit
= m_jit
.jump();
2073 is16Bit
.link(&m_jit
);
2075 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, indexReg
, MacroAssembler::TimesTwo
, 0), scratchReg
);
2077 cont8Bit
.link(&m_jit
);
2079 integerResult(scratchReg
, m_currentNode
);
2082 void SpeculativeJIT::compileGetByValOnString(Node
* node
)
2084 SpeculateCellOperand
base(this, node
->child1());
2085 SpeculateStrictInt32Operand
property(this, node
->child2());
2086 StorageOperand
storage(this, node
->child3());
2087 GPRReg baseReg
= base
.gpr();
2088 GPRReg propertyReg
= property
.gpr();
2089 GPRReg storageReg
= storage
.gpr();
2091 ASSERT(ArrayMode(Array::String
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
2093 // unsigned comparison so we can filter out negative indices and indices that are too large
2094 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(baseReg
, JSString::offsetOfLength())));
2096 GPRTemporary
scratch(this);
2097 GPRReg scratchReg
= scratch
.gpr();
2099 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSString::offsetOfValue()), scratchReg
);
2101 // Load the character into scratchReg
2102 JITCompiler::Jump is16Bit
= m_jit
.branchTest32(MacroAssembler::Zero
, MacroAssembler::Address(scratchReg
, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2104 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
, 0), scratchReg
);
2105 JITCompiler::Jump cont8Bit
= m_jit
.jump();
2107 is16Bit
.link(&m_jit
);
2109 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
, 0), scratchReg
);
2111 // We only support ascii characters
2112 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, scratchReg
, TrustedImm32(0x100)));
2114 // 8 bit string values don't need the isASCII check.
2115 cont8Bit
.link(&m_jit
);
2117 GPRTemporary
smallStrings(this);
2118 GPRReg smallStringsReg
= smallStrings
.gpr();
2119 m_jit
.move(MacroAssembler::TrustedImmPtr(m_jit
.vm()->smallStrings
.singleCharacterStrings()), smallStringsReg
);
2120 m_jit
.loadPtr(MacroAssembler::BaseIndex(smallStringsReg
, scratchReg
, MacroAssembler::ScalePtr
, 0), scratchReg
);
2121 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branchTest32(MacroAssembler::Zero
, scratchReg
));
2122 cellResult(scratchReg
, m_currentNode
);
2125 void SpeculativeJIT::compileFromCharCode(Node
* node
)
2127 SpeculateStrictInt32Operand
property(this, node
->child1());
2128 GPRReg propertyReg
= property
.gpr();
2129 GPRTemporary
smallStrings(this);
2130 GPRTemporary
scratch(this);
2131 GPRReg scratchReg
= scratch
.gpr();
2132 GPRReg smallStringsReg
= smallStrings
.gpr();
2134 JITCompiler::JumpList slowCases
;
2135 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, TrustedImm32(0xff)));
2136 m_jit
.move(MacroAssembler::TrustedImmPtr(m_jit
.vm()->smallStrings
.singleCharacterStrings()), smallStringsReg
);
2137 m_jit
.loadPtr(MacroAssembler::BaseIndex(smallStringsReg
, propertyReg
, MacroAssembler::ScalePtr
, 0), scratchReg
);
2139 slowCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, scratchReg
));
2140 addSlowPathGenerator(slowPathCall(slowCases
, this, operationStringFromCharCode
, scratchReg
, propertyReg
));
2141 cellResult(scratchReg
, m_currentNode
);
2144 GeneratedOperandType
SpeculativeJIT::checkGeneratedTypeForToInt32(Node
* node
)
2146 #if DFG_ENABLE(DEBUG_VERBOSE)
2147 dataLogF("checkGeneratedTypeForToInt32@%d ", node
->index());
2149 VirtualRegister virtualRegister
= node
->virtualRegister();
2150 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
2152 switch (info
.registerFormat()) {
2153 case DataFormatStorage
:
2154 RELEASE_ASSERT_NOT_REACHED();
2156 case DataFormatBoolean
:
2157 case DataFormatCell
:
2158 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
2159 return GeneratedOperandTypeUnknown
;
2161 case DataFormatNone
:
2162 case DataFormatJSCell
:
2164 case DataFormatJSBoolean
:
2165 return GeneratedOperandJSValue
;
2167 case DataFormatJSInteger
:
2168 case DataFormatInteger
:
2169 return GeneratedOperandInteger
;
2171 case DataFormatJSDouble
:
2172 case DataFormatDouble
:
2173 return GeneratedOperandDouble
;
2176 RELEASE_ASSERT_NOT_REACHED();
2177 return GeneratedOperandTypeUnknown
;
2181 void SpeculativeJIT::compileValueToInt32(Node
* node
)
2183 switch (node
->child1().useKind()) {
2185 SpeculateIntegerOperand
op1(this, node
->child1());
2186 GPRTemporary
result(this, op1
);
2187 m_jit
.move(op1
.gpr(), result
.gpr());
2188 integerResult(result
.gpr(), node
, op1
.format());
2194 switch (checkGeneratedTypeForToInt32(node
->child1().node())) {
2195 case GeneratedOperandInteger
: {
2196 SpeculateIntegerOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2197 GPRTemporary
result(this, op1
);
2198 m_jit
.move(op1
.gpr(), result
.gpr());
2199 integerResult(result
.gpr(), node
, op1
.format());
2202 case GeneratedOperandDouble
: {
2203 GPRTemporary
result(this);
2204 SpeculateDoubleOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2205 FPRReg fpr
= op1
.fpr();
2206 GPRReg gpr
= result
.gpr();
2207 JITCompiler::Jump notTruncatedToInteger
= m_jit
.branchTruncateDoubleToInt32(fpr
, gpr
, JITCompiler::BranchIfTruncateFailed
);
2209 addSlowPathGenerator(slowPathCall(notTruncatedToInteger
, this, toInt32
, gpr
, fpr
));
2211 integerResult(gpr
, node
);
2214 case GeneratedOperandJSValue
: {
2215 GPRTemporary
result(this);
2217 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2219 GPRReg gpr
= op1
.gpr();
2220 GPRReg resultGpr
= result
.gpr();
2221 FPRTemporary
tempFpr(this);
2222 FPRReg fpr
= tempFpr
.fpr();
2224 JITCompiler::Jump isInteger
= m_jit
.branch64(MacroAssembler::AboveOrEqual
, gpr
, GPRInfo::tagTypeNumberRegister
);
2225 JITCompiler::JumpList converted
;
2227 if (node
->child1().useKind() == NumberUse
) {
2229 JSValueRegs(gpr
), node
->child1(), SpecNumber
,
2231 MacroAssembler::Zero
, gpr
, GPRInfo::tagTypeNumberRegister
));
2233 JITCompiler::Jump isNumber
= m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, GPRInfo::tagTypeNumberRegister
);
2236 JSValueRegs(gpr
), node
->child1(), ~SpecCell
,
2238 JITCompiler::Zero
, gpr
, GPRInfo::tagMaskRegister
));
2240 // It's not a cell: so true turns into 1 and all else turns into 0.
2241 m_jit
.compare64(JITCompiler::Equal
, gpr
, TrustedImm32(ValueTrue
), resultGpr
);
2242 converted
.append(m_jit
.jump());
2244 isNumber
.link(&m_jit
);
2247 // First, if we get here we have a double encoded as a JSValue
2248 m_jit
.move(gpr
, resultGpr
);
2249 unboxDouble(resultGpr
, fpr
);
2251 silentSpillAllRegisters(resultGpr
);
2252 callOperation(toInt32
, resultGpr
, fpr
);
2253 silentFillAllRegisters(resultGpr
);
2255 converted
.append(m_jit
.jump());
2257 isInteger
.link(&m_jit
);
2258 m_jit
.zeroExtend32ToPtr(gpr
, resultGpr
);
2260 converted
.link(&m_jit
);
2262 Node
* childNode
= node
->child1().node();
2263 VirtualRegister virtualRegister
= childNode
->virtualRegister();
2264 GenerationInfo
& info
= m_generationInfo
[virtualRegister
];
2266 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2268 GPRReg payloadGPR
= op1
.payloadGPR();
2269 GPRReg resultGpr
= result
.gpr();
2271 JITCompiler::JumpList converted
;
2273 if (info
.registerFormat() == DataFormatJSInteger
)
2274 m_jit
.move(payloadGPR
, resultGpr
);
2276 GPRReg tagGPR
= op1
.tagGPR();
2277 FPRTemporary
tempFpr(this);
2278 FPRReg fpr
= tempFpr
.fpr();
2279 FPRTemporary
scratch(this);
2281 JITCompiler::Jump isInteger
= m_jit
.branch32(MacroAssembler::Equal
, tagGPR
, TrustedImm32(JSValue::Int32Tag
));
2283 if (node
->child1().useKind() == NumberUse
) {
2285 JSValueRegs(tagGPR
, payloadGPR
), node
->child1(), SpecNumber
,
2287 MacroAssembler::AboveOrEqual
, tagGPR
,
2288 TrustedImm32(JSValue::LowestTag
)));
2290 JITCompiler::Jump isNumber
= m_jit
.branch32(MacroAssembler::Below
, tagGPR
, TrustedImm32(JSValue::LowestTag
));
2293 JSValueRegs(tagGPR
, payloadGPR
), node
->child1(), ~SpecCell
,
2295 JITCompiler::Equal
, tagGPR
, TrustedImm32(JSValue::CellTag
)));
2297 // It's not a cell: so true turns into 1 and all else turns into 0.
2298 JITCompiler::Jump isBoolean
= m_jit
.branch32(JITCompiler::Equal
, tagGPR
, TrustedImm32(JSValue::BooleanTag
));
2299 m_jit
.move(TrustedImm32(0), resultGpr
);
2300 converted
.append(m_jit
.jump());
2302 isBoolean
.link(&m_jit
);
2303 m_jit
.move(payloadGPR
, resultGpr
);
2304 converted
.append(m_jit
.jump());
2306 isNumber
.link(&m_jit
);
2309 unboxDouble(tagGPR
, payloadGPR
, fpr
, scratch
.fpr());
2311 silentSpillAllRegisters(resultGpr
);
2312 callOperation(toInt32
, resultGpr
, fpr
);
2313 silentFillAllRegisters(resultGpr
);
2315 converted
.append(m_jit
.jump());
2317 isInteger
.link(&m_jit
);
2318 m_jit
.move(payloadGPR
, resultGpr
);
2320 converted
.link(&m_jit
);
2323 integerResult(resultGpr
, node
);
2326 case GeneratedOperandTypeUnknown
:
2327 RELEASE_ASSERT(!m_compileOkay
);
2330 RELEASE_ASSERT_NOT_REACHED();
2335 SpeculateBooleanOperand
op1(this, node
->child1());
2336 GPRTemporary
result(this, op1
);
2338 m_jit
.move(op1
.gpr(), result
.gpr());
2339 m_jit
.and32(JITCompiler::TrustedImm32(1), result
.gpr());
2341 integerResult(result
.gpr(), node
);
2346 ASSERT(!m_compileOkay
);
2351 void SpeculativeJIT::compileUInt32ToNumber(Node
* node
)
2353 if (!nodeCanSpeculateInteger(node
->arithNodeFlags())) {
2354 // We know that this sometimes produces doubles. So produce a double every
2355 // time. This at least allows subsequent code to not have weird conditionals.
2357 IntegerOperand
op1(this, node
->child1());
2358 FPRTemporary
result(this);
2360 GPRReg inputGPR
= op1
.gpr();
2361 FPRReg outputFPR
= result
.fpr();
2363 m_jit
.convertInt32ToDouble(inputGPR
, outputFPR
);
2365 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, inputGPR
, TrustedImm32(0));
2366 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), outputFPR
);
2367 positive
.link(&m_jit
);
2369 doubleResult(outputFPR
, node
);
2373 IntegerOperand
op1(this, node
->child1());
2374 GPRTemporary
result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2376 m_jit
.move(op1
.gpr(), result
.gpr());
2378 // Test the operand is positive. This is a very special speculation check - we actually
2379 // use roll-forward speculation here, where if this fails, we jump to the baseline
2380 // instruction that follows us, rather than the one we're executing right now. We have
2381 // to do this because by this point, the original values necessary to compile whatever
2382 // operation the UInt32ToNumber originated from might be dead.
2383 forwardSpeculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, result
.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result
.gpr()));
2385 integerResult(result
.gpr(), node
, op1
.format());
2388 void SpeculativeJIT::compileDoubleAsInt32(Node
* node
)
2390 SpeculateDoubleOperand
op1(this, node
->child1());
2391 FPRTemporary
scratch(this);
2392 GPRTemporary
result(this);
2394 FPRReg valueFPR
= op1
.fpr();
2395 FPRReg scratchFPR
= scratch
.fpr();
2396 GPRReg resultGPR
= result
.gpr();
2398 JITCompiler::JumpList failureCases
;
2399 bool negZeroCheck
= !nodeCanIgnoreNegativeZero(node
->arithNodeFlags());
2400 m_jit
.branchConvertDoubleToInt32(valueFPR
, resultGPR
, failureCases
, scratchFPR
, negZeroCheck
);
2401 forwardSpeculationCheck(Overflow
, JSValueRegs(), 0, failureCases
, ValueRecovery::inFPR(valueFPR
));
2403 integerResult(resultGPR
, node
);
2406 void SpeculativeJIT::compileInt32ToDouble(Node
* node
)
2408 ASSERT(!isInt32Constant(node
->child1().node())); // This should have been constant folded.
2410 if (isInt32Speculation(m_state
.forNode(node
->child1()).m_type
)) {
2411 SpeculateIntegerOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2412 FPRTemporary
result(this);
2413 m_jit
.convertInt32ToDouble(op1
.gpr(), result
.fpr());
2414 doubleResult(result
.fpr(), node
);
2418 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2419 FPRTemporary
result(this);
2422 GPRTemporary
temp(this);
2424 GPRReg op1GPR
= op1
.gpr();
2425 GPRReg tempGPR
= temp
.gpr();
2426 FPRReg resultFPR
= result
.fpr();
2428 JITCompiler::Jump isInteger
= m_jit
.branch64(
2429 MacroAssembler::AboveOrEqual
, op1GPR
, GPRInfo::tagTypeNumberRegister
);
2431 if (needsTypeCheck(node
->child1(), SpecNumber
)) {
2432 if (node
->op() == ForwardInt32ToDouble
) {
2434 JSValueRegs(op1GPR
), node
->child1(), SpecNumber
,
2435 m_jit
.branchTest64(MacroAssembler::Zero
, op1GPR
, GPRInfo::tagTypeNumberRegister
),
2436 ValueRecovery::inGPR(op1GPR
, DataFormatJS
));
2439 JSValueRegs(op1GPR
), node
->child1(), SpecNumber
,
2440 m_jit
.branchTest64(MacroAssembler::Zero
, op1GPR
, GPRInfo::tagTypeNumberRegister
));
2444 m_jit
.move(op1GPR
, tempGPR
);
2445 unboxDouble(tempGPR
, resultFPR
);
2446 JITCompiler::Jump done
= m_jit
.jump();
2448 isInteger
.link(&m_jit
);
2449 m_jit
.convertInt32ToDouble(op1GPR
, resultFPR
);
2452 FPRTemporary
temp(this);
2454 GPRReg op1TagGPR
= op1
.tagGPR();
2455 GPRReg op1PayloadGPR
= op1
.payloadGPR();
2456 FPRReg tempFPR
= temp
.fpr();
2457 FPRReg resultFPR
= result
.fpr();
2459 JITCompiler::Jump isInteger
= m_jit
.branch32(
2460 MacroAssembler::Equal
, op1TagGPR
, TrustedImm32(JSValue::Int32Tag
));
2462 if (needsTypeCheck(node
->child1(), SpecNumber
)) {
2463 if (node
->op() == ForwardInt32ToDouble
) {
2465 JSValueRegs(op1TagGPR
, op1PayloadGPR
), node
->child1(), SpecNumber
,
2466 m_jit
.branch32(MacroAssembler::AboveOrEqual
, op1TagGPR
, TrustedImm32(JSValue::LowestTag
)),
2467 ValueRecovery::inPair(op1TagGPR
, op1PayloadGPR
));
2470 JSValueRegs(op1TagGPR
, op1PayloadGPR
), node
->child1(), SpecNumber
,
2471 m_jit
.branch32(MacroAssembler::AboveOrEqual
, op1TagGPR
, TrustedImm32(JSValue::LowestTag
)));
2475 unboxDouble(op1TagGPR
, op1PayloadGPR
, resultFPR
, tempFPR
);
2476 JITCompiler::Jump done
= m_jit
.jump();
2478 isInteger
.link(&m_jit
);
2479 m_jit
.convertInt32ToDouble(op1PayloadGPR
, resultFPR
);
2483 doubleResult(resultFPR
, node
);
2486 static double clampDoubleToByte(double d
)
2496 static void compileClampIntegerToByte(JITCompiler
& jit
, GPRReg result
)
2498 MacroAssembler::Jump inBounds
= jit
.branch32(MacroAssembler::BelowOrEqual
, result
, JITCompiler::TrustedImm32(0xff));
2499 MacroAssembler::Jump tooBig
= jit
.branch32(MacroAssembler::GreaterThan
, result
, JITCompiler::TrustedImm32(0xff));
2500 jit
.xorPtr(result
, result
);
2501 MacroAssembler::Jump clamped
= jit
.jump();
2503 jit
.move(JITCompiler::TrustedImm32(255), result
);
2505 inBounds
.link(&jit
);
2508 static void compileClampDoubleToByte(JITCompiler
& jit
, GPRReg result
, FPRReg source
, FPRReg scratch
)
2510 // Unordered compare so we pick up NaN
2511 static const double zero
= 0;
2512 static const double byteMax
= 255;
2513 static const double half
= 0.5;
2514 jit
.loadDouble(&zero
, scratch
);
2515 MacroAssembler::Jump tooSmall
= jit
.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered
, source
, scratch
);
2516 jit
.loadDouble(&byteMax
, scratch
);
2517 MacroAssembler::Jump tooBig
= jit
.branchDouble(MacroAssembler::DoubleGreaterThan
, source
, scratch
);
2519 jit
.loadDouble(&half
, scratch
);
2520 // FIXME: This should probably just use a floating point round!
2521 // https://bugs.webkit.org/show_bug.cgi?id=72054
2522 jit
.addDouble(source
, scratch
);
2523 jit
.truncateDoubleToInt32(scratch
, result
);
2524 MacroAssembler::Jump truncatedInt
= jit
.jump();
2526 tooSmall
.link(&jit
);
2527 jit
.xorPtr(result
, result
);
2528 MacroAssembler::Jump zeroed
= jit
.jump();
2531 jit
.move(JITCompiler::TrustedImm32(255), result
);
2533 truncatedInt
.link(&jit
);
2538 void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor
& descriptor
, Node
* node
, size_t elementSize
, TypedArraySignedness signedness
)
2540 SpeculateCellOperand
base(this, node
->child1());
2541 SpeculateStrictInt32Operand
property(this, node
->child2());
2542 StorageOperand
storage(this, node
->child3());
2544 GPRReg baseReg
= base
.gpr();
2545 GPRReg propertyReg
= property
.gpr();
2546 GPRReg storageReg
= storage
.gpr();
2548 GPRTemporary
result(this);
2549 GPRReg resultReg
= result
.gpr();
2551 ASSERT(node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
2554 Uncountable
, JSValueRegs(), 0,
2556 MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(baseReg
, descriptor
.m_lengthOffset
)));
2557 switch (elementSize
) {
2559 if (signedness
== SignedTypedArray
)
2560 m_jit
.load8Signed(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
), resultReg
);
2562 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
), resultReg
);
2565 if (signedness
== SignedTypedArray
)
2566 m_jit
.load16Signed(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
), resultReg
);
2568 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
), resultReg
);
2571 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesFour
), resultReg
);
2576 if (elementSize
< 4 || signedness
== SignedTypedArray
) {
2577 integerResult(resultReg
, node
);
2581 ASSERT(elementSize
== 4 && signedness
== UnsignedTypedArray
);
2582 if (node
->shouldSpeculateInteger()) {
2583 forwardSpeculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, resultReg
, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg
));
2584 integerResult(resultReg
, node
);
2588 FPRTemporary
fresult(this);
2589 m_jit
.convertInt32ToDouble(resultReg
, fresult
.fpr());
2590 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, resultReg
, TrustedImm32(0));
2591 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), fresult
.fpr());
2592 positive
.link(&m_jit
);
2593 doubleResult(fresult
.fpr(), node
);
2596 void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor
& descriptor
, GPRReg base
, GPRReg property
, Node
* node
, size_t elementSize
, TypedArraySignedness signedness
, TypedArrayRounding rounding
)
2598 StorageOperand
storage(this, m_jit
.graph().varArgChild(node
, 3));
2599 GPRReg storageReg
= storage
.gpr();
2601 Edge valueUse
= m_jit
.graph().varArgChild(node
, 2);
2606 if (valueUse
->isConstant()) {
2607 JSValue jsValue
= valueOfJSConstant(valueUse
.node());
2608 if (!jsValue
.isNumber()) {
2609 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
2613 double d
= jsValue
.asNumber();
2614 if (rounding
== ClampRounding
) {
2615 ASSERT(elementSize
== 1);
2616 d
= clampDoubleToByte(d
);
2618 GPRTemporary
scratch(this);
2619 GPRReg scratchReg
= scratch
.gpr();
2620 m_jit
.move(Imm32(toInt32(d
)), scratchReg
);
2621 value
.adopt(scratch
);
2622 valueGPR
= scratchReg
;
2624 switch (valueUse
.useKind()) {
2626 SpeculateIntegerOperand
valueOp(this, valueUse
);
2627 GPRTemporary
scratch(this);
2628 GPRReg scratchReg
= scratch
.gpr();
2629 m_jit
.move(valueOp
.gpr(), scratchReg
);
2630 if (rounding
== ClampRounding
) {
2631 ASSERT(elementSize
== 1);
2632 compileClampIntegerToByte(m_jit
, scratchReg
);
2634 value
.adopt(scratch
);
2635 valueGPR
= scratchReg
;
2640 if (rounding
== ClampRounding
) {
2641 ASSERT(elementSize
== 1);
2642 SpeculateDoubleOperand
valueOp(this, valueUse
);
2643 GPRTemporary
result(this);
2644 FPRTemporary
floatScratch(this);
2645 FPRReg fpr
= valueOp
.fpr();
2646 GPRReg gpr
= result
.gpr();
2647 compileClampDoubleToByte(m_jit
, gpr
, fpr
, floatScratch
.fpr());
2648 value
.adopt(result
);
2651 SpeculateDoubleOperand
valueOp(this, valueUse
);
2652 GPRTemporary
result(this);
2653 FPRReg fpr
= valueOp
.fpr();
2654 GPRReg gpr
= result
.gpr();
2655 MacroAssembler::Jump notNaN
= m_jit
.branchDouble(MacroAssembler::DoubleEqual
, fpr
, fpr
);
2656 m_jit
.xorPtr(gpr
, gpr
);
2657 MacroAssembler::Jump fixed
= m_jit
.jump();
2658 notNaN
.link(&m_jit
);
2660 MacroAssembler::Jump failed
;
2661 if (signedness
== SignedTypedArray
)
2662 failed
= m_jit
.branchTruncateDoubleToInt32(fpr
, gpr
, MacroAssembler::BranchIfTruncateFailed
);
2664 failed
= m_jit
.branchTruncateDoubleToUint32(fpr
, gpr
, MacroAssembler::BranchIfTruncateFailed
);
2666 addSlowPathGenerator(slowPathCall(failed
, this, toInt32
, gpr
, fpr
));
2669 value
.adopt(result
);
2676 RELEASE_ASSERT_NOT_REACHED();
2681 ASSERT_UNUSED(valueGPR
, valueGPR
!= property
);
2682 ASSERT(valueGPR
!= base
);
2683 ASSERT(valueGPR
!= storageReg
);
2684 MacroAssembler::Jump outOfBounds
;
2685 if (node
->op() == PutByVal
)
2686 outOfBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, property
, MacroAssembler::Address(base
, descriptor
.m_lengthOffset
));
2688 switch (elementSize
) {
2690 m_jit
.store8(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesOne
));
2693 m_jit
.store16(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesTwo
));
2696 m_jit
.store32(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesFour
));
2701 if (node
->op() == PutByVal
)
2702 outOfBounds
.link(&m_jit
);
2706 void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor
& descriptor
, Node
* node
, size_t elementSize
)
2708 SpeculateCellOperand
base(this, node
->child1());
2709 SpeculateStrictInt32Operand
property(this, node
->child2());
2710 StorageOperand
storage(this, node
->child3());
2712 GPRReg baseReg
= base
.gpr();
2713 GPRReg propertyReg
= property
.gpr();
2714 GPRReg storageReg
= storage
.gpr();
2716 ASSERT(node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
2718 FPRTemporary
result(this);
2719 FPRReg resultReg
= result
.fpr();
2721 Uncountable
, JSValueRegs(), 0,
2723 MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(baseReg
, descriptor
.m_lengthOffset
)));
2724 switch (elementSize
) {
2726 m_jit
.loadFloat(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesFour
), resultReg
);
2727 m_jit
.convertFloatToDouble(resultReg
, resultReg
);
2730 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), resultReg
);
2734 RELEASE_ASSERT_NOT_REACHED();
2737 MacroAssembler::Jump notNaN
= m_jit
.branchDouble(MacroAssembler::DoubleEqual
, resultReg
, resultReg
);
2738 static const double NaN
= QNaN
;
2739 m_jit
.loadDouble(&NaN
, resultReg
);
2740 notNaN
.link(&m_jit
);
2742 doubleResult(resultReg
, node
);
2745 void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor
& descriptor
, GPRReg base
, GPRReg property
, Node
* node
, size_t elementSize
)
2747 StorageOperand
storage(this, m_jit
.graph().varArgChild(node
, 3));
2748 GPRReg storageReg
= storage
.gpr();
2750 Edge baseUse
= m_jit
.graph().varArgChild(node
, 0);
2751 Edge valueUse
= m_jit
.graph().varArgChild(node
, 2);
2753 SpeculateDoubleOperand
valueOp(this, valueUse
);
2754 FPRTemporary
scratch(this);
2755 FPRReg valueFPR
= valueOp
.fpr();
2756 FPRReg scratchFPR
= scratch
.fpr();
2758 ASSERT_UNUSED(baseUse
, node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(baseUse
)));
2760 MacroAssembler::Jump outOfBounds
;
2761 if (node
->op() == PutByVal
)
2762 outOfBounds
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, property
, MacroAssembler::Address(base
, descriptor
.m_lengthOffset
));
2764 switch (elementSize
) {
2766 m_jit
.moveDouble(valueFPR
, scratchFPR
);
2767 m_jit
.convertDoubleToFloat(valueFPR
, scratchFPR
);
2768 m_jit
.storeFloat(scratchFPR
, MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesFour
));
2772 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesEight
));
2775 RELEASE_ASSERT_NOT_REACHED();
2777 if (node
->op() == PutByVal
)
2778 outOfBounds
.link(&m_jit
);
2782 void SpeculativeJIT::compileInstanceOfForObject(Node
*, GPRReg valueReg
, GPRReg prototypeReg
, GPRReg scratchReg
)
2784 // Check that prototype is an object.
2785 m_jit
.loadPtr(MacroAssembler::Address(prototypeReg
, JSCell::structureOffset()), scratchReg
);
2786 speculationCheck(BadType
, JSValueRegs(), 0, m_jit
.branchIfNotObject(scratchReg
));
2788 // Initialize scratchReg with the value being checked.
2789 m_jit
.move(valueReg
, scratchReg
);
2791 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2792 MacroAssembler::Label
loop(&m_jit
);
2793 m_jit
.loadPtr(MacroAssembler::Address(scratchReg
, JSCell::structureOffset()), scratchReg
);
2795 m_jit
.load64(MacroAssembler::Address(scratchReg
, Structure::prototypeOffset()), scratchReg
);
2796 MacroAssembler::Jump isInstance
= m_jit
.branch64(MacroAssembler::Equal
, scratchReg
, prototypeReg
);
2797 m_jit
.branchTest64(MacroAssembler::Zero
, scratchReg
, GPRInfo::tagMaskRegister
).linkTo(loop
, &m_jit
);
2799 m_jit
.load32(MacroAssembler::Address(scratchReg
, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), scratchReg
);
2800 MacroAssembler::Jump isInstance
= m_jit
.branchPtr(MacroAssembler::Equal
, scratchReg
, prototypeReg
);
2801 m_jit
.branchTest32(MacroAssembler::NonZero
, scratchReg
).linkTo(loop
, &m_jit
);
2804 // No match - result is false.
2806 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg
);
2808 m_jit
.move(MacroAssembler::TrustedImm32(0), scratchReg
);
2810 MacroAssembler::Jump putResult
= m_jit
.jump();
2812 isInstance
.link(&m_jit
);
2814 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg
);
2816 m_jit
.move(MacroAssembler::TrustedImm32(1), scratchReg
);
2819 putResult
.link(&m_jit
);
2822 void SpeculativeJIT::compileInstanceOf(Node
* node
)
2824 if (node
->child1().useKind() == UntypedUse
) {
2825 // It might not be a cell. Speculate less aggressively.
2826 // Or: it might only be used once (i.e. by us), so we get zero benefit
2827 // from speculating any more aggressively than we absolutely need to.
2829 JSValueOperand
value(this, node
->child1());
2830 SpeculateCellOperand
prototype(this, node
->child2());
2831 GPRTemporary
scratch(this);
2833 GPRReg prototypeReg
= prototype
.gpr();
2834 GPRReg scratchReg
= scratch
.gpr();
2837 GPRReg valueReg
= value
.gpr();
2838 MacroAssembler::Jump isCell
= m_jit
.branchTest64(MacroAssembler::Zero
, valueReg
, GPRInfo::tagMaskRegister
);
2839 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg
);
2841 GPRReg valueTagReg
= value
.tagGPR();
2842 GPRReg valueReg
= value
.payloadGPR();
2843 MacroAssembler::Jump isCell
= m_jit
.branch32(MacroAssembler::Equal
, valueTagReg
, TrustedImm32(JSValue::CellTag
));
2844 m_jit
.move(MacroAssembler::TrustedImm32(0), scratchReg
);
2847 MacroAssembler::Jump done
= m_jit
.jump();
2849 isCell
.link(&m_jit
);
2851 compileInstanceOfForObject(node
, valueReg
, prototypeReg
, scratchReg
);
2856 jsValueResult(scratchReg
, node
, DataFormatJSBoolean
);
2858 booleanResult(scratchReg
, node
);
2863 SpeculateCellOperand
value(this, node
->child1());
2864 SpeculateCellOperand
prototype(this, node
->child2());
2866 GPRTemporary
scratch(this);
2868 GPRReg valueReg
= value
.gpr();
2869 GPRReg prototypeReg
= prototype
.gpr();
2870 GPRReg scratchReg
= scratch
.gpr();
2872 compileInstanceOfForObject(node
, valueReg
, prototypeReg
, scratchReg
);
2875 jsValueResult(scratchReg
, node
, DataFormatJSBoolean
);
2877 booleanResult(scratchReg
, node
);
2881 void SpeculativeJIT::compileSoftModulo(Node
* node
)
2883 // In the fast path, the dividend value could be the final result
2884 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
2885 SpeculateStrictInt32Operand
op1(this, node
->child1());
2886 #if CPU(X86) || CPU(X86_64)
2887 if (isInt32Constant(node
->child2().node())) {
2888 int32_t divisor
= valueOfInt32Constant(node
->child2().node());
2890 GPRReg op1Gpr
= op1
.gpr();
2892 GPRTemporary
eax(this, X86Registers::eax
);
2893 GPRTemporary
edx(this, X86Registers::edx
);
2894 GPRTemporary
scratch(this);
2895 GPRReg scratchGPR
= scratch
.gpr();
2898 if (op1Gpr
== X86Registers::eax
|| op1Gpr
== X86Registers::edx
) {
2899 op1SaveGPR
= allocate();
2900 ASSERT(op1Gpr
!= op1SaveGPR
);
2901 m_jit
.move(op1Gpr
, op1SaveGPR
);
2903 op1SaveGPR
= op1Gpr
;
2904 ASSERT(op1SaveGPR
!= X86Registers::eax
);
2905 ASSERT(op1SaveGPR
!= X86Registers::edx
);
2907 m_jit
.move(op1Gpr
, eax
.gpr());
2908 m_jit
.move(TrustedImm32(divisor
), scratchGPR
);
2910 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::Equal
, eax
.gpr(), TrustedImm32(-2147483647-1)));
2911 m_jit
.assembler().cdq();
2912 m_jit
.assembler().idivl_r(scratchGPR
);
2913 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
2914 // Check that we're not about to create negative zero.
2915 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, op1SaveGPR
, TrustedImm32(0));
2916 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, edx
.gpr()));
2917 numeratorPositive
.link(&m_jit
);
2919 if (op1SaveGPR
!= op1Gpr
)
2922 integerResult(edx
.gpr(), node
);
2927 if (isInt32Constant(node
->child2().node())) {
2928 int32_t divisor
= valueOfInt32Constant(node
->child2().node());
2929 if (divisor
> 0 && hasOneBitSet(divisor
)) { // If power of 2 then just mask
2930 GPRReg dividendGPR
= op1
.gpr();
2931 GPRTemporary
result(this);
2932 GPRReg resultGPR
= result
.gpr();
2934 m_jit
.assembler().cmp
<32>(dividendGPR
, UInt12(0));
2935 m_jit
.assembler().cneg
<32>(resultGPR
, dividendGPR
, ARM64Assembler::ConditionLT
);
2936 m_jit
.and32(TrustedImm32(divisor
- 1), resultGPR
);
2937 m_jit
.assembler().cneg
<32>(resultGPR
, resultGPR
, ARM64Assembler::ConditionLT
);
2939 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
2940 // Check that we're not about to create negative zero.
2941 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
2942 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, resultGPR
));
2943 numeratorPositive
.link(&m_jit
);
2945 integerResult(resultGPR
, node
);
2949 #elif CPU(APPLE_ARMV7S) || CPU(ARM_THUMB2)
2950 if (isInt32Constant(node
->child2().node())) {
2951 int32_t divisor
= valueOfInt32Constant(node
->child2().node());
2952 if (divisor
> 0 && hasOneBitSet(divisor
)) { // If power of 2 then just mask
2953 GPRReg dividendGPR
= op1
.gpr();
2954 GPRTemporary
result(this);
2955 GPRReg resultGPR
= result
.gpr();
2957 m_jit
.assembler().cmp(dividendGPR
, ARMThumbImmediate::makeEncodedImm(0));
2958 m_jit
.assembler().it(ARMv7Assembler::ConditionLT
, false);
2959 m_jit
.assembler().neg(resultGPR
, dividendGPR
);
2960 m_jit
.assembler().mov(resultGPR
, dividendGPR
);
2961 m_jit
.and32(TrustedImm32(divisor
- 1), resultGPR
);
2962 m_jit
.assembler().it(ARMv7Assembler::ConditionLT
);
2963 m_jit
.assembler().neg(resultGPR
, resultGPR
);
2965 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
2966 // Check that we're not about to create negative zero.
2967 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
2968 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, resultGPR
));
2969 numeratorPositive
.link(&m_jit
);
2971 integerResult(resultGPR
, node
);
2977 SpeculateIntegerOperand
op2(this, node
->child2());
2978 #if CPU(X86) || CPU(X86_64)
2979 GPRTemporary
eax(this, X86Registers::eax
);
2980 GPRTemporary
edx(this, X86Registers::edx
);
2981 GPRReg op1GPR
= op1
.gpr();
2982 GPRReg op2GPR
= op2
.gpr();
2988 if (op2GPR
== X86Registers::eax
|| op2GPR
== X86Registers::edx
) {
2989 op2TempGPR
= allocate();
2992 op2TempGPR
= InvalidGPRReg
;
2993 if (op1GPR
== X86Registers::eax
)
2994 temp
= X86Registers::edx
;
2996 temp
= X86Registers::eax
;
2999 if (op1GPR
== X86Registers::eax
|| op1GPR
== X86Registers::edx
) {
3000 op1SaveGPR
= allocate();
3001 ASSERT(op1GPR
!= op1SaveGPR
);
3002 m_jit
.move(op1GPR
, op1SaveGPR
);
3004 op1SaveGPR
= op1GPR
;
3006 ASSERT(temp
!= op1GPR
);
3007 ASSERT(temp
!= op2GPR
);
3008 ASSERT(op1SaveGPR
!= X86Registers::eax
);
3009 ASSERT(op1SaveGPR
!= X86Registers::edx
);
3011 m_jit
.add32(JITCompiler::TrustedImm32(1), op2GPR
, temp
);
3013 JITCompiler::Jump safeDenominator
= m_jit
.branch32(JITCompiler::Above
, temp
, JITCompiler::TrustedImm32(1));
3015 JITCompiler::Jump done
;
3016 // FIXME: if the node is not used as number then we can do this more easily.
3017 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, op2GPR
));
3018 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::Equal
, op1GPR
, TrustedImm32(-2147483647-1)));
3020 safeDenominator
.link(&m_jit
);
3022 if (op2TempGPR
!= InvalidGPRReg
) {
3023 m_jit
.move(op2GPR
, op2TempGPR
);
3024 op2GPR
= op2TempGPR
;
3027 m_jit
.move(op1GPR
, eax
.gpr());
3028 m_jit
.assembler().cdq();
3029 m_jit
.assembler().idivl_r(op2GPR
);
3031 if (op2TempGPR
!= InvalidGPRReg
)
3034 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3035 // Check that we're not about to create negative zero.
3036 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, op1SaveGPR
, TrustedImm32(0));
3037 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, edx
.gpr()));
3038 numeratorPositive
.link(&m_jit
);
3041 if (op1SaveGPR
!= op1GPR
)
3044 integerResult(edx
.gpr(), node
);
3046 GPRTemporary
temp(this);
3047 GPRTemporary
quotientThenRemainder(this);
3048 GPRTemporary
multiplyAnswer(this);
3049 GPRReg dividendGPR
= op1
.gpr();
3050 GPRReg divisorGPR
= op2
.gpr();
3051 GPRReg quotientThenRemainderGPR
= quotientThenRemainder
.gpr();
3052 GPRReg multiplyAnswerGPR
= multiplyAnswer
.gpr();
3054 m_jit
.assembler().sdiv
<32>(quotientThenRemainderGPR
, dividendGPR
, divisorGPR
);
3055 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotientThenRemainderGPR
, divisorGPR
, multiplyAnswerGPR
));
3056 m_jit
.assembler().sub
<32>(quotientThenRemainderGPR
, dividendGPR
, multiplyAnswerGPR
);
3058 // If the user cares about negative zero, then speculate that we're not about
3059 // to produce negative zero.
3060 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3061 // Check that we're not about to create negative zero.
3062 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
3063 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, quotientThenRemainderGPR
));
3064 numeratorPositive
.link(&m_jit
);
3067 integerResult(quotientThenRemainderGPR
, node
);
3068 #elif CPU(APPLE_ARMV7S)
3069 GPRTemporary
temp(this);
3070 GPRTemporary
quotientThenRemainder(this);
3071 GPRTemporary
multiplyAnswer(this);
3072 GPRReg dividendGPR
= op1
.gpr();
3073 GPRReg divisorGPR
= op2
.gpr();
3074 GPRReg quotientThenRemainderGPR
= quotientThenRemainder
.gpr();
3075 GPRReg multiplyAnswerGPR
= multiplyAnswer
.gpr();
3077 m_jit
.assembler().sdiv(quotientThenRemainderGPR
, dividendGPR
, divisorGPR
);
3078 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotientThenRemainderGPR
, divisorGPR
, multiplyAnswerGPR
));
3079 m_jit
.assembler().sub(quotientThenRemainderGPR
, dividendGPR
, multiplyAnswerGPR
);
3081 // If the user cares about negative zero, then speculate that we're not about
3082 // to produce negative zero.
3083 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3084 // Check that we're not about to create negative zero.
3085 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
3086 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, quotientThenRemainderGPR
));
3087 numeratorPositive
.link(&m_jit
);
3090 integerResult(quotientThenRemainderGPR
, node
);
3091 #else // not architecture that can do integer division
3092 // Do this the *safest* way possible: call out to a C function that will do the modulo,
3093 // and then attempt to convert back.
3094 GPRReg op1GPR
= op1
.gpr();
3095 GPRReg op2GPR
= op2
.gpr();
3097 FPRResult
result(this);
3100 callOperation(operationFModOnInts
, result
.fpr(), op1GPR
, op2GPR
);
3102 FPRTemporary
scratch(this);
3103 GPRTemporary
intResult(this);
3104 JITCompiler::JumpList failureCases
;
3105 m_jit
.branchConvertDoubleToInt32(result
.fpr(), intResult
.gpr(), failureCases
, scratch
.fpr(), false);
3106 speculationCheck(Overflow
, JSValueRegs(), 0, failureCases
);
3107 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3108 // Check that we're not about to create negative zero.
3109 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, op1GPR
, TrustedImm32(0));
3110 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, intResult
.gpr()));
3111 numeratorPositive
.link(&m_jit
);
3114 integerResult(intResult
.gpr(), node
);
3115 #endif // CPU(X86) || CPU(X86_64)
3118 void SpeculativeJIT::compileAdd(Node
* node
)
3120 switch (node
->binaryUseKind()) {
3122 if (isNumberConstant(node
->child1().node())) {
3123 int32_t imm1
= valueOfInt32Constant(node
->child1().node());
3124 SpeculateIntegerOperand
op2(this, node
->child2());
3125 GPRTemporary
result(this);
3127 if (nodeCanTruncateInteger(node
->arithNodeFlags())) {
3128 m_jit
.move(op2
.gpr(), result
.gpr());
3129 m_jit
.add32(Imm32(imm1
), result
.gpr());
3131 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchAdd32(MacroAssembler::Overflow
, op2
.gpr(), Imm32(imm1
), result
.gpr()));
3133 integerResult(result
.gpr(), node
);
3137 if (isNumberConstant(node
->child2().node())) {
3138 SpeculateIntegerOperand
op1(this, node
->child1());
3139 int32_t imm2
= valueOfInt32Constant(node
->child2().node());
3140 GPRTemporary
result(this);
3142 if (nodeCanTruncateInteger(node
->arithNodeFlags())) {
3143 m_jit
.move(op1
.gpr(), result
.gpr());
3144 m_jit
.add32(Imm32(imm2
), result
.gpr());
3146 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchAdd32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr()));
3148 integerResult(result
.gpr(), node
);
3152 SpeculateIntegerOperand
op1(this, node
->child1());
3153 SpeculateIntegerOperand
op2(this, node
->child2());
3154 GPRTemporary
result(this, op1
, op2
);
3156 GPRReg gpr1
= op1
.gpr();
3157 GPRReg gpr2
= op2
.gpr();
3158 GPRReg gprResult
= result
.gpr();
3160 if (nodeCanTruncateInteger(node
->arithNodeFlags())) {
3161 if (gpr1
== gprResult
)
3162 m_jit
.add32(gpr2
, gprResult
);
3164 m_jit
.move(gpr2
, gprResult
);
3165 m_jit
.add32(gpr1
, gprResult
);
3168 MacroAssembler::Jump check
= m_jit
.branchAdd32(MacroAssembler::Overflow
, gpr1
, gpr2
, gprResult
);
3170 if (gpr1
== gprResult
)
3171 speculationCheck(Overflow
, JSValueRegs(), 0, check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr2
));
3172 else if (gpr2
== gprResult
)
3173 speculationCheck(Overflow
, JSValueRegs(), 0, check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr1
));
3175 speculationCheck(Overflow
, JSValueRegs(), 0, check
);
3178 integerResult(gprResult
, node
);
3183 SpeculateDoubleOperand
op1(this, node
->child1());
3184 SpeculateDoubleOperand
op2(this, node
->child2());
3185 FPRTemporary
result(this, op1
, op2
);
3187 FPRReg reg1
= op1
.fpr();
3188 FPRReg reg2
= op2
.fpr();
3189 m_jit
.addDouble(reg1
, reg2
, result
.fpr());
3191 doubleResult(result
.fpr(), node
);
3196 RELEASE_ASSERT(node
->op() == ValueAdd
);
3197 compileValueAdd(node
);
3202 RELEASE_ASSERT_NOT_REACHED();
3207 void SpeculativeJIT::compileMakeRope(Node
* node
)
3209 ASSERT(node
->child1().useKind() == KnownStringUse
);
3210 ASSERT(node
->child2().useKind() == KnownStringUse
);
3211 ASSERT(!node
->child3() || node
->child3().useKind() == KnownStringUse
);
3213 SpeculateCellOperand
op1(this, node
->child1());
3214 SpeculateCellOperand
op2(this, node
->child2());
3215 SpeculateCellOperand
op3(this, node
->child3());
3216 GPRTemporary
result(this);
3217 GPRTemporary
allocator(this);
3218 GPRTemporary
scratch(this);
3222 opGPRs
[0] = op1
.gpr();
3223 opGPRs
[1] = op2
.gpr();
3224 if (node
->child3()) {
3225 opGPRs
[2] = op3
.gpr();
3228 opGPRs
[2] = InvalidGPRReg
;
3231 GPRReg resultGPR
= result
.gpr();
3232 GPRReg allocatorGPR
= allocator
.gpr();
3233 GPRReg scratchGPR
= scratch
.gpr();
3235 JITCompiler::JumpList slowPath
;
3236 MarkedAllocator
& markedAllocator
= m_jit
.vm()->heap
.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString
));
3237 m_jit
.move(TrustedImmPtr(&markedAllocator
), allocatorGPR
);
3238 emitAllocateJSCell(resultGPR
, allocatorGPR
, TrustedImmPtr(m_jit
.vm()->stringStructure
.get()), scratchGPR
, slowPath
);
3240 m_jit
.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR
, JSString::offsetOfValue()));
3241 for (unsigned i
= 0; i
< numOpGPRs
; ++i
)
3242 m_jit
.storePtr(opGPRs
[i
], JITCompiler::Address(resultGPR
, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier
<JSString
>) * i
));
3243 for (unsigned i
= numOpGPRs
; i
< JSRopeString::s_maxInternalRopeLength
; ++i
)
3244 m_jit
.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR
, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier
<JSString
>) * i
));
3245 m_jit
.load32(JITCompiler::Address(opGPRs
[0], JSString::offsetOfFlags()), scratchGPR
);
3246 m_jit
.load32(JITCompiler::Address(opGPRs
[0], JSString::offsetOfLength()), allocatorGPR
);
3247 for (unsigned i
= 1; i
< numOpGPRs
; ++i
) {
3248 m_jit
.and32(JITCompiler::Address(opGPRs
[i
], JSString::offsetOfFlags()), scratchGPR
);
3249 m_jit
.add32(JITCompiler::Address(opGPRs
[i
], JSString::offsetOfLength()), allocatorGPR
);
3251 m_jit
.and32(JITCompiler::TrustedImm32(JSString::Is8Bit
), scratchGPR
);
3252 m_jit
.store32(scratchGPR
, JITCompiler::Address(resultGPR
, JSString::offsetOfFlags()));
3253 m_jit
.store32(allocatorGPR
, JITCompiler::Address(resultGPR
, JSString::offsetOfLength()));
3255 switch (numOpGPRs
) {
3257 addSlowPathGenerator(slowPathCall(
3258 slowPath
, this, operationMakeRope2
, resultGPR
, opGPRs
[0], opGPRs
[1]));
3261 addSlowPathGenerator(slowPathCall(
3262 slowPath
, this, operationMakeRope3
, resultGPR
, opGPRs
[0], opGPRs
[1], opGPRs
[2]));
3265 RELEASE_ASSERT_NOT_REACHED();
3269 cellResult(resultGPR
, node
);
3272 void SpeculativeJIT::compileArithSub(Node
* node
)
3274 switch (node
->binaryUseKind()) {
3276 if (isNumberConstant(node
->child2().node())) {
3277 SpeculateIntegerOperand
op1(this, node
->child1());
3278 int32_t imm2
= valueOfInt32Constant(node
->child2().node());
3279 GPRTemporary
result(this);
3281 if (nodeCanTruncateInteger(node
->arithNodeFlags())) {
3282 m_jit
.move(op1
.gpr(), result
.gpr());
3283 m_jit
.sub32(Imm32(imm2
), result
.gpr());
3285 #if ENABLE(JIT_CONSTANT_BLINDING)
3286 GPRTemporary
scratch(this);
3287 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr(), scratch
.gpr()));
3289 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr()));
3293 integerResult(result
.gpr(), node
);
3297 if (isNumberConstant(node
->child1().node())) {
3298 int32_t imm1
= valueOfInt32Constant(node
->child1().node());
3299 SpeculateIntegerOperand
op2(this, node
->child2());
3300 GPRTemporary
result(this);
3302 m_jit
.move(Imm32(imm1
), result
.gpr());
3303 if (nodeCanTruncateInteger(node
->arithNodeFlags()))
3304 m_jit
.sub32(op2
.gpr(), result
.gpr());
3306 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op2
.gpr(), result
.gpr()));
3308 integerResult(result
.gpr(), node
);
3312 SpeculateIntegerOperand
op1(this, node
->child1());
3313 SpeculateIntegerOperand
op2(this, node
->child2());
3314 GPRTemporary
result(this);
3316 if (nodeCanTruncateInteger(node
->arithNodeFlags())) {
3317 m_jit
.move(op1
.gpr(), result
.gpr());
3318 m_jit
.sub32(op2
.gpr(), result
.gpr());
3320 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), op2
.gpr(), result
.gpr()));
3322 integerResult(result
.gpr(), node
);
3327 SpeculateDoubleOperand
op1(this, node
->child1());
3328 SpeculateDoubleOperand
op2(this, node
->child2());
3329 FPRTemporary
result(this, op1
);
3331 FPRReg reg1
= op1
.fpr();
3332 FPRReg reg2
= op2
.fpr();
3333 m_jit
.subDouble(reg1
, reg2
, result
.fpr());
3335 doubleResult(result
.fpr(), node
);
3340 RELEASE_ASSERT_NOT_REACHED();
3345 void SpeculativeJIT::compileArithNegate(Node
* node
)
3347 switch (node
->child1().useKind()) {
3349 SpeculateIntegerOperand
op1(this, node
->child1());
3350 GPRTemporary
result(this);
3352 m_jit
.move(op1
.gpr(), result
.gpr());
3354 if (nodeCanTruncateInteger(node
->arithNodeFlags()))
3355 m_jit
.neg32(result
.gpr());
3357 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchNeg32(MacroAssembler::Overflow
, result
.gpr()));
3358 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags()))
3359 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(MacroAssembler::Zero
, result
.gpr()));
3362 integerResult(result
.gpr(), node
);
3367 SpeculateDoubleOperand
op1(this, node
->child1());
3368 FPRTemporary
result(this);
3370 m_jit
.negateDouble(op1
.fpr(), result
.fpr());
3372 doubleResult(result
.fpr(), node
);
3377 RELEASE_ASSERT_NOT_REACHED();
3381 void SpeculativeJIT::compileArithIMul(Node
* node
)
3383 SpeculateIntegerOperand
op1(this, node
->child1());
3384 SpeculateIntegerOperand
op2(this, node
->child2());
3385 GPRTemporary
result(this);
3387 GPRReg reg1
= op1
.gpr();
3388 GPRReg reg2
= op2
.gpr();
3390 m_jit
.move(reg1
, result
.gpr());
3391 m_jit
.mul32(reg2
, result
.gpr());
3392 integerResult(result
.gpr(), node
);
3396 void SpeculativeJIT::compileArithMul(Node
* node
)
3398 switch (node
->binaryUseKind()) {
3400 SpeculateIntegerOperand
op1(this, node
->child1());
3401 SpeculateIntegerOperand
op2(this, node
->child2());
3402 GPRTemporary
result(this);
3404 GPRReg reg1
= op1
.gpr();
3405 GPRReg reg2
= op2
.gpr();
3407 // We can perform truncated multiplications if we get to this point, because if the
3408 // fixup phase could not prove that it would be safe, it would have turned us into
3409 // a double multiplication.
3410 if (nodeCanTruncateInteger(node
->arithNodeFlags())) {
3411 m_jit
.move(reg1
, result
.gpr());
3412 m_jit
.mul32(reg2
, result
.gpr());
3415 Overflow
, JSValueRegs(), 0,
3416 m_jit
.branchMul32(MacroAssembler::Overflow
, reg1
, reg2
, result
.gpr()));
3419 // Check for negative zero, if the users of this node care about such things.
3420 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3421 MacroAssembler::Jump resultNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, result
.gpr());
3422 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, reg1
, TrustedImm32(0)));
3423 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, reg2
, TrustedImm32(0)));
3424 resultNonZero
.link(&m_jit
);
3427 integerResult(result
.gpr(), node
);
3432 SpeculateDoubleOperand
op1(this, node
->child1());
3433 SpeculateDoubleOperand
op2(this, node
->child2());
3434 FPRTemporary
result(this, op1
, op2
);
3436 FPRReg reg1
= op1
.fpr();
3437 FPRReg reg2
= op2
.fpr();
3439 m_jit
.mulDouble(reg1
, reg2
, result
.fpr());
3441 doubleResult(result
.fpr(), node
);
3446 RELEASE_ASSERT_NOT_REACHED();
3451 #if CPU(X86) || CPU(X86_64)
3452 void SpeculativeJIT::compileIntegerArithDivForX86(Node
* node
)
3454 SpeculateIntegerOperand
op1(this, node
->child1());
3455 SpeculateIntegerOperand
op2(this, node
->child2());
3456 GPRTemporary
eax(this, X86Registers::eax
);
3457 GPRTemporary
edx(this, X86Registers::edx
);
3458 GPRReg op1GPR
= op1
.gpr();
3459 GPRReg op2GPR
= op2
.gpr();
3463 if (op2GPR
== X86Registers::eax
|| op2GPR
== X86Registers::edx
) {
3464 op2TempGPR
= allocate();
3467 op2TempGPR
= InvalidGPRReg
;
3468 if (op1GPR
== X86Registers::eax
)
3469 temp
= X86Registers::edx
;
3471 temp
= X86Registers::eax
;
3474 ASSERT(temp
!= op1GPR
);
3475 ASSERT(temp
!= op2GPR
);
3477 m_jit
.add32(JITCompiler::TrustedImm32(1), op2GPR
, temp
);
3479 JITCompiler::Jump safeDenominator
= m_jit
.branch32(JITCompiler::Above
, temp
, JITCompiler::TrustedImm32(1));
3481 JITCompiler::JumpList done
;
3482 if (nodeUsedAsNumber(node
->arithNodeFlags())) {
3483 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, op2GPR
));
3484 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::Equal
, op1GPR
, TrustedImm32(-2147483647-1)));
3486 // This is the case where we convert the result to an int after we're done, and we
3487 // already know that the denominator is either -1 or 0. So, if the denominator is
3488 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3489 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3490 // are happy to fall through to a normal division, since we're just dividing
3491 // something by negative 1.
3493 JITCompiler::Jump notZero
= m_jit
.branchTest32(JITCompiler::NonZero
, op2GPR
);
3494 m_jit
.move(TrustedImm32(0), eax
.gpr());
3495 done
.append(m_jit
.jump());
3497 notZero
.link(&m_jit
);
3498 JITCompiler::Jump notNeg2ToThe31
=
3499 m_jit
.branch32(JITCompiler::NotEqual
, op1GPR
, TrustedImm32(-2147483647-1));
3500 m_jit
.move(op1GPR
, eax
.gpr());
3501 done
.append(m_jit
.jump());
3503 notNeg2ToThe31
.link(&m_jit
);
3506 safeDenominator
.link(&m_jit
);
3508 // If the user cares about negative zero, then speculate that we're not about
3509 // to produce negative zero.
3510 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3511 MacroAssembler::Jump numeratorNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, op1GPR
);
3512 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, op2GPR
, TrustedImm32(0)));
3513 numeratorNonZero
.link(&m_jit
);
3516 if (op2TempGPR
!= InvalidGPRReg
) {
3517 m_jit
.move(op2GPR
, op2TempGPR
);
3518 op2GPR
= op2TempGPR
;
3521 m_jit
.move(op1GPR
, eax
.gpr());
3522 m_jit
.assembler().cdq();
3523 m_jit
.assembler().idivl_r(op2GPR
);
3525 if (op2TempGPR
!= InvalidGPRReg
)
3528 // Check that there was no remainder. If there had been, then we'd be obligated to
3529 // produce a double result instead.
3530 if (nodeUsedAsNumber(node
->arithNodeFlags()))
3531 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::NonZero
, edx
.gpr()));
3535 integerResult(eax
.gpr(), node
);
3538 void SpeculativeJIT::compileIntegerArithDivForARM64(Node
* node
)
3540 SpeculateIntegerOperand
op1(this, node
->child1());
3541 SpeculateIntegerOperand
op2(this, node
->child2());
3542 GPRReg op1GPR
= op1
.gpr();
3543 GPRReg op2GPR
= op2
.gpr();
3544 GPRTemporary
quotient(this);
3545 GPRTemporary
multiplyAnswer(this);
3547 // If the user cares about negative zero, then speculate that we're not about
3548 // to produce negative zero.
3549 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3550 MacroAssembler::Jump numeratorNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, op1GPR
);
3551 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, op2GPR
, TrustedImm32(0)));
3552 numeratorNonZero
.link(&m_jit
);
3555 m_jit
.assembler().sdiv
<32>(quotient
.gpr(), op1GPR
, op2GPR
);
3557 // Check that there was no remainder. If there had been, then we'd be obligated to
3558 // produce a double result instead.
3559 if (nodeUsedAsNumber(node
->arithNodeFlags())) {
3560 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotient
.gpr(), op2GPR
, multiplyAnswer
.gpr()));
3561 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::NotEqual
, multiplyAnswer
.gpr(), op1GPR
));
3564 integerResult(quotient
.gpr(), node
);
3566 #elif CPU(APPLE_ARMV7S)
3567 void SpeculativeJIT::compileIntegerArithDivForARMv7s(Node
* node
)
3569 SpeculateIntegerOperand
op1(this, node
->child1());
3570 SpeculateIntegerOperand
op2(this, node
->child2());
3571 GPRReg op1GPR
= op1
.gpr();
3572 GPRReg op2GPR
= op2
.gpr();
3573 GPRTemporary
quotient(this);
3574 GPRTemporary
multiplyAnswer(this);
3576 // If the user cares about negative zero, then speculate that we're not about
3577 // to produce negative zero.
3578 if (!nodeCanIgnoreNegativeZero(node
->arithNodeFlags())) {
3579 MacroAssembler::Jump numeratorNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, op1GPR
);
3580 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, op2GPR
, TrustedImm32(0)));
3581 numeratorNonZero
.link(&m_jit
);
3584 m_jit
.assembler().sdiv(quotient
.gpr(), op1GPR
, op2GPR
);
3586 // Check that there was no remainder. If there had been, then we'd be obligated to
3587 // produce a double result instead.
3588 if (nodeUsedAsNumber(node
->arithNodeFlags())) {
3589 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotient
.gpr(), op2GPR
, multiplyAnswer
.gpr()));
3590 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::NotEqual
, multiplyAnswer
.gpr(), op1GPR
));
3593 integerResult(quotient
.gpr(), node
);
3597 void SpeculativeJIT::compileArithMod(Node
* node
)
3599 switch (node
->binaryUseKind()) {
3601 compileSoftModulo(node
);
3606 SpeculateDoubleOperand
op1(this, node
->child1());
3607 SpeculateDoubleOperand
op2(this, node
->child2());
3609 FPRReg op1FPR
= op1
.fpr();
3610 FPRReg op2FPR
= op2
.fpr();
3614 FPRResult
result(this);
3616 callOperation(fmodAsDFGOperation
, result
.fpr(), op1FPR
, op2FPR
);
3618 doubleResult(result
.fpr(), node
);
3623 RELEASE_ASSERT_NOT_REACHED();
3628 // Returns true if the compare is fused with a subsequent branch.
3629 bool SpeculativeJIT::compare(Node
* node
, MacroAssembler::RelationalCondition condition
, MacroAssembler::DoubleCondition doubleCondition
, S_DFGOperation_EJJ operation
)
3631 if (compilePeepHoleBranch(node
, condition
, doubleCondition
, operation
))
3634 if (node
->isBinaryUseKind(Int32Use
)) {
3635 compileIntegerCompare(node
, condition
);
3639 if (node
->isBinaryUseKind(NumberUse
)) {
3640 compileDoubleCompare(node
, doubleCondition
);
3644 if (node
->op() == CompareEq
) {
3645 if (node
->isBinaryUseKind(StringUse
)) {
3646 compileStringEquality(node
);
3650 if (node
->isBinaryUseKind(BooleanUse
)) {
3651 compileBooleanCompare(node
, condition
);
3655 if (node
->isBinaryUseKind(ObjectUse
)) {
3656 compileObjectEquality(node
);
3660 if (node
->child1().useKind() == ObjectUse
&& node
->child2().useKind() == ObjectOrOtherUse
) {
3661 compileObjectToObjectOrOtherEquality(node
->child1(), node
->child2());
3665 if (node
->child1().useKind() == ObjectOrOtherUse
&& node
->child2().useKind() == ObjectUse
) {
3666 compileObjectToObjectOrOtherEquality(node
->child2(), node
->child1());
3671 nonSpeculativeNonPeepholeCompare(node
, condition
, operation
);
3675 bool SpeculativeJIT::compileStrictEqForConstant(Node
* node
, Edge value
, JSValue constant
)
3677 JSValueOperand
op1(this, value
);
3679 // FIXME: This code is wrong for the case that the constant is null or undefined,
3680 // and the value is an object that MasqueradesAsUndefined.
3681 // https://bugs.webkit.org/show_bug.cgi?id=109487
3683 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3684 if (branchIndexInBlock
!= UINT_MAX
) {
3685 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
3686 BlockIndex taken
= branchNode
->takenBlockIndex();
3687 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
3688 MacroAssembler::RelationalCondition condition
= MacroAssembler::Equal
;
3690 // The branch instruction will branch to the taken block.
3691 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
3692 if (taken
== nextBlock()) {
3693 condition
= MacroAssembler::NotEqual
;
3694 BlockIndex tmp
= taken
;
3700 branch64(condition
, op1
.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant
)), taken
);
3702 GPRReg payloadGPR
= op1
.payloadGPR();
3703 GPRReg tagGPR
= op1
.tagGPR();
3704 if (condition
== MacroAssembler::Equal
) {
3705 // Drop down if not equal, go elsewhere if equal.
3706 MacroAssembler::Jump notEqual
= m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, MacroAssembler::Imm32(constant
.tag()));
3707 branch32(MacroAssembler::Equal
, payloadGPR
, MacroAssembler::Imm32(constant
.payload()), taken
);
3708 notEqual
.link(&m_jit
);
3710 // Drop down if equal, go elsehwere if not equal.
3711 branch32(MacroAssembler::NotEqual
, tagGPR
, MacroAssembler::Imm32(constant
.tag()), taken
);
3712 branch32(MacroAssembler::NotEqual
, payloadGPR
, MacroAssembler::Imm32(constant
.payload()), taken
);
3718 use(node
->child1());
3719 use(node
->child2());
3720 m_indexInBlock
= branchIndexInBlock
;
3721 m_currentNode
= branchNode
;
3725 GPRTemporary
result(this);
3728 GPRReg op1GPR
= op1
.gpr();
3729 GPRReg resultGPR
= result
.gpr();
3730 m_jit
.move(MacroAssembler::TrustedImm64(ValueFalse
), resultGPR
);
3731 MacroAssembler::Jump notEqual
= m_jit
.branch64(MacroAssembler::NotEqual
, op1GPR
, MacroAssembler::TrustedImm64(JSValue::encode(constant
)));
3732 m_jit
.or32(MacroAssembler::TrustedImm32(1), resultGPR
);
3733 notEqual
.link(&m_jit
);
3734 jsValueResult(resultGPR
, node
, DataFormatJSBoolean
);
3736 GPRReg op1PayloadGPR
= op1
.payloadGPR();
3737 GPRReg op1TagGPR
= op1
.tagGPR();
3738 GPRReg resultGPR
= result
.gpr();
3739 m_jit
.move(TrustedImm32(0), resultGPR
);
3740 MacroAssembler::JumpList notEqual
;
3741 notEqual
.append(m_jit
.branch32(MacroAssembler::NotEqual
, op1TagGPR
, MacroAssembler::Imm32(constant
.tag())));
3742 notEqual
.append(m_jit
.branch32(MacroAssembler::NotEqual
, op1PayloadGPR
, MacroAssembler::Imm32(constant
.payload())));
3743 m_jit
.move(TrustedImm32(1), resultGPR
);
3744 notEqual
.link(&m_jit
);
3745 booleanResult(resultGPR
, node
);
3751 bool SpeculativeJIT::compileStrictEq(Node
* node
)
3753 switch (node
->binaryUseKind()) {
3755 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3756 if (branchIndexInBlock
!= UINT_MAX
) {
3757 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
3758 compilePeepHoleBooleanBranch(node
, branchNode
, MacroAssembler::Equal
);
3759 use(node
->child1());
3760 use(node
->child2());
3761 m_indexInBlock
= branchIndexInBlock
;
3762 m_currentNode
= branchNode
;
3765 compileBooleanCompare(node
, MacroAssembler::Equal
);
3770 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3771 if (branchIndexInBlock
!= UINT_MAX
) {
3772 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
3773 compilePeepHoleIntegerBranch(node
, branchNode
, MacroAssembler::Equal
);
3774 use(node
->child1());
3775 use(node
->child2());
3776 m_indexInBlock
= branchIndexInBlock
;
3777 m_currentNode
= branchNode
;
3780 compileIntegerCompare(node
, MacroAssembler::Equal
);
3785 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3786 if (branchIndexInBlock
!= UINT_MAX
) {
3787 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
3788 compilePeepHoleDoubleBranch(node
, branchNode
, MacroAssembler::DoubleEqual
);
3789 use(node
->child1());
3790 use(node
->child2());
3791 m_indexInBlock
= branchIndexInBlock
;
3792 m_currentNode
= branchNode
;
3795 compileDoubleCompare(node
, MacroAssembler::DoubleEqual
);
3800 compileStringEquality(node
);
3805 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3806 if (branchIndexInBlock
!= UINT_MAX
) {
3807 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
3808 compilePeepHoleObjectEquality(node
, branchNode
);
3809 use(node
->child1());
3810 use(node
->child2());
3811 m_indexInBlock
= branchIndexInBlock
;
3812 m_currentNode
= branchNode
;
3815 compileObjectEquality(node
);
3820 return nonSpeculativeStrictEq(node
);
3824 RELEASE_ASSERT_NOT_REACHED();
3829 void SpeculativeJIT::compileBooleanCompare(Node
* node
, MacroAssembler::RelationalCondition condition
)
3831 SpeculateBooleanOperand
op1(this, node
->child1());
3832 SpeculateBooleanOperand
op2(this, node
->child2());
3833 GPRTemporary
result(this);
3835 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
3837 // If we add a DataFormatBool, we should use it here.
3838 #if USE(JSVALUE32_64)
3839 booleanResult(result
.gpr(), node
);
3841 m_jit
.or32(TrustedImm32(ValueFalse
), result
.gpr());
3842 jsValueResult(result
.gpr(), m_currentNode
, DataFormatJSBoolean
);
3846 void SpeculativeJIT::compileStringEquality(Node
* node
)
3848 SpeculateCellOperand
left(this, node
->child1());
3849 SpeculateCellOperand
right(this, node
->child2());
3850 GPRTemporary
length(this);
3851 GPRTemporary
leftTemp(this);
3852 GPRTemporary
rightTemp(this);
3853 GPRTemporary
leftTemp2(this, left
);
3854 GPRTemporary
rightTemp2(this, right
);
3856 GPRReg leftGPR
= left
.gpr();
3857 GPRReg rightGPR
= right
.gpr();
3858 GPRReg lengthGPR
= length
.gpr();
3859 GPRReg leftTempGPR
= leftTemp
.gpr();
3860 GPRReg rightTempGPR
= rightTemp
.gpr();
3861 GPRReg leftTemp2GPR
= leftTemp2
.gpr();
3862 GPRReg rightTemp2GPR
= rightTemp2
.gpr();
3864 JITCompiler::JumpList trueCase
;
3865 JITCompiler::JumpList falseCase
;
3866 JITCompiler::JumpList slowCase
;
3869 JSValueSource::unboxedCell(leftGPR
), node
->child1(), SpecString
, m_jit
.branchPtr(
3870 MacroAssembler::NotEqual
,
3871 MacroAssembler::Address(leftGPR
, JSCell::structureOffset()),
3872 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
3874 // It's safe to branch around the type check below, since proving that the values are
3875 // equal does indeed prove that the right value is a string.
3876 trueCase
.append(m_jit
.branchPtr(MacroAssembler::Equal
, leftGPR
, rightGPR
));
3879 JSValueSource::unboxedCell(rightGPR
), node
->child2(), SpecString
, m_jit
.branchPtr(
3880 MacroAssembler::NotEqual
,
3881 MacroAssembler::Address(rightGPR
, JSCell::structureOffset()),
3882 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
3884 m_jit
.load32(MacroAssembler::Address(leftGPR
, JSString::offsetOfLength()), lengthGPR
);
3886 falseCase
.append(m_jit
.branch32(
3887 MacroAssembler::NotEqual
,
3888 MacroAssembler::Address(rightGPR
, JSString::offsetOfLength()),
3891 trueCase
.append(m_jit
.branchTest32(MacroAssembler::Zero
, lengthGPR
));
3893 m_jit
.loadPtr(MacroAssembler::Address(leftGPR
, JSString::offsetOfValue()), leftTempGPR
);
3894 m_jit
.loadPtr(MacroAssembler::Address(rightGPR
, JSString::offsetOfValue()), rightTempGPR
);
3896 slowCase
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, leftTempGPR
));
3897 slowCase
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, rightTempGPR
));
3899 slowCase
.append(m_jit
.branchTest32(
3900 MacroAssembler::Zero
,
3901 MacroAssembler::Address(leftTempGPR
, StringImpl::flagsOffset()),
3902 TrustedImm32(StringImpl::flagIs8Bit())));
3903 slowCase
.append(m_jit
.branchTest32(
3904 MacroAssembler::Zero
,
3905 MacroAssembler::Address(rightTempGPR
, StringImpl::flagsOffset()),
3906 TrustedImm32(StringImpl::flagIs8Bit())));
3908 m_jit
.loadPtr(MacroAssembler::Address(leftTempGPR
, StringImpl::dataOffset()), leftTempGPR
);
3909 m_jit
.loadPtr(MacroAssembler::Address(rightTempGPR
, StringImpl::dataOffset()), rightTempGPR
);
3911 MacroAssembler::Label loop
= m_jit
.label();
3913 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
3915 // This isn't going to generate the best code on x86. But that's OK, it's still better
3916 // than not inlining.
3917 m_jit
.load8(MacroAssembler::BaseIndex(leftTempGPR
, lengthGPR
, MacroAssembler::TimesOne
), leftTemp2GPR
);
3918 m_jit
.load8(MacroAssembler::BaseIndex(rightTempGPR
, lengthGPR
, MacroAssembler::TimesOne
), rightTemp2GPR
);
3919 falseCase
.append(m_jit
.branch32(MacroAssembler::NotEqual
, leftTemp2GPR
, rightTemp2GPR
));
3921 m_jit
.branchTest32(MacroAssembler::NonZero
, lengthGPR
).linkTo(loop
, &m_jit
);
3923 trueCase
.link(&m_jit
);
3925 m_jit
.move(TrustedImm64(ValueTrue
), leftTempGPR
);
3927 m_jit
.move(TrustedImm32(true), leftTempGPR
);
3930 JITCompiler::Jump done
= m_jit
.jump();
3932 falseCase
.link(&m_jit
);
3934 m_jit
.move(TrustedImm64(ValueFalse
), leftTempGPR
);
3936 m_jit
.move(TrustedImm32(false), leftTempGPR
);
3940 addSlowPathGenerator(
3942 slowCase
, this, operationCompareStringEq
, leftTempGPR
, leftGPR
, rightGPR
));
3945 jsValueResult(leftTempGPR
, node
, DataFormatJSBoolean
);
3947 booleanResult(leftTempGPR
, node
);
3951 void SpeculativeJIT::compileGetIndexedPropertyStorage(Node
* node
)
3953 SpeculateCellOperand
base(this, node
->child1());
3954 GPRReg baseReg
= base
.gpr();
3956 GPRTemporary
storage(this);
3957 GPRReg storageReg
= storage
.gpr();
3959 const TypedArrayDescriptor
* descriptor
= typedArrayDescriptor(node
->arrayMode());
3961 switch (node
->arrayMode().type()) {
3963 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSString::offsetOfValue()), storageReg
);
3965 addSlowPathGenerator(
3967 m_jit
.branchTest32(MacroAssembler::Zero
, storageReg
),
3968 this, operationResolveRope
, storageReg
, baseReg
));
3970 m_jit
.loadPtr(MacroAssembler::Address(storageReg
, StringImpl::dataOffset()), storageReg
);
3975 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, descriptor
->m_storageOffset
), storageReg
);
3979 storageResult(storageReg
, node
);
3982 void SpeculativeJIT::compileGetByValOnArguments(Node
* node
)
3984 SpeculateCellOperand
base(this, node
->child1());
3985 SpeculateStrictInt32Operand
property(this, node
->child2());
3986 GPRTemporary
result(this);
3987 #if USE(JSVALUE32_64)
3988 GPRTemporary
resultTag(this);
3990 GPRTemporary
scratch(this);
3992 GPRReg baseReg
= base
.gpr();
3993 GPRReg propertyReg
= property
.gpr();
3994 GPRReg resultReg
= result
.gpr();
3995 #if USE(JSVALUE32_64)
3996 GPRReg resultTagReg
= resultTag
.gpr();
3998 GPRReg scratchReg
= scratch
.gpr();
4003 ASSERT(ArrayMode(Array::Arguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4005 // Two really lame checks.
4007 Uncountable
, JSValueSource(), 0,
4009 MacroAssembler::AboveOrEqual
, propertyReg
,
4010 MacroAssembler::Address(baseReg
, OBJECT_OFFSETOF(Arguments
, m_numArguments
))));
4012 Uncountable
, JSValueSource(), 0,
4013 m_jit
.branchTestPtr(
4014 MacroAssembler::NonZero
,
4015 MacroAssembler::Address(
4016 baseReg
, OBJECT_OFFSETOF(Arguments
, m_slowArguments
))));
4018 m_jit
.move(propertyReg
, resultReg
);
4019 m_jit
.neg32(resultReg
);
4020 m_jit
.signExtend32ToPtr(resultReg
, resultReg
);
4022 MacroAssembler::Address(baseReg
, OBJECT_OFFSETOF(Arguments
, m_registers
)),
4025 #if USE(JSVALUE32_64)
4027 MacroAssembler::BaseIndex(
4028 scratchReg
, resultReg
, MacroAssembler::TimesEight
,
4029 CallFrame::thisArgumentOffset() * sizeof(Register
) - sizeof(Register
) +
4030 OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)),
4033 MacroAssembler::BaseIndex(
4034 scratchReg
, resultReg
, MacroAssembler::TimesEight
,
4035 CallFrame::thisArgumentOffset() * sizeof(Register
) - sizeof(Register
) +
4036 OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)),
4038 jsValueResult(resultTagReg
, resultReg
, node
);
4041 MacroAssembler::BaseIndex(
4042 scratchReg
, resultReg
, MacroAssembler::TimesEight
,
4043 CallFrame::thisArgumentOffset() * sizeof(Register
) - sizeof(Register
)),
4045 jsValueResult(resultReg
, node
);
4049 void SpeculativeJIT::compileGetArgumentsLength(Node
* node
)
4051 SpeculateCellOperand
base(this, node
->child1());
4052 GPRTemporary
result(this, base
);
4054 GPRReg baseReg
= base
.gpr();
4055 GPRReg resultReg
= result
.gpr();
4060 ASSERT(ArrayMode(Array::Arguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4063 Uncountable
, JSValueSource(), 0,
4065 MacroAssembler::NonZero
,
4066 MacroAssembler::Address(baseReg
, OBJECT_OFFSETOF(Arguments
, m_overrodeLength
))));
4069 MacroAssembler::Address(baseReg
, OBJECT_OFFSETOF(Arguments
, m_numArguments
)),
4071 integerResult(resultReg
, node
);
4074 void SpeculativeJIT::compileGetArrayLength(Node
* node
)
4076 const TypedArrayDescriptor
* descriptor
= typedArrayDescriptor(node
->arrayMode());
4078 switch (node
->arrayMode().type()) {
4081 case Array::Contiguous
: {
4082 StorageOperand
storage(this, node
->child2());
4083 GPRTemporary
result(this, storage
);
4084 GPRReg storageReg
= storage
.gpr();
4085 GPRReg resultReg
= result
.gpr();
4086 m_jit
.load32(MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()), resultReg
);
4088 integerResult(resultReg
, node
);
4091 case Array::ArrayStorage
:
4092 case Array::SlowPutArrayStorage
: {
4093 StorageOperand
storage(this, node
->child2());
4094 GPRTemporary
result(this, storage
);
4095 GPRReg storageReg
= storage
.gpr();
4096 GPRReg resultReg
= result
.gpr();
4097 m_jit
.load32(MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()), resultReg
);
4099 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, resultReg
, MacroAssembler::TrustedImm32(0)));
4101 integerResult(resultReg
, node
);
4104 case Array::String
: {
4105 SpeculateCellOperand
base(this, node
->child1());
4106 GPRTemporary
result(this, base
);
4107 GPRReg baseGPR
= base
.gpr();
4108 GPRReg resultGPR
= result
.gpr();
4109 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSString::offsetOfLength()), resultGPR
);
4110 integerResult(resultGPR
, node
);
4113 case Array::Arguments
: {
4114 compileGetArgumentsLength(node
);
4118 SpeculateCellOperand
base(this, node
->child1());
4119 GPRTemporary
result(this, base
);
4120 GPRReg baseGPR
= base
.gpr();
4121 GPRReg resultGPR
= result
.gpr();
4123 m_jit
.load32(MacroAssembler::Address(baseGPR
, descriptor
->m_lengthOffset
), resultGPR
);
4124 integerResult(resultGPR
, node
);
4129 void SpeculativeJIT::compileNewFunctionNoCheck(Node
* node
)
4131 GPRResult
result(this);
4132 GPRReg resultGPR
= result
.gpr();
4135 operationNewFunctionNoCheck
, resultGPR
, m_jit
.codeBlock()->functionDecl(node
->functionDeclIndex()));
4136 cellResult(resultGPR
, node
);
4139 void SpeculativeJIT::compileNewFunctionExpression(Node
* node
)
4141 GPRResult
result(this);
4142 GPRReg resultGPR
= result
.gpr();
4145 operationNewFunctionExpression
,
4147 m_jit
.codeBlock()->functionExpr(node
->functionExprIndex()));
4148 cellResult(resultGPR
, node
);
4151 bool SpeculativeJIT::compileRegExpExec(Node
* node
)
4153 unsigned branchIndexInBlock
= detectPeepHoleBranch();
4154 if (branchIndexInBlock
== UINT_MAX
)
4156 Node
* branchNode
= m_jit
.graph().m_blocks
[m_block
]->at(branchIndexInBlock
);
4157 ASSERT(node
->adjustedRefCount() == 1);
4159 BlockIndex taken
= branchNode
->takenBlockIndex();
4160 BlockIndex notTaken
= branchNode
->notTakenBlockIndex();
4162 bool invert
= false;
4163 if (taken
== nextBlock()) {
4165 BlockIndex tmp
= taken
;
4170 SpeculateCellOperand
base(this, node
->child1());
4171 SpeculateCellOperand
argument(this, node
->child2());
4172 GPRReg baseGPR
= base
.gpr();
4173 GPRReg argumentGPR
= argument
.gpr();
4176 GPRResult
result(this);
4177 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
4179 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, result
.gpr(), taken
);
4182 use(node
->child1());
4183 use(node
->child2());
4184 m_indexInBlock
= branchIndexInBlock
;
4185 m_currentNode
= branchNode
;
4190 void SpeculativeJIT::compileAllocatePropertyStorage(Node
* node
)
4192 if (hasIndexingHeader(node
->structureTransitionData().previousStructure
->indexingType())) {
4193 SpeculateCellOperand
base(this, node
->child1());
4195 GPRReg baseGPR
= base
.gpr();
4199 GPRResult
result(this);
4200 callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity
, result
.gpr(), baseGPR
);
4202 storageResult(result
.gpr(), node
);
4206 SpeculateCellOperand
base(this, node
->child1());
4207 GPRTemporary
scratch(this);
4209 GPRReg baseGPR
= base
.gpr();
4210 GPRReg scratchGPR
= scratch
.gpr();
4212 ASSERT(!node
->structureTransitionData().previousStructure
->outOfLineCapacity());
4213 ASSERT(initialOutOfLineCapacity
== node
->structureTransitionData().newStructure
->outOfLineCapacity());
4215 JITCompiler::Jump slowPath
=
4216 emitAllocateBasicStorage(
4217 TrustedImm32(initialOutOfLineCapacity
* sizeof(JSValue
)), scratchGPR
);
4219 m_jit
.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue
)), scratchGPR
);
4221 addSlowPathGenerator(
4222 slowPathCall(slowPath
, this, operationAllocatePropertyStorageWithInitialCapacity
, scratchGPR
));
4224 m_jit
.storePtr(scratchGPR
, JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()));
4226 storageResult(scratchGPR
, node
);
4229 void SpeculativeJIT::compileReallocatePropertyStorage(Node
* node
)
4231 size_t oldSize
= node
->structureTransitionData().previousStructure
->outOfLineCapacity() * sizeof(JSValue
);
4232 size_t newSize
= oldSize
* outOfLineGrowthFactor
;
4233 ASSERT(newSize
== node
->structureTransitionData().newStructure
->outOfLineCapacity() * sizeof(JSValue
));
4235 if (hasIndexingHeader(node
->structureTransitionData().previousStructure
->indexingType())) {
4236 SpeculateCellOperand
base(this, node
->child1());
4238 GPRReg baseGPR
= base
.gpr();
4242 GPRResult
result(this);
4243 callOperation(operationReallocateButterflyToGrowPropertyStorage
, result
.gpr(), baseGPR
, newSize
/ sizeof(JSValue
));
4245 storageResult(result
.gpr(), node
);
4249 SpeculateCellOperand
base(this, node
->child1());
4250 StorageOperand
oldStorage(this, node
->child2());
4251 GPRTemporary
scratch1(this);
4252 GPRTemporary
scratch2(this);
4254 GPRReg baseGPR
= base
.gpr();
4255 GPRReg oldStorageGPR
= oldStorage
.gpr();
4256 GPRReg scratchGPR1
= scratch1
.gpr();
4257 GPRReg scratchGPR2
= scratch2
.gpr();
4259 JITCompiler::Jump slowPath
=
4260 emitAllocateBasicStorage(TrustedImm32(newSize
), scratchGPR2
);
4262 m_jit
.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue
)), scratchGPR2
);
4264 addSlowPathGenerator(
4265 slowPathCall(slowPath
, this, operationAllocatePropertyStorage
, scratchGPR2
, newSize
/ sizeof(JSValue
)));
4266 // We have scratchGPR2 = new storage, scratchGPR1 = scratch
4267 for (ptrdiff_t offset
= 0; offset
< static_cast<ptrdiff_t>(oldSize
); offset
+= sizeof(void*)) {
4268 m_jit
.loadPtr(JITCompiler::Address(oldStorageGPR
, -(offset
+ sizeof(JSValue
) + sizeof(void*))), scratchGPR1
);
4269 m_jit
.storePtr(scratchGPR1
, JITCompiler::Address(scratchGPR2
, -(offset
+ sizeof(JSValue
) + sizeof(void*))));
4271 m_jit
.storePtr(scratchGPR2
, JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()));
4273 storageResult(scratchGPR2
, node
);
4276 GPRReg
SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary
& temporary
, ArrayMode arrayMode
)
4278 if (!putByValWillNeedExtraRegister(arrayMode
))
4279 return InvalidGPRReg
;
4281 GPRTemporary
realTemporary(this);
4282 temporary
.adopt(realTemporary
);
4283 return temporary
.gpr();
4286 void SpeculativeJIT::compileToStringOnCell(Node
* node
)
4288 SpeculateCellOperand
op1(this, node
->child1());
4289 GPRReg op1GPR
= op1
.gpr();
4291 switch (node
->child1().useKind()) {
4292 case StringObjectUse
: {
4293 GPRTemporary
result(this);
4294 GPRReg resultGPR
= result
.gpr();
4296 speculateStringObject(node
->child1(), op1GPR
);
4297 m_state
.forNode(node
->child1()).filter(SpecStringObject
);
4298 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, JSWrapperObject::internalValueCellOffset()), resultGPR
);
4299 cellResult(resultGPR
, node
);
4303 case StringOrStringObjectUse
: {
4304 GPRTemporary
result(this);
4305 GPRReg resultGPR
= result
.gpr();
4307 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, JSCell::structureOffset()), resultGPR
);
4308 JITCompiler::Jump isString
= m_jit
.branchPtr(
4309 JITCompiler::Equal
, resultGPR
, TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
4311 speculateStringObjectForStructure(node
->child1(), resultGPR
);
4313 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, JSWrapperObject::internalValueCellOffset()), resultGPR
);
4315 JITCompiler::Jump done
= m_jit
.jump();
4316 isString
.link(&m_jit
);
4317 m_jit
.move(op1GPR
, resultGPR
);
4320 m_state
.forNode(node
->child1()).filter(SpecString
| SpecStringObject
);
4322 cellResult(resultGPR
, node
);
4327 GPRResult
result(this);
4328 GPRReg resultGPR
= result
.gpr();
4330 // We flush registers instead of silent spill/fill because in this mode we
4331 // believe that most likely the input is not a string, and we need to take
4334 JITCompiler::Jump done
;
4335 if (node
->child1()->prediction() & SpecString
) {
4336 JITCompiler::Jump needCall
= m_jit
.branchPtr(
4337 JITCompiler::NotEqual
,
4338 JITCompiler::Address(op1GPR
, JSCell::structureOffset()),
4339 TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
4340 m_jit
.move(op1GPR
, resultGPR
);
4341 done
= m_jit
.jump();
4342 needCall
.link(&m_jit
);
4344 callOperation(operationToStringOnCell
, resultGPR
, op1GPR
);
4347 cellResult(resultGPR
, node
);
4352 RELEASE_ASSERT_NOT_REACHED();
4356 void SpeculativeJIT::compileNewStringObject(Node
* node
)
4358 SpeculateCellOperand
operand(this, node
->child1());
4360 GPRTemporary
result(this);
4361 GPRTemporary
scratch1(this);
4362 GPRTemporary
scratch2(this);
4364 GPRReg operandGPR
= operand
.gpr();
4365 GPRReg resultGPR
= result
.gpr();
4366 GPRReg scratch1GPR
= scratch1
.gpr();
4367 GPRReg scratch2GPR
= scratch2
.gpr();
4369 JITCompiler::JumpList slowPath
;
4371 emitAllocateJSObject
<StringObject
>(
4372 resultGPR
, TrustedImmPtr(node
->structure()), TrustedImmPtr(0), scratch1GPR
, scratch2GPR
,
4376 TrustedImmPtr(&StringObject::s_info
),
4377 JITCompiler::Address(resultGPR
, JSDestructibleObject::classInfoOffset()));
4380 operandGPR
, JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset()));
4383 TrustedImm32(JSValue::CellTag
),
4384 JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
4387 JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
4390 addSlowPathGenerator(slowPathCall(
4391 slowPath
, this, operationNewStringObject
, resultGPR
, operandGPR
, node
->structure()));
4393 cellResult(resultGPR
, node
);
4396 void SpeculativeJIT::speculateInt32(Edge edge
)
4398 if (!needsTypeCheck(edge
, SpecInt32
))
4401 (SpeculateIntegerOperand(this, edge
)).gpr();
4404 void SpeculativeJIT::speculateNumber(Edge edge
)
4406 if (!needsTypeCheck(edge
, SpecNumber
))
4409 (SpeculateDoubleOperand(this, edge
)).fpr();
4412 void SpeculativeJIT::speculateRealNumber(Edge edge
)
4414 if (!needsTypeCheck(edge
, SpecRealNumber
))
4417 SpeculateDoubleOperand
operand(this, edge
);
4418 FPRReg fpr
= operand
.fpr();
4420 JSValueRegs(), edge
, SpecRealNumber
,
4422 MacroAssembler::DoubleNotEqualOrUnordered
, fpr
, fpr
));
4425 void SpeculativeJIT::speculateBoolean(Edge edge
)
4427 if (!needsTypeCheck(edge
, SpecBoolean
))
4430 (SpeculateBooleanOperand(this, edge
)).gpr();
4433 void SpeculativeJIT::speculateCell(Edge edge
)
4435 if (!needsTypeCheck(edge
, SpecCell
))
4438 (SpeculateCellOperand(this, edge
)).gpr();
4441 void SpeculativeJIT::speculateObject(Edge edge
)
4443 if (!needsTypeCheck(edge
, SpecObject
))
4446 SpeculateCellOperand
operand(this, edge
);
4447 GPRReg gpr
= operand
.gpr();
4449 JSValueSource::unboxedCell(gpr
), edge
, SpecObject
, m_jit
.branchPtr(
4450 MacroAssembler::Equal
,
4451 MacroAssembler::Address(gpr
, JSCell::structureOffset()),
4452 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
4455 void SpeculativeJIT::speculateObjectOrOther(Edge edge
)
4457 if (!needsTypeCheck(edge
, SpecObject
| SpecOther
))
4460 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4461 GPRTemporary
temp(this);
4462 GPRReg tempGPR
= temp
.gpr();
4464 GPRReg gpr
= operand
.gpr();
4465 MacroAssembler::Jump notCell
= m_jit
.branchTest64(
4466 MacroAssembler::NonZero
, gpr
, GPRInfo::tagMaskRegister
);
4468 JSValueRegs(gpr
), edge
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
4469 MacroAssembler::Equal
,
4470 MacroAssembler::Address(gpr
, JSCell::structureOffset()),
4471 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
4472 MacroAssembler::Jump done
= m_jit
.jump();
4473 notCell
.link(&m_jit
);
4474 if (needsTypeCheck(edge
, SpecCell
| SpecOther
)) {
4475 m_jit
.move(gpr
, tempGPR
);
4476 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), tempGPR
);
4479 JSValueRegs(gpr
), edge
, SpecCell
| SpecOther
,
4481 MacroAssembler::NotEqual
, tempGPR
,
4482 MacroAssembler::TrustedImm64(ValueNull
)));
4486 GPRReg tagGPR
= operand
.tagGPR();
4487 GPRReg payloadGPR
= operand
.payloadGPR();
4488 MacroAssembler::Jump notCell
=
4489 m_jit
.branch32(MacroAssembler::NotEqual
, tagGPR
, TrustedImm32(JSValue::CellTag
));
4491 JSValueRegs(tagGPR
, payloadGPR
), edge
, (~SpecCell
) | SpecObject
, m_jit
.branchPtr(
4492 MacroAssembler::Equal
,
4493 MacroAssembler::Address(payloadGPR
, JSCell::structureOffset()),
4494 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
4495 MacroAssembler::Jump done
= m_jit
.jump();
4496 notCell
.link(&m_jit
);
4497 if (needsTypeCheck(edge
, SpecCell
| SpecOther
)) {
4498 m_jit
.move(tagGPR
, tempGPR
);
4499 m_jit
.or32(TrustedImm32(1), tempGPR
);
4502 JSValueRegs(tagGPR
, payloadGPR
), edge
, SpecCell
| SpecOther
,
4504 MacroAssembler::NotEqual
, tempGPR
,
4505 MacroAssembler::TrustedImm32(JSValue::NullTag
)));
4511 void SpeculativeJIT::speculateString(Edge edge
)
4513 if (!needsTypeCheck(edge
, SpecString
))
4516 SpeculateCellOperand
operand(this, edge
);
4517 GPRReg gpr
= operand
.gpr();
4519 JSValueSource::unboxedCell(gpr
), edge
, SpecString
, m_jit
.branchPtr(
4520 MacroAssembler::NotEqual
,
4521 MacroAssembler::Address(gpr
, JSCell::structureOffset()),
4522 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
4525 void SpeculativeJIT::speculateStringObject(Edge edge
, GPRReg gpr
)
4527 speculateStringObjectForStructure(edge
, JITCompiler::Address(gpr
, JSCell::structureOffset()));
4530 void SpeculativeJIT::speculateStringObject(Edge edge
)
4532 if (!needsTypeCheck(edge
, SpecStringObject
))
4535 SpeculateCellOperand
operand(this, edge
);
4536 GPRReg gpr
= operand
.gpr();
4537 if (!needsTypeCheck(edge
, SpecStringObject
))
4540 speculateStringObject(edge
, gpr
);
4541 m_state
.forNode(edge
).filter(SpecStringObject
);
4544 void SpeculativeJIT::speculateStringOrStringObject(Edge edge
)
4546 if (!needsTypeCheck(edge
, SpecString
| SpecStringObject
))
4549 SpeculateCellOperand
operand(this, edge
);
4550 GPRReg gpr
= operand
.gpr();
4551 if (!needsTypeCheck(edge
, SpecString
| SpecStringObject
))
4554 GPRTemporary
structure(this);
4555 GPRReg structureGPR
= structure
.gpr();
4557 m_jit
.loadPtr(JITCompiler::Address(gpr
, JSCell::structureOffset()), structureGPR
);
4559 JITCompiler::Jump isString
= m_jit
.branchPtr(
4560 JITCompiler::Equal
, structureGPR
, TrustedImmPtr(m_jit
.vm()->stringStructure
.get()));
4562 speculateStringObjectForStructure(edge
, structureGPR
);
4564 isString
.link(&m_jit
);
4566 m_state
.forNode(edge
).filter(SpecString
| SpecStringObject
);
4569 void SpeculativeJIT::speculateNotCell(Edge edge
)
4571 if (!needsTypeCheck(edge
, ~SpecCell
))
4574 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4577 JSValueRegs(operand
.gpr()), edge
, ~SpecCell
,
4579 JITCompiler::Zero
, operand
.gpr(), GPRInfo::tagMaskRegister
));
4582 JSValueRegs(operand
.tagGPR(), operand
.payloadGPR()), edge
, ~SpecCell
,
4584 JITCompiler::Equal
, operand
.tagGPR(), TrustedImm32(JSValue::CellTag
)));
4588 void SpeculativeJIT::speculateOther(Edge edge
)
4590 if (!needsTypeCheck(edge
, SpecOther
))
4593 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4594 GPRTemporary
temp(this);
4595 GPRReg tempGPR
= temp
.gpr();
4597 m_jit
.move(operand
.gpr(), tempGPR
);
4598 m_jit
.and64(MacroAssembler::TrustedImm32(~TagBitUndefined
), tempGPR
);
4600 JSValueRegs(operand
.gpr()), edge
, SpecOther
,
4602 MacroAssembler::NotEqual
, tempGPR
,
4603 MacroAssembler::TrustedImm64(ValueNull
)));
4605 m_jit
.move(operand
.tagGPR(), tempGPR
);
4606 m_jit
.or32(TrustedImm32(1), tempGPR
);
4608 JSValueRegs(operand
.tagGPR(), operand
.payloadGPR()), edge
, SpecOther
,
4609 m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(JSValue::NullTag
)));
4613 void SpeculativeJIT::speculate(Node
*, Edge edge
)
4615 switch (edge
.useKind()) {
4619 ASSERT(!needsTypeCheck(edge
, SpecInt32
));
4621 case KnownNumberUse
:
4622 ASSERT(!needsTypeCheck(edge
, SpecNumber
));
4625 ASSERT(!needsTypeCheck(edge
, SpecCell
));
4627 case KnownStringUse
:
4628 ASSERT(!needsTypeCheck(edge
, SpecString
));
4631 speculateInt32(edge
);
4634 speculateRealNumber(edge
);
4637 speculateNumber(edge
);
4640 speculateBoolean(edge
);
4643 speculateCell(edge
);
4646 speculateObject(edge
);
4648 case ObjectOrOtherUse
:
4649 speculateObjectOrOther(edge
);
4652 speculateString(edge
);
4654 case StringObjectUse
:
4655 speculateStringObject(edge
);
4657 case StringOrStringObjectUse
:
4658 speculateStringOrStringObject(edge
);
4661 speculateNotCell(edge
);
4664 speculateOther(edge
);
4667 RELEASE_ASSERT_NOT_REACHED();
4672 } } // namespace JSC::DFG