2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGSaneStringGetByValSlowPathGenerator.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "LinkBuffer.h"
39 #include "JSCInlines.h"
40 #include "ScratchRegisterAllocator.h"
41 #include "WriteBarrierBuffer.h"
42 #include <wtf/MathExtras.h>
44 namespace JSC
{ namespace DFG
{
46 SpeculativeJIT::SpeculativeJIT(JITCompiler
& jit
)
50 , m_lastGeneratedNode(LastNodeType
)
52 , m_generationInfo(m_jit
.graph().frameRegisterCount())
53 , m_state(m_jit
.graph())
54 , m_interpreter(m_jit
.graph(), m_state
)
55 , m_stream(&jit
.jitCode()->variableEventStream
)
56 , m_minifiedGraph(&jit
.jitCode()->minifiedDFG
)
57 , m_isCheckingArgumentTypes(false)
61 SpeculativeJIT::~SpeculativeJIT()
65 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR
, Structure
* structure
, GPRReg storageGPR
, unsigned numElements
)
67 ASSERT(hasUndecided(structure
->indexingType()) || hasInt32(structure
->indexingType()) || hasDouble(structure
->indexingType()) || hasContiguous(structure
->indexingType()));
69 GPRTemporary
scratch(this);
70 GPRTemporary
scratch2(this);
71 GPRReg scratchGPR
= scratch
.gpr();
72 GPRReg scratch2GPR
= scratch2
.gpr();
74 unsigned vectorLength
= std::max(BASE_VECTOR_LEN
, numElements
);
76 JITCompiler::JumpList slowCases
;
79 emitAllocateBasicStorage(TrustedImm32(vectorLength
* sizeof(JSValue
) + sizeof(IndexingHeader
)), storageGPR
));
80 m_jit
.subPtr(TrustedImm32(vectorLength
* sizeof(JSValue
)), storageGPR
);
81 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
83 m_jit
.store32(TrustedImm32(numElements
), MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
84 m_jit
.store32(TrustedImm32(vectorLength
), MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
86 if (hasDouble(structure
->indexingType()) && numElements
< vectorLength
) {
88 m_jit
.move(TrustedImm64(bitwise_cast
<int64_t>(PNaN
)), scratchGPR
);
89 for (unsigned i
= numElements
; i
< vectorLength
; ++i
)
90 m_jit
.store64(scratchGPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * i
));
92 EncodedValueDescriptor value
;
93 value
.asInt64
= JSValue::encode(JSValue(JSValue::EncodeAsDouble
, PNaN
));
94 for (unsigned i
= numElements
; i
< vectorLength
; ++i
) {
95 m_jit
.store32(TrustedImm32(value
.asBits
.tag
), MacroAssembler::Address(storageGPR
, sizeof(double) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
96 m_jit
.store32(TrustedImm32(value
.asBits
.payload
), MacroAssembler::Address(storageGPR
, sizeof(double) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
101 // I want a slow path that also loads out the storage pointer, and that's
102 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
103 // of work for a very small piece of functionality. :-/
104 addSlowPathGenerator(adoptPtr(
105 new CallArrayAllocatorSlowPathGenerator(
106 slowCases
, this, operationNewArrayWithSize
, resultGPR
, storageGPR
,
107 structure
, numElements
)));
110 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR
, GPRReg scratchGPR1
, GPRReg scratchGPR2
, MacroAssembler::JumpList
& slowPath
)
112 Structure
* structure
= m_jit
.graph().globalObjectFor(m_currentNode
->origin
.semantic
)->argumentsStructure();
113 emitAllocateDestructibleObject
<Arguments
>(resultGPR
, structure
, scratchGPR1
, scratchGPR2
, slowPath
);
115 m_jit
.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR
, Arguments::offsetOfActivation()));
117 m_jit
.load32(JITCompiler::payloadFor(JSStack::ArgumentCount
), scratchGPR1
);
118 m_jit
.sub32(TrustedImm32(1), scratchGPR1
);
119 m_jit
.store32(scratchGPR1
, MacroAssembler::Address(resultGPR
, Arguments::offsetOfNumArguments()));
121 m_jit
.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR
, Arguments::offsetOfOverrodeLength()));
122 if (m_jit
.isStrictModeFor(m_currentNode
->origin
.semantic
))
123 m_jit
.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR
, Arguments::offsetOfIsStrictMode()));
125 m_jit
.storePtr(GPRInfo::callFrameRegister
, MacroAssembler::Address(resultGPR
, Arguments::offsetOfRegisters()));
126 m_jit
.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR
, Arguments::offsetOfRegisterArray()));
127 m_jit
.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR
, Arguments::offsetOfSlowArgumentData()));
129 m_jit
.loadPtr(JITCompiler::addressFor(JSStack::Callee
), scratchGPR1
);
130 m_jit
.storePtr(scratchGPR1
, MacroAssembler::Address(resultGPR
, Arguments::offsetOfCallee()));
133 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
)
137 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
138 m_jit
.appendExitInfo(jumpToFail
);
139 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
142 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, const MacroAssembler::JumpList
& jumpsToFail
)
146 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
147 m_jit
.appendExitInfo(jumpsToFail
);
148 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
151 OSRExitJumpPlaceholder
SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
)
154 return OSRExitJumpPlaceholder();
155 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
156 unsigned index
= m_jit
.jitCode()->osrExit
.size();
157 m_jit
.appendExitInfo();
158 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
159 return OSRExitJumpPlaceholder(index
);
162 OSRExitJumpPlaceholder
SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
)
164 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
165 return speculationCheck(kind
, jsValueSource
, nodeUse
.node());
168 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
)
170 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
171 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpToFail
);
174 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, const MacroAssembler::JumpList
& jumpsToFail
)
176 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
177 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpsToFail
);
180 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
184 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
185 unsigned recoveryIndex
= m_jit
.jitCode()->appendSpeculationRecovery(recovery
);
186 m_jit
.appendExitInfo(jumpToFail
);
187 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size(), recoveryIndex
));
190 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
192 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
193 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpToFail
, recovery
);
196 void SpeculativeJIT::emitInvalidationPoint(Node
* node
)
201 OSRExitCompilationInfo
& info
= m_jit
.appendExitInfo(JITCompiler::JumpList());
202 m_jit
.jitCode()->appendOSRExit(OSRExit(
203 UncountableInvalidation
, JSValueSource(),
204 m_jit
.graph().methodOfGettingAValueProfileFor(node
),
205 this, m_stream
->size()));
206 info
.m_replacementSource
= m_jit
.watchpointLabel();
207 ASSERT(info
.m_replacementSource
.isSet());
211 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, Node
* node
)
213 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
216 speculationCheck(kind
, jsValueRegs
, node
, m_jit
.jump());
217 m_compileOkay
= false;
220 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, Edge nodeUse
)
222 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
223 terminateSpeculativeExecution(kind
, jsValueRegs
, nodeUse
.node());
226 void SpeculativeJIT::typeCheck(JSValueSource source
, Edge edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
)
228 ASSERT(needsTypeCheck(edge
, typesPassedThrough
));
229 m_interpreter
.filter(edge
, typesPassedThrough
);
230 speculationCheck(BadType
, source
, edge
.node(), jumpToFail
);
233 RegisterSet
SpeculativeJIT::usedRegisters()
237 for (unsigned i
= GPRInfo::numberOfRegisters
; i
--;) {
238 GPRReg gpr
= GPRInfo::toRegister(i
);
239 if (m_gprs
.isInUse(gpr
))
242 for (unsigned i
= FPRInfo::numberOfRegisters
; i
--;) {
243 FPRReg fpr
= FPRInfo::toRegister(i
);
244 if (m_fprs
.isInUse(fpr
))
248 result
.merge(RegisterSet::specialRegisters());
253 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr
<SlowPathGenerator
> slowPathGenerator
)
255 m_slowPathGenerators
.append(slowPathGenerator
);
258 void SpeculativeJIT::runSlowPathGenerators()
260 for (unsigned i
= 0; i
< m_slowPathGenerators
.size(); ++i
)
261 m_slowPathGenerators
[i
]->generate(this);
264 // On Windows we need to wrap fmod; on other platforms we can call it directly.
265 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
266 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
267 static double JIT_OPERATION
fmodAsDFGOperation(double x
, double y
)
272 #define fmodAsDFGOperation fmod
275 void SpeculativeJIT::clearGenerationInfo()
277 for (unsigned i
= 0; i
< m_generationInfo
.size(); ++i
)
278 m_generationInfo
[i
] = GenerationInfo();
279 m_gprs
= RegisterBank
<GPRInfo
>();
280 m_fprs
= RegisterBank
<FPRInfo
>();
283 SilentRegisterSavePlan
SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe
, GPRReg source
)
285 GenerationInfo
& info
= generationInfoFromVirtualRegister(spillMe
);
286 Node
* node
= info
.node();
287 DataFormat registerFormat
= info
.registerFormat();
288 ASSERT(registerFormat
!= DataFormatNone
);
289 ASSERT(registerFormat
!= DataFormatDouble
);
291 SilentSpillAction spillAction
;
292 SilentFillAction fillAction
;
294 if (!info
.needsSpill())
295 spillAction
= DoNothingForSpill
;
298 ASSERT(info
.gpr() == source
);
299 if (registerFormat
== DataFormatInt32
)
300 spillAction
= Store32Payload
;
301 else if (registerFormat
== DataFormatCell
|| registerFormat
== DataFormatStorage
)
302 spillAction
= StorePtr
;
303 else if (registerFormat
== DataFormatInt52
|| registerFormat
== DataFormatStrictInt52
)
304 spillAction
= Store64
;
306 ASSERT(registerFormat
& DataFormatJS
);
307 spillAction
= Store64
;
309 #elif USE(JSVALUE32_64)
310 if (registerFormat
& DataFormatJS
) {
311 ASSERT(info
.tagGPR() == source
|| info
.payloadGPR() == source
);
312 spillAction
= source
== info
.tagGPR() ? Store32Tag
: Store32Payload
;
314 ASSERT(info
.gpr() == source
);
315 spillAction
= Store32Payload
;
320 if (registerFormat
== DataFormatInt32
) {
321 ASSERT(info
.gpr() == source
);
322 ASSERT(isJSInt32(info
.registerFormat()));
323 if (node
->hasConstant()) {
324 ASSERT(isInt32Constant(node
));
325 fillAction
= SetInt32Constant
;
327 fillAction
= Load32Payload
;
328 } else if (registerFormat
== DataFormatBoolean
) {
330 RELEASE_ASSERT_NOT_REACHED();
331 fillAction
= DoNothingForFill
;
332 #elif USE(JSVALUE32_64)
333 ASSERT(info
.gpr() == source
);
334 if (node
->hasConstant()) {
335 ASSERT(isBooleanConstant(node
));
336 fillAction
= SetBooleanConstant
;
338 fillAction
= Load32Payload
;
340 } else if (registerFormat
== DataFormatCell
) {
341 ASSERT(info
.gpr() == source
);
342 if (node
->hasConstant()) {
343 JSValue value
= valueOfJSConstant(node
);
344 ASSERT_UNUSED(value
, value
.isCell());
345 fillAction
= SetCellConstant
;
348 fillAction
= LoadPtr
;
350 fillAction
= Load32Payload
;
353 } else if (registerFormat
== DataFormatStorage
) {
354 ASSERT(info
.gpr() == source
);
355 fillAction
= LoadPtr
;
356 } else if (registerFormat
== DataFormatInt52
) {
357 if (node
->hasConstant())
358 fillAction
= SetInt52Constant
;
359 else if (info
.spillFormat() == DataFormatInt52
)
361 else if (info
.spillFormat() == DataFormatStrictInt52
)
362 fillAction
= Load64ShiftInt52Left
;
363 else if (info
.spillFormat() == DataFormatNone
)
366 RELEASE_ASSERT_NOT_REACHED();
367 fillAction
= Load64
; // Make GCC happy.
369 } else if (registerFormat
== DataFormatStrictInt52
) {
370 if (node
->hasConstant())
371 fillAction
= SetStrictInt52Constant
;
372 else if (info
.spillFormat() == DataFormatInt52
)
373 fillAction
= Load64ShiftInt52Right
;
374 else if (info
.spillFormat() == DataFormatStrictInt52
)
376 else if (info
.spillFormat() == DataFormatNone
)
379 RELEASE_ASSERT_NOT_REACHED();
380 fillAction
= Load64
; // Make GCC happy.
383 ASSERT(registerFormat
& DataFormatJS
);
385 ASSERT(info
.gpr() == source
);
386 if (node
->hasConstant()) {
387 if (valueOfJSConstant(node
).isCell())
388 fillAction
= SetTrustedJSConstant
;
389 fillAction
= SetJSConstant
;
390 } else if (info
.spillFormat() == DataFormatInt32
) {
391 ASSERT(registerFormat
== DataFormatJSInt32
);
392 fillAction
= Load32PayloadBoxInt
;
396 ASSERT(info
.tagGPR() == source
|| info
.payloadGPR() == source
);
397 if (node
->hasConstant())
398 fillAction
= info
.tagGPR() == source
? SetJSConstantTag
: SetJSConstantPayload
;
399 else if (info
.payloadGPR() == source
)
400 fillAction
= Load32Payload
;
401 else { // Fill the Tag
402 switch (info
.spillFormat()) {
403 case DataFormatInt32
:
404 ASSERT(registerFormat
== DataFormatJSInt32
);
405 fillAction
= SetInt32Tag
;
408 ASSERT(registerFormat
== DataFormatJSCell
);
409 fillAction
= SetCellTag
;
411 case DataFormatBoolean
:
412 ASSERT(registerFormat
== DataFormatJSBoolean
);
413 fillAction
= SetBooleanTag
;
416 fillAction
= Load32Tag
;
423 return SilentRegisterSavePlan(spillAction
, fillAction
, node
, source
);
426 SilentRegisterSavePlan
SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe
, FPRReg source
)
428 GenerationInfo
& info
= generationInfoFromVirtualRegister(spillMe
);
429 Node
* node
= info
.node();
430 ASSERT(info
.registerFormat() == DataFormatDouble
);
432 SilentSpillAction spillAction
;
433 SilentFillAction fillAction
;
435 if (!info
.needsSpill())
436 spillAction
= DoNothingForSpill
;
438 ASSERT(!node
->hasConstant());
439 ASSERT(info
.spillFormat() == DataFormatNone
);
440 ASSERT(info
.fpr() == source
);
441 spillAction
= StoreDouble
;
445 if (node
->hasConstant()) {
446 ASSERT(isNumberConstant(node
));
447 fillAction
= SetDoubleConstant
;
449 ASSERT(info
.spillFormat() == DataFormatNone
|| info
.spillFormat() == DataFormatDouble
);
450 fillAction
= LoadDouble
;
452 #elif USE(JSVALUE32_64)
453 ASSERT(info
.registerFormat() == DataFormatDouble
);
454 if (node
->hasConstant()) {
455 ASSERT(isNumberConstant(node
));
456 fillAction
= SetDoubleConstant
;
458 fillAction
= LoadDouble
;
461 return SilentRegisterSavePlan(spillAction
, fillAction
, node
, source
);
464 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan
& plan
)
466 switch (plan
.spillAction()) {
467 case DoNothingForSpill
:
470 m_jit
.store32(plan
.gpr(), JITCompiler::tagFor(plan
.node()->virtualRegister()));
473 m_jit
.store32(plan
.gpr(), JITCompiler::payloadFor(plan
.node()->virtualRegister()));
476 m_jit
.storePtr(plan
.gpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
480 m_jit
.store64(plan
.gpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
484 m_jit
.storeDouble(plan
.fpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
487 RELEASE_ASSERT_NOT_REACHED();
491 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan
& plan
, GPRReg canTrample
)
493 #if USE(JSVALUE32_64)
494 UNUSED_PARAM(canTrample
);
496 switch (plan
.fillAction()) {
497 case DoNothingForFill
:
499 case SetInt32Constant
:
500 m_jit
.move(Imm32(valueOfInt32Constant(plan
.node())), plan
.gpr());
503 case SetInt52Constant
:
504 m_jit
.move(Imm64(valueOfJSConstant(plan
.node()).asMachineInt() << JSValue::int52ShiftAmount
), plan
.gpr());
506 case SetStrictInt52Constant
:
507 m_jit
.move(Imm64(valueOfJSConstant(plan
.node()).asMachineInt()), plan
.gpr());
509 #endif // USE(JSVALUE64)
510 case SetBooleanConstant
:
511 m_jit
.move(TrustedImm32(valueOfBooleanConstant(plan
.node())), plan
.gpr());
513 case SetCellConstant
:
514 m_jit
.move(TrustedImmPtr(valueOfJSConstant(plan
.node()).asCell()), plan
.gpr());
517 case SetTrustedJSConstant
:
518 m_jit
.move(valueOfJSConstantAsImm64(plan
.node()).asTrustedImm64(), plan
.gpr());
521 m_jit
.move(valueOfJSConstantAsImm64(plan
.node()), plan
.gpr());
523 case SetDoubleConstant
:
524 m_jit
.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan
.node()))), canTrample
);
525 m_jit
.move64ToDouble(canTrample
, plan
.fpr());
527 case Load32PayloadBoxInt
:
528 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
529 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, plan
.gpr());
531 case Load32PayloadConvertToInt52
:
532 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
533 m_jit
.signExtend32ToPtr(plan
.gpr(), plan
.gpr());
534 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), plan
.gpr());
536 case Load32PayloadSignExtend
:
537 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
538 m_jit
.signExtend32ToPtr(plan
.gpr(), plan
.gpr());
541 case SetJSConstantTag
:
542 m_jit
.move(Imm32(valueOfJSConstant(plan
.node()).tag()), plan
.gpr());
544 case SetJSConstantPayload
:
545 m_jit
.move(Imm32(valueOfJSConstant(plan
.node()).payload()), plan
.gpr());
548 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), plan
.gpr());
551 m_jit
.move(TrustedImm32(JSValue::CellTag
), plan
.gpr());
554 m_jit
.move(TrustedImm32(JSValue::BooleanTag
), plan
.gpr());
556 case SetDoubleConstant
:
557 m_jit
.loadDouble(TrustedImmPtr(addressOfDoubleConstant(plan
.node())), plan
.fpr());
561 m_jit
.load32(JITCompiler::tagFor(plan
.node()->virtualRegister()), plan
.gpr());
564 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
567 m_jit
.loadPtr(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
571 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
573 case Load64ShiftInt52Right
:
574 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
575 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), plan
.gpr());
577 case Load64ShiftInt52Left
:
578 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
579 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), plan
.gpr());
583 m_jit
.loadDouble(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.fpr());
586 RELEASE_ASSERT_NOT_REACHED();
590 JITCompiler::Jump
SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR
, ArrayMode arrayMode
, IndexingType shape
)
592 switch (arrayMode
.arrayClass()) {
593 case Array::OriginalArray
: {
595 JITCompiler::Jump result
; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
600 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
601 return m_jit
.branch32(
602 MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(IsArray
| shape
));
604 case Array::NonArray
:
605 case Array::OriginalNonArray
:
606 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
607 return m_jit
.branch32(
608 MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(shape
));
610 case Array::PossiblyArray
:
611 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
612 return m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(shape
));
615 RELEASE_ASSERT_NOT_REACHED();
616 return JITCompiler::Jump();
619 JITCompiler::JumpList
SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR
, ArrayMode arrayMode
)
621 JITCompiler::JumpList result
;
623 switch (arrayMode
.type()) {
625 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, Int32Shape
);
628 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, DoubleShape
);
630 case Array::Contiguous
:
631 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, ContiguousShape
);
633 case Array::ArrayStorage
:
634 case Array::SlowPutArrayStorage
: {
635 ASSERT(!arrayMode
.isJSArrayWithOriginalStructure());
637 if (arrayMode
.isJSArray()) {
638 if (arrayMode
.isSlowPut()) {
641 MacroAssembler::Zero
, tempGPR
, MacroAssembler::TrustedImm32(IsArray
)));
642 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
643 m_jit
.sub32(TrustedImm32(ArrayStorageShape
), tempGPR
);
646 MacroAssembler::Above
, tempGPR
,
647 TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
)));
650 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
652 m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(IsArray
| ArrayStorageShape
)));
655 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
656 if (arrayMode
.isSlowPut()) {
657 m_jit
.sub32(TrustedImm32(ArrayStorageShape
), tempGPR
);
660 MacroAssembler::Above
, tempGPR
,
661 TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
)));
665 m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(ArrayStorageShape
)));
676 void SpeculativeJIT::checkArray(Node
* node
)
678 ASSERT(node
->arrayMode().isSpecific());
679 ASSERT(!node
->arrayMode().doesConversion());
681 SpeculateCellOperand
base(this, node
->child1());
682 GPRReg baseReg
= base
.gpr();
684 if (node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1()))) {
685 noResult(m_currentNode
);
689 const ClassInfo
* expectedClassInfo
= 0;
691 switch (node
->arrayMode().type()) {
693 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
697 case Array::Contiguous
:
698 case Array::ArrayStorage
:
699 case Array::SlowPutArrayStorage
: {
700 GPRTemporary
temp(this);
701 GPRReg tempGPR
= temp
.gpr();
702 m_jit
.load8(MacroAssembler::Address(baseReg
, JSCell::indexingTypeOffset()), tempGPR
);
704 BadIndexingType
, JSValueSource::unboxedCell(baseReg
), 0,
705 jumpSlowForUnwantedArrayMode(tempGPR
, node
->arrayMode()));
707 noResult(m_currentNode
);
710 case Array::Arguments
:
711 speculationCheck(BadType
, JSValueSource::unboxedCell(baseReg
), node
,
713 MacroAssembler::NotEqual
,
714 MacroAssembler::Address(baseReg
, JSCell::typeInfoTypeOffset()),
715 MacroAssembler::TrustedImm32(ArgumentsType
)));
717 noResult(m_currentNode
);
720 speculationCheck(BadType
, JSValueSource::unboxedCell(baseReg
), node
,
722 MacroAssembler::NotEqual
,
723 MacroAssembler::Address(baseReg
, JSCell::typeInfoTypeOffset()),
724 MacroAssembler::TrustedImm32(typeForTypedArrayType(node
->arrayMode().typedArrayType()))));
725 noResult(m_currentNode
);
729 RELEASE_ASSERT(expectedClassInfo
);
731 GPRTemporary
temp(this);
732 GPRTemporary
temp2(this);
733 m_jit
.emitLoadStructure(baseReg
, temp
.gpr(), temp2
.gpr());
735 BadType
, JSValueSource::unboxedCell(baseReg
), node
,
737 MacroAssembler::NotEqual
,
738 MacroAssembler::Address(temp
.gpr(), Structure::classInfoOffset()),
739 MacroAssembler::TrustedImmPtr(expectedClassInfo
)));
741 noResult(m_currentNode
);
744 void SpeculativeJIT::arrayify(Node
* node
, GPRReg baseReg
, GPRReg propertyReg
)
746 ASSERT(node
->arrayMode().doesConversion());
748 GPRTemporary
temp(this);
749 GPRTemporary structure
;
750 GPRReg tempGPR
= temp
.gpr();
751 GPRReg structureGPR
= InvalidGPRReg
;
753 if (node
->op() != ArrayifyToStructure
) {
754 GPRTemporary
realStructure(this);
755 structure
.adopt(realStructure
);
756 structureGPR
= structure
.gpr();
759 // We can skip all that comes next if we already have array storage.
760 MacroAssembler::JumpList slowPath
;
762 if (node
->op() == ArrayifyToStructure
) {
763 slowPath
.append(m_jit
.branchWeakStructure(
764 JITCompiler::NotEqual
,
765 JITCompiler::Address(baseReg
, JSCell::structureIDOffset()),
769 MacroAssembler::Address(baseReg
, JSCell::indexingTypeOffset()), tempGPR
);
771 slowPath
.append(jumpSlowForUnwantedArrayMode(tempGPR
, node
->arrayMode()));
774 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
775 slowPath
, this, node
, baseReg
, propertyReg
, tempGPR
, structureGPR
)));
777 noResult(m_currentNode
);
780 void SpeculativeJIT::arrayify(Node
* node
)
782 ASSERT(node
->arrayMode().isSpecific());
784 SpeculateCellOperand
base(this, node
->child1());
786 if (!node
->child2()) {
787 arrayify(node
, base
.gpr(), InvalidGPRReg
);
791 SpeculateInt32Operand
property(this, node
->child2());
793 arrayify(node
, base
.gpr(), property
.gpr());
796 GPRReg
SpeculativeJIT::fillStorage(Edge edge
)
798 VirtualRegister virtualRegister
= edge
->virtualRegister();
799 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
801 switch (info
.registerFormat()) {
802 case DataFormatNone
: {
803 if (info
.spillFormat() == DataFormatStorage
) {
804 GPRReg gpr
= allocate();
805 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
806 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
807 info
.fillStorage(*m_stream
, gpr
);
811 // Must be a cell; fill it as a cell and then return the pointer.
812 return fillSpeculateCell(edge
);
815 case DataFormatStorage
: {
816 GPRReg gpr
= info
.gpr();
822 return fillSpeculateCell(edge
);
826 void SpeculativeJIT::useChildren(Node
* node
)
828 if (node
->flags() & NodeHasVarArgs
) {
829 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); childIdx
++) {
830 if (!!m_jit
.graph().m_varArgChildren
[childIdx
])
831 use(m_jit
.graph().m_varArgChildren
[childIdx
]);
834 Edge child1
= node
->child1();
836 ASSERT(!node
->child2() && !node
->child3());
841 Edge child2
= node
->child2();
843 ASSERT(!node
->child3());
848 Edge child3
= node
->child3();
855 void SpeculativeJIT::compileIn(Node
* node
)
857 SpeculateCellOperand
base(this, node
->child2());
858 GPRReg baseGPR
= base
.gpr();
860 if (isConstant(node
->child1().node())) {
862 jsDynamicCast
<JSString
*>(valueOfJSConstant(node
->child1().node()));
863 if (string
&& string
->tryGetValueImpl()
864 && string
->tryGetValueImpl()->isAtomic()) {
865 StructureStubInfo
* stubInfo
= m_jit
.codeBlock()->addStubInfo();
867 GPRTemporary
result(this);
868 GPRReg resultGPR
= result
.gpr();
872 MacroAssembler::PatchableJump jump
= m_jit
.patchableJump();
873 MacroAssembler::Label done
= m_jit
.label();
875 OwnPtr
<SlowPathGenerator
> slowPath
= slowPathCall(
876 jump
.m_jump
, this, operationInOptimize
,
877 JSValueRegs::payloadOnly(resultGPR
), stubInfo
, baseGPR
,
878 string
->tryGetValueImpl());
880 stubInfo
->codeOrigin
= node
->origin
.semantic
;
881 stubInfo
->patch
.baseGPR
= static_cast<int8_t>(baseGPR
);
882 stubInfo
->patch
.valueGPR
= static_cast<int8_t>(resultGPR
);
883 stubInfo
->patch
.usedRegisters
= usedRegisters();
884 stubInfo
->patch
.spillMode
= NeedToSpill
;
886 m_jit
.addIn(InRecord(jump
, done
, slowPath
.get(), stubInfo
));
887 addSlowPathGenerator(slowPath
.release());
891 blessedBooleanResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
896 JSValueOperand
key(this, node
->child1());
897 JSValueRegs regs
= key
.jsValueRegs();
899 GPRResult
result(this);
900 GPRReg resultGPR
= result
.gpr();
907 operationGenericIn
, extractResult(JSValueRegs::payloadOnly(resultGPR
)),
909 blessedBooleanResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
912 bool SpeculativeJIT::nonSpeculativeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
914 unsigned branchIndexInBlock
= detectPeepHoleBranch();
915 if (branchIndexInBlock
!= UINT_MAX
) {
916 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
918 ASSERT(node
->adjustedRefCount() == 1);
920 nonSpeculativePeepholeBranch(node
, branchNode
, cond
, helperFunction
);
922 m_indexInBlock
= branchIndexInBlock
;
923 m_currentNode
= branchNode
;
928 nonSpeculativeNonPeepholeCompare(node
, cond
, helperFunction
);
933 bool SpeculativeJIT::nonSpeculativeStrictEq(Node
* node
, bool invert
)
935 unsigned branchIndexInBlock
= detectPeepHoleBranch();
936 if (branchIndexInBlock
!= UINT_MAX
) {
937 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
939 ASSERT(node
->adjustedRefCount() == 1);
941 nonSpeculativePeepholeStrictEq(node
, branchNode
, invert
);
943 m_indexInBlock
= branchIndexInBlock
;
944 m_currentNode
= branchNode
;
949 nonSpeculativeNonPeepholeStrictEq(node
, invert
);
954 static const char* dataFormatString(DataFormat format
)
956 // These values correspond to the DataFormat enum.
957 const char* strings
[] = {
975 return strings
[format
];
978 void SpeculativeJIT::dump(const char* label
)
981 dataLogF("<%s>\n", label
);
983 dataLogF(" gprs:\n");
985 dataLogF(" fprs:\n");
987 dataLogF(" VirtualRegisters:\n");
988 for (unsigned i
= 0; i
< m_generationInfo
.size(); ++i
) {
989 GenerationInfo
& info
= m_generationInfo
[i
];
991 dataLogF(" % 3d:%s%s", i
, dataFormatString(info
.registerFormat()), dataFormatString(info
.spillFormat()));
993 dataLogF(" % 3d:[__][__]", i
);
994 if (info
.registerFormat() == DataFormatDouble
)
995 dataLogF(":fpr%d\n", info
.fpr());
996 else if (info
.registerFormat() != DataFormatNone
997 #if USE(JSVALUE32_64)
998 && !(info
.registerFormat() & DataFormatJS
)
1001 ASSERT(info
.gpr() != InvalidGPRReg
);
1002 dataLogF(":%s\n", GPRInfo::debugName(info
.gpr()));
1007 dataLogF("</%s>\n", label
);
1010 GPRTemporary::GPRTemporary()
1012 , m_gpr(InvalidGPRReg
)
1016 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
)
1018 , m_gpr(InvalidGPRReg
)
1020 m_gpr
= m_jit
->allocate();
1023 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, GPRReg specific
)
1025 , m_gpr(InvalidGPRReg
)
1027 m_gpr
= m_jit
->allocate(specific
);
1030 #if USE(JSVALUE32_64)
1031 GPRTemporary::GPRTemporary(
1032 SpeculativeJIT
* jit
, ReuseTag
, JSValueOperand
& op1
, WhichValueWord which
)
1034 , m_gpr(InvalidGPRReg
)
1036 if (!op1
.isDouble() && m_jit
->canReuse(op1
.node()))
1037 m_gpr
= m_jit
->reuse(op1
.gpr(which
));
1039 m_gpr
= m_jit
->allocate();
1041 #endif // USE(JSVALUE32_64)
1043 JSValueRegsTemporary::JSValueRegsTemporary() { }
1045 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT
* jit
)
1055 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1057 JSValueRegs
JSValueRegsTemporary::regs()
1060 return JSValueRegs(m_gpr
.gpr());
1062 return JSValueRegs(m_tagGPR
.gpr(), m_payloadGPR
.gpr());
1066 void GPRTemporary::adopt(GPRTemporary
& other
)
1069 ASSERT(m_gpr
== InvalidGPRReg
);
1070 ASSERT(other
.m_jit
);
1071 ASSERT(other
.m_gpr
!= InvalidGPRReg
);
1072 m_jit
= other
.m_jit
;
1073 m_gpr
= other
.m_gpr
;
1075 other
.m_gpr
= InvalidGPRReg
;
1078 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
)
1080 , m_fpr(InvalidFPRReg
)
1082 m_fpr
= m_jit
->fprAllocate();
1085 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, SpeculateDoubleOperand
& op1
)
1087 , m_fpr(InvalidFPRReg
)
1089 if (m_jit
->canReuse(op1
.node()))
1090 m_fpr
= m_jit
->reuse(op1
.fpr());
1092 m_fpr
= m_jit
->fprAllocate();
1095 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, SpeculateDoubleOperand
& op1
, SpeculateDoubleOperand
& op2
)
1097 , m_fpr(InvalidFPRReg
)
1099 if (m_jit
->canReuse(op1
.node()))
1100 m_fpr
= m_jit
->reuse(op1
.fpr());
1101 else if (m_jit
->canReuse(op2
.node()))
1102 m_fpr
= m_jit
->reuse(op2
.fpr());
1104 m_fpr
= m_jit
->fprAllocate();
1107 #if USE(JSVALUE32_64)
1108 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, JSValueOperand
& op1
)
1110 , m_fpr(InvalidFPRReg
)
1112 if (op1
.isDouble() && m_jit
->canReuse(op1
.node()))
1113 m_fpr
= m_jit
->reuse(op1
.fpr());
1115 m_fpr
= m_jit
->fprAllocate();
1119 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node
* node
, Node
* branchNode
, JITCompiler::DoubleCondition condition
)
1121 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1122 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1124 SpeculateDoubleOperand
op1(this, node
->child1());
1125 SpeculateDoubleOperand
op2(this, node
->child2());
1127 branchDouble(condition
, op1
.fpr(), op2
.fpr(), taken
);
1131 void SpeculativeJIT::compilePeepHoleObjectEquality(Node
* node
, Node
* branchNode
)
1133 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1134 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1136 MacroAssembler::RelationalCondition condition
= MacroAssembler::Equal
;
1138 if (taken
== nextBlock()) {
1139 condition
= MacroAssembler::NotEqual
;
1140 BasicBlock
* tmp
= taken
;
1145 SpeculateCellOperand
op1(this, node
->child1());
1146 SpeculateCellOperand
op2(this, node
->child2());
1148 GPRReg op1GPR
= op1
.gpr();
1149 GPRReg op2GPR
= op2
.gpr();
1151 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1152 if (m_state
.forNode(node
->child1()).m_type
& ~SpecObject
) {
1154 BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1155 m_jit
.branchStructurePtr(
1156 MacroAssembler::Equal
,
1157 MacroAssembler::Address(op1GPR
, JSCell::structureIDOffset()),
1158 m_jit
.vm()->stringStructure
.get()));
1160 if (m_state
.forNode(node
->child2()).m_type
& ~SpecObject
) {
1162 BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1163 m_jit
.branchStructurePtr(
1164 MacroAssembler::Equal
,
1165 MacroAssembler::Address(op2GPR
, JSCell::structureIDOffset()),
1166 m_jit
.vm()->stringStructure
.get()));
1169 GPRTemporary
structure(this);
1170 GPRTemporary
temp(this);
1171 GPRReg structureGPR
= structure
.gpr();
1173 m_jit
.emitLoadStructure(op1GPR
, structureGPR
, temp
.gpr());
1174 if (m_state
.forNode(node
->child1()).m_type
& ~SpecObject
) {
1176 BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1178 MacroAssembler::Equal
,
1180 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1182 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1184 MacroAssembler::NonZero
,
1185 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1186 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1188 m_jit
.emitLoadStructure(op2GPR
, structureGPR
, temp
.gpr());
1189 if (m_state
.forNode(node
->child2()).m_type
& ~SpecObject
) {
1191 BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1193 MacroAssembler::Equal
,
1195 MacroAssembler::TrustedImmPtr(m_jit
.vm()->stringStructure
.get())));
1197 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1199 MacroAssembler::NonZero
,
1200 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1201 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1204 branchPtr(condition
, op1GPR
, op2GPR
, taken
);
1208 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1210 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1211 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1213 // The branch instruction will branch to the taken block.
1214 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1215 if (taken
== nextBlock()) {
1216 condition
= JITCompiler::invert(condition
);
1217 BasicBlock
* tmp
= taken
;
1222 if (isBooleanConstant(node
->child1().node())) {
1223 bool imm
= valueOfBooleanConstant(node
->child1().node());
1224 SpeculateBooleanOperand
op2(this, node
->child2());
1225 branch32(condition
, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm
)))), op2
.gpr(), taken
);
1226 } else if (isBooleanConstant(node
->child2().node())) {
1227 SpeculateBooleanOperand
op1(this, node
->child1());
1228 bool imm
= valueOfBooleanConstant(node
->child2().node());
1229 branch32(condition
, op1
.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm
)))), taken
);
1231 SpeculateBooleanOperand
op1(this, node
->child1());
1232 SpeculateBooleanOperand
op2(this, node
->child2());
1233 branch32(condition
, op1
.gpr(), op2
.gpr(), taken
);
1239 void SpeculativeJIT::compilePeepHoleInt32Branch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1241 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1242 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1244 // The branch instruction will branch to the taken block.
1245 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1246 if (taken
== nextBlock()) {
1247 condition
= JITCompiler::invert(condition
);
1248 BasicBlock
* tmp
= taken
;
1253 if (isInt32Constant(node
->child1().node())) {
1254 int32_t imm
= valueOfInt32Constant(node
->child1().node());
1255 SpeculateInt32Operand
op2(this, node
->child2());
1256 branch32(condition
, JITCompiler::Imm32(imm
), op2
.gpr(), taken
);
1257 } else if (isInt32Constant(node
->child2().node())) {
1258 SpeculateInt32Operand
op1(this, node
->child1());
1259 int32_t imm
= valueOfInt32Constant(node
->child2().node());
1260 branch32(condition
, op1
.gpr(), JITCompiler::Imm32(imm
), taken
);
1262 SpeculateInt32Operand
op1(this, node
->child1());
1263 SpeculateInt32Operand
op2(this, node
->child2());
1264 branch32(condition
, op1
.gpr(), op2
.gpr(), taken
);
1270 // Returns true if the compare is fused with a subsequent branch.
1271 bool SpeculativeJIT::compilePeepHoleBranch(Node
* node
, MacroAssembler::RelationalCondition condition
, MacroAssembler::DoubleCondition doubleCondition
, S_JITOperation_EJJ operation
)
1273 // Fused compare & branch.
1274 unsigned branchIndexInBlock
= detectPeepHoleBranch();
1275 if (branchIndexInBlock
!= UINT_MAX
) {
1276 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
1278 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1279 // so can be no intervening nodes to also reference the compare.
1280 ASSERT(node
->adjustedRefCount() == 1);
1282 if (node
->isBinaryUseKind(Int32Use
))
1283 compilePeepHoleInt32Branch(node
, branchNode
, condition
);
1285 else if (node
->isBinaryUseKind(Int52RepUse
))
1286 compilePeepHoleInt52Branch(node
, branchNode
, condition
);
1287 #endif // USE(JSVALUE64)
1288 else if (node
->isBinaryUseKind(DoubleRepUse
))
1289 compilePeepHoleDoubleBranch(node
, branchNode
, doubleCondition
);
1290 else if (node
->op() == CompareEq
) {
1291 if (node
->isBinaryUseKind(StringUse
) || node
->isBinaryUseKind(StringIdentUse
)) {
1292 // Use non-peephole comparison, for now.
1295 if (node
->isBinaryUseKind(BooleanUse
))
1296 compilePeepHoleBooleanBranch(node
, branchNode
, condition
);
1297 else if (node
->isBinaryUseKind(ObjectUse
))
1298 compilePeepHoleObjectEquality(node
, branchNode
);
1299 else if (node
->isBinaryUseKind(ObjectUse
, ObjectOrOtherUse
))
1300 compilePeepHoleObjectToObjectOrOtherEquality(node
->child1(), node
->child2(), branchNode
);
1301 else if (node
->isBinaryUseKind(ObjectOrOtherUse
, ObjectUse
))
1302 compilePeepHoleObjectToObjectOrOtherEquality(node
->child2(), node
->child1(), branchNode
);
1304 nonSpeculativePeepholeBranch(node
, branchNode
, condition
, operation
);
1308 nonSpeculativePeepholeBranch(node
, branchNode
, condition
, operation
);
1312 use(node
->child1());
1313 use(node
->child2());
1314 m_indexInBlock
= branchIndexInBlock
;
1315 m_currentNode
= branchNode
;
1321 void SpeculativeJIT::noticeOSRBirth(Node
* node
)
1323 if (!node
->hasVirtualRegister())
1326 VirtualRegister virtualRegister
= node
->virtualRegister();
1327 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1329 info
.noticeOSRBirth(*m_stream
, node
, virtualRegister
);
1332 void SpeculativeJIT::compileMovHint(Node
* node
)
1334 ASSERT(node
->containsMovHint() && node
->op() != ZombieHint
);
1336 Node
* child
= node
->child1().node();
1337 noticeOSRBirth(child
);
1339 m_stream
->appendAndLog(VariableEvent::movHint(MinifiedID(child
), node
->unlinkedLocal()));
1342 void SpeculativeJIT::bail(AbortReason reason
)
1344 m_compileOkay
= true;
1345 m_jit
.abortWithReason(reason
, m_lastGeneratedNode
);
1346 clearGenerationInfo();
1349 void SpeculativeJIT::compileCurrentBlock()
1351 ASSERT(m_compileOkay
);
1356 ASSERT(m_block
->isReachable
);
1358 m_jit
.blockHeads()[m_block
->index
] = m_jit
.label();
1360 if (!m_block
->cfaHasVisited
) {
1361 // Don't generate code for basic blocks that are unreachable according to CFA.
1362 // But to be sure that nobody has generated a jump to this block, drop in a
1364 m_jit
.abortWithReason(DFGUnreachableBasicBlock
);
1368 m_stream
->appendAndLog(VariableEvent::reset());
1370 m_jit
.jitAssertHasValidCallFrame();
1371 m_jit
.jitAssertTagsInPlace();
1372 m_jit
.jitAssertArgumentCountSane();
1375 m_state
.beginBasicBlock(m_block
);
1377 for (size_t i
= m_block
->variablesAtHead
.size(); i
--;) {
1378 int operand
= m_block
->variablesAtHead
.operandForIndex(i
);
1379 Node
* node
= m_block
->variablesAtHead
[i
];
1381 continue; // No need to record dead SetLocal's.
1383 VariableAccessData
* variable
= node
->variableAccessData();
1385 if (!node
->refCount())
1386 continue; // No need to record dead SetLocal's.
1387 format
= dataFormatFor(variable
->flushFormat());
1388 m_stream
->appendAndLog(
1389 VariableEvent::setLocal(
1390 VirtualRegister(operand
),
1391 variable
->machineLocal(),
1395 m_codeOriginForExitTarget
= CodeOrigin();
1396 m_codeOriginForExitProfile
= CodeOrigin();
1398 for (m_indexInBlock
= 0; m_indexInBlock
< m_block
->size(); ++m_indexInBlock
) {
1399 m_currentNode
= m_block
->at(m_indexInBlock
);
1401 // We may have hit a contradiction that the CFA was aware of but that the JIT
1402 // didn't cause directly.
1403 if (!m_state
.isValid()) {
1404 bail(DFGBailedAtTopOfBlock
);
1408 m_canExit
= m_currentNode
->canExit();
1409 bool shouldExecuteEffects
= m_interpreter
.startExecuting(m_currentNode
);
1410 m_jit
.setForNode(m_currentNode
);
1411 m_codeOriginForExitTarget
= m_currentNode
->origin
.forExit
;
1412 m_codeOriginForExitProfile
= m_currentNode
->origin
.semantic
;
1413 m_lastGeneratedNode
= m_currentNode
->op();
1414 if (!m_currentNode
->shouldGenerate()) {
1415 switch (m_currentNode
->op()) {
1417 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1420 case WeakJSConstant
:
1421 m_jit
.addWeakReference(m_currentNode
->weakConstant());
1422 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1426 RELEASE_ASSERT_NOT_REACHED();
1430 compileMovHint(m_currentNode
);
1434 recordSetLocal(m_currentNode
->unlinkedLocal(), VirtualRegister(), DataFormatDead
);
1439 if (belongsInMinifiedGraph(m_currentNode
->op()))
1440 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1445 if (verboseCompilationEnabled()) {
1447 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1448 (int)m_currentNode
->index(),
1449 m_currentNode
->origin
.semantic
.bytecodeIndex
, m_jit
.debugOffset());
1453 compile(m_currentNode
);
1455 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1456 m_jit
.clearRegisterAllocationOffsets();
1459 if (!m_compileOkay
) {
1460 bail(DFGBailedAtEndOfNode
);
1464 if (belongsInMinifiedGraph(m_currentNode
->op())) {
1465 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1466 noticeOSRBirth(m_currentNode
);
1470 // Make sure that the abstract state is rematerialized for the next node.
1471 if (shouldExecuteEffects
)
1472 m_interpreter
.executeEffects(m_indexInBlock
);
1475 // Perform the most basic verification that children have been used correctly.
1476 if (!ASSERT_DISABLED
) {
1477 for (unsigned index
= 0; index
< m_generationInfo
.size(); ++index
) {
1478 GenerationInfo
& info
= m_generationInfo
[index
];
1479 RELEASE_ASSERT(!info
.alive());
1484 // If we are making type predictions about our arguments then
1485 // we need to check that they are correct on function entry.
1486 void SpeculativeJIT::checkArgumentTypes()
1488 ASSERT(!m_currentNode
);
1489 m_isCheckingArgumentTypes
= true;
1490 m_codeOriginForExitTarget
= CodeOrigin(0);
1491 m_codeOriginForExitProfile
= CodeOrigin(0);
1493 for (int i
= 0; i
< m_jit
.codeBlock()->numParameters(); ++i
) {
1494 Node
* node
= m_jit
.graph().m_arguments
[i
];
1496 // The argument is dead. We don't do any checks for such arguments.
1500 ASSERT(node
->op() == SetArgument
);
1501 ASSERT(node
->shouldGenerate());
1503 VariableAccessData
* variableAccessData
= node
->variableAccessData();
1504 FlushFormat format
= variableAccessData
->flushFormat();
1506 if (format
== FlushedJSValue
)
1509 VirtualRegister virtualRegister
= variableAccessData
->local();
1511 JSValueSource valueSource
= JSValueSource(JITCompiler::addressFor(virtualRegister
));
1515 case FlushedInt32
: {
1516 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch64(MacroAssembler::Below
, JITCompiler::addressFor(virtualRegister
), GPRInfo::tagTypeNumberRegister
));
1519 case FlushedBoolean
: {
1520 GPRTemporary
temp(this);
1521 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), temp
.gpr());
1522 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), temp
.gpr());
1523 speculationCheck(BadType
, valueSource
, node
, m_jit
.branchTest64(MacroAssembler::NonZero
, temp
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1527 speculationCheck(BadType
, valueSource
, node
, m_jit
.branchTest64(MacroAssembler::NonZero
, JITCompiler::addressFor(virtualRegister
), GPRInfo::tagMaskRegister
));
1531 RELEASE_ASSERT_NOT_REACHED();
1536 case FlushedInt32
: {
1537 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::Int32Tag
)));
1540 case FlushedBoolean
: {
1541 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::BooleanTag
)));
1545 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::CellTag
)));
1549 RELEASE_ASSERT_NOT_REACHED();
1554 m_isCheckingArgumentTypes
= false;
1557 bool SpeculativeJIT::compile()
1559 checkArgumentTypes();
1561 ASSERT(!m_currentNode
);
1562 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().numBlocks(); ++blockIndex
) {
1563 m_jit
.setForBlockIndex(blockIndex
);
1564 m_block
= m_jit
.graph().block(blockIndex
);
1565 compileCurrentBlock();
1571 void SpeculativeJIT::createOSREntries()
1573 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().numBlocks(); ++blockIndex
) {
1574 BasicBlock
* block
= m_jit
.graph().block(blockIndex
);
1577 if (!block
->isOSRTarget
)
1580 // Currently we don't have OSR entry trampolines. We could add them
1582 m_osrEntryHeads
.append(m_jit
.blockHeads()[blockIndex
]);
1586 void SpeculativeJIT::linkOSREntries(LinkBuffer
& linkBuffer
)
1588 unsigned osrEntryIndex
= 0;
1589 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().numBlocks(); ++blockIndex
) {
1590 BasicBlock
* block
= m_jit
.graph().block(blockIndex
);
1593 if (!block
->isOSRTarget
)
1595 m_jit
.noticeOSREntry(*block
, m_osrEntryHeads
[osrEntryIndex
++], linkBuffer
);
1597 ASSERT(osrEntryIndex
== m_osrEntryHeads
.size());
1600 void SpeculativeJIT::compileDoublePutByVal(Node
* node
, SpeculateCellOperand
& base
, SpeculateStrictInt32Operand
& property
)
1602 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
1603 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
1605 ArrayMode arrayMode
= node
->arrayMode();
1607 GPRReg baseReg
= base
.gpr();
1608 GPRReg propertyReg
= property
.gpr();
1610 SpeculateDoubleOperand
value(this, child3
);
1612 FPRReg valueReg
= value
.fpr();
1615 JSValueRegs(), child3
, SpecFullRealNumber
,
1617 MacroAssembler::DoubleNotEqualOrUnordered
, valueReg
, valueReg
));
1622 StorageOperand
storage(this, child4
);
1623 GPRReg storageReg
= storage
.gpr();
1625 if (node
->op() == PutByValAlias
) {
1626 // Store the value to the array.
1627 GPRReg propertyReg
= property
.gpr();
1628 FPRReg valueReg
= value
.fpr();
1629 m_jit
.storeDouble(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
1631 noResult(m_currentNode
);
1635 GPRTemporary temporary
;
1636 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
1638 MacroAssembler::Jump slowCase
;
1640 if (arrayMode
.isInBounds()) {
1642 OutOfBounds
, JSValueRegs(), 0,
1643 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
1645 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1647 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
1649 if (!arrayMode
.isOutOfBounds())
1650 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
1652 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
1653 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1655 inBounds
.link(&m_jit
);
1658 m_jit
.storeDouble(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
1665 if (arrayMode
.isOutOfBounds()) {
1666 addSlowPathGenerator(
1669 m_jit
.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict
: operationPutDoubleByValBeyondArrayBoundsNonStrict
,
1670 NoResult
, baseReg
, propertyReg
, valueReg
));
1673 noResult(m_currentNode
, UseChildrenCalledExplicitly
);
1676 void SpeculativeJIT::compileGetCharCodeAt(Node
* node
)
1678 SpeculateCellOperand
string(this, node
->child1());
1679 SpeculateStrictInt32Operand
index(this, node
->child2());
1680 StorageOperand
storage(this, node
->child3());
1682 GPRReg stringReg
= string
.gpr();
1683 GPRReg indexReg
= index
.gpr();
1684 GPRReg storageReg
= storage
.gpr();
1686 ASSERT(speculationChecked(m_state
.forNode(node
->child1()).m_type
, SpecString
));
1688 // unsigned comparison so we can filter out negative indices and indices that are too large
1689 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexReg
, MacroAssembler::Address(stringReg
, JSString::offsetOfLength())));
1691 GPRTemporary
scratch(this);
1692 GPRReg scratchReg
= scratch
.gpr();
1694 m_jit
.loadPtr(MacroAssembler::Address(stringReg
, JSString::offsetOfValue()), scratchReg
);
1696 // Load the character into scratchReg
1697 JITCompiler::Jump is16Bit
= m_jit
.branchTest32(MacroAssembler::Zero
, MacroAssembler::Address(scratchReg
, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1699 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, indexReg
, MacroAssembler::TimesOne
, 0), scratchReg
);
1700 JITCompiler::Jump cont8Bit
= m_jit
.jump();
1702 is16Bit
.link(&m_jit
);
1704 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, indexReg
, MacroAssembler::TimesTwo
, 0), scratchReg
);
1706 cont8Bit
.link(&m_jit
);
1708 int32Result(scratchReg
, m_currentNode
);
1711 void SpeculativeJIT::compileGetByValOnString(Node
* node
)
1713 SpeculateCellOperand
base(this, node
->child1());
1714 SpeculateStrictInt32Operand
property(this, node
->child2());
1715 StorageOperand
storage(this, node
->child3());
1716 GPRReg baseReg
= base
.gpr();
1717 GPRReg propertyReg
= property
.gpr();
1718 GPRReg storageReg
= storage
.gpr();
1720 GPRTemporary
scratch(this);
1721 GPRReg scratchReg
= scratch
.gpr();
1722 #if USE(JSVALUE32_64)
1723 GPRTemporary resultTag
;
1724 GPRReg resultTagReg
= InvalidGPRReg
;
1725 if (node
->arrayMode().isOutOfBounds()) {
1726 GPRTemporary
realResultTag(this);
1727 resultTag
.adopt(realResultTag
);
1728 resultTagReg
= resultTag
.gpr();
1732 ASSERT(ArrayMode(Array::String
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
1734 // unsigned comparison so we can filter out negative indices and indices that are too large
1735 JITCompiler::Jump outOfBounds
= m_jit
.branch32(
1736 MacroAssembler::AboveOrEqual
, propertyReg
,
1737 MacroAssembler::Address(baseReg
, JSString::offsetOfLength()));
1738 if (node
->arrayMode().isInBounds())
1739 speculationCheck(OutOfBounds
, JSValueRegs(), 0, outOfBounds
);
1741 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSString::offsetOfValue()), scratchReg
);
1743 // Load the character into scratchReg
1744 JITCompiler::Jump is16Bit
= m_jit
.branchTest32(MacroAssembler::Zero
, MacroAssembler::Address(scratchReg
, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1746 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
, 0), scratchReg
);
1747 JITCompiler::Jump cont8Bit
= m_jit
.jump();
1749 is16Bit
.link(&m_jit
);
1751 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
, 0), scratchReg
);
1753 JITCompiler::Jump bigCharacter
=
1754 m_jit
.branch32(MacroAssembler::AboveOrEqual
, scratchReg
, TrustedImm32(0x100));
1756 // 8 bit string values don't need the isASCII check.
1757 cont8Bit
.link(&m_jit
);
1759 m_jit
.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg
);
1760 m_jit
.addPtr(MacroAssembler::TrustedImmPtr(m_jit
.vm()->smallStrings
.singleCharacterStrings()), scratchReg
);
1761 m_jit
.loadPtr(scratchReg
, scratchReg
);
1763 addSlowPathGenerator(
1765 bigCharacter
, this, operationSingleCharacterString
, scratchReg
, scratchReg
));
1767 if (node
->arrayMode().isOutOfBounds()) {
1768 #if USE(JSVALUE32_64)
1769 m_jit
.move(TrustedImm32(JSValue::CellTag
), resultTagReg
);
1772 JSGlobalObject
* globalObject
= m_jit
.globalObjectFor(node
->origin
.semantic
);
1773 if (globalObject
->stringPrototypeChainIsSane()) {
1775 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1776 outOfBounds
, this, JSValueRegs(scratchReg
), baseReg
, propertyReg
)));
1778 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1779 outOfBounds
, this, JSValueRegs(resultTagReg
, scratchReg
),
1780 baseReg
, propertyReg
)));
1784 addSlowPathGenerator(
1786 outOfBounds
, this, operationGetByValStringInt
,
1787 scratchReg
, baseReg
, propertyReg
));
1789 addSlowPathGenerator(
1791 outOfBounds
, this, operationGetByValStringInt
,
1792 resultTagReg
, scratchReg
, baseReg
, propertyReg
));
1797 jsValueResult(scratchReg
, m_currentNode
);
1799 jsValueResult(resultTagReg
, scratchReg
, m_currentNode
);
1802 cellResult(scratchReg
, m_currentNode
);
1805 void SpeculativeJIT::compileFromCharCode(Node
* node
)
1807 SpeculateStrictInt32Operand
property(this, node
->child1());
1808 GPRReg propertyReg
= property
.gpr();
1809 GPRTemporary
smallStrings(this);
1810 GPRTemporary
scratch(this);
1811 GPRReg scratchReg
= scratch
.gpr();
1812 GPRReg smallStringsReg
= smallStrings
.gpr();
1814 JITCompiler::JumpList slowCases
;
1815 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, TrustedImm32(0xff)));
1816 m_jit
.move(MacroAssembler::TrustedImmPtr(m_jit
.vm()->smallStrings
.singleCharacterStrings()), smallStringsReg
);
1817 m_jit
.loadPtr(MacroAssembler::BaseIndex(smallStringsReg
, propertyReg
, MacroAssembler::ScalePtr
, 0), scratchReg
);
1819 slowCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, scratchReg
));
1820 addSlowPathGenerator(slowPathCall(slowCases
, this, operationStringFromCharCode
, scratchReg
, propertyReg
));
1821 cellResult(scratchReg
, m_currentNode
);
1824 GeneratedOperandType
SpeculativeJIT::checkGeneratedTypeForToInt32(Node
* node
)
1826 VirtualRegister virtualRegister
= node
->virtualRegister();
1827 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1829 switch (info
.registerFormat()) {
1830 case DataFormatStorage
:
1831 RELEASE_ASSERT_NOT_REACHED();
1833 case DataFormatBoolean
:
1834 case DataFormatCell
:
1835 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1836 return GeneratedOperandTypeUnknown
;
1838 case DataFormatNone
:
1839 case DataFormatJSCell
:
1841 case DataFormatJSBoolean
:
1842 case DataFormatJSDouble
:
1843 return GeneratedOperandJSValue
;
1845 case DataFormatJSInt32
:
1846 case DataFormatInt32
:
1847 return GeneratedOperandInteger
;
1850 RELEASE_ASSERT_NOT_REACHED();
1851 return GeneratedOperandTypeUnknown
;
1855 void SpeculativeJIT::compileValueToInt32(Node
* node
)
1857 switch (node
->child1().useKind()) {
1860 SpeculateStrictInt52Operand
op1(this, node
->child1());
1861 GPRTemporary
result(this, Reuse
, op1
);
1862 GPRReg op1GPR
= op1
.gpr();
1863 GPRReg resultGPR
= result
.gpr();
1864 m_jit
.zeroExtend32ToPtr(op1GPR
, resultGPR
);
1865 int32Result(resultGPR
, node
, DataFormatInt32
);
1868 #endif // USE(JSVALUE64)
1870 case DoubleRepUse
: {
1871 GPRTemporary
result(this);
1872 SpeculateDoubleOperand
op1(this, node
->child1());
1873 FPRReg fpr
= op1
.fpr();
1874 GPRReg gpr
= result
.gpr();
1875 JITCompiler::Jump notTruncatedToInteger
= m_jit
.branchTruncateDoubleToInt32(fpr
, gpr
, JITCompiler::BranchIfTruncateFailed
);
1877 addSlowPathGenerator(slowPathCall(notTruncatedToInteger
, this, toInt32
, gpr
, fpr
));
1879 int32Result(gpr
, node
);
1885 switch (checkGeneratedTypeForToInt32(node
->child1().node())) {
1886 case GeneratedOperandInteger
: {
1887 SpeculateInt32Operand
op1(this, node
->child1(), ManualOperandSpeculation
);
1888 GPRTemporary
result(this, Reuse
, op1
);
1889 m_jit
.move(op1
.gpr(), result
.gpr());
1890 int32Result(result
.gpr(), node
, op1
.format());
1893 case GeneratedOperandJSValue
: {
1894 GPRTemporary
result(this);
1896 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
1898 GPRReg gpr
= op1
.gpr();
1899 GPRReg resultGpr
= result
.gpr();
1900 FPRTemporary
tempFpr(this);
1901 FPRReg fpr
= tempFpr
.fpr();
1903 JITCompiler::Jump isInteger
= m_jit
.branch64(MacroAssembler::AboveOrEqual
, gpr
, GPRInfo::tagTypeNumberRegister
);
1904 JITCompiler::JumpList converted
;
1906 if (node
->child1().useKind() == NumberUse
) {
1908 JSValueRegs(gpr
), node
->child1(), SpecBytecodeNumber
,
1910 MacroAssembler::Zero
, gpr
, GPRInfo::tagTypeNumberRegister
));
1912 JITCompiler::Jump isNumber
= m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, GPRInfo::tagTypeNumberRegister
);
1915 JSValueRegs(gpr
), node
->child1(), ~SpecCell
, branchIsCell(JSValueRegs(gpr
)));
1917 // It's not a cell: so true turns into 1 and all else turns into 0.
1918 m_jit
.compare64(JITCompiler::Equal
, gpr
, TrustedImm32(ValueTrue
), resultGpr
);
1919 converted
.append(m_jit
.jump());
1921 isNumber
.link(&m_jit
);
1924 // First, if we get here we have a double encoded as a JSValue
1925 m_jit
.move(gpr
, resultGpr
);
1926 unboxDouble(resultGpr
, fpr
);
1928 silentSpillAllRegisters(resultGpr
);
1929 callOperation(toInt32
, resultGpr
, fpr
);
1930 silentFillAllRegisters(resultGpr
);
1932 converted
.append(m_jit
.jump());
1934 isInteger
.link(&m_jit
);
1935 m_jit
.zeroExtend32ToPtr(gpr
, resultGpr
);
1937 converted
.link(&m_jit
);
1939 Node
* childNode
= node
->child1().node();
1940 VirtualRegister virtualRegister
= childNode
->virtualRegister();
1941 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1943 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
1945 GPRReg payloadGPR
= op1
.payloadGPR();
1946 GPRReg resultGpr
= result
.gpr();
1948 JITCompiler::JumpList converted
;
1950 if (info
.registerFormat() == DataFormatJSInt32
)
1951 m_jit
.move(payloadGPR
, resultGpr
);
1953 GPRReg tagGPR
= op1
.tagGPR();
1954 FPRTemporary
tempFpr(this);
1955 FPRReg fpr
= tempFpr
.fpr();
1956 FPRTemporary
scratch(this);
1958 JITCompiler::Jump isInteger
= m_jit
.branch32(MacroAssembler::Equal
, tagGPR
, TrustedImm32(JSValue::Int32Tag
));
1960 if (node
->child1().useKind() == NumberUse
) {
1962 op1
.jsValueRegs(), node
->child1(), SpecBytecodeNumber
,
1964 MacroAssembler::AboveOrEqual
, tagGPR
,
1965 TrustedImm32(JSValue::LowestTag
)));
1967 JITCompiler::Jump isNumber
= m_jit
.branch32(MacroAssembler::Below
, tagGPR
, TrustedImm32(JSValue::LowestTag
));
1970 op1
.jsValueRegs(), node
->child1(), ~SpecCell
,
1971 branchIsCell(op1
.jsValueRegs()));
1973 // It's not a cell: so true turns into 1 and all else turns into 0.
1974 JITCompiler::Jump isBoolean
= m_jit
.branch32(JITCompiler::Equal
, tagGPR
, TrustedImm32(JSValue::BooleanTag
));
1975 m_jit
.move(TrustedImm32(0), resultGpr
);
1976 converted
.append(m_jit
.jump());
1978 isBoolean
.link(&m_jit
);
1979 m_jit
.move(payloadGPR
, resultGpr
);
1980 converted
.append(m_jit
.jump());
1982 isNumber
.link(&m_jit
);
1985 unboxDouble(tagGPR
, payloadGPR
, fpr
, scratch
.fpr());
1987 silentSpillAllRegisters(resultGpr
);
1988 callOperation(toInt32
, resultGpr
, fpr
);
1989 silentFillAllRegisters(resultGpr
);
1991 converted
.append(m_jit
.jump());
1993 isInteger
.link(&m_jit
);
1994 m_jit
.move(payloadGPR
, resultGpr
);
1996 converted
.link(&m_jit
);
1999 int32Result(resultGpr
, node
);
2002 case GeneratedOperandTypeUnknown
:
2003 RELEASE_ASSERT(!m_compileOkay
);
2006 RELEASE_ASSERT_NOT_REACHED();
2011 ASSERT(!m_compileOkay
);
2016 void SpeculativeJIT::compileUInt32ToNumber(Node
* node
)
2018 if (doesOverflow(node
->arithMode())) {
2019 // We know that this sometimes produces doubles. So produce a double every
2020 // time. This at least allows subsequent code to not have weird conditionals.
2022 SpeculateInt32Operand
op1(this, node
->child1());
2023 FPRTemporary
result(this);
2025 GPRReg inputGPR
= op1
.gpr();
2026 FPRReg outputFPR
= result
.fpr();
2028 m_jit
.convertInt32ToDouble(inputGPR
, outputFPR
);
2030 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, inputGPR
, TrustedImm32(0));
2031 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), outputFPR
);
2032 positive
.link(&m_jit
);
2034 doubleResult(outputFPR
, node
);
2038 RELEASE_ASSERT(node
->arithMode() == Arith::CheckOverflow
);
2040 SpeculateInt32Operand
op1(this, node
->child1());
2041 GPRTemporary
result(this);
2043 m_jit
.move(op1
.gpr(), result
.gpr());
2045 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, result
.gpr(), TrustedImm32(0)));
2047 int32Result(result
.gpr(), node
, op1
.format());
2050 void SpeculativeJIT::compileDoubleAsInt32(Node
* node
)
2052 SpeculateDoubleOperand
op1(this, node
->child1());
2053 FPRTemporary
scratch(this);
2054 GPRTemporary
result(this);
2056 FPRReg valueFPR
= op1
.fpr();
2057 FPRReg scratchFPR
= scratch
.fpr();
2058 GPRReg resultGPR
= result
.gpr();
2060 JITCompiler::JumpList failureCases
;
2061 RELEASE_ASSERT(shouldCheckOverflow(node
->arithMode()));
2062 m_jit
.branchConvertDoubleToInt32(
2063 valueFPR
, resultGPR
, failureCases
, scratchFPR
,
2064 shouldCheckNegativeZero(node
->arithMode()));
2065 speculationCheck(Overflow
, JSValueRegs(), 0, failureCases
);
2067 int32Result(resultGPR
, node
);
2070 void SpeculativeJIT::compileDoubleRep(Node
* node
)
2072 switch (node
->child1().useKind()) {
2074 ASSERT(!isNumberConstant(node
->child1().node())); // This should have been constant folded.
2076 if (isInt32Speculation(m_state
.forNode(node
->child1()).m_type
)) {
2077 SpeculateInt32Operand
op1(this, node
->child1(), ManualOperandSpeculation
);
2078 FPRTemporary
result(this);
2079 m_jit
.convertInt32ToDouble(op1
.gpr(), result
.fpr());
2080 doubleResult(result
.fpr(), node
);
2084 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2085 FPRTemporary
result(this);
2088 GPRTemporary
temp(this);
2090 GPRReg op1GPR
= op1
.gpr();
2091 GPRReg tempGPR
= temp
.gpr();
2092 FPRReg resultFPR
= result
.fpr();
2094 JITCompiler::Jump isInteger
= m_jit
.branch64(
2095 MacroAssembler::AboveOrEqual
, op1GPR
, GPRInfo::tagTypeNumberRegister
);
2097 if (needsTypeCheck(node
->child1(), SpecBytecodeNumber
)) {
2099 JSValueRegs(op1GPR
), node
->child1(), SpecBytecodeNumber
,
2100 m_jit
.branchTest64(MacroAssembler::Zero
, op1GPR
, GPRInfo::tagTypeNumberRegister
));
2103 m_jit
.move(op1GPR
, tempGPR
);
2104 unboxDouble(tempGPR
, resultFPR
);
2105 JITCompiler::Jump done
= m_jit
.jump();
2107 isInteger
.link(&m_jit
);
2108 m_jit
.convertInt32ToDouble(op1GPR
, resultFPR
);
2110 #else // USE(JSVALUE64) -> this is the 32_64 case
2111 FPRTemporary
temp(this);
2113 GPRReg op1TagGPR
= op1
.tagGPR();
2114 GPRReg op1PayloadGPR
= op1
.payloadGPR();
2115 FPRReg tempFPR
= temp
.fpr();
2116 FPRReg resultFPR
= result
.fpr();
2118 JITCompiler::Jump isInteger
= m_jit
.branch32(
2119 MacroAssembler::Equal
, op1TagGPR
, TrustedImm32(JSValue::Int32Tag
));
2121 if (needsTypeCheck(node
->child1(), SpecBytecodeNumber
)) {
2123 JSValueRegs(op1TagGPR
, op1PayloadGPR
), node
->child1(), SpecBytecodeNumber
,
2124 m_jit
.branch32(MacroAssembler::AboveOrEqual
, op1TagGPR
, TrustedImm32(JSValue::LowestTag
)));
2127 unboxDouble(op1TagGPR
, op1PayloadGPR
, resultFPR
, tempFPR
);
2128 JITCompiler::Jump done
= m_jit
.jump();
2130 isInteger
.link(&m_jit
);
2131 m_jit
.convertInt32ToDouble(op1PayloadGPR
, resultFPR
);
2133 #endif // USE(JSVALUE64)
2135 doubleResult(resultFPR
, node
);
2141 SpeculateStrictInt52Operand
value(this, node
->child1());
2142 FPRTemporary
result(this);
2144 GPRReg valueGPR
= value
.gpr();
2145 FPRReg resultFPR
= result
.fpr();
2147 m_jit
.convertInt64ToDouble(valueGPR
, resultFPR
);
2149 doubleResult(resultFPR
, node
);
2152 #endif // USE(JSVALUE64)
2155 RELEASE_ASSERT_NOT_REACHED();
2160 void SpeculativeJIT::compileValueRep(Node
* node
)
2162 switch (node
->child1().useKind()) {
2163 case DoubleRepUse
: {
2164 SpeculateDoubleOperand
value(this, node
->child1());
2165 JSValueRegsTemporary
result(this);
2167 FPRReg valueFPR
= value
.fpr();
2168 JSValueRegs resultRegs
= result
.regs();
2170 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2171 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2172 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2173 // local was purified.
2174 if (needsTypeCheck(node
->child1(), ~SpecDoubleImpureNaN
))
2175 m_jit
.purifyNaN(valueFPR
);
2178 // boxDouble() on X86 clobbers the source, so we need to copy.
2179 // FIXME: Don't do that! https://bugs.webkit.org/show_bug.cgi?id=131690
2180 FPRTemporary
temp(this);
2181 m_jit
.moveDouble(valueFPR
, temp
.fpr());
2182 valueFPR
= temp
.fpr();
2185 boxDouble(valueFPR
, resultRegs
);
2187 jsValueResult(resultRegs
, node
);
2193 SpeculateStrictInt52Operand
value(this, node
->child1());
2194 GPRTemporary
result(this);
2196 GPRReg valueGPR
= value
.gpr();
2197 GPRReg resultGPR
= result
.gpr();
2199 boxInt52(valueGPR
, resultGPR
, DataFormatStrictInt52
);
2201 jsValueResult(resultGPR
, node
);
2204 #endif // USE(JSVALUE64)
2207 RELEASE_ASSERT_NOT_REACHED();
2212 static double clampDoubleToByte(double d
)
2222 static void compileClampIntegerToByte(JITCompiler
& jit
, GPRReg result
)
2224 MacroAssembler::Jump inBounds
= jit
.branch32(MacroAssembler::BelowOrEqual
, result
, JITCompiler::TrustedImm32(0xff));
2225 MacroAssembler::Jump tooBig
= jit
.branch32(MacroAssembler::GreaterThan
, result
, JITCompiler::TrustedImm32(0xff));
2226 jit
.xorPtr(result
, result
);
2227 MacroAssembler::Jump clamped
= jit
.jump();
2229 jit
.move(JITCompiler::TrustedImm32(255), result
);
2231 inBounds
.link(&jit
);
2234 static void compileClampDoubleToByte(JITCompiler
& jit
, GPRReg result
, FPRReg source
, FPRReg scratch
)
2236 // Unordered compare so we pick up NaN
2237 static const double zero
= 0;
2238 static const double byteMax
= 255;
2239 static const double half
= 0.5;
2240 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zero
), scratch
);
2241 MacroAssembler::Jump tooSmall
= jit
.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered
, source
, scratch
);
2242 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax
), scratch
);
2243 MacroAssembler::Jump tooBig
= jit
.branchDouble(MacroAssembler::DoubleGreaterThan
, source
, scratch
);
2245 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&half
), scratch
);
2246 // FIXME: This should probably just use a floating point round!
2247 // https://bugs.webkit.org/show_bug.cgi?id=72054
2248 jit
.addDouble(source
, scratch
);
2249 jit
.truncateDoubleToInt32(scratch
, result
);
2250 MacroAssembler::Jump truncatedInt
= jit
.jump();
2252 tooSmall
.link(&jit
);
2253 jit
.xorPtr(result
, result
);
2254 MacroAssembler::Jump zeroed
= jit
.jump();
2257 jit
.move(JITCompiler::TrustedImm32(255), result
);
2259 truncatedInt
.link(&jit
);
2264 JITCompiler::Jump
SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node
* node
, GPRReg baseGPR
, GPRReg indexGPR
)
2266 if (node
->op() == PutByValAlias
)
2267 return JITCompiler::Jump();
2268 if (JSArrayBufferView
* view
= m_jit
.graph().tryGetFoldableViewForChild1(node
)) {
2269 uint32_t length
= view
->length();
2270 Node
* indexNode
= m_jit
.graph().child(node
, 1).node();
2271 if (m_jit
.graph().isInt32Constant(indexNode
) && static_cast<uint32_t>(m_jit
.graph().valueOfInt32Constant(indexNode
)) < length
)
2272 return JITCompiler::Jump();
2273 return m_jit
.branch32(
2274 MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Imm32(length
));
2276 return m_jit
.branch32(
2277 MacroAssembler::AboveOrEqual
, indexGPR
,
2278 MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfLength()));
2281 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node
* node
, GPRReg baseGPR
, GPRReg indexGPR
)
2283 JITCompiler::Jump jump
= jumpForTypedArrayOutOfBounds(node
, baseGPR
, indexGPR
);
2286 speculationCheck(OutOfBounds
, JSValueRegs(), 0, jump
);
2289 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node
* node
, TypedArrayType type
)
2291 ASSERT(isInt(type
));
2293 SpeculateCellOperand
base(this, node
->child1());
2294 SpeculateStrictInt32Operand
property(this, node
->child2());
2295 StorageOperand
storage(this, node
->child3());
2297 GPRReg baseReg
= base
.gpr();
2298 GPRReg propertyReg
= property
.gpr();
2299 GPRReg storageReg
= storage
.gpr();
2301 GPRTemporary
result(this);
2302 GPRReg resultReg
= result
.gpr();
2304 ASSERT(node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
2306 emitTypedArrayBoundsCheck(node
, baseReg
, propertyReg
);
2307 switch (elementSize(type
)) {
2310 m_jit
.load8Signed(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
), resultReg
);
2312 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
), resultReg
);
2316 m_jit
.load16Signed(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
), resultReg
);
2318 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
), resultReg
);
2321 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesFour
), resultReg
);
2326 if (elementSize(type
) < 4 || isSigned(type
)) {
2327 int32Result(resultReg
, node
);
2331 ASSERT(elementSize(type
) == 4 && !isSigned(type
));
2332 if (node
->shouldSpeculateInt32()) {
2333 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, resultReg
, TrustedImm32(0)));
2334 int32Result(resultReg
, node
);
2339 if (node
->shouldSpeculateMachineInt()) {
2340 m_jit
.zeroExtend32ToPtr(resultReg
, resultReg
);
2341 strictInt52Result(resultReg
, node
);
2346 FPRTemporary
fresult(this);
2347 m_jit
.convertInt32ToDouble(resultReg
, fresult
.fpr());
2348 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, resultReg
, TrustedImm32(0));
2349 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), fresult
.fpr());
2350 positive
.link(&m_jit
);
2351 doubleResult(fresult
.fpr(), node
);
2354 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base
, GPRReg property
, Node
* node
, TypedArrayType type
)
2356 ASSERT(isInt(type
));
2358 StorageOperand
storage(this, m_jit
.graph().varArgChild(node
, 3));
2359 GPRReg storageReg
= storage
.gpr();
2361 Edge valueUse
= m_jit
.graph().varArgChild(node
, 2);
2364 GPRReg valueGPR
= InvalidGPRReg
;
2366 if (valueUse
->isConstant()) {
2367 JSValue jsValue
= valueOfJSConstant(valueUse
.node());
2368 if (!jsValue
.isNumber()) {
2369 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
2373 double d
= jsValue
.asNumber();
2374 if (isClamped(type
)) {
2375 ASSERT(elementSize(type
) == 1);
2376 d
= clampDoubleToByte(d
);
2378 GPRTemporary
scratch(this);
2379 GPRReg scratchReg
= scratch
.gpr();
2380 m_jit
.move(Imm32(toInt32(d
)), scratchReg
);
2381 value
.adopt(scratch
);
2382 valueGPR
= scratchReg
;
2384 switch (valueUse
.useKind()) {
2386 SpeculateInt32Operand
valueOp(this, valueUse
);
2387 GPRTemporary
scratch(this);
2388 GPRReg scratchReg
= scratch
.gpr();
2389 m_jit
.move(valueOp
.gpr(), scratchReg
);
2390 if (isClamped(type
)) {
2391 ASSERT(elementSize(type
) == 1);
2392 compileClampIntegerToByte(m_jit
, scratchReg
);
2394 value
.adopt(scratch
);
2395 valueGPR
= scratchReg
;
2401 SpeculateStrictInt52Operand
valueOp(this, valueUse
);
2402 GPRTemporary
scratch(this);
2403 GPRReg scratchReg
= scratch
.gpr();
2404 m_jit
.move(valueOp
.gpr(), scratchReg
);
2405 if (isClamped(type
)) {
2406 ASSERT(elementSize(type
) == 1);
2407 MacroAssembler::Jump inBounds
= m_jit
.branch64(
2408 MacroAssembler::BelowOrEqual
, scratchReg
, JITCompiler::TrustedImm64(0xff));
2409 MacroAssembler::Jump tooBig
= m_jit
.branch64(
2410 MacroAssembler::GreaterThan
, scratchReg
, JITCompiler::TrustedImm64(0xff));
2411 m_jit
.move(TrustedImm32(0), scratchReg
);
2412 MacroAssembler::Jump clamped
= m_jit
.jump();
2413 tooBig
.link(&m_jit
);
2414 m_jit
.move(JITCompiler::TrustedImm32(255), scratchReg
);
2415 clamped
.link(&m_jit
);
2416 inBounds
.link(&m_jit
);
2418 value
.adopt(scratch
);
2419 valueGPR
= scratchReg
;
2422 #endif // USE(JSVALUE64)
2424 case DoubleRepUse
: {
2425 if (isClamped(type
)) {
2426 ASSERT(elementSize(type
) == 1);
2427 SpeculateDoubleOperand
valueOp(this, valueUse
);
2428 GPRTemporary
result(this);
2429 FPRTemporary
floatScratch(this);
2430 FPRReg fpr
= valueOp
.fpr();
2431 GPRReg gpr
= result
.gpr();
2432 compileClampDoubleToByte(m_jit
, gpr
, fpr
, floatScratch
.fpr());
2433 value
.adopt(result
);
2436 SpeculateDoubleOperand
valueOp(this, valueUse
);
2437 GPRTemporary
result(this);
2438 FPRReg fpr
= valueOp
.fpr();
2439 GPRReg gpr
= result
.gpr();
2440 MacroAssembler::Jump notNaN
= m_jit
.branchDouble(MacroAssembler::DoubleEqual
, fpr
, fpr
);
2441 m_jit
.xorPtr(gpr
, gpr
);
2442 MacroAssembler::Jump fixed
= m_jit
.jump();
2443 notNaN
.link(&m_jit
);
2445 MacroAssembler::Jump failed
= m_jit
.branchTruncateDoubleToInt32(
2446 fpr
, gpr
, MacroAssembler::BranchIfTruncateFailed
);
2448 addSlowPathGenerator(slowPathCall(failed
, this, toInt32
, gpr
, fpr
));
2451 value
.adopt(result
);
2458 RELEASE_ASSERT_NOT_REACHED();
2463 ASSERT_UNUSED(valueGPR
, valueGPR
!= property
);
2464 ASSERT(valueGPR
!= base
);
2465 ASSERT(valueGPR
!= storageReg
);
2466 MacroAssembler::Jump outOfBounds
= jumpForTypedArrayOutOfBounds(node
, base
, property
);
2467 if (node
->arrayMode().isInBounds() && outOfBounds
.isSet()) {
2468 speculationCheck(OutOfBounds
, JSValueSource(), 0, outOfBounds
);
2469 outOfBounds
= MacroAssembler::Jump();
2472 switch (elementSize(type
)) {
2474 m_jit
.store8(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesOne
));
2477 m_jit
.store16(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesTwo
));
2480 m_jit
.store32(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesFour
));
2485 if (outOfBounds
.isSet())
2486 outOfBounds
.link(&m_jit
);
2490 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node
* node
, TypedArrayType type
)
2492 ASSERT(isFloat(type
));
2494 SpeculateCellOperand
base(this, node
->child1());
2495 SpeculateStrictInt32Operand
property(this, node
->child2());
2496 StorageOperand
storage(this, node
->child3());
2498 GPRReg baseReg
= base
.gpr();
2499 GPRReg propertyReg
= property
.gpr();
2500 GPRReg storageReg
= storage
.gpr();
2502 ASSERT(node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
2504 FPRTemporary
result(this);
2505 FPRReg resultReg
= result
.fpr();
2506 emitTypedArrayBoundsCheck(node
, baseReg
, propertyReg
);
2507 switch (elementSize(type
)) {
2509 m_jit
.loadFloat(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesFour
), resultReg
);
2510 m_jit
.convertFloatToDouble(resultReg
, resultReg
);
2513 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), resultReg
);
2517 RELEASE_ASSERT_NOT_REACHED();
2520 doubleResult(resultReg
, node
);
2523 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base
, GPRReg property
, Node
* node
, TypedArrayType type
)
2525 ASSERT(isFloat(type
));
2527 StorageOperand
storage(this, m_jit
.graph().varArgChild(node
, 3));
2528 GPRReg storageReg
= storage
.gpr();
2530 Edge baseUse
= m_jit
.graph().varArgChild(node
, 0);
2531 Edge valueUse
= m_jit
.graph().varArgChild(node
, 2);
2533 SpeculateDoubleOperand
valueOp(this, valueUse
);
2534 FPRTemporary
scratch(this);
2535 FPRReg valueFPR
= valueOp
.fpr();
2536 FPRReg scratchFPR
= scratch
.fpr();
2538 ASSERT_UNUSED(baseUse
, node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(baseUse
)));
2540 MacroAssembler::Jump outOfBounds
= jumpForTypedArrayOutOfBounds(node
, base
, property
);
2541 if (node
->arrayMode().isInBounds() && outOfBounds
.isSet()) {
2542 speculationCheck(OutOfBounds
, JSValueSource(), 0, outOfBounds
);
2543 outOfBounds
= MacroAssembler::Jump();
2546 switch (elementSize(type
)) {
2548 m_jit
.moveDouble(valueFPR
, scratchFPR
);
2549 m_jit
.convertDoubleToFloat(valueFPR
, scratchFPR
);
2550 m_jit
.storeFloat(scratchFPR
, MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesFour
));
2554 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesEight
));
2557 RELEASE_ASSERT_NOT_REACHED();
2559 if (outOfBounds
.isSet())
2560 outOfBounds
.link(&m_jit
);
2564 void SpeculativeJIT::compileInstanceOfForObject(Node
*, GPRReg valueReg
, GPRReg prototypeReg
, GPRReg scratchReg
, GPRReg scratch2Reg
)
2566 // Check that prototype is an object.
2567 speculationCheck(BadType
, JSValueRegs(), 0, m_jit
.branchIfCellNotObject(prototypeReg
));
2569 // Initialize scratchReg with the value being checked.
2570 m_jit
.move(valueReg
, scratchReg
);
2572 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2573 MacroAssembler::Label
loop(&m_jit
);
2574 m_jit
.emitLoadStructure(scratchReg
, scratchReg
, scratch2Reg
);
2575 m_jit
.loadPtr(MacroAssembler::Address(scratchReg
, Structure::prototypeOffset() + CellPayloadOffset
), scratchReg
);
2576 MacroAssembler::Jump isInstance
= m_jit
.branchPtr(MacroAssembler::Equal
, scratchReg
, prototypeReg
);
2578 branchIsCell(JSValueRegs(scratchReg
)).linkTo(loop
, &m_jit
);
2580 m_jit
.branchTestPtr(MacroAssembler::NonZero
, scratchReg
).linkTo(loop
, &m_jit
);
2583 // No match - result is false.
2585 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg
);
2587 m_jit
.move(MacroAssembler::TrustedImm32(0), scratchReg
);
2589 MacroAssembler::Jump putResult
= m_jit
.jump();
2591 isInstance
.link(&m_jit
);
2593 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg
);
2595 m_jit
.move(MacroAssembler::TrustedImm32(1), scratchReg
);
2598 putResult
.link(&m_jit
);
2601 void SpeculativeJIT::compileInstanceOf(Node
* node
)
2603 if (node
->child1().useKind() == UntypedUse
) {
2604 // It might not be a cell. Speculate less aggressively.
2605 // Or: it might only be used once (i.e. by us), so we get zero benefit
2606 // from speculating any more aggressively than we absolutely need to.
2608 JSValueOperand
value(this, node
->child1());
2609 SpeculateCellOperand
prototype(this, node
->child2());
2610 GPRTemporary
scratch(this);
2611 GPRTemporary
scratch2(this);
2613 GPRReg prototypeReg
= prototype
.gpr();
2614 GPRReg scratchReg
= scratch
.gpr();
2615 GPRReg scratch2Reg
= scratch2
.gpr();
2617 MacroAssembler::Jump isCell
= branchIsCell(value
.jsValueRegs());
2618 GPRReg valueReg
= value
.jsValueRegs().payloadGPR();
2619 moveFalseTo(scratchReg
);
2621 MacroAssembler::Jump done
= m_jit
.jump();
2623 isCell
.link(&m_jit
);
2625 compileInstanceOfForObject(node
, valueReg
, prototypeReg
, scratchReg
, scratch2Reg
);
2629 blessedBooleanResult(scratchReg
, node
);
2633 SpeculateCellOperand
value(this, node
->child1());
2634 SpeculateCellOperand
prototype(this, node
->child2());
2636 GPRTemporary
scratch(this);
2637 GPRTemporary
scratch2(this);
2639 GPRReg valueReg
= value
.gpr();
2640 GPRReg prototypeReg
= prototype
.gpr();
2641 GPRReg scratchReg
= scratch
.gpr();
2642 GPRReg scratch2Reg
= scratch2
.gpr();
2644 compileInstanceOfForObject(node
, valueReg
, prototypeReg
, scratchReg
, scratch2Reg
);
2646 blessedBooleanResult(scratchReg
, node
);
2649 void SpeculativeJIT::compileAdd(Node
* node
)
2651 switch (node
->binaryUseKind()) {
2653 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
2655 if (isInt32Constant(node
->child1().node())) {
2656 int32_t imm1
= valueOfInt32Constant(node
->child1().node());
2657 SpeculateInt32Operand
op2(this, node
->child2());
2658 GPRTemporary
result(this);
2660 if (!shouldCheckOverflow(node
->arithMode())) {
2661 m_jit
.move(op2
.gpr(), result
.gpr());
2662 m_jit
.add32(Imm32(imm1
), result
.gpr());
2664 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchAdd32(MacroAssembler::Overflow
, op2
.gpr(), Imm32(imm1
), result
.gpr()));
2666 int32Result(result
.gpr(), node
);
2670 if (isInt32Constant(node
->child2().node())) {
2671 SpeculateInt32Operand
op1(this, node
->child1());
2672 int32_t imm2
= valueOfInt32Constant(node
->child2().node());
2673 GPRTemporary
result(this);
2675 if (!shouldCheckOverflow(node
->arithMode())) {
2676 m_jit
.move(op1
.gpr(), result
.gpr());
2677 m_jit
.add32(Imm32(imm2
), result
.gpr());
2679 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchAdd32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr()));
2681 int32Result(result
.gpr(), node
);
2685 SpeculateInt32Operand
op1(this, node
->child1());
2686 SpeculateInt32Operand
op2(this, node
->child2());
2687 GPRTemporary
result(this, Reuse
, op1
, op2
);
2689 GPRReg gpr1
= op1
.gpr();
2690 GPRReg gpr2
= op2
.gpr();
2691 GPRReg gprResult
= result
.gpr();
2693 if (!shouldCheckOverflow(node
->arithMode())) {
2694 if (gpr1
== gprResult
)
2695 m_jit
.add32(gpr2
, gprResult
);
2697 m_jit
.move(gpr2
, gprResult
);
2698 m_jit
.add32(gpr1
, gprResult
);
2701 MacroAssembler::Jump check
= m_jit
.branchAdd32(MacroAssembler::Overflow
, gpr1
, gpr2
, gprResult
);
2703 if (gpr1
== gprResult
)
2704 speculationCheck(Overflow
, JSValueRegs(), 0, check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr2
));
2705 else if (gpr2
== gprResult
)
2706 speculationCheck(Overflow
, JSValueRegs(), 0, check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr1
));
2708 speculationCheck(Overflow
, JSValueRegs(), 0, check
);
2711 int32Result(gprResult
, node
);
2717 ASSERT(shouldCheckOverflow(node
->arithMode()));
2718 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
2720 // Will we need an overflow check? If we can prove that neither input can be
2721 // Int52 then the overflow check will not be necessary.
2722 if (!m_state
.forNode(node
->child1()).couldBeType(SpecInt52
)
2723 && !m_state
.forNode(node
->child2()).couldBeType(SpecInt52
)) {
2724 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
2725 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
2726 GPRTemporary
result(this, Reuse
, op1
);
2727 m_jit
.move(op1
.gpr(), result
.gpr());
2728 m_jit
.add64(op2
.gpr(), result
.gpr());
2729 int52Result(result
.gpr(), node
, op1
.format());
2733 SpeculateInt52Operand
op1(this, node
->child1());
2734 SpeculateInt52Operand
op2(this, node
->child2());
2735 GPRTemporary
result(this);
2736 m_jit
.move(op1
.gpr(), result
.gpr());
2738 Int52Overflow
, JSValueRegs(), 0,
2739 m_jit
.branchAdd64(MacroAssembler::Overflow
, op2
.gpr(), result
.gpr()));
2740 int52Result(result
.gpr(), node
);
2743 #endif // USE(JSVALUE64)
2745 case DoubleRepUse
: {
2746 SpeculateDoubleOperand
op1(this, node
->child1());
2747 SpeculateDoubleOperand
op2(this, node
->child2());
2748 FPRTemporary
result(this, op1
, op2
);
2750 FPRReg reg1
= op1
.fpr();
2751 FPRReg reg2
= op2
.fpr();
2752 m_jit
.addDouble(reg1
, reg2
, result
.fpr());
2754 doubleResult(result
.fpr(), node
);
2759 RELEASE_ASSERT_NOT_REACHED();
2764 void SpeculativeJIT::compileMakeRope(Node
* node
)
2766 ASSERT(node
->child1().useKind() == KnownStringUse
);
2767 ASSERT(node
->child2().useKind() == KnownStringUse
);
2768 ASSERT(!node
->child3() || node
->child3().useKind() == KnownStringUse
);
2770 SpeculateCellOperand
op1(this, node
->child1());
2771 SpeculateCellOperand
op2(this, node
->child2());
2772 SpeculateCellOperand
op3(this, node
->child3());
2773 GPRTemporary
result(this);
2774 GPRTemporary
allocator(this);
2775 GPRTemporary
scratch(this);
2779 opGPRs
[0] = op1
.gpr();
2780 opGPRs
[1] = op2
.gpr();
2781 if (node
->child3()) {
2782 opGPRs
[2] = op3
.gpr();
2785 opGPRs
[2] = InvalidGPRReg
;
2788 GPRReg resultGPR
= result
.gpr();
2789 GPRReg allocatorGPR
= allocator
.gpr();
2790 GPRReg scratchGPR
= scratch
.gpr();
2792 JITCompiler::JumpList slowPath
;
2793 MarkedAllocator
& markedAllocator
= m_jit
.vm()->heap
.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString
));
2794 m_jit
.move(TrustedImmPtr(&markedAllocator
), allocatorGPR
);
2795 emitAllocateJSCell(resultGPR
, allocatorGPR
, TrustedImmPtr(m_jit
.vm()->stringStructure
.get()), scratchGPR
, slowPath
);
2797 m_jit
.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR
, JSString::offsetOfValue()));
2798 for (unsigned i
= 0; i
< numOpGPRs
; ++i
)
2799 m_jit
.storePtr(opGPRs
[i
], JITCompiler::Address(resultGPR
, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier
<JSString
>) * i
));
2800 for (unsigned i
= numOpGPRs
; i
< JSRopeString::s_maxInternalRopeLength
; ++i
)
2801 m_jit
.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR
, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier
<JSString
>) * i
));
2802 m_jit
.load32(JITCompiler::Address(opGPRs
[0], JSString::offsetOfFlags()), scratchGPR
);
2803 m_jit
.load32(JITCompiler::Address(opGPRs
[0], JSString::offsetOfLength()), allocatorGPR
);
2804 if (!ASSERT_DISABLED
) {
2805 JITCompiler::Jump ok
= m_jit
.branch32(
2806 JITCompiler::GreaterThanOrEqual
, allocatorGPR
, TrustedImm32(0));
2807 m_jit
.abortWithReason(DFGNegativeStringLength
);
2810 for (unsigned i
= 1; i
< numOpGPRs
; ++i
) {
2811 m_jit
.and32(JITCompiler::Address(opGPRs
[i
], JSString::offsetOfFlags()), scratchGPR
);
2813 Uncountable
, JSValueSource(), nullptr,
2815 JITCompiler::Overflow
,
2816 JITCompiler::Address(opGPRs
[i
], JSString::offsetOfLength()), allocatorGPR
));
2818 m_jit
.and32(JITCompiler::TrustedImm32(JSString::Is8Bit
), scratchGPR
);
2819 m_jit
.store32(scratchGPR
, JITCompiler::Address(resultGPR
, JSString::offsetOfFlags()));
2820 if (!ASSERT_DISABLED
) {
2821 JITCompiler::Jump ok
= m_jit
.branch32(
2822 JITCompiler::GreaterThanOrEqual
, allocatorGPR
, TrustedImm32(0));
2823 m_jit
.abortWithReason(DFGNegativeStringLength
);
2826 m_jit
.store32(allocatorGPR
, JITCompiler::Address(resultGPR
, JSString::offsetOfLength()));
2828 switch (numOpGPRs
) {
2830 addSlowPathGenerator(slowPathCall(
2831 slowPath
, this, operationMakeRope2
, resultGPR
, opGPRs
[0], opGPRs
[1]));
2834 addSlowPathGenerator(slowPathCall(
2835 slowPath
, this, operationMakeRope3
, resultGPR
, opGPRs
[0], opGPRs
[1], opGPRs
[2]));
2838 RELEASE_ASSERT_NOT_REACHED();
2842 cellResult(resultGPR
, node
);
2845 void SpeculativeJIT::compileArithSub(Node
* node
)
2847 switch (node
->binaryUseKind()) {
2849 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
2851 if (isNumberConstant(node
->child2().node())) {
2852 SpeculateInt32Operand
op1(this, node
->child1());
2853 int32_t imm2
= valueOfInt32Constant(node
->child2().node());
2854 GPRTemporary
result(this);
2856 if (!shouldCheckOverflow(node
->arithMode())) {
2857 m_jit
.move(op1
.gpr(), result
.gpr());
2858 m_jit
.sub32(Imm32(imm2
), result
.gpr());
2860 GPRTemporary
scratch(this);
2861 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr(), scratch
.gpr()));
2864 int32Result(result
.gpr(), node
);
2868 if (isNumberConstant(node
->child1().node())) {
2869 int32_t imm1
= valueOfInt32Constant(node
->child1().node());
2870 SpeculateInt32Operand
op2(this, node
->child2());
2871 GPRTemporary
result(this);
2873 m_jit
.move(Imm32(imm1
), result
.gpr());
2874 if (!shouldCheckOverflow(node
->arithMode()))
2875 m_jit
.sub32(op2
.gpr(), result
.gpr());
2877 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op2
.gpr(), result
.gpr()));
2879 int32Result(result
.gpr(), node
);
2883 SpeculateInt32Operand
op1(this, node
->child1());
2884 SpeculateInt32Operand
op2(this, node
->child2());
2885 GPRTemporary
result(this);
2887 if (!shouldCheckOverflow(node
->arithMode())) {
2888 m_jit
.move(op1
.gpr(), result
.gpr());
2889 m_jit
.sub32(op2
.gpr(), result
.gpr());
2891 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), op2
.gpr(), result
.gpr()));
2893 int32Result(result
.gpr(), node
);
2899 ASSERT(shouldCheckOverflow(node
->arithMode()));
2900 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
2902 // Will we need an overflow check? If we can prove that neither input can be
2903 // Int52 then the overflow check will not be necessary.
2904 if (!m_state
.forNode(node
->child1()).couldBeType(SpecInt52
)
2905 && !m_state
.forNode(node
->child2()).couldBeType(SpecInt52
)) {
2906 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
2907 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
2908 GPRTemporary
result(this, Reuse
, op1
);
2909 m_jit
.move(op1
.gpr(), result
.gpr());
2910 m_jit
.sub64(op2
.gpr(), result
.gpr());
2911 int52Result(result
.gpr(), node
, op1
.format());
2915 SpeculateInt52Operand
op1(this, node
->child1());
2916 SpeculateInt52Operand
op2(this, node
->child2());
2917 GPRTemporary
result(this);
2918 m_jit
.move(op1
.gpr(), result
.gpr());
2920 Int52Overflow
, JSValueRegs(), 0,
2921 m_jit
.branchSub64(MacroAssembler::Overflow
, op2
.gpr(), result
.gpr()));
2922 int52Result(result
.gpr(), node
);
2925 #endif // USE(JSVALUE64)
2927 case DoubleRepUse
: {
2928 SpeculateDoubleOperand
op1(this, node
->child1());
2929 SpeculateDoubleOperand
op2(this, node
->child2());
2930 FPRTemporary
result(this, op1
);
2932 FPRReg reg1
= op1
.fpr();
2933 FPRReg reg2
= op2
.fpr();
2934 m_jit
.subDouble(reg1
, reg2
, result
.fpr());
2936 doubleResult(result
.fpr(), node
);
2941 RELEASE_ASSERT_NOT_REACHED();
2946 void SpeculativeJIT::compileArithNegate(Node
* node
)
2948 switch (node
->child1().useKind()) {
2950 SpeculateInt32Operand
op1(this, node
->child1());
2951 GPRTemporary
result(this);
2953 m_jit
.move(op1
.gpr(), result
.gpr());
2955 // Note: there is no notion of being not used as a number, but someone
2956 // caring about negative zero.
2958 if (!shouldCheckOverflow(node
->arithMode()))
2959 m_jit
.neg32(result
.gpr());
2960 else if (!shouldCheckNegativeZero(node
->arithMode()))
2961 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchNeg32(MacroAssembler::Overflow
, result
.gpr()));
2963 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(MacroAssembler::Zero
, result
.gpr(), TrustedImm32(0x7fffffff)));
2964 m_jit
.neg32(result
.gpr());
2967 int32Result(result
.gpr(), node
);
2973 ASSERT(shouldCheckOverflow(node
->arithMode()));
2975 if (!m_state
.forNode(node
->child1()).couldBeType(SpecInt52
)) {
2976 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
2977 GPRTemporary
result(this);
2978 GPRReg op1GPR
= op1
.gpr();
2979 GPRReg resultGPR
= result
.gpr();
2980 m_jit
.move(op1GPR
, resultGPR
);
2981 m_jit
.neg64(resultGPR
);
2982 if (shouldCheckNegativeZero(node
->arithMode())) {
2984 NegativeZero
, JSValueRegs(), 0,
2985 m_jit
.branchTest64(MacroAssembler::Zero
, resultGPR
));
2987 int52Result(resultGPR
, node
, op1
.format());
2991 SpeculateInt52Operand
op1(this, node
->child1());
2992 GPRTemporary
result(this);
2993 GPRReg op1GPR
= op1
.gpr();
2994 GPRReg resultGPR
= result
.gpr();
2995 m_jit
.move(op1GPR
, resultGPR
);
2997 Int52Overflow
, JSValueRegs(), 0,
2998 m_jit
.branchNeg64(MacroAssembler::Overflow
, resultGPR
));
2999 if (shouldCheckNegativeZero(node
->arithMode())) {
3001 NegativeZero
, JSValueRegs(), 0,
3002 m_jit
.branchTest64(MacroAssembler::Zero
, resultGPR
));
3004 int52Result(resultGPR
, node
);
3007 #endif // USE(JSVALUE64)
3009 case DoubleRepUse
: {
3010 SpeculateDoubleOperand
op1(this, node
->child1());
3011 FPRTemporary
result(this);
3013 m_jit
.negateDouble(op1
.fpr(), result
.fpr());
3015 doubleResult(result
.fpr(), node
);
3020 RELEASE_ASSERT_NOT_REACHED();
3024 void SpeculativeJIT::compileArithMul(Node
* node
)
3026 switch (node
->binaryUseKind()) {
3028 SpeculateInt32Operand
op1(this, node
->child1());
3029 SpeculateInt32Operand
op2(this, node
->child2());
3030 GPRTemporary
result(this);
3032 GPRReg reg1
= op1
.gpr();
3033 GPRReg reg2
= op2
.gpr();
3035 // We can perform truncated multiplications if we get to this point, because if the
3036 // fixup phase could not prove that it would be safe, it would have turned us into
3037 // a double multiplication.
3038 if (!shouldCheckOverflow(node
->arithMode())) {
3039 m_jit
.move(reg1
, result
.gpr());
3040 m_jit
.mul32(reg2
, result
.gpr());
3043 Overflow
, JSValueRegs(), 0,
3044 m_jit
.branchMul32(MacroAssembler::Overflow
, reg1
, reg2
, result
.gpr()));
3047 // Check for negative zero, if the users of this node care about such things.
3048 if (shouldCheckNegativeZero(node
->arithMode())) {
3049 MacroAssembler::Jump resultNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, result
.gpr());
3050 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, reg1
, TrustedImm32(0)));
3051 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, reg2
, TrustedImm32(0)));
3052 resultNonZero
.link(&m_jit
);
3055 int32Result(result
.gpr(), node
);
3061 ASSERT(shouldCheckOverflow(node
->arithMode()));
3063 // This is super clever. We want to do an int52 multiplication and check the
3064 // int52 overflow bit. There is no direct hardware support for this, but we do
3065 // have the ability to do an int64 multiplication and check the int64 overflow
3066 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3067 // registers, with the high 12 bits being sign-extended. We can do:
3071 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3072 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3073 // multiplication overflows is identical to whether the 'a * b' 52-bit
3074 // multiplication overflows.
3076 // In our nomenclature, this is:
3078 // strictInt52(a) * int52(b) => int52
3080 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3083 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3084 // we just do whatever is more convenient for op1 and have op2 do the
3085 // opposite. This ensures that we do at most one shift.
3087 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
3088 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), OppositeShift
, op1
);
3089 GPRTemporary
result(this);
3091 GPRReg op1GPR
= op1
.gpr();
3092 GPRReg op2GPR
= op2
.gpr();
3093 GPRReg resultGPR
= result
.gpr();
3095 m_jit
.move(op1GPR
, resultGPR
);
3097 Int52Overflow
, JSValueRegs(), 0,
3098 m_jit
.branchMul64(MacroAssembler::Overflow
, op2GPR
, resultGPR
));
3100 if (shouldCheckNegativeZero(node
->arithMode())) {
3101 MacroAssembler::Jump resultNonZero
= m_jit
.branchTest64(
3102 MacroAssembler::NonZero
, resultGPR
);
3104 NegativeZero
, JSValueRegs(), 0,
3105 m_jit
.branch64(MacroAssembler::LessThan
, op1GPR
, TrustedImm64(0)));
3107 NegativeZero
, JSValueRegs(), 0,
3108 m_jit
.branch64(MacroAssembler::LessThan
, op2GPR
, TrustedImm64(0)));
3109 resultNonZero
.link(&m_jit
);
3112 int52Result(resultGPR
, node
);
3115 #endif // USE(JSVALUE64)
3117 case DoubleRepUse
: {
3118 SpeculateDoubleOperand
op1(this, node
->child1());
3119 SpeculateDoubleOperand
op2(this, node
->child2());
3120 FPRTemporary
result(this, op1
, op2
);
3122 FPRReg reg1
= op1
.fpr();
3123 FPRReg reg2
= op2
.fpr();
3125 m_jit
.mulDouble(reg1
, reg2
, result
.fpr());
3127 doubleResult(result
.fpr(), node
);
3132 RELEASE_ASSERT_NOT_REACHED();
3137 void SpeculativeJIT::compileArithDiv(Node
* node
)
3139 switch (node
->binaryUseKind()) {
3141 #if CPU(X86) || CPU(X86_64)
3142 SpeculateInt32Operand
op1(this, node
->child1());
3143 SpeculateInt32Operand
op2(this, node
->child2());
3144 GPRTemporary
eax(this, X86Registers::eax
);
3145 GPRTemporary
edx(this, X86Registers::edx
);
3146 GPRReg op1GPR
= op1
.gpr();
3147 GPRReg op2GPR
= op2
.gpr();
3151 if (op2GPR
== X86Registers::eax
|| op2GPR
== X86Registers::edx
) {
3152 op2TempGPR
= allocate();
3155 op2TempGPR
= InvalidGPRReg
;
3156 if (op1GPR
== X86Registers::eax
)
3157 temp
= X86Registers::edx
;
3159 temp
= X86Registers::eax
;
3162 ASSERT(temp
!= op1GPR
);
3163 ASSERT(temp
!= op2GPR
);
3165 m_jit
.add32(JITCompiler::TrustedImm32(1), op2GPR
, temp
);
3167 JITCompiler::Jump safeDenominator
= m_jit
.branch32(JITCompiler::Above
, temp
, JITCompiler::TrustedImm32(1));
3169 JITCompiler::JumpList done
;
3170 if (shouldCheckOverflow(node
->arithMode())) {
3171 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, op2GPR
));
3172 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::Equal
, op1GPR
, TrustedImm32(-2147483647-1)));
3174 // This is the case where we convert the result to an int after we're done, and we
3175 // already know that the denominator is either -1 or 0. So, if the denominator is
3176 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3177 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3178 // are happy to fall through to a normal division, since we're just dividing
3179 // something by negative 1.
3181 JITCompiler::Jump notZero
= m_jit
.branchTest32(JITCompiler::NonZero
, op2GPR
);
3182 m_jit
.move(TrustedImm32(0), eax
.gpr());
3183 done
.append(m_jit
.jump());
3185 notZero
.link(&m_jit
);
3186 JITCompiler::Jump notNeg2ToThe31
=
3187 m_jit
.branch32(JITCompiler::NotEqual
, op1GPR
, TrustedImm32(-2147483647-1));
3188 m_jit
.zeroExtend32ToPtr(op1GPR
, eax
.gpr());
3189 done
.append(m_jit
.jump());
3191 notNeg2ToThe31
.link(&m_jit
);
3194 safeDenominator
.link(&m_jit
);
3196 // If the user cares about negative zero, then speculate that we're not about
3197 // to produce negative zero.
3198 if (shouldCheckNegativeZero(node
->arithMode())) {
3199 MacroAssembler::Jump numeratorNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, op1GPR
);
3200 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, op2GPR
, TrustedImm32(0)));
3201 numeratorNonZero
.link(&m_jit
);
3204 if (op2TempGPR
!= InvalidGPRReg
) {
3205 m_jit
.move(op2GPR
, op2TempGPR
);
3206 op2GPR
= op2TempGPR
;
3209 m_jit
.move(op1GPR
, eax
.gpr());
3210 m_jit
.assembler().cdq();
3211 m_jit
.assembler().idivl_r(op2GPR
);
3213 if (op2TempGPR
!= InvalidGPRReg
)
3216 // Check that there was no remainder. If there had been, then we'd be obligated to
3217 // produce a double result instead.
3218 if (shouldCheckOverflow(node
->arithMode()))
3219 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::NonZero
, edx
.gpr()));
3222 int32Result(eax
.gpr(), node
);
3223 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3224 SpeculateInt32Operand
op1(this, node
->child1());
3225 SpeculateInt32Operand
op2(this, node
->child2());
3226 GPRReg op1GPR
= op1
.gpr();
3227 GPRReg op2GPR
= op2
.gpr();
3228 GPRTemporary
quotient(this);
3229 GPRTemporary
multiplyAnswer(this);
3231 // If the user cares about negative zero, then speculate that we're not about
3232 // to produce negative zero.
3233 if (shouldCheckNegativeZero(node
->arithMode())) {
3234 MacroAssembler::Jump numeratorNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, op1GPR
);
3235 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, op2GPR
, TrustedImm32(0)));
3236 numeratorNonZero
.link(&m_jit
);
3239 m_jit
.assembler().sdiv
<32>(quotient
.gpr(), op1GPR
, op2GPR
);
3241 // Check that there was no remainder. If there had been, then we'd be obligated to
3242 // produce a double result instead.
3243 if (shouldCheckOverflow(node
->arithMode())) {
3244 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotient
.gpr(), op2GPR
, multiplyAnswer
.gpr()));
3245 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::NotEqual
, multiplyAnswer
.gpr(), op1GPR
));
3248 int32Result(quotient
.gpr(), node
);
3250 RELEASE_ASSERT_NOT_REACHED();
3255 case DoubleRepUse
: {
3256 SpeculateDoubleOperand
op1(this, node
->child1());
3257 SpeculateDoubleOperand
op2(this, node
->child2());
3258 FPRTemporary
result(this, op1
);
3260 FPRReg reg1
= op1
.fpr();
3261 FPRReg reg2
= op2
.fpr();
3262 m_jit
.divDouble(reg1
, reg2
, result
.fpr());
3264 doubleResult(result
.fpr(), node
);
3269 RELEASE_ASSERT_NOT_REACHED();
3274 void SpeculativeJIT::compileArithMod(Node
* node
)
3276 switch (node
->binaryUseKind()) {
3278 // In the fast path, the dividend value could be the final result
3279 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3280 SpeculateStrictInt32Operand
op1(this, node
->child1());
3282 if (isInt32Constant(node
->child2().node())) {
3283 int32_t divisor
= valueOfInt32Constant(node
->child2().node());
3284 if (divisor
> 1 && hasOneBitSet(divisor
)) {
3285 unsigned logarithm
= WTF::fastLog2(divisor
);
3286 GPRReg dividendGPR
= op1
.gpr();
3287 GPRTemporary
result(this);
3288 GPRReg resultGPR
= result
.gpr();
3290 // This is what LLVM generates. It's pretty crazy. Here's my
3291 // attempt at understanding it.
3293 // First, compute either divisor - 1, or 0, depending on whether
3294 // the dividend is negative:
3296 // If dividend < 0: resultGPR = divisor - 1
3297 // If dividend >= 0: resultGPR = 0
3298 m_jit
.move(dividendGPR
, resultGPR
);
3299 m_jit
.rshift32(TrustedImm32(31), resultGPR
);
3300 m_jit
.urshift32(TrustedImm32(32 - logarithm
), resultGPR
);
3302 // Add in the dividend, so that:
3304 // If dividend < 0: resultGPR = dividend + divisor - 1
3305 // If dividend >= 0: resultGPR = dividend
3306 m_jit
.add32(dividendGPR
, resultGPR
);
3308 // Mask so as to only get the *high* bits. This rounds down
3309 // (towards negative infinity) resultGPR to the nearest multiple
3310 // of divisor, so that:
3312 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3313 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3315 // Note that this can be simplified to:
3317 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3318 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3320 // Note that if the dividend is negative, resultGPR will also be negative.
3321 // Regardless of the sign of dividend, resultGPR will be rounded towards
3322 // zero, because of how things are conditionalized.
3323 m_jit
.and32(TrustedImm32(-divisor
), resultGPR
);
3325 // Subtract resultGPR from dividendGPR, which yields the remainder:
3327 // resultGPR = dividendGPR - resultGPR
3328 m_jit
.neg32(resultGPR
);
3329 m_jit
.add32(dividendGPR
, resultGPR
);
3331 if (shouldCheckNegativeZero(node
->arithMode())) {
3332 // Check that we're not about to create negative zero.
3333 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
3334 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, resultGPR
));
3335 numeratorPositive
.link(&m_jit
);
3338 int32Result(resultGPR
, node
);
3343 #if CPU(X86) || CPU(X86_64)
3344 if (isInt32Constant(node
->child2().node())) {
3345 int32_t divisor
= valueOfInt32Constant(node
->child2().node());
3346 if (divisor
&& divisor
!= -1) {
3347 GPRReg op1Gpr
= op1
.gpr();
3349 GPRTemporary
eax(this, X86Registers::eax
);
3350 GPRTemporary
edx(this, X86Registers::edx
);
3351 GPRTemporary
scratch(this);
3352 GPRReg scratchGPR
= scratch
.gpr();
3355 if (op1Gpr
== X86Registers::eax
|| op1Gpr
== X86Registers::edx
) {
3356 op1SaveGPR
= allocate();
3357 ASSERT(op1Gpr
!= op1SaveGPR
);
3358 m_jit
.move(op1Gpr
, op1SaveGPR
);
3360 op1SaveGPR
= op1Gpr
;
3361 ASSERT(op1SaveGPR
!= X86Registers::eax
);
3362 ASSERT(op1SaveGPR
!= X86Registers::edx
);
3364 m_jit
.move(op1Gpr
, eax
.gpr());
3365 m_jit
.move(TrustedImm32(divisor
), scratchGPR
);
3366 m_jit
.assembler().cdq();
3367 m_jit
.assembler().idivl_r(scratchGPR
);
3368 if (shouldCheckNegativeZero(node
->arithMode())) {
3369 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, op1SaveGPR
, TrustedImm32(0));
3370 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, edx
.gpr()));
3371 numeratorPositive
.link(&m_jit
);
3374 if (op1SaveGPR
!= op1Gpr
)
3377 int32Result(edx
.gpr(), node
);
3383 SpeculateInt32Operand
op2(this, node
->child2());
3384 #if CPU(X86) || CPU(X86_64)
3385 GPRTemporary
eax(this, X86Registers::eax
);
3386 GPRTemporary
edx(this, X86Registers::edx
);
3387 GPRReg op1GPR
= op1
.gpr();
3388 GPRReg op2GPR
= op2
.gpr();
3394 if (op2GPR
== X86Registers::eax
|| op2GPR
== X86Registers::edx
) {
3395 op2TempGPR
= allocate();
3398 op2TempGPR
= InvalidGPRReg
;
3399 if (op1GPR
== X86Registers::eax
)
3400 temp
= X86Registers::edx
;
3402 temp
= X86Registers::eax
;
3405 if (op1GPR
== X86Registers::eax
|| op1GPR
== X86Registers::edx
) {
3406 op1SaveGPR
= allocate();
3407 ASSERT(op1GPR
!= op1SaveGPR
);
3408 m_jit
.move(op1GPR
, op1SaveGPR
);
3410 op1SaveGPR
= op1GPR
;
3412 ASSERT(temp
!= op1GPR
);
3413 ASSERT(temp
!= op2GPR
);
3414 ASSERT(op1SaveGPR
!= X86Registers::eax
);
3415 ASSERT(op1SaveGPR
!= X86Registers::edx
);
3417 m_jit
.add32(JITCompiler::TrustedImm32(1), op2GPR
, temp
);
3419 JITCompiler::Jump safeDenominator
= m_jit
.branch32(JITCompiler::Above
, temp
, JITCompiler::TrustedImm32(1));
3421 JITCompiler::JumpList done
;
3423 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3424 // separate case for that. But it probably doesn't matter so much.
3425 if (shouldCheckOverflow(node
->arithMode())) {
3426 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, op2GPR
));
3427 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::Equal
, op1GPR
, TrustedImm32(-2147483647-1)));
3429 // This is the case where we convert the result to an int after we're done, and we
3430 // already know that the denominator is either -1 or 0. So, if the denominator is
3431 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3432 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3433 // happy to fall through to a normal division, since we're just dividing something
3436 JITCompiler::Jump notZero
= m_jit
.branchTest32(JITCompiler::NonZero
, op2GPR
);
3437 m_jit
.move(TrustedImm32(0), edx
.gpr());
3438 done
.append(m_jit
.jump());
3440 notZero
.link(&m_jit
);
3441 JITCompiler::Jump notNeg2ToThe31
=
3442 m_jit
.branch32(JITCompiler::NotEqual
, op1GPR
, TrustedImm32(-2147483647-1));
3443 m_jit
.move(TrustedImm32(0), edx
.gpr());
3444 done
.append(m_jit
.jump());
3446 notNeg2ToThe31
.link(&m_jit
);
3449 safeDenominator
.link(&m_jit
);
3451 if (op2TempGPR
!= InvalidGPRReg
) {
3452 m_jit
.move(op2GPR
, op2TempGPR
);
3453 op2GPR
= op2TempGPR
;
3456 m_jit
.move(op1GPR
, eax
.gpr());
3457 m_jit
.assembler().cdq();
3458 m_jit
.assembler().idivl_r(op2GPR
);
3460 if (op2TempGPR
!= InvalidGPRReg
)
3463 // Check that we're not about to create negative zero.
3464 if (shouldCheckNegativeZero(node
->arithMode())) {
3465 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, op1SaveGPR
, TrustedImm32(0));
3466 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, edx
.gpr()));
3467 numeratorPositive
.link(&m_jit
);
3470 if (op1SaveGPR
!= op1GPR
)
3474 int32Result(edx
.gpr(), node
);
3476 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3477 GPRTemporary
temp(this);
3478 GPRTemporary
quotientThenRemainder(this);
3479 GPRTemporary
multiplyAnswer(this);
3480 GPRReg dividendGPR
= op1
.gpr();
3481 GPRReg divisorGPR
= op2
.gpr();
3482 GPRReg quotientThenRemainderGPR
= quotientThenRemainder
.gpr();
3483 GPRReg multiplyAnswerGPR
= multiplyAnswer
.gpr();
3485 JITCompiler::JumpList done
;
3487 if (shouldCheckOverflow(node
->arithMode()))
3488 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, divisorGPR
));
3490 JITCompiler::Jump denominatorNotZero
= m_jit
.branchTest32(JITCompiler::NonZero
, divisorGPR
);
3491 m_jit
.move(divisorGPR
, quotientThenRemainderGPR
);
3492 done
.append(m_jit
.jump());
3493 denominatorNotZero
.link(&m_jit
);
3496 m_jit
.assembler().sdiv
<32>(quotientThenRemainderGPR
, dividendGPR
, divisorGPR
);
3497 // FIXME: It seems like there are cases where we don't need this? What if we have
3498 // arithMode() == Arith::Unchecked?
3499 // https://bugs.webkit.org/show_bug.cgi?id=126444
3500 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotientThenRemainderGPR
, divisorGPR
, multiplyAnswerGPR
));
3501 #if CPU(APPLE_ARMV7S)
3502 m_jit
.assembler().sub(quotientThenRemainderGPR
, dividendGPR
, multiplyAnswerGPR
);
3504 m_jit
.assembler().sub
<32>(quotientThenRemainderGPR
, dividendGPR
, multiplyAnswerGPR
);
3507 // If the user cares about negative zero, then speculate that we're not about
3508 // to produce negative zero.
3509 if (shouldCheckNegativeZero(node
->arithMode())) {
3510 // Check that we're not about to create negative zero.
3511 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
3512 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, quotientThenRemainderGPR
));
3513 numeratorPositive
.link(&m_jit
);
3518 int32Result(quotientThenRemainderGPR
, node
);
3519 #else // not architecture that can do integer division
3520 RELEASE_ASSERT_NOT_REACHED();
3525 case DoubleRepUse
: {
3526 SpeculateDoubleOperand
op1(this, node
->child1());
3527 SpeculateDoubleOperand
op2(this, node
->child2());
3529 FPRReg op1FPR
= op1
.fpr();
3530 FPRReg op2FPR
= op2
.fpr();
3534 FPRResult
result(this);
3536 callOperation(fmodAsDFGOperation
, result
.fpr(), op1FPR
, op2FPR
);
3538 doubleResult(result
.fpr(), node
);
3543 RELEASE_ASSERT_NOT_REACHED();
3548 // Returns true if the compare is fused with a subsequent branch.
3549 bool SpeculativeJIT::compare(Node
* node
, MacroAssembler::RelationalCondition condition
, MacroAssembler::DoubleCondition doubleCondition
, S_JITOperation_EJJ operation
)
3551 if (compilePeepHoleBranch(node
, condition
, doubleCondition
, operation
))
3554 if (node
->isBinaryUseKind(Int32Use
)) {
3555 compileInt32Compare(node
, condition
);
3560 if (node
->isBinaryUseKind(Int52RepUse
)) {
3561 compileInt52Compare(node
, condition
);
3564 #endif // USE(JSVALUE64)
3566 if (node
->isBinaryUseKind(DoubleRepUse
)) {
3567 compileDoubleCompare(node
, doubleCondition
);
3571 if (node
->op() == CompareEq
) {
3572 if (node
->isBinaryUseKind(StringUse
)) {
3573 compileStringEquality(node
);
3577 if (node
->isBinaryUseKind(BooleanUse
)) {
3578 compileBooleanCompare(node
, condition
);
3582 if (node
->isBinaryUseKind(StringIdentUse
)) {
3583 compileStringIdentEquality(node
);
3587 if (node
->isBinaryUseKind(ObjectUse
)) {
3588 compileObjectEquality(node
);
3592 if (node
->isBinaryUseKind(ObjectUse
, ObjectOrOtherUse
)) {
3593 compileObjectToObjectOrOtherEquality(node
->child1(), node
->child2());
3597 if (node
->isBinaryUseKind(ObjectOrOtherUse
, ObjectUse
)) {
3598 compileObjectToObjectOrOtherEquality(node
->child2(), node
->child1());
3603 nonSpeculativeNonPeepholeCompare(node
, condition
, operation
);
3607 bool SpeculativeJIT::compileStrictEq(Node
* node
)
3609 if (node
->isBinaryUseKind(BooleanUse
)) {
3610 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3611 if (branchIndexInBlock
!= UINT_MAX
) {
3612 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3613 compilePeepHoleBooleanBranch(node
, branchNode
, MacroAssembler::Equal
);
3614 use(node
->child1());
3615 use(node
->child2());
3616 m_indexInBlock
= branchIndexInBlock
;
3617 m_currentNode
= branchNode
;
3620 compileBooleanCompare(node
, MacroAssembler::Equal
);
3624 if (node
->isBinaryUseKind(Int32Use
)) {
3625 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3626 if (branchIndexInBlock
!= UINT_MAX
) {
3627 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3628 compilePeepHoleInt32Branch(node
, branchNode
, MacroAssembler::Equal
);
3629 use(node
->child1());
3630 use(node
->child2());
3631 m_indexInBlock
= branchIndexInBlock
;
3632 m_currentNode
= branchNode
;
3635 compileInt32Compare(node
, MacroAssembler::Equal
);
3640 if (node
->isBinaryUseKind(Int52RepUse
)) {
3641 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3642 if (branchIndexInBlock
!= UINT_MAX
) {
3643 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3644 compilePeepHoleInt52Branch(node
, branchNode
, MacroAssembler::Equal
);
3645 use(node
->child1());
3646 use(node
->child2());
3647 m_indexInBlock
= branchIndexInBlock
;
3648 m_currentNode
= branchNode
;
3651 compileInt52Compare(node
, MacroAssembler::Equal
);
3654 #endif // USE(JSVALUE64)
3656 if (node
->isBinaryUseKind(DoubleRepUse
)) {
3657 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3658 if (branchIndexInBlock
!= UINT_MAX
) {
3659 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3660 compilePeepHoleDoubleBranch(node
, branchNode
, MacroAssembler::DoubleEqual
);
3661 use(node
->child1());
3662 use(node
->child2());
3663 m_indexInBlock
= branchIndexInBlock
;
3664 m_currentNode
= branchNode
;
3667 compileDoubleCompare(node
, MacroAssembler::DoubleEqual
);
3671 if (node
->isBinaryUseKind(StringUse
)) {
3672 compileStringEquality(node
);
3676 if (node
->isBinaryUseKind(StringIdentUse
)) {
3677 compileStringIdentEquality(node
);
3681 if (node
->isBinaryUseKind(ObjectUse
)) {
3682 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3683 if (branchIndexInBlock
!= UINT_MAX
) {
3684 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3685 compilePeepHoleObjectEquality(node
, branchNode
);
3686 use(node
->child1());
3687 use(node
->child2());
3688 m_indexInBlock
= branchIndexInBlock
;
3689 m_currentNode
= branchNode
;
3692 compileObjectEquality(node
);
3696 if (node
->isBinaryUseKind(MiscUse
, UntypedUse
)
3697 || node
->isBinaryUseKind(UntypedUse
, MiscUse
)) {
3698 compileMiscStrictEq(node
);
3702 if (node
->isBinaryUseKind(StringIdentUse
, NotStringVarUse
)) {
3703 compileStringIdentToNotStringVarEquality(node
, node
->child1(), node
->child2());
3707 if (node
->isBinaryUseKind(NotStringVarUse
, StringIdentUse
)) {
3708 compileStringIdentToNotStringVarEquality(node
, node
->child2(), node
->child1());
3712 if (node
->isBinaryUseKind(StringUse
, UntypedUse
)) {
3713 compileStringToUntypedEquality(node
, node
->child1(), node
->child2());
3717 if (node
->isBinaryUseKind(UntypedUse
, StringUse
)) {
3718 compileStringToUntypedEquality(node
, node
->child2(), node
->child1());
3722 RELEASE_ASSERT(node
->isBinaryUseKind(UntypedUse
));
3723 return nonSpeculativeStrictEq(node
);
3726 void SpeculativeJIT::compileBooleanCompare(Node
* node
, MacroAssembler::RelationalCondition condition
)
3728 SpeculateBooleanOperand
op1(this, node
->child1());
3729 SpeculateBooleanOperand
op2(this, node
->child2());
3730 GPRTemporary
result(this);
3732 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
3734 unblessedBooleanResult(result
.gpr(), node
);
3737 void SpeculativeJIT::compileStringEquality(
3738 Node
* node
, GPRReg leftGPR
, GPRReg rightGPR
, GPRReg lengthGPR
, GPRReg leftTempGPR
,
3739 GPRReg rightTempGPR
, GPRReg leftTemp2GPR
, GPRReg rightTemp2GPR
,
3740 JITCompiler::JumpList fastTrue
, JITCompiler::JumpList fastFalse
)
3742 JITCompiler::JumpList trueCase
;
3743 JITCompiler::JumpList falseCase
;
3744 JITCompiler::JumpList slowCase
;
3746 trueCase
.append(fastTrue
);
3747 falseCase
.append(fastFalse
);
3749 m_jit
.load32(MacroAssembler::Address(leftGPR
, JSString::offsetOfLength()), lengthGPR
);
3751 falseCase
.append(m_jit
.branch32(
3752 MacroAssembler::NotEqual
,
3753 MacroAssembler::Address(rightGPR
, JSString::offsetOfLength()),
3756 trueCase
.append(m_jit
.branchTest32(MacroAssembler::Zero
, lengthGPR
));
3758 m_jit
.loadPtr(MacroAssembler::Address(leftGPR
, JSString::offsetOfValue()), leftTempGPR
);
3759 m_jit
.loadPtr(MacroAssembler::Address(rightGPR
, JSString::offsetOfValue()), rightTempGPR
);
3761 slowCase
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, leftTempGPR
));
3762 slowCase
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, rightTempGPR
));
3764 slowCase
.append(m_jit
.branchTest32(
3765 MacroAssembler::Zero
,
3766 MacroAssembler::Address(leftTempGPR
, StringImpl::flagsOffset()),
3767 TrustedImm32(StringImpl::flagIs8Bit())));
3768 slowCase
.append(m_jit
.branchTest32(
3769 MacroAssembler::Zero
,
3770 MacroAssembler::Address(rightTempGPR
, StringImpl::flagsOffset()),
3771 TrustedImm32(StringImpl::flagIs8Bit())));
3773 m_jit
.loadPtr(MacroAssembler::Address(leftTempGPR
, StringImpl::dataOffset()), leftTempGPR
);
3774 m_jit
.loadPtr(MacroAssembler::Address(rightTempGPR
, StringImpl::dataOffset()), rightTempGPR
);
3776 MacroAssembler::Label loop
= m_jit
.label();
3778 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
3780 // This isn't going to generate the best code on x86. But that's OK, it's still better
3781 // than not inlining.
3782 m_jit
.load8(MacroAssembler::BaseIndex(leftTempGPR
, lengthGPR
, MacroAssembler::TimesOne
), leftTemp2GPR
);
3783 m_jit
.load8(MacroAssembler::BaseIndex(rightTempGPR
, lengthGPR
, MacroAssembler::TimesOne
), rightTemp2GPR
);
3784 falseCase
.append(m_jit
.branch32(MacroAssembler::NotEqual
, leftTemp2GPR
, rightTemp2GPR
));
3786 m_jit
.branchTest32(MacroAssembler::NonZero
, lengthGPR
).linkTo(loop
, &m_jit
);
3788 trueCase
.link(&m_jit
);
3789 moveTrueTo(leftTempGPR
);
3791 JITCompiler::Jump done
= m_jit
.jump();
3793 falseCase
.link(&m_jit
);
3794 moveFalseTo(leftTempGPR
);
3797 addSlowPathGenerator(
3799 slowCase
, this, operationCompareStringEq
, leftTempGPR
, leftGPR
, rightGPR
));
3801 blessedBooleanResult(leftTempGPR
, node
);
3804 void SpeculativeJIT::compileStringEquality(Node
* node
)
3806 SpeculateCellOperand
left(this, node
->child1());
3807 SpeculateCellOperand
right(this, node
->child2());
3808 GPRTemporary
length(this);
3809 GPRTemporary
leftTemp(this);
3810 GPRTemporary
rightTemp(this);
3811 GPRTemporary
leftTemp2(this, Reuse
, left
);
3812 GPRTemporary
rightTemp2(this, Reuse
, right
);
3814 GPRReg leftGPR
= left
.gpr();
3815 GPRReg rightGPR
= right
.gpr();
3816 GPRReg lengthGPR
= length
.gpr();
3817 GPRReg leftTempGPR
= leftTemp
.gpr();
3818 GPRReg rightTempGPR
= rightTemp
.gpr();
3819 GPRReg leftTemp2GPR
= leftTemp2
.gpr();
3820 GPRReg rightTemp2GPR
= rightTemp2
.gpr();
3822 speculateString(node
->child1(), leftGPR
);
3824 // It's safe to branch around the type check below, since proving that the values are
3825 // equal does indeed prove that the right value is a string.
3826 JITCompiler::Jump fastTrue
= m_jit
.branchPtr(MacroAssembler::Equal
, leftGPR
, rightGPR
);
3828 speculateString(node
->child2(), rightGPR
);
3830 compileStringEquality(
3831 node
, leftGPR
, rightGPR
, lengthGPR
, leftTempGPR
, rightTempGPR
, leftTemp2GPR
,
3832 rightTemp2GPR
, fastTrue
, JITCompiler::Jump());
3835 void SpeculativeJIT::compileStringToUntypedEquality(Node
* node
, Edge stringEdge
, Edge untypedEdge
)
3837 SpeculateCellOperand
left(this, stringEdge
);
3838 JSValueOperand
right(this, untypedEdge
, ManualOperandSpeculation
);
3839 GPRTemporary
length(this);
3840 GPRTemporary
leftTemp(this);
3841 GPRTemporary
rightTemp(this);
3842 GPRTemporary
leftTemp2(this, Reuse
, left
);
3843 GPRTemporary
rightTemp2(this);
3845 GPRReg leftGPR
= left
.gpr();
3846 JSValueRegs rightRegs
= right
.jsValueRegs();
3847 GPRReg lengthGPR
= length
.gpr();
3848 GPRReg leftTempGPR
= leftTemp
.gpr();
3849 GPRReg rightTempGPR
= rightTemp
.gpr();
3850 GPRReg leftTemp2GPR
= leftTemp2
.gpr();
3851 GPRReg rightTemp2GPR
= rightTemp2
.gpr();
3853 speculateString(stringEdge
, leftGPR
);
3855 JITCompiler::JumpList fastTrue
;
3856 JITCompiler::JumpList fastFalse
;
3858 fastFalse
.append(branchNotCell(rightRegs
));
3860 // It's safe to branch around the type check below, since proving that the values are
3861 // equal does indeed prove that the right value is a string.
3862 fastTrue
.append(m_jit
.branchPtr(
3863 MacroAssembler::Equal
, leftGPR
, rightRegs
.payloadGPR()));
3865 fastFalse
.append(m_jit
.branchStructurePtr(
3866 MacroAssembler::NotEqual
,
3867 MacroAssembler::Address(rightRegs
.payloadGPR(), JSCell::structureIDOffset()),
3868 m_jit
.vm()->stringStructure
.get()));
3870 compileStringEquality(
3871 node
, leftGPR
, rightRegs
.payloadGPR(), lengthGPR
, leftTempGPR
, rightTempGPR
, leftTemp2GPR
,
3872 rightTemp2GPR
, fastTrue
, fastFalse
);
3875 void SpeculativeJIT::compileStringIdentEquality(Node
* node
)
3877 SpeculateCellOperand
left(this, node
->child1());
3878 SpeculateCellOperand
right(this, node
->child2());
3879 GPRTemporary
leftTemp(this);
3880 GPRTemporary
rightTemp(this);
3882 GPRReg leftGPR
= left
.gpr();
3883 GPRReg rightGPR
= right
.gpr();
3884 GPRReg leftTempGPR
= leftTemp
.gpr();
3885 GPRReg rightTempGPR
= rightTemp
.gpr();
3887 speculateString(node
->child1(), leftGPR
);
3888 speculateString(node
->child2(), rightGPR
);
3890 speculateStringIdentAndLoadStorage(node
->child1(), leftGPR
, leftTempGPR
);
3891 speculateStringIdentAndLoadStorage(node
->child2(), rightGPR
, rightTempGPR
);
3893 m_jit
.comparePtr(MacroAssembler::Equal
, leftTempGPR
, rightTempGPR
, leftTempGPR
);
3895 unblessedBooleanResult(leftTempGPR
, node
);
3898 void SpeculativeJIT::compileStringIdentToNotStringVarEquality(
3899 Node
* node
, Edge stringEdge
, Edge notStringVarEdge
)
3901 SpeculateCellOperand
left(this, stringEdge
);
3902 JSValueOperand
right(this, notStringVarEdge
, ManualOperandSpeculation
);
3903 GPRTemporary
leftTemp(this);
3904 GPRTemporary
rightTemp(this);
3905 GPRReg leftTempGPR
= leftTemp
.gpr();
3906 GPRReg rightTempGPR
= rightTemp
.gpr();
3907 GPRReg leftGPR
= left
.gpr();
3908 JSValueRegs rightRegs
= right
.jsValueRegs();
3910 speculateString(stringEdge
, leftGPR
);
3911 speculateStringIdentAndLoadStorage(stringEdge
, leftGPR
, leftTempGPR
);
3913 moveFalseTo(rightTempGPR
);
3914 JITCompiler::JumpList notString
;
3915 notString
.append(branchNotCell(rightRegs
));
3916 notString
.append(m_jit
.branchStructurePtr(
3917 MacroAssembler::NotEqual
,
3918 MacroAssembler::Address(rightRegs
.payloadGPR(), JSCell::structureIDOffset()),
3919 m_jit
.vm()->stringStructure
.get()));
3921 speculateStringIdentAndLoadStorage(notStringVarEdge
, rightRegs
.payloadGPR(), rightTempGPR
);
3923 m_jit
.comparePtr(MacroAssembler::Equal
, leftTempGPR
, rightTempGPR
, rightTempGPR
);
3924 notString
.link(&m_jit
);
3926 unblessedBooleanResult(rightTempGPR
, node
);
3929 void SpeculativeJIT::compileStringZeroLength(Node
* node
)
3931 SpeculateCellOperand
str(this, node
->child1());
3932 GPRReg strGPR
= str
.gpr();
3934 // Make sure that this is a string.
3935 speculateString(node
->child1(), strGPR
);
3937 GPRTemporary
eq(this);
3938 GPRReg eqGPR
= eq
.gpr();
3940 // Fetch the length field from the string object.
3941 m_jit
.test32(MacroAssembler::Zero
, MacroAssembler::Address(strGPR
, JSString::offsetOfLength()), MacroAssembler::TrustedImm32(-1), eqGPR
);
3943 unblessedBooleanResult(eqGPR
, node
);
3946 void SpeculativeJIT::compileConstantStoragePointer(Node
* node
)
3948 GPRTemporary
storage(this);
3949 GPRReg storageGPR
= storage
.gpr();
3950 m_jit
.move(TrustedImmPtr(node
->storagePointer()), storageGPR
);
3951 storageResult(storageGPR
, node
);
3954 void SpeculativeJIT::compileGetIndexedPropertyStorage(Node
* node
)
3956 SpeculateCellOperand
base(this, node
->child1());
3957 GPRReg baseReg
= base
.gpr();
3959 GPRTemporary
storage(this);
3960 GPRReg storageReg
= storage
.gpr();
3962 switch (node
->arrayMode().type()) {
3964 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSString::offsetOfValue()), storageReg
);
3966 addSlowPathGenerator(
3968 m_jit
.branchTest32(MacroAssembler::Zero
, storageReg
),
3969 this, operationResolveRope
, storageReg
, baseReg
));
3971 m_jit
.loadPtr(MacroAssembler::Address(storageReg
, StringImpl::dataOffset()), storageReg
);
3975 ASSERT(isTypedView(node
->arrayMode().typedArrayType()));
3977 MacroAssembler::Address(baseReg
, JSArrayBufferView::offsetOfVector()),
3982 storageResult(storageReg
, node
);
3985 void SpeculativeJIT::compileGetTypedArrayByteOffset(Node
* node
)
3987 SpeculateCellOperand
base(this, node
->child1());
3988 GPRTemporary
vector(this);
3989 GPRTemporary
data(this);
3991 GPRReg baseGPR
= base
.gpr();
3992 GPRReg vectorGPR
= vector
.gpr();
3993 GPRReg dataGPR
= data
.gpr();
3995 JITCompiler::Jump emptyByteOffset
= m_jit
.branch32(
3996 MacroAssembler::NotEqual
,
3997 MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfMode()),
3998 TrustedImm32(WastefulTypedArray
));
4000 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSObject::butterflyOffset()), dataGPR
);
4001 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfVector()), vectorGPR
);
4002 m_jit
.loadPtr(MacroAssembler::Address(dataGPR
, Butterfly::offsetOfArrayBuffer()), dataGPR
);
4003 m_jit
.loadPtr(MacroAssembler::Address(dataGPR
, ArrayBuffer::offsetOfData()), dataGPR
);
4004 m_jit
.subPtr(dataGPR
, vectorGPR
);
4006 JITCompiler::Jump done
= m_jit
.jump();
4008 emptyByteOffset
.link(&m_jit
);
4009 m_jit
.move(TrustedImmPtr(0), vectorGPR
);
4013 int32Result(vectorGPR
, node
);
4016 void SpeculativeJIT::compileGetByValOnArguments(Node
* node
)
4018 SpeculateCellOperand
base(this, node
->child1());
4019 SpeculateStrictInt32Operand
property(this, node
->child2());
4020 GPRTemporary
result(this);
4021 #if USE(JSVALUE32_64)
4022 GPRTemporary
resultTag(this);
4024 GPRTemporary
scratch(this);
4026 GPRReg baseReg
= base
.gpr();
4027 GPRReg propertyReg
= property
.gpr();
4028 GPRReg resultReg
= result
.gpr();
4029 #if USE(JSVALUE32_64)
4030 GPRReg resultTagReg
= resultTag
.gpr();
4032 GPRReg scratchReg
= scratch
.gpr();
4037 ASSERT(ArrayMode(Array::Arguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4039 // Two really lame checks.
4041 Uncountable
, JSValueSource(), 0,
4043 MacroAssembler::AboveOrEqual
, propertyReg
,
4044 MacroAssembler::Address(baseReg
, Arguments::offsetOfNumArguments())));
4046 Uncountable
, JSValueSource(), 0,
4047 m_jit
.branchTestPtr(
4048 MacroAssembler::NonZero
,
4049 MacroAssembler::Address(
4050 baseReg
, Arguments::offsetOfSlowArgumentData())));
4052 m_jit
.move(propertyReg
, resultReg
);
4053 m_jit
.signExtend32ToPtr(resultReg
, resultReg
);
4055 MacroAssembler::Address(baseReg
, Arguments::offsetOfRegisters()),
4058 #if USE(JSVALUE32_64)
4060 MacroAssembler::BaseIndex(
4061 scratchReg
, resultReg
, MacroAssembler::TimesEight
,
4062 CallFrame::thisArgumentOffset() * sizeof(Register
) + sizeof(Register
) +
4063 OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)),
4066 MacroAssembler::BaseIndex(
4067 scratchReg
, resultReg
, MacroAssembler::TimesEight
,
4068 CallFrame::thisArgumentOffset() * sizeof(Register
) + sizeof(Register
) +
4069 OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)),
4071 jsValueResult(resultTagReg
, resultReg
, node
);
4074 MacroAssembler::BaseIndex(
4075 scratchReg
, resultReg
, MacroAssembler::TimesEight
,
4076 CallFrame::thisArgumentOffset() * sizeof(Register
) + sizeof(Register
)),
4078 jsValueResult(resultReg
, node
);
4082 void SpeculativeJIT::compileGetArgumentsLength(Node
* node
)
4084 SpeculateCellOperand
base(this, node
->child1());
4085 GPRTemporary
result(this, Reuse
, base
);
4087 GPRReg baseReg
= base
.gpr();
4088 GPRReg resultReg
= result
.gpr();
4093 ASSERT(ArrayMode(Array::Arguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4096 Uncountable
, JSValueSource(), 0,
4098 MacroAssembler::NonZero
,
4099 MacroAssembler::Address(baseReg
, Arguments::offsetOfOverrodeLength())));
4102 MacroAssembler::Address(baseReg
, Arguments::offsetOfNumArguments()),
4104 int32Result(resultReg
, node
);
4107 void SpeculativeJIT::compileGetArrayLength(Node
* node
)
4109 switch (node
->arrayMode().type()) {
4112 case Array::Contiguous
: {
4113 StorageOperand
storage(this, node
->child2());
4114 GPRTemporary
result(this, Reuse
, storage
);
4115 GPRReg storageReg
= storage
.gpr();
4116 GPRReg resultReg
= result
.gpr();
4117 m_jit
.load32(MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()), resultReg
);
4119 int32Result(resultReg
, node
);
4122 case Array::ArrayStorage
:
4123 case Array::SlowPutArrayStorage
: {
4124 StorageOperand
storage(this, node
->child2());
4125 GPRTemporary
result(this, Reuse
, storage
);
4126 GPRReg storageReg
= storage
.gpr();
4127 GPRReg resultReg
= result
.gpr();
4128 m_jit
.load32(MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()), resultReg
);
4130 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, resultReg
, MacroAssembler::TrustedImm32(0)));
4132 int32Result(resultReg
, node
);
4135 case Array::String
: {
4136 SpeculateCellOperand
base(this, node
->child1());
4137 GPRTemporary
result(this, Reuse
, base
);
4138 GPRReg baseGPR
= base
.gpr();
4139 GPRReg resultGPR
= result
.gpr();
4140 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSString::offsetOfLength()), resultGPR
);
4141 int32Result(resultGPR
, node
);
4144 case Array::Arguments
: {
4145 compileGetArgumentsLength(node
);
4149 ASSERT(isTypedView(node
->arrayMode().typedArrayType()));
4150 SpeculateCellOperand
base(this, node
->child1());
4151 GPRTemporary
result(this, Reuse
, base
);
4152 GPRReg baseGPR
= base
.gpr();
4153 GPRReg resultGPR
= result
.gpr();
4154 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfLength()), resultGPR
);
4155 int32Result(resultGPR
, node
);
4160 void SpeculativeJIT::compileNewFunctionNoCheck(Node
* node
)
4162 GPRResult
result(this);
4163 GPRReg resultGPR
= result
.gpr();
4166 operationNewFunctionNoCheck
, resultGPR
, m_jit
.codeBlock()->functionDecl(node
->functionDeclIndex()));
4167 cellResult(resultGPR
, node
);
4170 void SpeculativeJIT::compileNewFunctionExpression(Node
* node
)
4172 GPRResult
result(this);
4173 GPRReg resultGPR
= result
.gpr();
4176 operationNewFunctionNoCheck
,
4178 m_jit
.codeBlock()->functionExpr(node
->functionExprIndex()));
4179 cellResult(resultGPR
, node
);
4182 bool SpeculativeJIT::compileRegExpExec(Node
* node
)
4184 unsigned branchIndexInBlock
= detectPeepHoleBranch();
4185 if (branchIndexInBlock
== UINT_MAX
)
4187 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
4188 ASSERT(node
->adjustedRefCount() == 1);
4190 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
4191 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
4193 bool invert
= false;
4194 if (taken
== nextBlock()) {
4196 BasicBlock
* tmp
= taken
;
4201 SpeculateCellOperand
base(this, node
->child1());
4202 SpeculateCellOperand
argument(this, node
->child2());
4203 GPRReg baseGPR
= base
.gpr();
4204 GPRReg argumentGPR
= argument
.gpr();
4207 GPRResult
result(this);
4208 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
4210 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, result
.gpr(), taken
);
4213 use(node
->child1());
4214 use(node
->child2());
4215 m_indexInBlock
= branchIndexInBlock
;
4216 m_currentNode
= branchNode
;
4221 void SpeculativeJIT::compileAllocatePropertyStorage(Node
* node
)
4223 if (node
->structureTransitionData().previousStructure
->couldHaveIndexingHeader()) {
4224 SpeculateCellOperand
base(this, node
->child1());
4226 GPRReg baseGPR
= base
.gpr();
4230 GPRResult
result(this);
4231 callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity
, result
.gpr(), baseGPR
);
4233 storageResult(result
.gpr(), node
);
4237 SpeculateCellOperand
base(this, node
->child1());
4238 GPRTemporary
scratch1(this);
4240 GPRReg baseGPR
= base
.gpr();
4241 GPRReg scratchGPR1
= scratch1
.gpr();
4243 ASSERT(!node
->structureTransitionData().previousStructure
->outOfLineCapacity());
4244 ASSERT(initialOutOfLineCapacity
== node
->structureTransitionData().newStructure
->outOfLineCapacity());
4246 JITCompiler::Jump slowPath
=
4247 emitAllocateBasicStorage(
4248 TrustedImm32(initialOutOfLineCapacity
* sizeof(JSValue
)), scratchGPR1
);
4250 m_jit
.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader
)), scratchGPR1
);
4252 addSlowPathGenerator(
4253 slowPathCall(slowPath
, this, operationAllocatePropertyStorageWithInitialCapacity
, scratchGPR1
));
4255 m_jit
.storePtr(scratchGPR1
, JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()));
4257 storageResult(scratchGPR1
, node
);
4260 void SpeculativeJIT::compileReallocatePropertyStorage(Node
* node
)
4262 size_t oldSize
= node
->structureTransitionData().previousStructure
->outOfLineCapacity() * sizeof(JSValue
);
4263 size_t newSize
= oldSize
* outOfLineGrowthFactor
;
4264 ASSERT(newSize
== node
->structureTransitionData().newStructure
->outOfLineCapacity() * sizeof(JSValue
));
4266 if (node
->structureTransitionData().previousStructure
->couldHaveIndexingHeader()) {
4267 SpeculateCellOperand
base(this, node
->child1());
4269 GPRReg baseGPR
= base
.gpr();
4273 GPRResult
result(this);
4274 callOperation(operationReallocateButterflyToGrowPropertyStorage
, result
.gpr(), baseGPR
, newSize
/ sizeof(JSValue
));
4276 storageResult(result
.gpr(), node
);
4280 SpeculateCellOperand
base(this, node
->child1());
4281 StorageOperand
oldStorage(this, node
->child2());
4282 GPRTemporary
scratch1(this);
4283 GPRTemporary
scratch2(this);
4285 GPRReg baseGPR
= base
.gpr();
4286 GPRReg oldStorageGPR
= oldStorage
.gpr();
4287 GPRReg scratchGPR1
= scratch1
.gpr();
4288 GPRReg scratchGPR2
= scratch2
.gpr();
4290 JITCompiler::Jump slowPath
=
4291 emitAllocateBasicStorage(TrustedImm32(newSize
), scratchGPR1
);
4293 m_jit
.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader
)), scratchGPR1
);
4295 addSlowPathGenerator(
4296 slowPathCall(slowPath
, this, operationAllocatePropertyStorage
, scratchGPR1
, newSize
/ sizeof(JSValue
)));
4298 // We have scratchGPR1 = new storage, scratchGPR2 = scratch
4299 for (ptrdiff_t offset
= 0; offset
< static_cast<ptrdiff_t>(oldSize
); offset
+= sizeof(void*)) {
4300 m_jit
.loadPtr(JITCompiler::Address(oldStorageGPR
, -(offset
+ sizeof(JSValue
) + sizeof(void*))), scratchGPR2
);
4301 m_jit
.storePtr(scratchGPR2
, JITCompiler::Address(scratchGPR1
, -(offset
+ sizeof(JSValue
) + sizeof(void*))));
4303 m_jit
.storePtr(scratchGPR1
, JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()));
4305 storageResult(scratchGPR1
, node
);
4308 GPRReg
SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary
& temporary
, ArrayMode arrayMode
)
4310 if (!putByValWillNeedExtraRegister(arrayMode
))
4311 return InvalidGPRReg
;
4313 GPRTemporary
realTemporary(this);
4314 temporary
.adopt(realTemporary
);
4315 return temporary
.gpr();
4318 void SpeculativeJIT::compileToStringOnCell(Node
* node
)
4320 SpeculateCellOperand
op1(this, node
->child1());
4321 GPRReg op1GPR
= op1
.gpr();
4323 switch (node
->child1().useKind()) {
4324 case StringObjectUse
: {
4325 GPRTemporary
result(this);
4326 GPRReg resultGPR
= result
.gpr();
4328 speculateStringObject(node
->child1(), op1GPR
);
4329 m_interpreter
.filter(node
->child1(), SpecStringObject
);
4331 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, JSWrapperObject::internalValueCellOffset()), resultGPR
);
4332 cellResult(resultGPR
, node
);
4336 case StringOrStringObjectUse
: {
4337 GPRTemporary
result(this);
4338 GPRReg resultGPR
= result
.gpr();
4340 m_jit
.load32(JITCompiler::Address(op1GPR
, JSCell::structureIDOffset()), resultGPR
);
4341 JITCompiler::Jump isString
= m_jit
.branchStructurePtr(
4344 m_jit
.vm()->stringStructure
.get());
4346 speculateStringObjectForStructure(node
->child1(), resultGPR
);
4348 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, JSWrapperObject::internalValueCellOffset()), resultGPR
);
4350 JITCompiler::Jump done
= m_jit
.jump();
4351 isString
.link(&m_jit
);
4352 m_jit
.move(op1GPR
, resultGPR
);
4355 m_interpreter
.filter(node
->child1(), SpecString
| SpecStringObject
);
4357 cellResult(resultGPR
, node
);
4362 GPRResult
result(this);
4363 GPRReg resultGPR
= result
.gpr();
4365 // We flush registers instead of silent spill/fill because in this mode we
4366 // believe that most likely the input is not a string, and we need to take
4369 JITCompiler::Jump done
;
4370 if (node
->child1()->prediction() & SpecString
) {
4371 JITCompiler::Jump needCall
= m_jit
.branchStructurePtr(
4372 JITCompiler::NotEqual
,
4373 JITCompiler::Address(op1GPR
, JSCell::structureIDOffset()),
4374 m_jit
.vm()->stringStructure
.get());
4375 m_jit
.move(op1GPR
, resultGPR
);
4376 done
= m_jit
.jump();
4377 needCall
.link(&m_jit
);
4379 callOperation(operationToStringOnCell
, resultGPR
, op1GPR
);
4382 cellResult(resultGPR
, node
);
4387 RELEASE_ASSERT_NOT_REACHED();
4391 void SpeculativeJIT::compileNewStringObject(Node
* node
)
4393 SpeculateCellOperand
operand(this, node
->child1());
4395 GPRTemporary
result(this);
4396 GPRTemporary
scratch1(this);
4397 GPRTemporary
scratch2(this);
4399 GPRReg operandGPR
= operand
.gpr();
4400 GPRReg resultGPR
= result
.gpr();
4401 GPRReg scratch1GPR
= scratch1
.gpr();
4402 GPRReg scratch2GPR
= scratch2
.gpr();
4404 JITCompiler::JumpList slowPath
;
4406 emitAllocateJSObject
<StringObject
>(
4407 resultGPR
, TrustedImmPtr(node
->structure()), TrustedImmPtr(0), scratch1GPR
, scratch2GPR
,
4411 TrustedImmPtr(StringObject::info()),
4412 JITCompiler::Address(resultGPR
, JSDestructibleObject::classInfoOffset()));
4415 operandGPR
, JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset()));
4418 TrustedImm32(JSValue::CellTag
),
4419 JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
4422 JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
4425 addSlowPathGenerator(slowPathCall(
4426 slowPath
, this, operationNewStringObject
, resultGPR
, operandGPR
, node
->structure()));
4428 cellResult(resultGPR
, node
);
4431 void SpeculativeJIT::compileNewTypedArray(Node
* node
)
4433 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
4434 TypedArrayType type
= node
->typedArrayType();
4435 Structure
* structure
= globalObject
->typedArrayStructure(type
);
4437 SpeculateInt32Operand
size(this, node
->child1());
4438 GPRReg sizeGPR
= size
.gpr();
4440 GPRTemporary
result(this);
4441 GPRTemporary
storage(this);
4442 GPRTemporary
scratch(this);
4443 GPRTemporary
scratch2(this);
4444 GPRReg resultGPR
= result
.gpr();
4445 GPRReg storageGPR
= storage
.gpr();
4446 GPRReg scratchGPR
= scratch
.gpr();
4447 GPRReg scratchGPR2
= scratch2
.gpr();
4449 JITCompiler::JumpList slowCases
;
4451 slowCases
.append(m_jit
.branch32(
4452 MacroAssembler::Above
, sizeGPR
, TrustedImm32(JSArrayBufferView::fastSizeLimit
)));
4453 slowCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, sizeGPR
));
4455 m_jit
.move(sizeGPR
, scratchGPR
);
4456 m_jit
.lshift32(TrustedImm32(logElementSize(type
)), scratchGPR
);
4457 if (elementSize(type
) < 8) {
4458 m_jit
.add32(TrustedImm32(7), scratchGPR
);
4459 m_jit
.and32(TrustedImm32(~7), scratchGPR
);
4462 emitAllocateBasicStorage(scratchGPR
, storageGPR
));
4464 m_jit
.subPtr(scratchGPR
, storageGPR
);
4466 emitAllocateJSObject
<JSArrayBufferView
>(
4467 resultGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, scratchGPR2
,
4472 MacroAssembler::Address(resultGPR
, JSArrayBufferView::offsetOfVector()));
4475 MacroAssembler::Address(resultGPR
, JSArrayBufferView::offsetOfLength()));
4477 TrustedImm32(FastTypedArray
),
4478 MacroAssembler::Address(resultGPR
, JSArrayBufferView::offsetOfMode()));
4480 #if USE(JSVALUE32_64)
4481 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, sizeGPR
);
4482 m_jit
.move(sizeGPR
, scratchGPR
);
4483 if (elementSize(type
) != 4) {
4484 if (elementSize(type
) > 4)
4485 m_jit
.lshift32(TrustedImm32(logElementSize(type
) - 2), scratchGPR
);
4487 if (elementSize(type
) > 1)
4488 m_jit
.lshift32(TrustedImm32(logElementSize(type
)), scratchGPR
);
4489 m_jit
.add32(TrustedImm32(3), scratchGPR
);
4490 m_jit
.urshift32(TrustedImm32(2), scratchGPR
);
4493 MacroAssembler::Label loop
= m_jit
.label();
4494 m_jit
.sub32(TrustedImm32(1), scratchGPR
);
4497 MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesFour
));
4498 m_jit
.branchTest32(MacroAssembler::NonZero
, scratchGPR
).linkTo(loop
, &m_jit
);
4500 #endif // USE(JSVALUE32_64)
4502 addSlowPathGenerator(slowPathCall(
4503 slowCases
, this, operationNewTypedArrayWithSizeForType(type
),
4504 resultGPR
, structure
, sizeGPR
));
4506 cellResult(resultGPR
, node
);
4509 void SpeculativeJIT::speculateInt32(Edge edge
)
4511 if (!needsTypeCheck(edge
, SpecInt32
))
4514 (SpeculateInt32Operand(this, edge
)).gpr();
4517 void SpeculativeJIT::speculateNumber(Edge edge
)
4519 if (!needsTypeCheck(edge
, SpecBytecodeNumber
))
4522 JSValueOperand
value(this, edge
, ManualOperandSpeculation
);
4524 GPRReg gpr
= value
.gpr();
4526 JSValueRegs(gpr
), edge
, SpecBytecodeNumber
,
4527 m_jit
.branchTest64(MacroAssembler::Zero
, gpr
, GPRInfo::tagTypeNumberRegister
));
4529 GPRReg tagGPR
= value
.tagGPR();
4531 value
.jsValueRegs(), edge
, ~SpecInt32
,
4532 m_jit
.branch32(MacroAssembler::Equal
, tagGPR
, TrustedImm32(JSValue::Int32Tag
)));
4534 value
.jsValueRegs(), edge
, SpecBytecodeNumber
,
4535 m_jit
.branch32(MacroAssembler::AboveOrEqual
, tagGPR
, TrustedImm32(JSValue::LowestTag
)));
4539 void SpeculativeJIT::speculateDoubleReal(Edge edge
)
4541 if (!needsTypeCheck(edge
, SpecDoubleReal
))
4544 SpeculateDoubleOperand
operand(this, edge
);
4545 FPRReg fpr
= operand
.fpr();
4547 JSValueRegs(), edge
, SpecDoubleReal
,
4549 MacroAssembler::DoubleNotEqualOrUnordered
, fpr
, fpr
));
4552 void SpeculativeJIT::speculateBoolean(Edge edge
)
4554 if (!needsTypeCheck(edge
, SpecBoolean
))
4557 (SpeculateBooleanOperand(this, edge
)).gpr();
4560 void SpeculativeJIT::speculateCell(Edge edge
)
4562 if (!needsTypeCheck(edge
, SpecCell
))
4565 (SpeculateCellOperand(this, edge
)).gpr();
4568 void SpeculativeJIT::speculateObject(Edge edge
)
4570 if (!needsTypeCheck(edge
, SpecObject
))
4573 SpeculateCellOperand
operand(this, edge
);
4574 GPRReg gpr
= operand
.gpr();
4576 JSValueSource::unboxedCell(gpr
), edge
, SpecObject
, m_jit
.branchStructurePtr(
4577 MacroAssembler::Equal
,
4578 MacroAssembler::Address(gpr
, JSCell::structureIDOffset()),
4579 m_jit
.vm()->stringStructure
.get()));
4582 void SpeculativeJIT::speculateFinalObject(Edge edge
)
4584 if (!needsTypeCheck(edge
, SpecFinalObject
))
4587 SpeculateCellOperand
operand(this, edge
);
4588 GPRReg gpr
= operand
.gpr();
4590 JSValueSource::unboxedCell(gpr
), edge
, SpecFinalObject
, m_jit
.branch8(
4591 MacroAssembler::NotEqual
,
4592 MacroAssembler::Address(gpr
, JSCell::typeInfoTypeOffset()),
4593 TrustedImm32(FinalObjectType
)));
4596 void SpeculativeJIT::speculateObjectOrOther(Edge edge
)
4598 if (!needsTypeCheck(edge
, SpecObject
| SpecOther
))
4601 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4602 GPRTemporary
temp(this);
4603 GPRReg tempGPR
= temp
.gpr();
4604 MacroAssembler::Jump notCell
= branchNotCell(operand
.jsValueRegs());
4605 GPRReg gpr
= operand
.jsValueRegs().payloadGPR();
4607 operand
.jsValueRegs(), edge
, (~SpecCell
) | SpecObject
, m_jit
.branchStructurePtr(
4608 MacroAssembler::Equal
,
4609 MacroAssembler::Address(gpr
, JSCell::structureIDOffset()),
4610 m_jit
.vm()->stringStructure
.get()));
4611 MacroAssembler::Jump done
= m_jit
.jump();
4612 notCell
.link(&m_jit
);
4613 if (needsTypeCheck(edge
, SpecCell
| SpecOther
)) {
4615 operand
.jsValueRegs(), edge
, SpecCell
| SpecOther
,
4616 branchNotOther(operand
.jsValueRegs(), tempGPR
));
4621 void SpeculativeJIT::speculateString(Edge edge
, GPRReg cell
)
4624 JSValueSource::unboxedCell(cell
), edge
, SpecString
| ~SpecCell
,
4625 m_jit
.branchStructurePtr(
4626 MacroAssembler::NotEqual
,
4627 MacroAssembler::Address(cell
, JSCell::structureIDOffset()),
4628 m_jit
.vm()->stringStructure
.get()));
4631 void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge
, GPRReg string
, GPRReg storage
)
4633 m_jit
.loadPtr(MacroAssembler::Address(string
, JSString::offsetOfValue()), storage
);
4635 if (!needsTypeCheck(edge
, SpecStringIdent
| ~SpecString
))
4639 BadType
, JSValueSource::unboxedCell(string
), edge
,
4640 m_jit
.branchTestPtr(MacroAssembler::Zero
, storage
));
4642 BadType
, JSValueSource::unboxedCell(string
), edge
, m_jit
.branchTest32(
4643 MacroAssembler::Zero
,
4644 MacroAssembler::Address(storage
, StringImpl::flagsOffset()),
4645 MacroAssembler::TrustedImm32(StringImpl::flagIsAtomic())));
4647 m_interpreter
.filter(edge
, SpecStringIdent
| ~SpecString
);
4650 void SpeculativeJIT::speculateStringIdent(Edge edge
, GPRReg string
)
4652 if (!needsTypeCheck(edge
, SpecStringIdent
))
4655 GPRTemporary
temp(this);
4656 speculateStringIdentAndLoadStorage(edge
, string
, temp
.gpr());
4659 void SpeculativeJIT::speculateStringIdent(Edge edge
)
4661 if (!needsTypeCheck(edge
, SpecStringIdent
))
4664 SpeculateCellOperand
operand(this, edge
);
4665 GPRReg gpr
= operand
.gpr();
4666 speculateString(edge
, gpr
);
4667 speculateStringIdent(edge
, gpr
);
4670 void SpeculativeJIT::speculateString(Edge edge
)
4672 if (!needsTypeCheck(edge
, SpecString
))
4675 SpeculateCellOperand
operand(this, edge
);
4676 speculateString(edge
, operand
.gpr());
4679 void SpeculativeJIT::speculateStringObject(Edge edge
, GPRReg gpr
)
4681 speculateStringObjectForStructure(edge
, JITCompiler::Address(gpr
, JSCell::structureIDOffset()));
4684 void SpeculativeJIT::speculateStringObject(Edge edge
)
4686 if (!needsTypeCheck(edge
, SpecStringObject
))
4689 SpeculateCellOperand
operand(this, edge
);
4690 GPRReg gpr
= operand
.gpr();
4691 if (!needsTypeCheck(edge
, SpecStringObject
))
4694 speculateStringObject(edge
, gpr
);
4695 m_interpreter
.filter(edge
, SpecStringObject
);
4698 void SpeculativeJIT::speculateStringOrStringObject(Edge edge
)
4700 if (!needsTypeCheck(edge
, SpecString
| SpecStringObject
))
4703 SpeculateCellOperand
operand(this, edge
);
4704 GPRReg gpr
= operand
.gpr();
4705 if (!needsTypeCheck(edge
, SpecString
| SpecStringObject
))
4708 GPRTemporary
structureID(this);
4709 GPRReg structureIDGPR
= structureID
.gpr();
4711 m_jit
.load32(JITCompiler::Address(gpr
, JSCell::structureIDOffset()), structureIDGPR
);
4712 JITCompiler::Jump isString
= m_jit
.branchStructurePtr(
4715 m_jit
.vm()->stringStructure
.get());
4717 speculateStringObjectForStructure(edge
, structureIDGPR
);
4719 isString
.link(&m_jit
);
4721 m_interpreter
.filter(edge
, SpecString
| SpecStringObject
);
4724 void SpeculativeJIT::speculateNotStringVar(Edge edge
)
4726 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4727 GPRTemporary
temp(this);
4728 GPRReg tempGPR
= temp
.gpr();
4730 JITCompiler::Jump notCell
= branchNotCell(operand
.jsValueRegs());
4731 GPRReg cell
= operand
.jsValueRegs().payloadGPR();
4733 JITCompiler::Jump notString
= m_jit
.branchStructurePtr(
4734 MacroAssembler::NotEqual
,
4735 MacroAssembler::Address(cell
, JSCell::structureIDOffset()),
4736 m_jit
.vm()->stringStructure
.get());
4738 speculateStringIdentAndLoadStorage(edge
, cell
, tempGPR
);
4740 notString
.link(&m_jit
);
4741 notCell
.link(&m_jit
);
4744 void SpeculativeJIT::speculateNotCell(Edge edge
)
4746 if (!needsTypeCheck(edge
, ~SpecCell
))
4749 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4750 typeCheck(operand
.jsValueRegs(), edge
, ~SpecCell
, branchIsCell(operand
.jsValueRegs()));
4753 void SpeculativeJIT::speculateOther(Edge edge
)
4755 if (!needsTypeCheck(edge
, SpecOther
))
4758 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4759 GPRTemporary
temp(this);
4760 GPRReg tempGPR
= temp
.gpr();
4762 operand
.jsValueRegs(), edge
, SpecOther
,
4763 branchNotOther(operand
.jsValueRegs(), tempGPR
));
4766 void SpeculativeJIT::speculateMisc(Edge edge
, JSValueRegs regs
)
4770 regs
, edge
, SpecMisc
,
4771 m_jit
.branch64(MacroAssembler::Above
, regs
.gpr(), MacroAssembler::TrustedImm64(TagBitTypeOther
| TagBitBool
| TagBitUndefined
)));
4774 regs
, edge
, ~SpecInt32
,
4775 m_jit
.branch32(MacroAssembler::Equal
, regs
.tagGPR(), MacroAssembler::TrustedImm32(JSValue::Int32Tag
)));
4777 regs
, edge
, SpecMisc
,
4778 m_jit
.branch32(MacroAssembler::Below
, regs
.tagGPR(), MacroAssembler::TrustedImm32(JSValue::UndefinedTag
)));
4782 void SpeculativeJIT::speculateMisc(Edge edge
)
4784 if (!needsTypeCheck(edge
, SpecMisc
))
4787 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
4788 speculateMisc(edge
, operand
.jsValueRegs());
4791 void SpeculativeJIT::speculate(Node
*, Edge edge
)
4793 switch (edge
.useKind()) {
4797 ASSERT(!needsTypeCheck(edge
, SpecInt32
));
4800 ASSERT(!needsTypeCheck(edge
, SpecFullDouble
));
4803 ASSERT(!needsTypeCheck(edge
, SpecMachineInt
));
4806 ASSERT(!needsTypeCheck(edge
, SpecCell
));
4808 case KnownStringUse
:
4809 ASSERT(!needsTypeCheck(edge
, SpecString
));
4812 speculateInt32(edge
);
4815 speculateNumber(edge
);
4817 case DoubleRepRealUse
:
4818 speculateDoubleReal(edge
);
4822 speculateMachineInt(edge
);
4824 case DoubleRepMachineIntUse
:
4825 speculateDoubleRepMachineInt(edge
);
4829 speculateBoolean(edge
);
4832 speculateCell(edge
);
4835 speculateObject(edge
);
4837 case FinalObjectUse
:
4838 speculateFinalObject(edge
);
4840 case ObjectOrOtherUse
:
4841 speculateObjectOrOther(edge
);
4843 case StringIdentUse
:
4844 speculateStringIdent(edge
);
4847 speculateString(edge
);
4849 case StringObjectUse
:
4850 speculateStringObject(edge
);
4852 case StringOrStringObjectUse
:
4853 speculateStringOrStringObject(edge
);
4855 case NotStringVarUse
:
4856 speculateNotStringVar(edge
);
4859 speculateNotCell(edge
);
4862 speculateOther(edge
);
4865 speculateMisc(edge
);
4868 RELEASE_ASSERT_NOT_REACHED();
4873 void SpeculativeJIT::emitSwitchIntJump(
4874 SwitchData
* data
, GPRReg value
, GPRReg scratch
)
4876 SimpleJumpTable
& table
= m_jit
.codeBlock()->switchJumpTable(data
->switchTableIndex
);
4877 table
.ensureCTITable();
4878 m_jit
.sub32(Imm32(table
.min
), value
);
4880 m_jit
.branch32(JITCompiler::AboveOrEqual
, value
, Imm32(table
.ctiOffsets
.size())),
4881 data
->fallThrough
.block
);
4882 m_jit
.move(TrustedImmPtr(table
.ctiOffsets
.begin()), scratch
);
4883 m_jit
.loadPtr(JITCompiler::BaseIndex(scratch
, value
, JITCompiler::timesPtr()), scratch
);
4884 m_jit
.jump(scratch
);
4885 data
->didUseJumpTable
= true;
4888 void SpeculativeJIT::emitSwitchImm(Node
* node
, SwitchData
* data
)
4890 switch (node
->child1().useKind()) {
4892 SpeculateInt32Operand
value(this, node
->child1());
4893 GPRTemporary
temp(this);
4894 emitSwitchIntJump(data
, value
.gpr(), temp
.gpr());
4900 JSValueOperand
value(this, node
->child1());
4901 GPRTemporary
temp(this);
4902 JSValueRegs valueRegs
= value
.jsValueRegs();
4903 GPRReg scratch
= temp
.gpr();
4908 JITCompiler::Jump notInt
= m_jit
.branch64(
4909 JITCompiler::Below
, valueRegs
.gpr(), GPRInfo::tagTypeNumberRegister
);
4910 emitSwitchIntJump(data
, valueRegs
.gpr(), scratch
);
4911 notInt
.link(&m_jit
);
4914 JITCompiler::Zero
, valueRegs
.gpr(), GPRInfo::tagTypeNumberRegister
),
4915 data
->fallThrough
.block
);
4916 silentSpillAllRegisters(scratch
);
4917 callOperation(operationFindSwitchImmTargetForDouble
, scratch
, valueRegs
.gpr(), data
->switchTableIndex
);
4918 silentFillAllRegisters(scratch
);
4919 m_jit
.jump(scratch
);
4921 JITCompiler::Jump notInt
= m_jit
.branch32(
4922 JITCompiler::NotEqual
, valueRegs
.tagGPR(), TrustedImm32(JSValue::Int32Tag
));
4923 emitSwitchIntJump(data
, valueRegs
.payloadGPR(), scratch
);
4924 notInt
.link(&m_jit
);
4927 JITCompiler::AboveOrEqual
, valueRegs
.tagGPR(),
4928 TrustedImm32(JSValue::LowestTag
)),
4929 data
->fallThrough
.block
);
4930 silentSpillAllRegisters(scratch
);
4931 callOperation(operationFindSwitchImmTargetForDouble
, scratch
, valueRegs
, data
->switchTableIndex
);
4932 silentFillAllRegisters(scratch
);
4933 m_jit
.jump(scratch
);
4935 noResult(node
, UseChildrenCalledExplicitly
);
4940 RELEASE_ASSERT_NOT_REACHED();
4945 void SpeculativeJIT::emitSwitchCharStringJump(
4946 SwitchData
* data
, GPRReg value
, GPRReg scratch
)
4950 MacroAssembler::NotEqual
,
4951 MacroAssembler::Address(value
, JSString::offsetOfLength()),
4953 data
->fallThrough
.block
);
4955 m_jit
.loadPtr(MacroAssembler::Address(value
, JSString::offsetOfValue()), scratch
);
4957 addSlowPathGenerator(
4959 m_jit
.branchTestPtr(MacroAssembler::Zero
, scratch
),
4960 this, operationResolveRope
, scratch
, value
));
4962 m_jit
.loadPtr(MacroAssembler::Address(scratch
, StringImpl::dataOffset()), value
);
4964 JITCompiler::Jump is8Bit
= m_jit
.branchTest32(
4965 MacroAssembler::NonZero
,
4966 MacroAssembler::Address(scratch
, StringImpl::flagsOffset()),
4967 TrustedImm32(StringImpl::flagIs8Bit()));
4969 m_jit
.load16(MacroAssembler::Address(value
), scratch
);
4971 JITCompiler::Jump ready
= m_jit
.jump();
4973 is8Bit
.link(&m_jit
);
4974 m_jit
.load8(MacroAssembler::Address(value
), scratch
);
4977 emitSwitchIntJump(data
, scratch
, value
);
4980 void SpeculativeJIT::emitSwitchChar(Node
* node
, SwitchData
* data
)
4982 switch (node
->child1().useKind()) {
4984 SpeculateCellOperand
op1(this, node
->child1());
4985 GPRTemporary
temp(this);
4987 GPRReg op1GPR
= op1
.gpr();
4988 GPRReg tempGPR
= temp
.gpr();
4992 speculateString(node
->child1(), op1GPR
);
4993 emitSwitchCharStringJump(data
, op1GPR
, tempGPR
);
4994 noResult(node
, UseChildrenCalledExplicitly
);
4999 JSValueOperand
op1(this, node
->child1());
5000 GPRTemporary
temp(this);
5002 JSValueRegs op1Regs
= op1
.jsValueRegs();
5003 GPRReg tempGPR
= temp
.gpr();
5007 addBranch(branchNotCell(op1Regs
), data
->fallThrough
.block
);
5010 m_jit
.branchStructurePtr(
5011 MacroAssembler::NotEqual
,
5012 MacroAssembler::Address(op1Regs
.payloadGPR(), JSCell::structureIDOffset()),
5013 m_jit
.vm()->stringStructure
.get()),
5014 data
->fallThrough
.block
);
5016 emitSwitchCharStringJump(data
, op1Regs
.payloadGPR(), tempGPR
);
5017 noResult(node
, UseChildrenCalledExplicitly
);
5022 RELEASE_ASSERT_NOT_REACHED();
5027 bool SpeculativeJIT::StringSwitchCase::operator<(
5028 const SpeculativeJIT::StringSwitchCase
& other
) const
5030 unsigned minLength
= std::min(string
->length(), other
.string
->length());
5031 for (unsigned i
= 0; i
< minLength
; ++i
) {
5032 if (string
->at(i
) == other
.string
->at(i
))
5034 return string
->at(i
) < other
.string
->at(i
);
5036 return string
->length() < other
.string
->length();
5041 struct CharacterCase
{
5042 bool operator<(const CharacterCase
& other
) const
5044 return character
< other
.character
;
5052 } // anonymous namespace
5054 void SpeculativeJIT::emitBinarySwitchStringRecurse(
5055 SwitchData
* data
, const Vector
<SpeculativeJIT::StringSwitchCase
>& cases
,
5056 unsigned numChecked
, unsigned begin
, unsigned end
, GPRReg buffer
, GPRReg length
,
5057 GPRReg temp
, unsigned alreadyCheckedLength
, bool checkedExactLength
)
5059 static const bool verbose
= false;
5062 dataLog("We're down to the following cases, alreadyCheckedLength = ", alreadyCheckedLength
, ":\n");
5063 for (unsigned i
= begin
; i
< end
; ++i
) {
5064 dataLog(" ", cases
[i
].string
, "\n");
5069 jump(data
->fallThrough
.block
, ForceJump
);
5073 unsigned minLength
= cases
[begin
].string
->length();
5074 unsigned commonChars
= minLength
;
5075 bool allLengthsEqual
= true;
5076 for (unsigned i
= begin
+ 1; i
< end
; ++i
) {
5077 unsigned myCommonChars
= numChecked
;
5078 for (unsigned j
= numChecked
;
5079 j
< std::min(cases
[begin
].string
->length(), cases
[i
].string
->length());
5081 if (cases
[begin
].string
->at(j
) != cases
[i
].string
->at(j
)) {
5083 dataLog("string(", cases
[i
].string
, ")[", j
, "] != string(", cases
[begin
].string
, ")[", j
, "]\n");
5088 commonChars
= std::min(commonChars
, myCommonChars
);
5089 if (minLength
!= cases
[i
].string
->length())
5090 allLengthsEqual
= false;
5091 minLength
= std::min(minLength
, cases
[i
].string
->length());
5094 if (checkedExactLength
) {
5095 RELEASE_ASSERT(alreadyCheckedLength
== minLength
);
5096 RELEASE_ASSERT(allLengthsEqual
);
5099 RELEASE_ASSERT(minLength
>= commonChars
);
5102 dataLog("length = ", minLength
, ", commonChars = ", commonChars
, ", allLengthsEqual = ", allLengthsEqual
, "\n");
5104 if (!allLengthsEqual
&& alreadyCheckedLength
< minLength
)
5105 branch32(MacroAssembler::Below
, length
, Imm32(minLength
), data
->fallThrough
.block
);
5106 if (allLengthsEqual
&& (alreadyCheckedLength
< minLength
|| !checkedExactLength
))
5107 branch32(MacroAssembler::NotEqual
, length
, Imm32(minLength
), data
->fallThrough
.block
);
5109 for (unsigned i
= numChecked
; i
< commonChars
; ++i
) {
5111 MacroAssembler::NotEqual
, MacroAssembler::Address(buffer
, i
),
5112 TrustedImm32(cases
[begin
].string
->at(i
)), data
->fallThrough
.block
);
5115 if (minLength
== commonChars
) {
5116 // This is the case where one of the cases is a prefix of all of the other cases.
5117 // We've already checked that the input string is a prefix of all of the cases,
5118 // so we just check length to jump to that case.
5120 if (!ASSERT_DISABLED
) {
5121 ASSERT(cases
[begin
].string
->length() == commonChars
);
5122 for (unsigned i
= begin
+ 1; i
< end
; ++i
)
5123 ASSERT(cases
[i
].string
->length() > commonChars
);
5126 if (allLengthsEqual
) {
5127 RELEASE_ASSERT(end
== begin
+ 1);
5128 jump(cases
[begin
].target
, ForceJump
);
5132 branch32(MacroAssembler::Equal
, length
, Imm32(commonChars
), cases
[begin
].target
);
5134 // We've checked if the length is >= minLength, and then we checked if the
5135 // length is == commonChars. We get to this point if it is >= minLength but not
5136 // == commonChars. Hence we know that it now must be > minLength, i.e., that
5137 // it's >= minLength + 1.
5138 emitBinarySwitchStringRecurse(
5139 data
, cases
, commonChars
, begin
+ 1, end
, buffer
, length
, temp
, minLength
+ 1, false);
5143 // At this point we know that the string is longer than commonChars, and we've only
5144 // verified commonChars. Use a binary switch on the next unchecked character, i.e.
5145 // string[commonChars].
5147 RELEASE_ASSERT(end
>= begin
+ 2);
5149 m_jit
.load8(MacroAssembler::Address(buffer
, commonChars
), temp
);
5151 Vector
<CharacterCase
> characterCases
;
5152 CharacterCase currentCase
;
5153 currentCase
.character
= cases
[begin
].string
->at(commonChars
);
5154 currentCase
.begin
= begin
;
5155 currentCase
.end
= begin
+ 1;
5156 for (unsigned i
= begin
+ 1; i
< end
; ++i
) {
5157 if (cases
[i
].string
->at(commonChars
) != currentCase
.character
) {
5159 dataLog("string(", cases
[i
].string
, ")[", commonChars
, "] != string(", cases
[begin
].string
, ")[", commonChars
, "]\n");
5160 currentCase
.end
= i
;
5161 characterCases
.append(currentCase
);
5162 currentCase
.character
= cases
[i
].string
->at(commonChars
);
5163 currentCase
.begin
= i
;
5164 currentCase
.end
= i
+ 1;
5166 currentCase
.end
= i
+ 1;
5168 characterCases
.append(currentCase
);
5170 Vector
<int64_t> characterCaseValues
;
5171 for (unsigned i
= 0; i
< characterCases
.size(); ++i
)
5172 characterCaseValues
.append(characterCases
[i
].character
);
5174 BinarySwitch
binarySwitch(temp
, characterCaseValues
, BinarySwitch::Int32
);
5175 while (binarySwitch
.advance(m_jit
)) {
5176 const CharacterCase
& myCase
= characterCases
[binarySwitch
.caseIndex()];
5177 emitBinarySwitchStringRecurse(
5178 data
, cases
, commonChars
+ 1, myCase
.begin
, myCase
.end
, buffer
, length
,
5179 temp
, minLength
, allLengthsEqual
);
5182 addBranch(binarySwitch
.fallThrough(), data
->fallThrough
.block
);
5185 void SpeculativeJIT::emitSwitchStringOnString(SwitchData
* data
, GPRReg string
)
5187 data
->didUseJumpTable
= true;
5189 bool canDoBinarySwitch
= true;
5190 unsigned totalLength
= 0;
5192 for (unsigned i
= data
->cases
.size(); i
--;) {
5193 StringImpl
* string
= data
->cases
[i
].value
.stringImpl();
5194 if (!string
->is8Bit()) {
5195 canDoBinarySwitch
= false;
5198 if (string
->length() > Options::maximumBinaryStringSwitchCaseLength()) {
5199 canDoBinarySwitch
= false;
5202 totalLength
+= string
->length();
5205 if (!canDoBinarySwitch
|| totalLength
> Options::maximumBinaryStringSwitchTotalLength()) {
5208 operationSwitchString
, string
, data
->switchTableIndex
, string
);
5213 GPRTemporary
length(this);
5214 GPRTemporary
temp(this);
5216 GPRReg lengthGPR
= length
.gpr();
5217 GPRReg tempGPR
= temp
.gpr();
5219 m_jit
.load32(MacroAssembler::Address(string
, JSString::offsetOfLength()), lengthGPR
);
5220 m_jit
.loadPtr(MacroAssembler::Address(string
, JSString::offsetOfValue()), tempGPR
);
5222 MacroAssembler::JumpList slowCases
;
5223 slowCases
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, tempGPR
));
5224 slowCases
.append(m_jit
.branchTest32(
5225 MacroAssembler::Zero
,
5226 MacroAssembler::Address(tempGPR
, StringImpl::flagsOffset()),
5227 TrustedImm32(StringImpl::flagIs8Bit())));
5229 m_jit
.loadPtr(MacroAssembler::Address(tempGPR
, StringImpl::dataOffset()), string
);
5231 Vector
<StringSwitchCase
> cases
;
5232 for (unsigned i
= 0; i
< data
->cases
.size(); ++i
) {
5234 StringSwitchCase(data
->cases
[i
].value
.stringImpl(), data
->cases
[i
].target
.block
));
5237 std::sort(cases
.begin(), cases
.end());
5239 emitBinarySwitchStringRecurse(
5240 data
, cases
, 0, 0, cases
.size(), string
, lengthGPR
, tempGPR
, 0, false);
5242 slowCases
.link(&m_jit
);
5243 silentSpillAllRegisters(string
);
5244 callOperation(operationSwitchString
, string
, data
->switchTableIndex
, string
);
5245 silentFillAllRegisters(string
);
5249 void SpeculativeJIT::emitSwitchString(Node
* node
, SwitchData
* data
)
5251 switch (node
->child1().useKind()) {
5252 case StringIdentUse
: {
5253 SpeculateCellOperand
op1(this, node
->child1());
5254 GPRTemporary
temp(this);
5256 GPRReg op1GPR
= op1
.gpr();
5257 GPRReg tempGPR
= temp
.gpr();
5259 speculateString(node
->child1(), op1GPR
);
5260 speculateStringIdentAndLoadStorage(node
->child1(), op1GPR
, tempGPR
);
5262 Vector
<int64_t> identifierCaseValues
;
5263 for (unsigned i
= 0; i
< data
->cases
.size(); ++i
) {
5264 identifierCaseValues
.append(
5265 static_cast<int64_t>(bitwise_cast
<intptr_t>(data
->cases
[i
].value
.stringImpl())));
5268 BinarySwitch
binarySwitch(tempGPR
, identifierCaseValues
, BinarySwitch::IntPtr
);
5269 while (binarySwitch
.advance(m_jit
))
5270 jump(data
->cases
[binarySwitch
.caseIndex()].target
.block
, ForceJump
);
5271 addBranch(binarySwitch
.fallThrough(), data
->fallThrough
.block
);
5278 SpeculateCellOperand
op1(this, node
->child1());
5280 GPRReg op1GPR
= op1
.gpr();
5284 speculateString(node
->child1(), op1GPR
);
5285 emitSwitchStringOnString(data
, op1GPR
);
5286 noResult(node
, UseChildrenCalledExplicitly
);
5291 JSValueOperand
op1(this, node
->child1());
5293 JSValueRegs op1Regs
= op1
.jsValueRegs();
5297 addBranch(branchNotCell(op1Regs
), data
->fallThrough
.block
);
5300 m_jit
.branchStructurePtr(
5301 MacroAssembler::NotEqual
,
5302 MacroAssembler::Address(op1Regs
.payloadGPR(), JSCell::structureIDOffset()),
5303 m_jit
.vm()->stringStructure
.get()),
5304 data
->fallThrough
.block
);
5306 emitSwitchStringOnString(data
, op1Regs
.payloadGPR());
5307 noResult(node
, UseChildrenCalledExplicitly
);
5312 RELEASE_ASSERT_NOT_REACHED();
5317 void SpeculativeJIT::emitSwitch(Node
* node
)
5319 SwitchData
* data
= node
->switchData();
5320 switch (data
->kind
) {
5322 emitSwitchImm(node
, data
);
5326 emitSwitchChar(node
, data
);
5329 case SwitchString
: {
5330 emitSwitchString(node
, data
);
5333 RELEASE_ASSERT_NOT_REACHED();
5336 void SpeculativeJIT::addBranch(const MacroAssembler::JumpList
& jump
, BasicBlock
* destination
)
5338 for (unsigned i
= jump
.jumps().size(); i
--;)
5339 addBranch(jump
.jumps()[i
], destination
);
5342 void SpeculativeJIT::linkBranches()
5344 for (size_t i
= 0; i
< m_branches
.size(); ++i
) {
5345 BranchRecord
& branch
= m_branches
[i
];
5346 branch
.jump
.linkTo(m_jit
.blockHeads()[branch
.destination
->index
], &m_jit
);
5351 void SpeculativeJIT::compileStoreBarrier(Node
* node
)
5353 switch (node
->op()) {
5354 case StoreBarrier
: {
5355 SpeculateCellOperand
base(this, node
->child1());
5356 GPRTemporary
scratch1(this);
5357 GPRTemporary
scratch2(this);
5359 writeBarrier(base
.gpr(), scratch1
.gpr(), scratch2
.gpr());
5363 case StoreBarrierWithNullCheck
: {
5364 JSValueOperand
base(this, node
->child1());
5365 GPRTemporary
scratch1(this);
5366 GPRTemporary
scratch2(this);
5369 JITCompiler::Jump isNull
= m_jit
.branchTest64(JITCompiler::Zero
, base
.gpr());
5370 writeBarrier(base
.gpr(), scratch1
.gpr(), scratch2
.gpr());
5372 JITCompiler::Jump isNull
= m_jit
.branch32(JITCompiler::Equal
, base
.tagGPR(), TrustedImm32(JSValue::EmptyValueTag
));
5373 writeBarrier(base
.payloadGPR(), scratch1
.gpr(), scratch2
.gpr());
5375 isNull
.link(&m_jit
);
5380 RELEASE_ASSERT_NOT_REACHED();
5387 void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell
, GPRReg scratch1
, GPRReg scratch2
)
5389 ASSERT(scratch1
!= scratch2
);
5390 WriteBarrierBuffer
* writeBarrierBuffer
= &m_jit
.vm()->heap
.m_writeBarrierBuffer
;
5391 m_jit
.move(TrustedImmPtr(writeBarrierBuffer
), scratch1
);
5392 m_jit
.load32(MacroAssembler::Address(scratch1
, WriteBarrierBuffer::currentIndexOffset()), scratch2
);
5393 JITCompiler::Jump needToFlush
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, scratch2
, MacroAssembler::Address(scratch1
, WriteBarrierBuffer::capacityOffset()));
5395 m_jit
.add32(TrustedImm32(1), scratch2
);
5396 m_jit
.store32(scratch2
, MacroAssembler::Address(scratch1
, WriteBarrierBuffer::currentIndexOffset()));
5398 m_jit
.loadPtr(MacroAssembler::Address(scratch1
, WriteBarrierBuffer::bufferOffset()), scratch1
);
5399 // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
5400 m_jit
.storePtr(cell
, MacroAssembler::BaseIndex(scratch1
, scratch2
, MacroAssembler::ScalePtr
, static_cast<int32_t>(-sizeof(void*))));
5402 JITCompiler::Jump done
= m_jit
.jump();
5403 needToFlush
.link(&m_jit
);
5405 silentSpillAllRegisters(InvalidGPRReg
);
5406 callOperation(operationFlushWriteBarrierBuffer
, cell
);
5407 silentFillAllRegisters(InvalidGPRReg
);
5412 void SpeculativeJIT::storeToWriteBarrierBuffer(JSCell
* cell
, GPRReg scratch1
, GPRReg scratch2
)
5414 ASSERT(scratch1
!= scratch2
);
5415 WriteBarrierBuffer
* writeBarrierBuffer
= &m_jit
.vm()->heap
.m_writeBarrierBuffer
;
5416 m_jit
.move(TrustedImmPtr(writeBarrierBuffer
), scratch1
);
5417 m_jit
.load32(MacroAssembler::Address(scratch1
, WriteBarrierBuffer::currentIndexOffset()), scratch2
);
5418 JITCompiler::Jump needToFlush
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, scratch2
, MacroAssembler::Address(scratch1
, WriteBarrierBuffer::capacityOffset()));
5420 m_jit
.add32(TrustedImm32(1), scratch2
);
5421 m_jit
.store32(scratch2
, MacroAssembler::Address(scratch1
, WriteBarrierBuffer::currentIndexOffset()));
5423 m_jit
.loadPtr(MacroAssembler::Address(scratch1
, WriteBarrierBuffer::bufferOffset()), scratch1
);
5424 // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
5425 m_jit
.storePtr(TrustedImmPtr(cell
), MacroAssembler::BaseIndex(scratch1
, scratch2
, MacroAssembler::ScalePtr
, static_cast<int32_t>(-sizeof(void*))));
5427 JITCompiler::Jump done
= m_jit
.jump();
5428 needToFlush
.link(&m_jit
);
5431 silentSpillAllRegisters(InvalidGPRReg
);
5432 callOperation(operationFlushWriteBarrierBuffer
, cell
);
5433 silentFillAllRegisters(InvalidGPRReg
);
5438 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, JSCell
* value
, GPRReg scratch1
, GPRReg scratch2
)
5440 if (Heap::isMarked(value
))
5443 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered
= m_jit
.checkMarkByte(ownerGPR
);
5444 storeToWriteBarrierBuffer(ownerGPR
, scratch1
, scratch2
);
5445 ownerNotMarkedOrAlreadyRemembered
.link(&m_jit
);
5448 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, GPRReg scratch1
, GPRReg scratch2
)
5450 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered
= m_jit
.checkMarkByte(ownerGPR
);
5451 storeToWriteBarrierBuffer(ownerGPR
, scratch1
, scratch2
);
5452 ownerNotMarkedOrAlreadyRemembered
.link(&m_jit
);
5455 void SpeculativeJIT::compileStoreBarrier(Node
* node
)
5457 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
5460 #endif // ENABLE(GGC)
5462 } } // namespace JSC::DFG