2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSCInlines.h"
42 #include "JSEnvironmentRecord.h"
43 #include "JSLexicalEnvironment.h"
44 #include "LinkBuffer.h"
45 #include "ScopedArguments.h"
46 #include "ScratchRegisterAllocator.h"
47 #include "WriteBarrierBuffer.h"
48 #include <wtf/MathExtras.h>
50 namespace JSC
{ namespace DFG
{
52 SpeculativeJIT::SpeculativeJIT(JITCompiler
& jit
)
56 , m_lastGeneratedNode(LastNodeType
)
58 , m_generationInfo(m_jit
.graph().frameRegisterCount())
59 , m_state(m_jit
.graph())
60 , m_interpreter(m_jit
.graph(), m_state
)
61 , m_stream(&jit
.jitCode()->variableEventStream
)
62 , m_minifiedGraph(&jit
.jitCode()->minifiedDFG
)
63 , m_isCheckingArgumentTypes(false)
67 SpeculativeJIT::~SpeculativeJIT()
71 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR
, Structure
* structure
, GPRReg storageGPR
, unsigned numElements
)
73 ASSERT(hasUndecided(structure
->indexingType()) || hasInt32(structure
->indexingType()) || hasDouble(structure
->indexingType()) || hasContiguous(structure
->indexingType()));
75 GPRTemporary
scratch(this);
76 GPRTemporary
scratch2(this);
77 GPRReg scratchGPR
= scratch
.gpr();
78 GPRReg scratch2GPR
= scratch2
.gpr();
80 unsigned vectorLength
= std::max(BASE_VECTOR_LEN
, numElements
);
82 JITCompiler::JumpList slowCases
;
85 emitAllocateBasicStorage(TrustedImm32(vectorLength
* sizeof(JSValue
) + sizeof(IndexingHeader
)), storageGPR
));
86 m_jit
.subPtr(TrustedImm32(vectorLength
* sizeof(JSValue
)), storageGPR
);
87 emitAllocateJSObject
<JSArray
>(resultGPR
, TrustedImmPtr(structure
), storageGPR
, scratchGPR
, scratch2GPR
, slowCases
);
89 m_jit
.store32(TrustedImm32(numElements
), MacroAssembler::Address(storageGPR
, Butterfly::offsetOfPublicLength()));
90 m_jit
.store32(TrustedImm32(vectorLength
), MacroAssembler::Address(storageGPR
, Butterfly::offsetOfVectorLength()));
92 if (hasDouble(structure
->indexingType()) && numElements
< vectorLength
) {
94 m_jit
.move(TrustedImm64(bitwise_cast
<int64_t>(PNaN
)), scratchGPR
);
95 for (unsigned i
= numElements
; i
< vectorLength
; ++i
)
96 m_jit
.store64(scratchGPR
, MacroAssembler::Address(storageGPR
, sizeof(double) * i
));
98 EncodedValueDescriptor value
;
99 value
.asInt64
= JSValue::encode(JSValue(JSValue::EncodeAsDouble
, PNaN
));
100 for (unsigned i
= numElements
; i
< vectorLength
; ++i
) {
101 m_jit
.store32(TrustedImm32(value
.asBits
.tag
), MacroAssembler::Address(storageGPR
, sizeof(double) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
102 m_jit
.store32(TrustedImm32(value
.asBits
.payload
), MacroAssembler::Address(storageGPR
, sizeof(double) * i
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
107 // I want a slow path that also loads out the storage pointer, and that's
108 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
109 // of work for a very small piece of functionality. :-/
110 addSlowPathGenerator(std::make_unique
<CallArrayAllocatorSlowPathGenerator
>(
111 slowCases
, this, operationNewArrayWithSize
, resultGPR
, storageGPR
,
112 structure
, numElements
));
115 void SpeculativeJIT::emitGetLength(InlineCallFrame
* inlineCallFrame
, GPRReg lengthGPR
, bool includeThis
)
117 if (inlineCallFrame
&& !inlineCallFrame
->isVarargs())
118 m_jit
.move(TrustedImm32(inlineCallFrame
->arguments
.size() - !includeThis
), lengthGPR
);
120 VirtualRegister argumentCountRegister
;
121 if (!inlineCallFrame
)
122 argumentCountRegister
= VirtualRegister(JSStack::ArgumentCount
);
124 argumentCountRegister
= inlineCallFrame
->argumentCountRegister
;
125 m_jit
.load32(JITCompiler::payloadFor(argumentCountRegister
), lengthGPR
);
127 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
131 void SpeculativeJIT::emitGetLength(CodeOrigin origin
, GPRReg lengthGPR
, bool includeThis
)
133 emitGetLength(origin
.inlineCallFrame
, lengthGPR
, includeThis
);
136 void SpeculativeJIT::emitGetCallee(CodeOrigin origin
, GPRReg calleeGPR
)
138 if (origin
.inlineCallFrame
) {
139 if (origin
.inlineCallFrame
->isClosureCall
) {
141 JITCompiler::addressFor(origin
.inlineCallFrame
->calleeRecovery
.virtualRegister()),
145 TrustedImmPtr(origin
.inlineCallFrame
->calleeRecovery
.constant().asCell()),
149 m_jit
.loadPtr(JITCompiler::addressFor(JSStack::Callee
), calleeGPR
);
152 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin
, GPRReg startGPR
)
156 JITCompiler::argumentsStart(origin
).offset() * static_cast<int>(sizeof(Register
))),
157 GPRInfo::callFrameRegister
, startGPR
);
160 MacroAssembler::Jump
SpeculativeJIT::emitOSRExitFuzzCheck()
162 if (!doOSRExitFuzzing())
163 return MacroAssembler::Jump();
165 MacroAssembler::Jump result
;
167 m_jit
.pushToSave(GPRInfo::regT0
);
168 m_jit
.load32(&g_numberOfOSRExitFuzzChecks
, GPRInfo::regT0
);
169 m_jit
.add32(TrustedImm32(1), GPRInfo::regT0
);
170 m_jit
.store32(GPRInfo::regT0
, &g_numberOfOSRExitFuzzChecks
);
171 unsigned atOrAfter
= Options::fireOSRExitFuzzAtOrAfter();
172 unsigned at
= Options::fireOSRExitFuzzAt();
173 if (at
|| atOrAfter
) {
175 MacroAssembler::RelationalCondition condition
;
177 threshold
= atOrAfter
;
178 condition
= MacroAssembler::Below
;
181 condition
= MacroAssembler::NotEqual
;
183 MacroAssembler::Jump ok
= m_jit
.branch32(
184 condition
, GPRInfo::regT0
, MacroAssembler::TrustedImm32(threshold
));
185 m_jit
.popToRestore(GPRInfo::regT0
);
186 result
= m_jit
.jump();
189 m_jit
.popToRestore(GPRInfo::regT0
);
194 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
)
198 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
199 JITCompiler::Jump fuzzJump
= emitOSRExitFuzzCheck();
200 if (fuzzJump
.isSet()) {
201 JITCompiler::JumpList jumpsToFail
;
202 jumpsToFail
.append(fuzzJump
);
203 jumpsToFail
.append(jumpToFail
);
204 m_jit
.appendExitInfo(jumpsToFail
);
206 m_jit
.appendExitInfo(jumpToFail
);
207 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
210 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, const MacroAssembler::JumpList
& jumpsToFail
)
214 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
215 JITCompiler::Jump fuzzJump
= emitOSRExitFuzzCheck();
216 if (fuzzJump
.isSet()) {
217 JITCompiler::JumpList myJumpsToFail
;
218 myJumpsToFail
.append(jumpsToFail
);
219 myJumpsToFail
.append(fuzzJump
);
220 m_jit
.appendExitInfo(myJumpsToFail
);
222 m_jit
.appendExitInfo(jumpsToFail
);
223 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
226 OSRExitJumpPlaceholder
SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
)
229 return OSRExitJumpPlaceholder();
230 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
231 unsigned index
= m_jit
.jitCode()->osrExit
.size();
232 m_jit
.appendExitInfo();
233 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size()));
234 return OSRExitJumpPlaceholder(index
);
237 OSRExitJumpPlaceholder
SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
)
239 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
240 return speculationCheck(kind
, jsValueSource
, nodeUse
.node());
243 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
)
245 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
246 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpToFail
);
249 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, const MacroAssembler::JumpList
& jumpsToFail
)
251 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
252 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpsToFail
);
255 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Node
* node
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
259 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
260 unsigned recoveryIndex
= m_jit
.jitCode()->appendSpeculationRecovery(recovery
);
261 m_jit
.appendExitInfo(jumpToFail
);
262 m_jit
.jitCode()->appendOSRExit(OSRExit(kind
, jsValueSource
, m_jit
.graph().methodOfGettingAValueProfileFor(node
), this, m_stream
->size(), recoveryIndex
));
265 void SpeculativeJIT::speculationCheck(ExitKind kind
, JSValueSource jsValueSource
, Edge nodeUse
, MacroAssembler::Jump jumpToFail
, const SpeculationRecovery
& recovery
)
267 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
268 speculationCheck(kind
, jsValueSource
, nodeUse
.node(), jumpToFail
, recovery
);
271 void SpeculativeJIT::emitInvalidationPoint(Node
* node
)
276 OSRExitCompilationInfo
& info
= m_jit
.appendExitInfo(JITCompiler::JumpList());
277 m_jit
.jitCode()->appendOSRExit(OSRExit(
278 UncountableInvalidation
, JSValueSource(),
279 m_jit
.graph().methodOfGettingAValueProfileFor(node
),
280 this, m_stream
->size()));
281 info
.m_replacementSource
= m_jit
.watchpointLabel();
282 ASSERT(info
.m_replacementSource
.isSet());
286 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, Node
* node
)
288 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
291 speculationCheck(kind
, jsValueRegs
, node
, m_jit
.jump());
292 m_compileOkay
= false;
293 if (verboseCompilationEnabled())
294 dataLog("Bailing compilation.\n");
297 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind
, JSValueRegs jsValueRegs
, Edge nodeUse
)
299 ASSERT(m_isCheckingArgumentTypes
|| m_canExit
);
300 terminateSpeculativeExecution(kind
, jsValueRegs
, nodeUse
.node());
303 void SpeculativeJIT::typeCheck(JSValueSource source
, Edge edge
, SpeculatedType typesPassedThrough
, MacroAssembler::Jump jumpToFail
)
305 ASSERT(needsTypeCheck(edge
, typesPassedThrough
));
306 m_interpreter
.filter(edge
, typesPassedThrough
);
307 speculationCheck(BadType
, source
, edge
.node(), jumpToFail
);
310 RegisterSet
SpeculativeJIT::usedRegisters()
314 for (unsigned i
= GPRInfo::numberOfRegisters
; i
--;) {
315 GPRReg gpr
= GPRInfo::toRegister(i
);
316 if (m_gprs
.isInUse(gpr
))
319 for (unsigned i
= FPRInfo::numberOfRegisters
; i
--;) {
320 FPRReg fpr
= FPRInfo::toRegister(i
);
321 if (m_fprs
.isInUse(fpr
))
325 result
.merge(RegisterSet::specialRegisters());
330 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr
<SlowPathGenerator
> slowPathGenerator
)
332 m_slowPathGenerators
.append(WTF::move(slowPathGenerator
));
335 void SpeculativeJIT::runSlowPathGenerators()
337 for (unsigned i
= 0; i
< m_slowPathGenerators
.size(); ++i
)
338 m_slowPathGenerators
[i
]->generate(this);
341 // On Windows we need to wrap fmod; on other platforms we can call it directly.
342 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
343 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
344 static double JIT_OPERATION
fmodAsDFGOperation(double x
, double y
)
349 #define fmodAsDFGOperation fmod
352 void SpeculativeJIT::clearGenerationInfo()
354 for (unsigned i
= 0; i
< m_generationInfo
.size(); ++i
)
355 m_generationInfo
[i
] = GenerationInfo();
356 m_gprs
= RegisterBank
<GPRInfo
>();
357 m_fprs
= RegisterBank
<FPRInfo
>();
360 SilentRegisterSavePlan
SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe
, GPRReg source
)
362 GenerationInfo
& info
= generationInfoFromVirtualRegister(spillMe
);
363 Node
* node
= info
.node();
364 DataFormat registerFormat
= info
.registerFormat();
365 ASSERT(registerFormat
!= DataFormatNone
);
366 ASSERT(registerFormat
!= DataFormatDouble
);
368 SilentSpillAction spillAction
;
369 SilentFillAction fillAction
;
371 if (!info
.needsSpill())
372 spillAction
= DoNothingForSpill
;
375 ASSERT(info
.gpr() == source
);
376 if (registerFormat
== DataFormatInt32
)
377 spillAction
= Store32Payload
;
378 else if (registerFormat
== DataFormatCell
|| registerFormat
== DataFormatStorage
)
379 spillAction
= StorePtr
;
380 else if (registerFormat
== DataFormatInt52
|| registerFormat
== DataFormatStrictInt52
)
381 spillAction
= Store64
;
383 ASSERT(registerFormat
& DataFormatJS
);
384 spillAction
= Store64
;
386 #elif USE(JSVALUE32_64)
387 if (registerFormat
& DataFormatJS
) {
388 ASSERT(info
.tagGPR() == source
|| info
.payloadGPR() == source
);
389 spillAction
= source
== info
.tagGPR() ? Store32Tag
: Store32Payload
;
391 ASSERT(info
.gpr() == source
);
392 spillAction
= Store32Payload
;
397 if (registerFormat
== DataFormatInt32
) {
398 ASSERT(info
.gpr() == source
);
399 ASSERT(isJSInt32(info
.registerFormat()));
400 if (node
->hasConstant()) {
401 ASSERT(node
->isInt32Constant());
402 fillAction
= SetInt32Constant
;
404 fillAction
= Load32Payload
;
405 } else if (registerFormat
== DataFormatBoolean
) {
407 RELEASE_ASSERT_NOT_REACHED();
408 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
409 fillAction
= DoNothingForFill
;
411 #elif USE(JSVALUE32_64)
412 ASSERT(info
.gpr() == source
);
413 if (node
->hasConstant()) {
414 ASSERT(node
->isBooleanConstant());
415 fillAction
= SetBooleanConstant
;
417 fillAction
= Load32Payload
;
419 } else if (registerFormat
== DataFormatCell
) {
420 ASSERT(info
.gpr() == source
);
421 if (node
->hasConstant()) {
422 DFG_ASSERT(m_jit
.graph(), m_currentNode
, node
->isCellConstant());
423 node
->asCell(); // To get the assertion.
424 fillAction
= SetCellConstant
;
427 fillAction
= LoadPtr
;
429 fillAction
= Load32Payload
;
432 } else if (registerFormat
== DataFormatStorage
) {
433 ASSERT(info
.gpr() == source
);
434 fillAction
= LoadPtr
;
435 } else if (registerFormat
== DataFormatInt52
) {
436 if (node
->hasConstant())
437 fillAction
= SetInt52Constant
;
438 else if (info
.spillFormat() == DataFormatInt52
)
440 else if (info
.spillFormat() == DataFormatStrictInt52
)
441 fillAction
= Load64ShiftInt52Left
;
442 else if (info
.spillFormat() == DataFormatNone
)
445 RELEASE_ASSERT_NOT_REACHED();
446 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
447 fillAction
= Load64
; // Make GCC happy.
450 } else if (registerFormat
== DataFormatStrictInt52
) {
451 if (node
->hasConstant())
452 fillAction
= SetStrictInt52Constant
;
453 else if (info
.spillFormat() == DataFormatInt52
)
454 fillAction
= Load64ShiftInt52Right
;
455 else if (info
.spillFormat() == DataFormatStrictInt52
)
457 else if (info
.spillFormat() == DataFormatNone
)
460 RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462 fillAction
= Load64
; // Make GCC happy.
466 ASSERT(registerFormat
& DataFormatJS
);
468 ASSERT(info
.gpr() == source
);
469 if (node
->hasConstant()) {
470 if (node
->isCellConstant())
471 fillAction
= SetTrustedJSConstant
;
473 fillAction
= SetJSConstant
;
474 } else if (info
.spillFormat() == DataFormatInt32
) {
475 ASSERT(registerFormat
== DataFormatJSInt32
);
476 fillAction
= Load32PayloadBoxInt
;
480 ASSERT(info
.tagGPR() == source
|| info
.payloadGPR() == source
);
481 if (node
->hasConstant())
482 fillAction
= info
.tagGPR() == source
? SetJSConstantTag
: SetJSConstantPayload
;
483 else if (info
.payloadGPR() == source
)
484 fillAction
= Load32Payload
;
485 else { // Fill the Tag
486 switch (info
.spillFormat()) {
487 case DataFormatInt32
:
488 ASSERT(registerFormat
== DataFormatJSInt32
);
489 fillAction
= SetInt32Tag
;
492 ASSERT(registerFormat
== DataFormatJSCell
);
493 fillAction
= SetCellTag
;
495 case DataFormatBoolean
:
496 ASSERT(registerFormat
== DataFormatJSBoolean
);
497 fillAction
= SetBooleanTag
;
500 fillAction
= Load32Tag
;
507 return SilentRegisterSavePlan(spillAction
, fillAction
, node
, source
);
510 SilentRegisterSavePlan
SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe
, FPRReg source
)
512 GenerationInfo
& info
= generationInfoFromVirtualRegister(spillMe
);
513 Node
* node
= info
.node();
514 ASSERT(info
.registerFormat() == DataFormatDouble
);
516 SilentSpillAction spillAction
;
517 SilentFillAction fillAction
;
519 if (!info
.needsSpill())
520 spillAction
= DoNothingForSpill
;
522 ASSERT(!node
->hasConstant());
523 ASSERT(info
.spillFormat() == DataFormatNone
);
524 ASSERT(info
.fpr() == source
);
525 spillAction
= StoreDouble
;
529 if (node
->hasConstant()) {
530 node
->asNumber(); // To get the assertion.
531 fillAction
= SetDoubleConstant
;
533 ASSERT(info
.spillFormat() == DataFormatNone
|| info
.spillFormat() == DataFormatDouble
);
534 fillAction
= LoadDouble
;
536 #elif USE(JSVALUE32_64)
537 ASSERT(info
.registerFormat() == DataFormatDouble
);
538 if (node
->hasConstant()) {
539 node
->asNumber(); // To get the assertion.
540 fillAction
= SetDoubleConstant
;
542 fillAction
= LoadDouble
;
545 return SilentRegisterSavePlan(spillAction
, fillAction
, node
, source
);
548 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan
& plan
)
550 switch (plan
.spillAction()) {
551 case DoNothingForSpill
:
554 m_jit
.store32(plan
.gpr(), JITCompiler::tagFor(plan
.node()->virtualRegister()));
557 m_jit
.store32(plan
.gpr(), JITCompiler::payloadFor(plan
.node()->virtualRegister()));
560 m_jit
.storePtr(plan
.gpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
564 m_jit
.store64(plan
.gpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
568 m_jit
.storeDouble(plan
.fpr(), JITCompiler::addressFor(plan
.node()->virtualRegister()));
571 RELEASE_ASSERT_NOT_REACHED();
575 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan
& plan
, GPRReg canTrample
)
577 #if USE(JSVALUE32_64)
578 UNUSED_PARAM(canTrample
);
580 switch (plan
.fillAction()) {
581 case DoNothingForFill
:
583 case SetInt32Constant
:
584 m_jit
.move(Imm32(plan
.node()->asInt32()), plan
.gpr());
587 case SetInt52Constant
:
588 m_jit
.move(Imm64(plan
.node()->asMachineInt() << JSValue::int52ShiftAmount
), plan
.gpr());
590 case SetStrictInt52Constant
:
591 m_jit
.move(Imm64(plan
.node()->asMachineInt()), plan
.gpr());
593 #endif // USE(JSVALUE64)
594 case SetBooleanConstant
:
595 m_jit
.move(TrustedImm32(plan
.node()->asBoolean()), plan
.gpr());
597 case SetCellConstant
:
598 m_jit
.move(TrustedImmPtr(plan
.node()->asCell()), plan
.gpr());
601 case SetTrustedJSConstant
:
602 m_jit
.move(valueOfJSConstantAsImm64(plan
.node()).asTrustedImm64(), plan
.gpr());
605 m_jit
.move(valueOfJSConstantAsImm64(plan
.node()), plan
.gpr());
607 case SetDoubleConstant
:
608 m_jit
.move(Imm64(reinterpretDoubleToInt64(plan
.node()->asNumber())), canTrample
);
609 m_jit
.move64ToDouble(canTrample
, plan
.fpr());
611 case Load32PayloadBoxInt
:
612 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
613 m_jit
.or64(GPRInfo::tagTypeNumberRegister
, plan
.gpr());
615 case Load32PayloadConvertToInt52
:
616 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
617 m_jit
.signExtend32ToPtr(plan
.gpr(), plan
.gpr());
618 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), plan
.gpr());
620 case Load32PayloadSignExtend
:
621 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
622 m_jit
.signExtend32ToPtr(plan
.gpr(), plan
.gpr());
625 case SetJSConstantTag
:
626 m_jit
.move(Imm32(plan
.node()->asJSValue().tag()), plan
.gpr());
628 case SetJSConstantPayload
:
629 m_jit
.move(Imm32(plan
.node()->asJSValue().payload()), plan
.gpr());
632 m_jit
.move(TrustedImm32(JSValue::Int32Tag
), plan
.gpr());
635 m_jit
.move(TrustedImm32(JSValue::CellTag
), plan
.gpr());
638 m_jit
.move(TrustedImm32(JSValue::BooleanTag
), plan
.gpr());
640 case SetDoubleConstant
:
641 m_jit
.loadDouble(TrustedImmPtr(m_jit
.addressOfDoubleConstant(plan
.node())), plan
.fpr());
645 m_jit
.load32(JITCompiler::tagFor(plan
.node()->virtualRegister()), plan
.gpr());
648 m_jit
.load32(JITCompiler::payloadFor(plan
.node()->virtualRegister()), plan
.gpr());
651 m_jit
.loadPtr(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
655 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
657 case Load64ShiftInt52Right
:
658 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
659 m_jit
.rshift64(TrustedImm32(JSValue::int52ShiftAmount
), plan
.gpr());
661 case Load64ShiftInt52Left
:
662 m_jit
.load64(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.gpr());
663 m_jit
.lshift64(TrustedImm32(JSValue::int52ShiftAmount
), plan
.gpr());
667 m_jit
.loadDouble(JITCompiler::addressFor(plan
.node()->virtualRegister()), plan
.fpr());
670 RELEASE_ASSERT_NOT_REACHED();
674 JITCompiler::Jump
SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR
, ArrayMode arrayMode
, IndexingType shape
)
676 switch (arrayMode
.arrayClass()) {
677 case Array::OriginalArray
: {
679 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
680 JITCompiler::Jump result
; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
686 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
687 return m_jit
.branch32(
688 MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(IsArray
| shape
));
690 case Array::NonArray
:
691 case Array::OriginalNonArray
:
692 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
693 return m_jit
.branch32(
694 MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(shape
));
696 case Array::PossiblyArray
:
697 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
698 return m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(shape
));
701 RELEASE_ASSERT_NOT_REACHED();
702 return JITCompiler::Jump();
705 JITCompiler::JumpList
SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR
, ArrayMode arrayMode
)
707 JITCompiler::JumpList result
;
709 switch (arrayMode
.type()) {
711 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, Int32Shape
);
714 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, DoubleShape
);
716 case Array::Contiguous
:
717 return jumpSlowForUnwantedArrayMode(tempGPR
, arrayMode
, ContiguousShape
);
719 case Array::ArrayStorage
:
720 case Array::SlowPutArrayStorage
: {
721 ASSERT(!arrayMode
.isJSArrayWithOriginalStructure());
723 if (arrayMode
.isJSArray()) {
724 if (arrayMode
.isSlowPut()) {
727 MacroAssembler::Zero
, tempGPR
, MacroAssembler::TrustedImm32(IsArray
)));
728 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
729 m_jit
.sub32(TrustedImm32(ArrayStorageShape
), tempGPR
);
732 MacroAssembler::Above
, tempGPR
,
733 TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
)));
736 m_jit
.and32(TrustedImm32(IsArray
| IndexingShapeMask
), tempGPR
);
738 m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(IsArray
| ArrayStorageShape
)));
741 m_jit
.and32(TrustedImm32(IndexingShapeMask
), tempGPR
);
742 if (arrayMode
.isSlowPut()) {
743 m_jit
.sub32(TrustedImm32(ArrayStorageShape
), tempGPR
);
746 MacroAssembler::Above
, tempGPR
,
747 TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
)));
751 m_jit
.branch32(MacroAssembler::NotEqual
, tempGPR
, TrustedImm32(ArrayStorageShape
)));
762 void SpeculativeJIT::checkArray(Node
* node
)
764 ASSERT(node
->arrayMode().isSpecific());
765 ASSERT(!node
->arrayMode().doesConversion());
767 SpeculateCellOperand
base(this, node
->child1());
768 GPRReg baseReg
= base
.gpr();
770 if (node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1()))) {
771 noResult(m_currentNode
);
775 const ClassInfo
* expectedClassInfo
= 0;
777 switch (node
->arrayMode().type()) {
779 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
783 case Array::Contiguous
:
784 case Array::ArrayStorage
:
785 case Array::SlowPutArrayStorage
: {
786 GPRTemporary
temp(this);
787 GPRReg tempGPR
= temp
.gpr();
788 m_jit
.load8(MacroAssembler::Address(baseReg
, JSCell::indexingTypeOffset()), tempGPR
);
790 BadIndexingType
, JSValueSource::unboxedCell(baseReg
), 0,
791 jumpSlowForUnwantedArrayMode(tempGPR
, node
->arrayMode()));
793 noResult(m_currentNode
);
796 case Array::DirectArguments
:
797 speculateCellTypeWithoutTypeFiltering(node
->child1(), baseReg
, DirectArgumentsType
);
798 noResult(m_currentNode
);
800 case Array::ScopedArguments
:
801 speculateCellTypeWithoutTypeFiltering(node
->child1(), baseReg
, ScopedArgumentsType
);
802 noResult(m_currentNode
);
805 speculateCellTypeWithoutTypeFiltering(
806 node
->child1(), baseReg
,
807 typeForTypedArrayType(node
->arrayMode().typedArrayType()));
808 noResult(m_currentNode
);
812 RELEASE_ASSERT(expectedClassInfo
);
814 GPRTemporary
temp(this);
815 GPRTemporary
temp2(this);
816 m_jit
.emitLoadStructure(baseReg
, temp
.gpr(), temp2
.gpr());
818 BadType
, JSValueSource::unboxedCell(baseReg
), node
,
820 MacroAssembler::NotEqual
,
821 MacroAssembler::Address(temp
.gpr(), Structure::classInfoOffset()),
822 MacroAssembler::TrustedImmPtr(expectedClassInfo
)));
824 noResult(m_currentNode
);
827 void SpeculativeJIT::arrayify(Node
* node
, GPRReg baseReg
, GPRReg propertyReg
)
829 ASSERT(node
->arrayMode().doesConversion());
831 GPRTemporary
temp(this);
832 GPRTemporary structure
;
833 GPRReg tempGPR
= temp
.gpr();
834 GPRReg structureGPR
= InvalidGPRReg
;
836 if (node
->op() != ArrayifyToStructure
) {
837 GPRTemporary
realStructure(this);
838 structure
.adopt(realStructure
);
839 structureGPR
= structure
.gpr();
842 // We can skip all that comes next if we already have array storage.
843 MacroAssembler::JumpList slowPath
;
845 if (node
->op() == ArrayifyToStructure
) {
846 slowPath
.append(m_jit
.branchWeakStructure(
847 JITCompiler::NotEqual
,
848 JITCompiler::Address(baseReg
, JSCell::structureIDOffset()),
852 MacroAssembler::Address(baseReg
, JSCell::indexingTypeOffset()), tempGPR
);
854 slowPath
.append(jumpSlowForUnwantedArrayMode(tempGPR
, node
->arrayMode()));
857 addSlowPathGenerator(std::make_unique
<ArrayifySlowPathGenerator
>(
858 slowPath
, this, node
, baseReg
, propertyReg
, tempGPR
, structureGPR
));
860 noResult(m_currentNode
);
863 void SpeculativeJIT::arrayify(Node
* node
)
865 ASSERT(node
->arrayMode().isSpecific());
867 SpeculateCellOperand
base(this, node
->child1());
869 if (!node
->child2()) {
870 arrayify(node
, base
.gpr(), InvalidGPRReg
);
874 SpeculateInt32Operand
property(this, node
->child2());
876 arrayify(node
, base
.gpr(), property
.gpr());
879 GPRReg
SpeculativeJIT::fillStorage(Edge edge
)
881 VirtualRegister virtualRegister
= edge
->virtualRegister();
882 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
884 switch (info
.registerFormat()) {
885 case DataFormatNone
: {
886 if (info
.spillFormat() == DataFormatStorage
) {
887 GPRReg gpr
= allocate();
888 m_gprs
.retain(gpr
, virtualRegister
, SpillOrderSpilled
);
889 m_jit
.loadPtr(JITCompiler::addressFor(virtualRegister
), gpr
);
890 info
.fillStorage(*m_stream
, gpr
);
894 // Must be a cell; fill it as a cell and then return the pointer.
895 return fillSpeculateCell(edge
);
898 case DataFormatStorage
: {
899 GPRReg gpr
= info
.gpr();
905 return fillSpeculateCell(edge
);
909 void SpeculativeJIT::useChildren(Node
* node
)
911 if (node
->flags() & NodeHasVarArgs
) {
912 for (unsigned childIdx
= node
->firstChild(); childIdx
< node
->firstChild() + node
->numChildren(); childIdx
++) {
913 if (!!m_jit
.graph().m_varArgChildren
[childIdx
])
914 use(m_jit
.graph().m_varArgChildren
[childIdx
]);
917 Edge child1
= node
->child1();
919 ASSERT(!node
->child2() && !node
->child3());
924 Edge child2
= node
->child2();
926 ASSERT(!node
->child3());
931 Edge child3
= node
->child3();
938 void SpeculativeJIT::compileIn(Node
* node
)
940 SpeculateCellOperand
base(this, node
->child2());
941 GPRReg baseGPR
= base
.gpr();
943 if (JSString
* string
= node
->child1()->dynamicCastConstant
<JSString
*>()) {
944 if (string
->tryGetValueImpl() && string
->tryGetValueImpl()->isAtomic()) {
945 StructureStubInfo
* stubInfo
= m_jit
.codeBlock()->addStubInfo();
947 GPRTemporary
result(this);
948 GPRReg resultGPR
= result
.gpr();
952 MacroAssembler::PatchableJump jump
= m_jit
.patchableJump();
953 MacroAssembler::Label done
= m_jit
.label();
955 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
956 // we can cast it to const AtomicStringImpl* safely.
957 auto slowPath
= slowPathCall(
958 jump
.m_jump
, this, operationInOptimize
,
959 JSValueRegs::payloadOnly(resultGPR
), stubInfo
, baseGPR
,
960 static_cast<const AtomicStringImpl
*>(string
->tryGetValueImpl()));
962 stubInfo
->codeOrigin
= node
->origin
.semantic
;
963 stubInfo
->patch
.baseGPR
= static_cast<int8_t>(baseGPR
);
964 stubInfo
->patch
.valueGPR
= static_cast<int8_t>(resultGPR
);
965 stubInfo
->patch
.usedRegisters
= usedRegisters();
966 stubInfo
->patch
.spillMode
= NeedToSpill
;
968 m_jit
.addIn(InRecord(jump
, done
, slowPath
.get(), stubInfo
));
969 addSlowPathGenerator(WTF::move(slowPath
));
973 blessedBooleanResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
978 JSValueOperand
key(this, node
->child1());
979 JSValueRegs regs
= key
.jsValueRegs();
981 GPRFlushedCallResult
result(this);
982 GPRReg resultGPR
= result
.gpr();
989 operationGenericIn
, extractResult(JSValueRegs::payloadOnly(resultGPR
)),
991 blessedBooleanResult(resultGPR
, node
, UseChildrenCalledExplicitly
);
994 bool SpeculativeJIT::nonSpeculativeCompare(Node
* node
, MacroAssembler::RelationalCondition cond
, S_JITOperation_EJJ helperFunction
)
996 unsigned branchIndexInBlock
= detectPeepHoleBranch();
997 if (branchIndexInBlock
!= UINT_MAX
) {
998 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
1000 ASSERT(node
->adjustedRefCount() == 1);
1002 nonSpeculativePeepholeBranch(node
, branchNode
, cond
, helperFunction
);
1004 m_indexInBlock
= branchIndexInBlock
;
1005 m_currentNode
= branchNode
;
1010 nonSpeculativeNonPeepholeCompare(node
, cond
, helperFunction
);
1015 bool SpeculativeJIT::nonSpeculativeStrictEq(Node
* node
, bool invert
)
1017 unsigned branchIndexInBlock
= detectPeepHoleBranch();
1018 if (branchIndexInBlock
!= UINT_MAX
) {
1019 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
1021 ASSERT(node
->adjustedRefCount() == 1);
1023 nonSpeculativePeepholeStrictEq(node
, branchNode
, invert
);
1025 m_indexInBlock
= branchIndexInBlock
;
1026 m_currentNode
= branchNode
;
1031 nonSpeculativeNonPeepholeStrictEq(node
, invert
);
1036 static const char* dataFormatString(DataFormat format
)
1038 // These values correspond to the DataFormat enum.
1039 const char* strings
[] = {
1057 return strings
[format
];
1060 void SpeculativeJIT::dump(const char* label
)
1063 dataLogF("<%s>\n", label
);
1065 dataLogF(" gprs:\n");
1067 dataLogF(" fprs:\n");
1069 dataLogF(" VirtualRegisters:\n");
1070 for (unsigned i
= 0; i
< m_generationInfo
.size(); ++i
) {
1071 GenerationInfo
& info
= m_generationInfo
[i
];
1073 dataLogF(" % 3d:%s%s", i
, dataFormatString(info
.registerFormat()), dataFormatString(info
.spillFormat()));
1075 dataLogF(" % 3d:[__][__]", i
);
1076 if (info
.registerFormat() == DataFormatDouble
)
1077 dataLogF(":fpr%d\n", info
.fpr());
1078 else if (info
.registerFormat() != DataFormatNone
1079 #if USE(JSVALUE32_64)
1080 && !(info
.registerFormat() & DataFormatJS
)
1083 ASSERT(info
.gpr() != InvalidGPRReg
);
1084 dataLogF(":%s\n", GPRInfo::debugName(info
.gpr()));
1089 dataLogF("</%s>\n", label
);
1092 GPRTemporary::GPRTemporary()
1094 , m_gpr(InvalidGPRReg
)
1098 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
)
1100 , m_gpr(InvalidGPRReg
)
1102 m_gpr
= m_jit
->allocate();
1105 GPRTemporary::GPRTemporary(SpeculativeJIT
* jit
, GPRReg specific
)
1107 , m_gpr(InvalidGPRReg
)
1109 m_gpr
= m_jit
->allocate(specific
);
1112 #if USE(JSVALUE32_64)
1113 GPRTemporary::GPRTemporary(
1114 SpeculativeJIT
* jit
, ReuseTag
, JSValueOperand
& op1
, WhichValueWord which
)
1116 , m_gpr(InvalidGPRReg
)
1118 if (!op1
.isDouble() && m_jit
->canReuse(op1
.node()))
1119 m_gpr
= m_jit
->reuse(op1
.gpr(which
));
1121 m_gpr
= m_jit
->allocate();
1123 #endif // USE(JSVALUE32_64)
1125 JSValueRegsTemporary::JSValueRegsTemporary() { }
1127 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT
* jit
)
1137 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1139 JSValueRegs
JSValueRegsTemporary::regs()
1142 return JSValueRegs(m_gpr
.gpr());
1144 return JSValueRegs(m_tagGPR
.gpr(), m_payloadGPR
.gpr());
1148 void GPRTemporary::adopt(GPRTemporary
& other
)
1151 ASSERT(m_gpr
== InvalidGPRReg
);
1152 ASSERT(other
.m_jit
);
1153 ASSERT(other
.m_gpr
!= InvalidGPRReg
);
1154 m_jit
= other
.m_jit
;
1155 m_gpr
= other
.m_gpr
;
1157 other
.m_gpr
= InvalidGPRReg
;
1160 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
)
1162 , m_fpr(InvalidFPRReg
)
1164 m_fpr
= m_jit
->fprAllocate();
1167 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, SpeculateDoubleOperand
& op1
)
1169 , m_fpr(InvalidFPRReg
)
1171 if (m_jit
->canReuse(op1
.node()))
1172 m_fpr
= m_jit
->reuse(op1
.fpr());
1174 m_fpr
= m_jit
->fprAllocate();
1177 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, SpeculateDoubleOperand
& op1
, SpeculateDoubleOperand
& op2
)
1179 , m_fpr(InvalidFPRReg
)
1181 if (m_jit
->canReuse(op1
.node()))
1182 m_fpr
= m_jit
->reuse(op1
.fpr());
1183 else if (m_jit
->canReuse(op2
.node()))
1184 m_fpr
= m_jit
->reuse(op2
.fpr());
1186 m_fpr
= m_jit
->fprAllocate();
1189 #if USE(JSVALUE32_64)
1190 FPRTemporary::FPRTemporary(SpeculativeJIT
* jit
, JSValueOperand
& op1
)
1192 , m_fpr(InvalidFPRReg
)
1194 if (op1
.isDouble() && m_jit
->canReuse(op1
.node()))
1195 m_fpr
= m_jit
->reuse(op1
.fpr());
1197 m_fpr
= m_jit
->fprAllocate();
1201 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node
* node
, Node
* branchNode
, JITCompiler::DoubleCondition condition
)
1203 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1204 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1206 SpeculateDoubleOperand
op1(this, node
->child1());
1207 SpeculateDoubleOperand
op2(this, node
->child2());
1209 branchDouble(condition
, op1
.fpr(), op2
.fpr(), taken
);
1213 void SpeculativeJIT::compilePeepHoleObjectEquality(Node
* node
, Node
* branchNode
)
1215 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1216 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1218 MacroAssembler::RelationalCondition condition
= MacroAssembler::Equal
;
1220 if (taken
== nextBlock()) {
1221 condition
= MacroAssembler::NotEqual
;
1222 BasicBlock
* tmp
= taken
;
1227 SpeculateCellOperand
op1(this, node
->child1());
1228 SpeculateCellOperand
op2(this, node
->child2());
1230 GPRReg op1GPR
= op1
.gpr();
1231 GPRReg op2GPR
= op2
.gpr();
1233 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1234 if (m_state
.forNode(node
->child1()).m_type
& ~SpecObject
) {
1236 BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(), m_jit
.branchIfNotObject(op1GPR
));
1238 if (m_state
.forNode(node
->child2()).m_type
& ~SpecObject
) {
1240 BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(), m_jit
.branchIfNotObject(op2GPR
));
1243 if (m_state
.forNode(node
->child1()).m_type
& ~SpecObject
) {
1245 BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1246 m_jit
.branchIfNotObject(op1GPR
));
1248 speculationCheck(BadType
, JSValueSource::unboxedCell(op1GPR
), node
->child1(),
1250 MacroAssembler::NonZero
,
1251 MacroAssembler::Address(op1GPR
, JSCell::typeInfoFlagsOffset()),
1252 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1254 if (m_state
.forNode(node
->child2()).m_type
& ~SpecObject
) {
1256 BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1257 m_jit
.branchIfNotObject(op2GPR
));
1259 speculationCheck(BadType
, JSValueSource::unboxedCell(op2GPR
), node
->child2(),
1261 MacroAssembler::NonZero
,
1262 MacroAssembler::Address(op2GPR
, JSCell::typeInfoFlagsOffset()),
1263 MacroAssembler::TrustedImm32(MasqueradesAsUndefined
)));
1266 branchPtr(condition
, op1GPR
, op2GPR
, taken
);
1270 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1272 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1273 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1275 // The branch instruction will branch to the taken block.
1276 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1277 if (taken
== nextBlock()) {
1278 condition
= JITCompiler::invert(condition
);
1279 BasicBlock
* tmp
= taken
;
1284 if (node
->child1()->isBooleanConstant()) {
1285 bool imm
= node
->child1()->asBoolean();
1286 SpeculateBooleanOperand
op2(this, node
->child2());
1287 branch32(condition
, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm
)))), op2
.gpr(), taken
);
1288 } else if (node
->child2()->isBooleanConstant()) {
1289 SpeculateBooleanOperand
op1(this, node
->child1());
1290 bool imm
= node
->child2()->asBoolean();
1291 branch32(condition
, op1
.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm
)))), taken
);
1293 SpeculateBooleanOperand
op1(this, node
->child1());
1294 SpeculateBooleanOperand
op2(this, node
->child2());
1295 branch32(condition
, op1
.gpr(), op2
.gpr(), taken
);
1301 void SpeculativeJIT::compilePeepHoleInt32Branch(Node
* node
, Node
* branchNode
, JITCompiler::RelationalCondition condition
)
1303 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
1304 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
1306 // The branch instruction will branch to the taken block.
1307 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1308 if (taken
== nextBlock()) {
1309 condition
= JITCompiler::invert(condition
);
1310 BasicBlock
* tmp
= taken
;
1315 if (node
->child1()->isInt32Constant()) {
1316 int32_t imm
= node
->child1()->asInt32();
1317 SpeculateInt32Operand
op2(this, node
->child2());
1318 branch32(condition
, JITCompiler::Imm32(imm
), op2
.gpr(), taken
);
1319 } else if (node
->child2()->isInt32Constant()) {
1320 SpeculateInt32Operand
op1(this, node
->child1());
1321 int32_t imm
= node
->child2()->asInt32();
1322 branch32(condition
, op1
.gpr(), JITCompiler::Imm32(imm
), taken
);
1324 SpeculateInt32Operand
op1(this, node
->child1());
1325 SpeculateInt32Operand
op2(this, node
->child2());
1326 branch32(condition
, op1
.gpr(), op2
.gpr(), taken
);
1332 // Returns true if the compare is fused with a subsequent branch.
1333 bool SpeculativeJIT::compilePeepHoleBranch(Node
* node
, MacroAssembler::RelationalCondition condition
, MacroAssembler::DoubleCondition doubleCondition
, S_JITOperation_EJJ operation
)
1335 // Fused compare & branch.
1336 unsigned branchIndexInBlock
= detectPeepHoleBranch();
1337 if (branchIndexInBlock
!= UINT_MAX
) {
1338 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
1340 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1341 // so can be no intervening nodes to also reference the compare.
1342 ASSERT(node
->adjustedRefCount() == 1);
1344 if (node
->isBinaryUseKind(Int32Use
))
1345 compilePeepHoleInt32Branch(node
, branchNode
, condition
);
1347 else if (node
->isBinaryUseKind(Int52RepUse
))
1348 compilePeepHoleInt52Branch(node
, branchNode
, condition
);
1349 #endif // USE(JSVALUE64)
1350 else if (node
->isBinaryUseKind(DoubleRepUse
))
1351 compilePeepHoleDoubleBranch(node
, branchNode
, doubleCondition
);
1352 else if (node
->op() == CompareEq
) {
1353 if (node
->isBinaryUseKind(StringUse
) || node
->isBinaryUseKind(StringIdentUse
)) {
1354 // Use non-peephole comparison, for now.
1357 if (node
->isBinaryUseKind(BooleanUse
))
1358 compilePeepHoleBooleanBranch(node
, branchNode
, condition
);
1359 else if (node
->isBinaryUseKind(ObjectUse
))
1360 compilePeepHoleObjectEquality(node
, branchNode
);
1361 else if (node
->isBinaryUseKind(ObjectUse
, ObjectOrOtherUse
))
1362 compilePeepHoleObjectToObjectOrOtherEquality(node
->child1(), node
->child2(), branchNode
);
1363 else if (node
->isBinaryUseKind(ObjectOrOtherUse
, ObjectUse
))
1364 compilePeepHoleObjectToObjectOrOtherEquality(node
->child2(), node
->child1(), branchNode
);
1366 nonSpeculativePeepholeBranch(node
, branchNode
, condition
, operation
);
1370 nonSpeculativePeepholeBranch(node
, branchNode
, condition
, operation
);
1374 use(node
->child1());
1375 use(node
->child2());
1376 m_indexInBlock
= branchIndexInBlock
;
1377 m_currentNode
= branchNode
;
1383 void SpeculativeJIT::noticeOSRBirth(Node
* node
)
1385 if (!node
->hasVirtualRegister())
1388 VirtualRegister virtualRegister
= node
->virtualRegister();
1389 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1391 info
.noticeOSRBirth(*m_stream
, node
, virtualRegister
);
1394 void SpeculativeJIT::compileMovHint(Node
* node
)
1396 ASSERT(node
->containsMovHint() && node
->op() != ZombieHint
);
1398 Node
* child
= node
->child1().node();
1399 noticeOSRBirth(child
);
1401 m_stream
->appendAndLog(VariableEvent::movHint(MinifiedID(child
), node
->unlinkedLocal()));
1404 void SpeculativeJIT::bail(AbortReason reason
)
1406 if (verboseCompilationEnabled())
1407 dataLog("Bailing compilation.\n");
1408 m_compileOkay
= true;
1409 m_jit
.abortWithReason(reason
, m_lastGeneratedNode
);
1410 clearGenerationInfo();
1413 void SpeculativeJIT::compileCurrentBlock()
1415 ASSERT(m_compileOkay
);
1420 ASSERT(m_block
->isReachable
);
1422 m_jit
.blockHeads()[m_block
->index
] = m_jit
.label();
1424 if (!m_block
->intersectionOfCFAHasVisited
) {
1425 // Don't generate code for basic blocks that are unreachable according to CFA.
1426 // But to be sure that nobody has generated a jump to this block, drop in a
1428 m_jit
.abortWithReason(DFGUnreachableBasicBlock
);
1432 m_stream
->appendAndLog(VariableEvent::reset());
1434 m_jit
.jitAssertHasValidCallFrame();
1435 m_jit
.jitAssertTagsInPlace();
1436 m_jit
.jitAssertArgumentCountSane();
1439 m_state
.beginBasicBlock(m_block
);
1441 for (size_t i
= m_block
->variablesAtHead
.size(); i
--;) {
1442 int operand
= m_block
->variablesAtHead
.operandForIndex(i
);
1443 Node
* node
= m_block
->variablesAtHead
[i
];
1445 continue; // No need to record dead SetLocal's.
1447 VariableAccessData
* variable
= node
->variableAccessData();
1449 if (!node
->refCount())
1450 continue; // No need to record dead SetLocal's.
1451 format
= dataFormatFor(variable
->flushFormat());
1452 m_stream
->appendAndLog(
1453 VariableEvent::setLocal(
1454 VirtualRegister(operand
),
1455 variable
->machineLocal(),
1459 m_codeOriginForExitTarget
= CodeOrigin();
1460 m_codeOriginForExitProfile
= CodeOrigin();
1462 for (m_indexInBlock
= 0; m_indexInBlock
< m_block
->size(); ++m_indexInBlock
) {
1463 m_currentNode
= m_block
->at(m_indexInBlock
);
1465 // We may have hit a contradiction that the CFA was aware of but that the JIT
1466 // didn't cause directly.
1467 if (!m_state
.isValid()) {
1468 bail(DFGBailedAtTopOfBlock
);
1472 if (ASSERT_DISABLED
)
1473 m_canExit
= true; // Essentially disable the assertions.
1475 m_canExit
= mayExit(m_jit
.graph(), m_currentNode
);
1477 m_interpreter
.startExecuting();
1478 m_jit
.setForNode(m_currentNode
);
1479 m_codeOriginForExitTarget
= m_currentNode
->origin
.forExit
;
1480 m_codeOriginForExitProfile
= m_currentNode
->origin
.semantic
;
1481 m_lastGeneratedNode
= m_currentNode
->op();
1483 ASSERT(m_currentNode
->shouldGenerate());
1485 if (verboseCompilationEnabled()) {
1487 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1488 (int)m_currentNode
->index(),
1489 m_currentNode
->origin
.semantic
.bytecodeIndex
, m_jit
.debugOffset());
1493 compile(m_currentNode
);
1495 if (belongsInMinifiedGraph(m_currentNode
->op()))
1496 m_minifiedGraph
->append(MinifiedNode::fromNode(m_currentNode
));
1498 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1499 m_jit
.clearRegisterAllocationOffsets();
1502 if (!m_compileOkay
) {
1503 bail(DFGBailedAtEndOfNode
);
1507 // Make sure that the abstract state is rematerialized for the next node.
1508 m_interpreter
.executeEffects(m_indexInBlock
);
1511 // Perform the most basic verification that children have been used correctly.
1512 if (!ASSERT_DISABLED
) {
1513 for (unsigned index
= 0; index
< m_generationInfo
.size(); ++index
) {
1514 GenerationInfo
& info
= m_generationInfo
[index
];
1515 RELEASE_ASSERT(!info
.alive());
1520 // If we are making type predictions about our arguments then
1521 // we need to check that they are correct on function entry.
1522 void SpeculativeJIT::checkArgumentTypes()
1524 ASSERT(!m_currentNode
);
1525 m_isCheckingArgumentTypes
= true;
1526 m_codeOriginForExitTarget
= CodeOrigin(0);
1527 m_codeOriginForExitProfile
= CodeOrigin(0);
1529 for (int i
= 0; i
< m_jit
.codeBlock()->numParameters(); ++i
) {
1530 Node
* node
= m_jit
.graph().m_arguments
[i
];
1532 // The argument is dead. We don't do any checks for such arguments.
1536 ASSERT(node
->op() == SetArgument
);
1537 ASSERT(node
->shouldGenerate());
1539 VariableAccessData
* variableAccessData
= node
->variableAccessData();
1540 FlushFormat format
= variableAccessData
->flushFormat();
1542 if (format
== FlushedJSValue
)
1545 VirtualRegister virtualRegister
= variableAccessData
->local();
1547 JSValueSource valueSource
= JSValueSource(JITCompiler::addressFor(virtualRegister
));
1551 case FlushedInt32
: {
1552 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch64(MacroAssembler::Below
, JITCompiler::addressFor(virtualRegister
), GPRInfo::tagTypeNumberRegister
));
1555 case FlushedBoolean
: {
1556 GPRTemporary
temp(this);
1557 m_jit
.load64(JITCompiler::addressFor(virtualRegister
), temp
.gpr());
1558 m_jit
.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse
)), temp
.gpr());
1559 speculationCheck(BadType
, valueSource
, node
, m_jit
.branchTest64(MacroAssembler::NonZero
, temp
.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1563 speculationCheck(BadType
, valueSource
, node
, m_jit
.branchTest64(MacroAssembler::NonZero
, JITCompiler::addressFor(virtualRegister
), GPRInfo::tagMaskRegister
));
1567 RELEASE_ASSERT_NOT_REACHED();
1572 case FlushedInt32
: {
1573 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::Int32Tag
)));
1576 case FlushedBoolean
: {
1577 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::BooleanTag
)));
1581 speculationCheck(BadType
, valueSource
, node
, m_jit
.branch32(MacroAssembler::NotEqual
, JITCompiler::tagFor(virtualRegister
), TrustedImm32(JSValue::CellTag
)));
1585 RELEASE_ASSERT_NOT_REACHED();
1590 m_isCheckingArgumentTypes
= false;
1593 bool SpeculativeJIT::compile()
1595 checkArgumentTypes();
1597 ASSERT(!m_currentNode
);
1598 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().numBlocks(); ++blockIndex
) {
1599 m_jit
.setForBlockIndex(blockIndex
);
1600 m_block
= m_jit
.graph().block(blockIndex
);
1601 compileCurrentBlock();
1607 void SpeculativeJIT::createOSREntries()
1609 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().numBlocks(); ++blockIndex
) {
1610 BasicBlock
* block
= m_jit
.graph().block(blockIndex
);
1613 if (!block
->isOSRTarget
)
1616 // Currently we don't have OSR entry trampolines. We could add them
1618 m_osrEntryHeads
.append(m_jit
.blockHeads()[blockIndex
]);
1622 void SpeculativeJIT::linkOSREntries(LinkBuffer
& linkBuffer
)
1624 unsigned osrEntryIndex
= 0;
1625 for (BlockIndex blockIndex
= 0; blockIndex
< m_jit
.graph().numBlocks(); ++blockIndex
) {
1626 BasicBlock
* block
= m_jit
.graph().block(blockIndex
);
1629 if (!block
->isOSRTarget
)
1631 m_jit
.noticeOSREntry(*block
, m_osrEntryHeads
[osrEntryIndex
++], linkBuffer
);
1633 ASSERT(osrEntryIndex
== m_osrEntryHeads
.size());
1635 if (verboseCompilationEnabled()) {
1636 DumpContext dumpContext
;
1637 dataLog("OSR Entries:\n");
1638 for (OSREntryData
& entryData
: m_jit
.jitCode()->osrEntry
)
1639 dataLog(" ", inContext(entryData
, &dumpContext
), "\n");
1640 if (!dumpContext
.isEmpty())
1641 dumpContext
.dump(WTF::dataFile());
1645 void SpeculativeJIT::compileDoublePutByVal(Node
* node
, SpeculateCellOperand
& base
, SpeculateStrictInt32Operand
& property
)
1647 Edge child3
= m_jit
.graph().varArgChild(node
, 2);
1648 Edge child4
= m_jit
.graph().varArgChild(node
, 3);
1650 ArrayMode arrayMode
= node
->arrayMode();
1652 GPRReg baseReg
= base
.gpr();
1653 GPRReg propertyReg
= property
.gpr();
1655 SpeculateDoubleOperand
value(this, child3
);
1657 FPRReg valueReg
= value
.fpr();
1660 JSValueRegs(), child3
, SpecFullRealNumber
,
1662 MacroAssembler::DoubleNotEqualOrUnordered
, valueReg
, valueReg
));
1667 StorageOperand
storage(this, child4
);
1668 GPRReg storageReg
= storage
.gpr();
1670 if (node
->op() == PutByValAlias
) {
1671 // Store the value to the array.
1672 GPRReg propertyReg
= property
.gpr();
1673 FPRReg valueReg
= value
.fpr();
1674 m_jit
.storeDouble(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
1676 noResult(m_currentNode
);
1680 GPRTemporary temporary
;
1681 GPRReg temporaryReg
= temporaryRegisterForPutByVal(temporary
, node
);
1683 MacroAssembler::Jump slowCase
;
1685 if (arrayMode
.isInBounds()) {
1687 OutOfBounds
, JSValueRegs(), 0,
1688 m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength())));
1690 MacroAssembler::Jump inBounds
= m_jit
.branch32(MacroAssembler::Below
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1692 slowCase
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfVectorLength()));
1694 if (!arrayMode
.isOutOfBounds())
1695 speculationCheck(OutOfBounds
, JSValueRegs(), 0, slowCase
);
1697 m_jit
.add32(TrustedImm32(1), propertyReg
, temporaryReg
);
1698 m_jit
.store32(temporaryReg
, MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()));
1700 inBounds
.link(&m_jit
);
1703 m_jit
.storeDouble(valueReg
, MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
));
1710 if (arrayMode
.isOutOfBounds()) {
1711 addSlowPathGenerator(
1714 m_jit
.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict
: operationPutDoubleByValBeyondArrayBoundsNonStrict
,
1715 NoResult
, baseReg
, propertyReg
, valueReg
));
1718 noResult(m_currentNode
, UseChildrenCalledExplicitly
);
1721 void SpeculativeJIT::compileGetCharCodeAt(Node
* node
)
1723 SpeculateCellOperand
string(this, node
->child1());
1724 SpeculateStrictInt32Operand
index(this, node
->child2());
1725 StorageOperand
storage(this, node
->child3());
1727 GPRReg stringReg
= string
.gpr();
1728 GPRReg indexReg
= index
.gpr();
1729 GPRReg storageReg
= storage
.gpr();
1731 ASSERT(speculationChecked(m_state
.forNode(node
->child1()).m_type
, SpecString
));
1733 // unsigned comparison so we can filter out negative indices and indices that are too large
1734 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::AboveOrEqual
, indexReg
, MacroAssembler::Address(stringReg
, JSString::offsetOfLength())));
1736 GPRTemporary
scratch(this);
1737 GPRReg scratchReg
= scratch
.gpr();
1739 m_jit
.loadPtr(MacroAssembler::Address(stringReg
, JSString::offsetOfValue()), scratchReg
);
1741 // Load the character into scratchReg
1742 JITCompiler::Jump is16Bit
= m_jit
.branchTest32(MacroAssembler::Zero
, MacroAssembler::Address(scratchReg
, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1744 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, indexReg
, MacroAssembler::TimesOne
, 0), scratchReg
);
1745 JITCompiler::Jump cont8Bit
= m_jit
.jump();
1747 is16Bit
.link(&m_jit
);
1749 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, indexReg
, MacroAssembler::TimesTwo
, 0), scratchReg
);
1751 cont8Bit
.link(&m_jit
);
1753 int32Result(scratchReg
, m_currentNode
);
1756 void SpeculativeJIT::compileGetByValOnString(Node
* node
)
1758 SpeculateCellOperand
base(this, node
->child1());
1759 SpeculateStrictInt32Operand
property(this, node
->child2());
1760 StorageOperand
storage(this, node
->child3());
1761 GPRReg baseReg
= base
.gpr();
1762 GPRReg propertyReg
= property
.gpr();
1763 GPRReg storageReg
= storage
.gpr();
1765 GPRTemporary
scratch(this);
1766 GPRReg scratchReg
= scratch
.gpr();
1767 #if USE(JSVALUE32_64)
1768 GPRTemporary resultTag
;
1769 GPRReg resultTagReg
= InvalidGPRReg
;
1770 if (node
->arrayMode().isOutOfBounds()) {
1771 GPRTemporary
realResultTag(this);
1772 resultTag
.adopt(realResultTag
);
1773 resultTagReg
= resultTag
.gpr();
1777 ASSERT(ArrayMode(Array::String
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
1779 // unsigned comparison so we can filter out negative indices and indices that are too large
1780 JITCompiler::Jump outOfBounds
= m_jit
.branch32(
1781 MacroAssembler::AboveOrEqual
, propertyReg
,
1782 MacroAssembler::Address(baseReg
, JSString::offsetOfLength()));
1783 if (node
->arrayMode().isInBounds())
1784 speculationCheck(OutOfBounds
, JSValueRegs(), 0, outOfBounds
);
1786 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSString::offsetOfValue()), scratchReg
);
1788 // Load the character into scratchReg
1789 JITCompiler::Jump is16Bit
= m_jit
.branchTest32(MacroAssembler::Zero
, MacroAssembler::Address(scratchReg
, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1791 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
, 0), scratchReg
);
1792 JITCompiler::Jump cont8Bit
= m_jit
.jump();
1794 is16Bit
.link(&m_jit
);
1796 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
, 0), scratchReg
);
1798 JITCompiler::Jump bigCharacter
=
1799 m_jit
.branch32(MacroAssembler::AboveOrEqual
, scratchReg
, TrustedImm32(0x100));
1801 // 8 bit string values don't need the isASCII check.
1802 cont8Bit
.link(&m_jit
);
1804 m_jit
.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg
);
1805 m_jit
.addPtr(MacroAssembler::TrustedImmPtr(m_jit
.vm()->smallStrings
.singleCharacterStrings()), scratchReg
);
1806 m_jit
.loadPtr(scratchReg
, scratchReg
);
1808 addSlowPathGenerator(
1810 bigCharacter
, this, operationSingleCharacterString
, scratchReg
, scratchReg
));
1812 if (node
->arrayMode().isOutOfBounds()) {
1813 #if USE(JSVALUE32_64)
1814 m_jit
.move(TrustedImm32(JSValue::CellTag
), resultTagReg
);
1817 JSGlobalObject
* globalObject
= m_jit
.globalObjectFor(node
->origin
.semantic
);
1818 if (globalObject
->stringPrototypeChainIsSane()) {
1819 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1820 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1821 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1822 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1823 // indexed properties either.
1824 // https://bugs.webkit.org/show_bug.cgi?id=144668
1825 m_jit
.graph().watchpoints().addLazily(globalObject
->stringPrototype()->structure()->transitionWatchpointSet());
1826 m_jit
.graph().watchpoints().addLazily(globalObject
->objectPrototype()->structure()->transitionWatchpointSet());
1829 addSlowPathGenerator(std::make_unique
<SaneStringGetByValSlowPathGenerator
>(
1830 outOfBounds
, this, JSValueRegs(scratchReg
), baseReg
, propertyReg
));
1832 addSlowPathGenerator(std::make_unique
<SaneStringGetByValSlowPathGenerator
>(
1833 outOfBounds
, this, JSValueRegs(resultTagReg
, scratchReg
),
1834 baseReg
, propertyReg
));
1838 addSlowPathGenerator(
1840 outOfBounds
, this, operationGetByValStringInt
,
1841 scratchReg
, baseReg
, propertyReg
));
1843 addSlowPathGenerator(
1845 outOfBounds
, this, operationGetByValStringInt
,
1846 resultTagReg
, scratchReg
, baseReg
, propertyReg
));
1851 jsValueResult(scratchReg
, m_currentNode
);
1853 jsValueResult(resultTagReg
, scratchReg
, m_currentNode
);
1856 cellResult(scratchReg
, m_currentNode
);
1859 void SpeculativeJIT::compileFromCharCode(Node
* node
)
1861 SpeculateStrictInt32Operand
property(this, node
->child1());
1862 GPRReg propertyReg
= property
.gpr();
1863 GPRTemporary
smallStrings(this);
1864 GPRTemporary
scratch(this);
1865 GPRReg scratchReg
= scratch
.gpr();
1866 GPRReg smallStringsReg
= smallStrings
.gpr();
1868 JITCompiler::JumpList slowCases
;
1869 slowCases
.append(m_jit
.branch32(MacroAssembler::AboveOrEqual
, propertyReg
, TrustedImm32(0xff)));
1870 m_jit
.move(MacroAssembler::TrustedImmPtr(m_jit
.vm()->smallStrings
.singleCharacterStrings()), smallStringsReg
);
1871 m_jit
.loadPtr(MacroAssembler::BaseIndex(smallStringsReg
, propertyReg
, MacroAssembler::ScalePtr
, 0), scratchReg
);
1873 slowCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, scratchReg
));
1874 addSlowPathGenerator(slowPathCall(slowCases
, this, operationStringFromCharCode
, scratchReg
, propertyReg
));
1875 cellResult(scratchReg
, m_currentNode
);
1878 GeneratedOperandType
SpeculativeJIT::checkGeneratedTypeForToInt32(Node
* node
)
1880 VirtualRegister virtualRegister
= node
->virtualRegister();
1881 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1883 switch (info
.registerFormat()) {
1884 case DataFormatStorage
:
1885 RELEASE_ASSERT_NOT_REACHED();
1887 case DataFormatBoolean
:
1888 case DataFormatCell
:
1889 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
1890 return GeneratedOperandTypeUnknown
;
1892 case DataFormatNone
:
1893 case DataFormatJSCell
:
1895 case DataFormatJSBoolean
:
1896 case DataFormatJSDouble
:
1897 return GeneratedOperandJSValue
;
1899 case DataFormatJSInt32
:
1900 case DataFormatInt32
:
1901 return GeneratedOperandInteger
;
1904 RELEASE_ASSERT_NOT_REACHED();
1905 return GeneratedOperandTypeUnknown
;
1909 void SpeculativeJIT::compileValueToInt32(Node
* node
)
1911 switch (node
->child1().useKind()) {
1914 SpeculateStrictInt52Operand
op1(this, node
->child1());
1915 GPRTemporary
result(this, Reuse
, op1
);
1916 GPRReg op1GPR
= op1
.gpr();
1917 GPRReg resultGPR
= result
.gpr();
1918 m_jit
.zeroExtend32ToPtr(op1GPR
, resultGPR
);
1919 int32Result(resultGPR
, node
, DataFormatInt32
);
1922 #endif // USE(JSVALUE64)
1924 case DoubleRepUse
: {
1925 GPRTemporary
result(this);
1926 SpeculateDoubleOperand
op1(this, node
->child1());
1927 FPRReg fpr
= op1
.fpr();
1928 GPRReg gpr
= result
.gpr();
1929 JITCompiler::Jump notTruncatedToInteger
= m_jit
.branchTruncateDoubleToInt32(fpr
, gpr
, JITCompiler::BranchIfTruncateFailed
);
1931 addSlowPathGenerator(slowPathCall(notTruncatedToInteger
, this, toInt32
, gpr
, fpr
));
1933 int32Result(gpr
, node
);
1939 switch (checkGeneratedTypeForToInt32(node
->child1().node())) {
1940 case GeneratedOperandInteger
: {
1941 SpeculateInt32Operand
op1(this, node
->child1(), ManualOperandSpeculation
);
1942 GPRTemporary
result(this, Reuse
, op1
);
1943 m_jit
.move(op1
.gpr(), result
.gpr());
1944 int32Result(result
.gpr(), node
, op1
.format());
1947 case GeneratedOperandJSValue
: {
1948 GPRTemporary
result(this);
1950 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
1952 GPRReg gpr
= op1
.gpr();
1953 GPRReg resultGpr
= result
.gpr();
1954 FPRTemporary
tempFpr(this);
1955 FPRReg fpr
= tempFpr
.fpr();
1957 JITCompiler::Jump isInteger
= m_jit
.branch64(MacroAssembler::AboveOrEqual
, gpr
, GPRInfo::tagTypeNumberRegister
);
1958 JITCompiler::JumpList converted
;
1960 if (node
->child1().useKind() == NumberUse
) {
1962 JSValueRegs(gpr
), node
->child1(), SpecBytecodeNumber
,
1964 MacroAssembler::Zero
, gpr
, GPRInfo::tagTypeNumberRegister
));
1966 JITCompiler::Jump isNumber
= m_jit
.branchTest64(MacroAssembler::NonZero
, gpr
, GPRInfo::tagTypeNumberRegister
);
1969 JSValueRegs(gpr
), node
->child1(), ~SpecCell
, m_jit
.branchIfCell(JSValueRegs(gpr
)));
1971 // It's not a cell: so true turns into 1 and all else turns into 0.
1972 m_jit
.compare64(JITCompiler::Equal
, gpr
, TrustedImm32(ValueTrue
), resultGpr
);
1973 converted
.append(m_jit
.jump());
1975 isNumber
.link(&m_jit
);
1978 // First, if we get here we have a double encoded as a JSValue
1979 m_jit
.move(gpr
, resultGpr
);
1980 unboxDouble(resultGpr
, fpr
);
1982 silentSpillAllRegisters(resultGpr
);
1983 callOperation(toInt32
, resultGpr
, fpr
);
1984 silentFillAllRegisters(resultGpr
);
1986 converted
.append(m_jit
.jump());
1988 isInteger
.link(&m_jit
);
1989 m_jit
.zeroExtend32ToPtr(gpr
, resultGpr
);
1991 converted
.link(&m_jit
);
1993 Node
* childNode
= node
->child1().node();
1994 VirtualRegister virtualRegister
= childNode
->virtualRegister();
1995 GenerationInfo
& info
= generationInfoFromVirtualRegister(virtualRegister
);
1997 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
1999 GPRReg payloadGPR
= op1
.payloadGPR();
2000 GPRReg resultGpr
= result
.gpr();
2002 JITCompiler::JumpList converted
;
2004 if (info
.registerFormat() == DataFormatJSInt32
)
2005 m_jit
.move(payloadGPR
, resultGpr
);
2007 GPRReg tagGPR
= op1
.tagGPR();
2008 FPRTemporary
tempFpr(this);
2009 FPRReg fpr
= tempFpr
.fpr();
2010 FPRTemporary
scratch(this);
2012 JITCompiler::Jump isInteger
= m_jit
.branch32(MacroAssembler::Equal
, tagGPR
, TrustedImm32(JSValue::Int32Tag
));
2014 if (node
->child1().useKind() == NumberUse
) {
2016 op1
.jsValueRegs(), node
->child1(), SpecBytecodeNumber
,
2018 MacroAssembler::AboveOrEqual
, tagGPR
,
2019 TrustedImm32(JSValue::LowestTag
)));
2021 JITCompiler::Jump isNumber
= m_jit
.branch32(MacroAssembler::Below
, tagGPR
, TrustedImm32(JSValue::LowestTag
));
2024 op1
.jsValueRegs(), node
->child1(), ~SpecCell
,
2025 m_jit
.branchIfCell(op1
.jsValueRegs()));
2027 // It's not a cell: so true turns into 1 and all else turns into 0.
2028 JITCompiler::Jump isBoolean
= m_jit
.branch32(JITCompiler::Equal
, tagGPR
, TrustedImm32(JSValue::BooleanTag
));
2029 m_jit
.move(TrustedImm32(0), resultGpr
);
2030 converted
.append(m_jit
.jump());
2032 isBoolean
.link(&m_jit
);
2033 m_jit
.move(payloadGPR
, resultGpr
);
2034 converted
.append(m_jit
.jump());
2036 isNumber
.link(&m_jit
);
2039 unboxDouble(tagGPR
, payloadGPR
, fpr
, scratch
.fpr());
2041 silentSpillAllRegisters(resultGpr
);
2042 callOperation(toInt32
, resultGpr
, fpr
);
2043 silentFillAllRegisters(resultGpr
);
2045 converted
.append(m_jit
.jump());
2047 isInteger
.link(&m_jit
);
2048 m_jit
.move(payloadGPR
, resultGpr
);
2050 converted
.link(&m_jit
);
2053 int32Result(resultGpr
, node
);
2056 case GeneratedOperandTypeUnknown
:
2057 RELEASE_ASSERT(!m_compileOkay
);
2060 RELEASE_ASSERT_NOT_REACHED();
2065 ASSERT(!m_compileOkay
);
2070 void SpeculativeJIT::compileUInt32ToNumber(Node
* node
)
2072 if (doesOverflow(node
->arithMode())) {
2073 // We know that this sometimes produces doubles. So produce a double every
2074 // time. This at least allows subsequent code to not have weird conditionals.
2076 SpeculateInt32Operand
op1(this, node
->child1());
2077 FPRTemporary
result(this);
2079 GPRReg inputGPR
= op1
.gpr();
2080 FPRReg outputFPR
= result
.fpr();
2082 m_jit
.convertInt32ToDouble(inputGPR
, outputFPR
);
2084 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, inputGPR
, TrustedImm32(0));
2085 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), outputFPR
);
2086 positive
.link(&m_jit
);
2088 doubleResult(outputFPR
, node
);
2092 RELEASE_ASSERT(node
->arithMode() == Arith::CheckOverflow
);
2094 SpeculateInt32Operand
op1(this, node
->child1());
2095 GPRTemporary
result(this);
2097 m_jit
.move(op1
.gpr(), result
.gpr());
2099 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, result
.gpr(), TrustedImm32(0)));
2101 int32Result(result
.gpr(), node
, op1
.format());
2104 void SpeculativeJIT::compileDoubleAsInt32(Node
* node
)
2106 SpeculateDoubleOperand
op1(this, node
->child1());
2107 FPRTemporary
scratch(this);
2108 GPRTemporary
result(this);
2110 FPRReg valueFPR
= op1
.fpr();
2111 FPRReg scratchFPR
= scratch
.fpr();
2112 GPRReg resultGPR
= result
.gpr();
2114 JITCompiler::JumpList failureCases
;
2115 RELEASE_ASSERT(shouldCheckOverflow(node
->arithMode()));
2116 m_jit
.branchConvertDoubleToInt32(
2117 valueFPR
, resultGPR
, failureCases
, scratchFPR
,
2118 shouldCheckNegativeZero(node
->arithMode()));
2119 speculationCheck(Overflow
, JSValueRegs(), 0, failureCases
);
2121 int32Result(resultGPR
, node
);
2124 void SpeculativeJIT::compileDoubleRep(Node
* node
)
2126 switch (node
->child1().useKind()) {
2127 case RealNumberUse
: {
2128 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2129 FPRTemporary
result(this);
2131 JSValueRegs op1Regs
= op1
.jsValueRegs();
2132 FPRReg resultFPR
= result
.fpr();
2135 GPRTemporary
temp(this);
2136 GPRReg tempGPR
= temp
.gpr();
2137 m_jit
.move(op1Regs
.gpr(), tempGPR
);
2138 m_jit
.unboxDoubleWithoutAssertions(tempGPR
, resultFPR
);
2140 FPRTemporary
temp(this);
2141 FPRReg tempFPR
= temp
.fpr();
2142 unboxDouble(op1Regs
.tagGPR(), op1Regs
.payloadGPR(), resultFPR
, tempFPR
);
2145 JITCompiler::Jump done
= m_jit
.branchDouble(
2146 JITCompiler::DoubleEqual
, resultFPR
, resultFPR
);
2149 op1Regs
, node
->child1(), SpecBytecodeRealNumber
, m_jit
.branchIfNotInt32(op1Regs
));
2150 m_jit
.convertInt32ToDouble(op1Regs
.payloadGPR(), resultFPR
);
2154 doubleResult(resultFPR
, node
);
2160 ASSERT(!node
->child1()->isNumberConstant()); // This should have been constant folded.
2162 SpeculatedType possibleTypes
= m_state
.forNode(node
->child1()).m_type
;
2163 if (isInt32Speculation(possibleTypes
)) {
2164 SpeculateInt32Operand
op1(this, node
->child1(), ManualOperandSpeculation
);
2165 FPRTemporary
result(this);
2166 m_jit
.convertInt32ToDouble(op1
.gpr(), result
.fpr());
2167 doubleResult(result
.fpr(), node
);
2171 JSValueOperand
op1(this, node
->child1(), ManualOperandSpeculation
);
2172 FPRTemporary
result(this);
2175 GPRTemporary
temp(this);
2177 GPRReg op1GPR
= op1
.gpr();
2178 GPRReg tempGPR
= temp
.gpr();
2179 FPRReg resultFPR
= result
.fpr();
2180 JITCompiler::JumpList done
;
2182 JITCompiler::Jump isInteger
= m_jit
.branch64(
2183 MacroAssembler::AboveOrEqual
, op1GPR
, GPRInfo::tagTypeNumberRegister
);
2185 if (node
->child1().useKind() == NotCellUse
) {
2186 JITCompiler::Jump isNumber
= m_jit
.branchTest64(MacroAssembler::NonZero
, op1GPR
, GPRInfo::tagTypeNumberRegister
);
2187 JITCompiler::Jump isUndefined
= m_jit
.branch64(JITCompiler::Equal
, op1GPR
, TrustedImm64(ValueUndefined
));
2189 static const double zero
= 0;
2190 m_jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zero
), resultFPR
);
2192 JITCompiler::Jump isNull
= m_jit
.branch64(JITCompiler::Equal
, op1GPR
, TrustedImm64(ValueNull
));
2193 done
.append(isNull
);
2195 DFG_TYPE_CHECK(JSValueRegs(op1GPR
), node
->child1(), ~SpecCell
,
2196 m_jit
.branchTest64(JITCompiler::NonZero
, op1GPR
, TrustedImm32(static_cast<int32_t>(~1))));
2198 JITCompiler::Jump isFalse
= m_jit
.branch64(JITCompiler::Equal
, op1GPR
, TrustedImm64(ValueFalse
));
2199 static const double one
= 1;
2200 m_jit
.loadDouble(MacroAssembler::TrustedImmPtr(&one
), resultFPR
);
2201 done
.append(isFalse
);
2203 isUndefined
.link(&m_jit
);
2204 static const double NaN
= PNaN
;
2205 m_jit
.loadDouble(MacroAssembler::TrustedImmPtr(&NaN
), resultFPR
);
2206 done
.append(m_jit
.jump());
2208 isNumber
.link(&m_jit
);
2209 } else if (needsTypeCheck(node
->child1(), SpecBytecodeNumber
)) {
2211 JSValueRegs(op1GPR
), node
->child1(), SpecBytecodeNumber
,
2212 m_jit
.branchTest64(MacroAssembler::Zero
, op1GPR
, GPRInfo::tagTypeNumberRegister
));
2215 m_jit
.move(op1GPR
, tempGPR
);
2216 unboxDouble(tempGPR
, resultFPR
);
2217 done
.append(m_jit
.jump());
2219 isInteger
.link(&m_jit
);
2220 m_jit
.convertInt32ToDouble(op1GPR
, resultFPR
);
2222 #else // USE(JSVALUE64) -> this is the 32_64 case
2223 FPRTemporary
temp(this);
2225 GPRReg op1TagGPR
= op1
.tagGPR();
2226 GPRReg op1PayloadGPR
= op1
.payloadGPR();
2227 FPRReg tempFPR
= temp
.fpr();
2228 FPRReg resultFPR
= result
.fpr();
2229 JITCompiler::JumpList done
;
2231 JITCompiler::Jump isInteger
= m_jit
.branch32(
2232 MacroAssembler::Equal
, op1TagGPR
, TrustedImm32(JSValue::Int32Tag
));
2234 if (node
->child1().useKind() == NotCellUse
) {
2235 JITCompiler::Jump isNumber
= m_jit
.branch32(JITCompiler::Below
, op1TagGPR
, JITCompiler::TrustedImm32(JSValue::LowestTag
+ 1));
2236 JITCompiler::Jump isUndefined
= m_jit
.branch32(JITCompiler::Equal
, op1TagGPR
, TrustedImm32(JSValue::UndefinedTag
));
2238 static const double zero
= 0;
2239 m_jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zero
), resultFPR
);
2241 JITCompiler::Jump isNull
= m_jit
.branch32(JITCompiler::Equal
, op1TagGPR
, TrustedImm32(JSValue::NullTag
));
2242 done
.append(isNull
);
2244 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR
, op1PayloadGPR
), node
->child1(), ~SpecCell
, m_jit
.branch32(JITCompiler::NotEqual
, op1TagGPR
, TrustedImm32(JSValue::BooleanTag
)));
2246 JITCompiler::Jump isFalse
= m_jit
.branchTest32(JITCompiler::Zero
, op1PayloadGPR
, TrustedImm32(1));
2247 static const double one
= 1;
2248 m_jit
.loadDouble(MacroAssembler::TrustedImmPtr(&one
), resultFPR
);
2249 done
.append(isFalse
);
2251 isUndefined
.link(&m_jit
);
2252 static const double NaN
= PNaN
;
2253 m_jit
.loadDouble(MacroAssembler::TrustedImmPtr(&NaN
), resultFPR
);
2254 done
.append(m_jit
.jump());
2256 isNumber
.link(&m_jit
);
2257 } else if (needsTypeCheck(node
->child1(), SpecBytecodeNumber
)) {
2259 JSValueRegs(op1TagGPR
, op1PayloadGPR
), node
->child1(), SpecBytecodeNumber
,
2260 m_jit
.branch32(MacroAssembler::AboveOrEqual
, op1TagGPR
, TrustedImm32(JSValue::LowestTag
)));
2263 unboxDouble(op1TagGPR
, op1PayloadGPR
, resultFPR
, tempFPR
);
2264 done
.append(m_jit
.jump());
2266 isInteger
.link(&m_jit
);
2267 m_jit
.convertInt32ToDouble(op1PayloadGPR
, resultFPR
);
2269 #endif // USE(JSVALUE64)
2271 doubleResult(resultFPR
, node
);
2277 SpeculateStrictInt52Operand
value(this, node
->child1());
2278 FPRTemporary
result(this);
2280 GPRReg valueGPR
= value
.gpr();
2281 FPRReg resultFPR
= result
.fpr();
2283 m_jit
.convertInt64ToDouble(valueGPR
, resultFPR
);
2285 doubleResult(resultFPR
, node
);
2288 #endif // USE(JSVALUE64)
2291 RELEASE_ASSERT_NOT_REACHED();
2296 void SpeculativeJIT::compileValueRep(Node
* node
)
2298 switch (node
->child1().useKind()) {
2299 case DoubleRepUse
: {
2300 SpeculateDoubleOperand
value(this, node
->child1());
2301 JSValueRegsTemporary
result(this);
2303 FPRReg valueFPR
= value
.fpr();
2304 JSValueRegs resultRegs
= result
.regs();
2306 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2307 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2308 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2309 // local was purified.
2310 if (needsTypeCheck(node
->child1(), ~SpecDoubleImpureNaN
))
2311 m_jit
.purifyNaN(valueFPR
);
2313 boxDouble(valueFPR
, resultRegs
);
2315 jsValueResult(resultRegs
, node
);
2321 SpeculateStrictInt52Operand
value(this, node
->child1());
2322 GPRTemporary
result(this);
2324 GPRReg valueGPR
= value
.gpr();
2325 GPRReg resultGPR
= result
.gpr();
2327 boxInt52(valueGPR
, resultGPR
, DataFormatStrictInt52
);
2329 jsValueResult(resultGPR
, node
);
2332 #endif // USE(JSVALUE64)
2335 RELEASE_ASSERT_NOT_REACHED();
2340 static double clampDoubleToByte(double d
)
2350 static void compileClampIntegerToByte(JITCompiler
& jit
, GPRReg result
)
2352 MacroAssembler::Jump inBounds
= jit
.branch32(MacroAssembler::BelowOrEqual
, result
, JITCompiler::TrustedImm32(0xff));
2353 MacroAssembler::Jump tooBig
= jit
.branch32(MacroAssembler::GreaterThan
, result
, JITCompiler::TrustedImm32(0xff));
2354 jit
.xorPtr(result
, result
);
2355 MacroAssembler::Jump clamped
= jit
.jump();
2357 jit
.move(JITCompiler::TrustedImm32(255), result
);
2359 inBounds
.link(&jit
);
2362 static void compileClampDoubleToByte(JITCompiler
& jit
, GPRReg result
, FPRReg source
, FPRReg scratch
)
2364 // Unordered compare so we pick up NaN
2365 static const double zero
= 0;
2366 static const double byteMax
= 255;
2367 static const double half
= 0.5;
2368 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&zero
), scratch
);
2369 MacroAssembler::Jump tooSmall
= jit
.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered
, source
, scratch
);
2370 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax
), scratch
);
2371 MacroAssembler::Jump tooBig
= jit
.branchDouble(MacroAssembler::DoubleGreaterThan
, source
, scratch
);
2373 jit
.loadDouble(MacroAssembler::TrustedImmPtr(&half
), scratch
);
2374 // FIXME: This should probably just use a floating point round!
2375 // https://bugs.webkit.org/show_bug.cgi?id=72054
2376 jit
.addDouble(source
, scratch
);
2377 jit
.truncateDoubleToInt32(scratch
, result
);
2378 MacroAssembler::Jump truncatedInt
= jit
.jump();
2380 tooSmall
.link(&jit
);
2381 jit
.xorPtr(result
, result
);
2382 MacroAssembler::Jump zeroed
= jit
.jump();
2385 jit
.move(JITCompiler::TrustedImm32(255), result
);
2387 truncatedInt
.link(&jit
);
2392 JITCompiler::Jump
SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node
* node
, GPRReg baseGPR
, GPRReg indexGPR
)
2394 if (node
->op() == PutByValAlias
)
2395 return JITCompiler::Jump();
2396 JSArrayBufferView
* view
= m_jit
.graph().tryGetFoldableView(
2397 m_state
.forNode(m_jit
.graph().child(node
, 0)).m_value
, node
->arrayMode());
2399 uint32_t length
= view
->length();
2400 Node
* indexNode
= m_jit
.graph().child(node
, 1).node();
2401 if (indexNode
->isInt32Constant() && indexNode
->asUInt32() < length
)
2402 return JITCompiler::Jump();
2403 return m_jit
.branch32(
2404 MacroAssembler::AboveOrEqual
, indexGPR
, MacroAssembler::Imm32(length
));
2406 return m_jit
.branch32(
2407 MacroAssembler::AboveOrEqual
, indexGPR
,
2408 MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfLength()));
2411 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node
* node
, GPRReg baseGPR
, GPRReg indexGPR
)
2413 JITCompiler::Jump jump
= jumpForTypedArrayOutOfBounds(node
, baseGPR
, indexGPR
);
2416 speculationCheck(OutOfBounds
, JSValueRegs(), 0, jump
);
2419 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node
* node
, TypedArrayType type
)
2421 ASSERT(isInt(type
));
2423 SpeculateCellOperand
base(this, node
->child1());
2424 SpeculateStrictInt32Operand
property(this, node
->child2());
2425 StorageOperand
storage(this, node
->child3());
2427 GPRReg baseReg
= base
.gpr();
2428 GPRReg propertyReg
= property
.gpr();
2429 GPRReg storageReg
= storage
.gpr();
2431 GPRTemporary
result(this);
2432 GPRReg resultReg
= result
.gpr();
2434 ASSERT(node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
2436 emitTypedArrayBoundsCheck(node
, baseReg
, propertyReg
);
2437 switch (elementSize(type
)) {
2440 m_jit
.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
), resultReg
);
2442 m_jit
.load8(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesOne
), resultReg
);
2446 m_jit
.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
), resultReg
);
2448 m_jit
.load16(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesTwo
), resultReg
);
2451 m_jit
.load32(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesFour
), resultReg
);
2456 if (elementSize(type
) < 4 || isSigned(type
)) {
2457 int32Result(resultReg
, node
);
2461 ASSERT(elementSize(type
) == 4 && !isSigned(type
));
2462 if (node
->shouldSpeculateInt32()) {
2463 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, resultReg
, TrustedImm32(0)));
2464 int32Result(resultReg
, node
);
2469 if (node
->shouldSpeculateMachineInt()) {
2470 m_jit
.zeroExtend32ToPtr(resultReg
, resultReg
);
2471 strictInt52Result(resultReg
, node
);
2476 FPRTemporary
fresult(this);
2477 m_jit
.convertInt32ToDouble(resultReg
, fresult
.fpr());
2478 JITCompiler::Jump positive
= m_jit
.branch32(MacroAssembler::GreaterThanOrEqual
, resultReg
, TrustedImm32(0));
2479 m_jit
.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32
), fresult
.fpr());
2480 positive
.link(&m_jit
);
2481 doubleResult(fresult
.fpr(), node
);
2484 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base
, GPRReg property
, Node
* node
, TypedArrayType type
)
2486 ASSERT(isInt(type
));
2488 StorageOperand
storage(this, m_jit
.graph().varArgChild(node
, 3));
2489 GPRReg storageReg
= storage
.gpr();
2491 Edge valueUse
= m_jit
.graph().varArgChild(node
, 2);
2494 GPRReg valueGPR
= InvalidGPRReg
;
2496 if (valueUse
->isConstant()) {
2497 JSValue jsValue
= valueUse
->asJSValue();
2498 if (!jsValue
.isNumber()) {
2499 terminateSpeculativeExecution(Uncountable
, JSValueRegs(), 0);
2503 double d
= jsValue
.asNumber();
2504 if (isClamped(type
)) {
2505 ASSERT(elementSize(type
) == 1);
2506 d
= clampDoubleToByte(d
);
2508 GPRTemporary
scratch(this);
2509 GPRReg scratchReg
= scratch
.gpr();
2510 m_jit
.move(Imm32(toInt32(d
)), scratchReg
);
2511 value
.adopt(scratch
);
2512 valueGPR
= scratchReg
;
2514 switch (valueUse
.useKind()) {
2516 SpeculateInt32Operand
valueOp(this, valueUse
);
2517 GPRTemporary
scratch(this);
2518 GPRReg scratchReg
= scratch
.gpr();
2519 m_jit
.move(valueOp
.gpr(), scratchReg
);
2520 if (isClamped(type
)) {
2521 ASSERT(elementSize(type
) == 1);
2522 compileClampIntegerToByte(m_jit
, scratchReg
);
2524 value
.adopt(scratch
);
2525 valueGPR
= scratchReg
;
2531 SpeculateStrictInt52Operand
valueOp(this, valueUse
);
2532 GPRTemporary
scratch(this);
2533 GPRReg scratchReg
= scratch
.gpr();
2534 m_jit
.move(valueOp
.gpr(), scratchReg
);
2535 if (isClamped(type
)) {
2536 ASSERT(elementSize(type
) == 1);
2537 MacroAssembler::Jump inBounds
= m_jit
.branch64(
2538 MacroAssembler::BelowOrEqual
, scratchReg
, JITCompiler::TrustedImm64(0xff));
2539 MacroAssembler::Jump tooBig
= m_jit
.branch64(
2540 MacroAssembler::GreaterThan
, scratchReg
, JITCompiler::TrustedImm64(0xff));
2541 m_jit
.move(TrustedImm32(0), scratchReg
);
2542 MacroAssembler::Jump clamped
= m_jit
.jump();
2543 tooBig
.link(&m_jit
);
2544 m_jit
.move(JITCompiler::TrustedImm32(255), scratchReg
);
2545 clamped
.link(&m_jit
);
2546 inBounds
.link(&m_jit
);
2548 value
.adopt(scratch
);
2549 valueGPR
= scratchReg
;
2552 #endif // USE(JSVALUE64)
2554 case DoubleRepUse
: {
2555 if (isClamped(type
)) {
2556 ASSERT(elementSize(type
) == 1);
2557 SpeculateDoubleOperand
valueOp(this, valueUse
);
2558 GPRTemporary
result(this);
2559 FPRTemporary
floatScratch(this);
2560 FPRReg fpr
= valueOp
.fpr();
2561 GPRReg gpr
= result
.gpr();
2562 compileClampDoubleToByte(m_jit
, gpr
, fpr
, floatScratch
.fpr());
2563 value
.adopt(result
);
2566 SpeculateDoubleOperand
valueOp(this, valueUse
);
2567 GPRTemporary
result(this);
2568 FPRReg fpr
= valueOp
.fpr();
2569 GPRReg gpr
= result
.gpr();
2570 MacroAssembler::Jump notNaN
= m_jit
.branchDouble(MacroAssembler::DoubleEqual
, fpr
, fpr
);
2571 m_jit
.xorPtr(gpr
, gpr
);
2572 MacroAssembler::Jump fixed
= m_jit
.jump();
2573 notNaN
.link(&m_jit
);
2575 MacroAssembler::Jump failed
= m_jit
.branchTruncateDoubleToInt32(
2576 fpr
, gpr
, MacroAssembler::BranchIfTruncateFailed
);
2578 addSlowPathGenerator(slowPathCall(failed
, this, toInt32
, gpr
, fpr
));
2581 value
.adopt(result
);
2588 RELEASE_ASSERT_NOT_REACHED();
2593 ASSERT_UNUSED(valueGPR
, valueGPR
!= property
);
2594 ASSERT(valueGPR
!= base
);
2595 ASSERT(valueGPR
!= storageReg
);
2596 MacroAssembler::Jump outOfBounds
= jumpForTypedArrayOutOfBounds(node
, base
, property
);
2597 if (node
->arrayMode().isInBounds() && outOfBounds
.isSet()) {
2598 speculationCheck(OutOfBounds
, JSValueSource(), 0, outOfBounds
);
2599 outOfBounds
= MacroAssembler::Jump();
2602 switch (elementSize(type
)) {
2604 m_jit
.store8(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesOne
));
2607 m_jit
.store16(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesTwo
));
2610 m_jit
.store32(value
.gpr(), MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesFour
));
2615 if (outOfBounds
.isSet())
2616 outOfBounds
.link(&m_jit
);
2620 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node
* node
, TypedArrayType type
)
2622 ASSERT(isFloat(type
));
2624 SpeculateCellOperand
base(this, node
->child1());
2625 SpeculateStrictInt32Operand
property(this, node
->child2());
2626 StorageOperand
storage(this, node
->child3());
2628 GPRReg baseReg
= base
.gpr();
2629 GPRReg propertyReg
= property
.gpr();
2630 GPRReg storageReg
= storage
.gpr();
2632 ASSERT(node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
2634 FPRTemporary
result(this);
2635 FPRReg resultReg
= result
.fpr();
2636 emitTypedArrayBoundsCheck(node
, baseReg
, propertyReg
);
2637 switch (elementSize(type
)) {
2639 m_jit
.loadFloat(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesFour
), resultReg
);
2640 m_jit
.convertFloatToDouble(resultReg
, resultReg
);
2643 m_jit
.loadDouble(MacroAssembler::BaseIndex(storageReg
, propertyReg
, MacroAssembler::TimesEight
), resultReg
);
2647 RELEASE_ASSERT_NOT_REACHED();
2650 doubleResult(resultReg
, node
);
2653 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base
, GPRReg property
, Node
* node
, TypedArrayType type
)
2655 ASSERT(isFloat(type
));
2657 StorageOperand
storage(this, m_jit
.graph().varArgChild(node
, 3));
2658 GPRReg storageReg
= storage
.gpr();
2660 Edge baseUse
= m_jit
.graph().varArgChild(node
, 0);
2661 Edge valueUse
= m_jit
.graph().varArgChild(node
, 2);
2663 SpeculateDoubleOperand
valueOp(this, valueUse
);
2664 FPRTemporary
scratch(this);
2665 FPRReg valueFPR
= valueOp
.fpr();
2666 FPRReg scratchFPR
= scratch
.fpr();
2668 ASSERT_UNUSED(baseUse
, node
->arrayMode().alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(baseUse
)));
2670 MacroAssembler::Jump outOfBounds
= jumpForTypedArrayOutOfBounds(node
, base
, property
);
2671 if (node
->arrayMode().isInBounds() && outOfBounds
.isSet()) {
2672 speculationCheck(OutOfBounds
, JSValueSource(), 0, outOfBounds
);
2673 outOfBounds
= MacroAssembler::Jump();
2676 switch (elementSize(type
)) {
2678 m_jit
.moveDouble(valueFPR
, scratchFPR
);
2679 m_jit
.convertDoubleToFloat(valueFPR
, scratchFPR
);
2680 m_jit
.storeFloat(scratchFPR
, MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesFour
));
2684 m_jit
.storeDouble(valueFPR
, MacroAssembler::BaseIndex(storageReg
, property
, MacroAssembler::TimesEight
));
2687 RELEASE_ASSERT_NOT_REACHED();
2689 if (outOfBounds
.isSet())
2690 outOfBounds
.link(&m_jit
);
2694 void SpeculativeJIT::compileInstanceOfForObject(Node
*, GPRReg valueReg
, GPRReg prototypeReg
, GPRReg scratchReg
, GPRReg scratch2Reg
)
2696 // Check that prototype is an object.
2697 speculationCheck(BadType
, JSValueRegs(), 0, m_jit
.branchIfNotObject(prototypeReg
));
2699 // Initialize scratchReg with the value being checked.
2700 m_jit
.move(valueReg
, scratchReg
);
2702 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2703 MacroAssembler::Label
loop(&m_jit
);
2704 m_jit
.emitLoadStructure(scratchReg
, scratchReg
, scratch2Reg
);
2705 m_jit
.loadPtr(MacroAssembler::Address(scratchReg
, Structure::prototypeOffset() + CellPayloadOffset
), scratchReg
);
2706 MacroAssembler::Jump isInstance
= m_jit
.branchPtr(MacroAssembler::Equal
, scratchReg
, prototypeReg
);
2708 m_jit
.branchIfCell(JSValueRegs(scratchReg
)).linkTo(loop
, &m_jit
);
2710 m_jit
.branchTestPtr(MacroAssembler::NonZero
, scratchReg
).linkTo(loop
, &m_jit
);
2713 // No match - result is false.
2715 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg
);
2717 m_jit
.move(MacroAssembler::TrustedImm32(0), scratchReg
);
2719 MacroAssembler::Jump putResult
= m_jit
.jump();
2721 isInstance
.link(&m_jit
);
2723 m_jit
.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg
);
2725 m_jit
.move(MacroAssembler::TrustedImm32(1), scratchReg
);
2728 putResult
.link(&m_jit
);
2731 void SpeculativeJIT::compileInstanceOf(Node
* node
)
2733 if (node
->child1().useKind() == UntypedUse
) {
2734 // It might not be a cell. Speculate less aggressively.
2735 // Or: it might only be used once (i.e. by us), so we get zero benefit
2736 // from speculating any more aggressively than we absolutely need to.
2738 JSValueOperand
value(this, node
->child1());
2739 SpeculateCellOperand
prototype(this, node
->child2());
2740 GPRTemporary
scratch(this);
2741 GPRTemporary
scratch2(this);
2743 GPRReg prototypeReg
= prototype
.gpr();
2744 GPRReg scratchReg
= scratch
.gpr();
2745 GPRReg scratch2Reg
= scratch2
.gpr();
2747 MacroAssembler::Jump isCell
= m_jit
.branchIfCell(value
.jsValueRegs());
2748 GPRReg valueReg
= value
.jsValueRegs().payloadGPR();
2749 moveFalseTo(scratchReg
);
2751 MacroAssembler::Jump done
= m_jit
.jump();
2753 isCell
.link(&m_jit
);
2755 compileInstanceOfForObject(node
, valueReg
, prototypeReg
, scratchReg
, scratch2Reg
);
2759 blessedBooleanResult(scratchReg
, node
);
2763 SpeculateCellOperand
value(this, node
->child1());
2764 SpeculateCellOperand
prototype(this, node
->child2());
2766 GPRTemporary
scratch(this);
2767 GPRTemporary
scratch2(this);
2769 GPRReg valueReg
= value
.gpr();
2770 GPRReg prototypeReg
= prototype
.gpr();
2771 GPRReg scratchReg
= scratch
.gpr();
2772 GPRReg scratch2Reg
= scratch2
.gpr();
2774 compileInstanceOfForObject(node
, valueReg
, prototypeReg
, scratchReg
, scratch2Reg
);
2776 blessedBooleanResult(scratchReg
, node
);
2779 void SpeculativeJIT::compileAdd(Node
* node
)
2781 switch (node
->binaryUseKind()) {
2783 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
2785 if (node
->child1()->isInt32Constant()) {
2786 int32_t imm1
= node
->child1()->asInt32();
2787 SpeculateInt32Operand
op2(this, node
->child2());
2788 GPRTemporary
result(this);
2790 if (!shouldCheckOverflow(node
->arithMode())) {
2791 m_jit
.move(op2
.gpr(), result
.gpr());
2792 m_jit
.add32(Imm32(imm1
), result
.gpr());
2794 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchAdd32(MacroAssembler::Overflow
, op2
.gpr(), Imm32(imm1
), result
.gpr()));
2796 int32Result(result
.gpr(), node
);
2800 if (node
->child2()->isInt32Constant()) {
2801 SpeculateInt32Operand
op1(this, node
->child1());
2802 int32_t imm2
= node
->child2()->asInt32();
2803 GPRTemporary
result(this);
2805 if (!shouldCheckOverflow(node
->arithMode())) {
2806 m_jit
.move(op1
.gpr(), result
.gpr());
2807 m_jit
.add32(Imm32(imm2
), result
.gpr());
2809 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchAdd32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr()));
2811 int32Result(result
.gpr(), node
);
2815 SpeculateInt32Operand
op1(this, node
->child1());
2816 SpeculateInt32Operand
op2(this, node
->child2());
2817 GPRTemporary
result(this, Reuse
, op1
, op2
);
2819 GPRReg gpr1
= op1
.gpr();
2820 GPRReg gpr2
= op2
.gpr();
2821 GPRReg gprResult
= result
.gpr();
2823 if (!shouldCheckOverflow(node
->arithMode())) {
2824 if (gpr1
== gprResult
)
2825 m_jit
.add32(gpr2
, gprResult
);
2827 m_jit
.move(gpr2
, gprResult
);
2828 m_jit
.add32(gpr1
, gprResult
);
2831 MacroAssembler::Jump check
= m_jit
.branchAdd32(MacroAssembler::Overflow
, gpr1
, gpr2
, gprResult
);
2833 if (gpr1
== gprResult
)
2834 speculationCheck(Overflow
, JSValueRegs(), 0, check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr2
));
2835 else if (gpr2
== gprResult
)
2836 speculationCheck(Overflow
, JSValueRegs(), 0, check
, SpeculationRecovery(SpeculativeAdd
, gprResult
, gpr1
));
2838 speculationCheck(Overflow
, JSValueRegs(), 0, check
);
2841 int32Result(gprResult
, node
);
2847 ASSERT(shouldCheckOverflow(node
->arithMode()));
2848 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
2850 // Will we need an overflow check? If we can prove that neither input can be
2851 // Int52 then the overflow check will not be necessary.
2852 if (!m_state
.forNode(node
->child1()).couldBeType(SpecInt52
)
2853 && !m_state
.forNode(node
->child2()).couldBeType(SpecInt52
)) {
2854 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
2855 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
2856 GPRTemporary
result(this, Reuse
, op1
);
2857 m_jit
.move(op1
.gpr(), result
.gpr());
2858 m_jit
.add64(op2
.gpr(), result
.gpr());
2859 int52Result(result
.gpr(), node
, op1
.format());
2863 SpeculateInt52Operand
op1(this, node
->child1());
2864 SpeculateInt52Operand
op2(this, node
->child2());
2865 GPRTemporary
result(this);
2866 m_jit
.move(op1
.gpr(), result
.gpr());
2868 Int52Overflow
, JSValueRegs(), 0,
2869 m_jit
.branchAdd64(MacroAssembler::Overflow
, op2
.gpr(), result
.gpr()));
2870 int52Result(result
.gpr(), node
);
2873 #endif // USE(JSVALUE64)
2875 case DoubleRepUse
: {
2876 SpeculateDoubleOperand
op1(this, node
->child1());
2877 SpeculateDoubleOperand
op2(this, node
->child2());
2878 FPRTemporary
result(this, op1
, op2
);
2880 FPRReg reg1
= op1
.fpr();
2881 FPRReg reg2
= op2
.fpr();
2882 m_jit
.addDouble(reg1
, reg2
, result
.fpr());
2884 doubleResult(result
.fpr(), node
);
2889 RELEASE_ASSERT_NOT_REACHED();
2894 void SpeculativeJIT::compileMakeRope(Node
* node
)
2896 ASSERT(node
->child1().useKind() == KnownStringUse
);
2897 ASSERT(node
->child2().useKind() == KnownStringUse
);
2898 ASSERT(!node
->child3() || node
->child3().useKind() == KnownStringUse
);
2900 SpeculateCellOperand
op1(this, node
->child1());
2901 SpeculateCellOperand
op2(this, node
->child2());
2902 SpeculateCellOperand
op3(this, node
->child3());
2903 GPRTemporary
result(this);
2904 GPRTemporary
allocator(this);
2905 GPRTemporary
scratch(this);
2909 opGPRs
[0] = op1
.gpr();
2910 opGPRs
[1] = op2
.gpr();
2911 if (node
->child3()) {
2912 opGPRs
[2] = op3
.gpr();
2915 opGPRs
[2] = InvalidGPRReg
;
2918 GPRReg resultGPR
= result
.gpr();
2919 GPRReg allocatorGPR
= allocator
.gpr();
2920 GPRReg scratchGPR
= scratch
.gpr();
2922 JITCompiler::JumpList slowPath
;
2923 MarkedAllocator
& markedAllocator
= m_jit
.vm()->heap
.allocatorForObjectWithDestructor(sizeof(JSRopeString
));
2924 m_jit
.move(TrustedImmPtr(&markedAllocator
), allocatorGPR
);
2925 emitAllocateJSCell(resultGPR
, allocatorGPR
, TrustedImmPtr(m_jit
.vm()->stringStructure
.get()), scratchGPR
, slowPath
);
2927 m_jit
.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR
, JSString::offsetOfValue()));
2928 for (unsigned i
= 0; i
< numOpGPRs
; ++i
)
2929 m_jit
.storePtr(opGPRs
[i
], JITCompiler::Address(resultGPR
, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier
<JSString
>) * i
));
2930 for (unsigned i
= numOpGPRs
; i
< JSRopeString::s_maxInternalRopeLength
; ++i
)
2931 m_jit
.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR
, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier
<JSString
>) * i
));
2932 m_jit
.load32(JITCompiler::Address(opGPRs
[0], JSString::offsetOfFlags()), scratchGPR
);
2933 m_jit
.load32(JITCompiler::Address(opGPRs
[0], JSString::offsetOfLength()), allocatorGPR
);
2934 if (!ASSERT_DISABLED
) {
2935 JITCompiler::Jump ok
= m_jit
.branch32(
2936 JITCompiler::GreaterThanOrEqual
, allocatorGPR
, TrustedImm32(0));
2937 m_jit
.abortWithReason(DFGNegativeStringLength
);
2940 for (unsigned i
= 1; i
< numOpGPRs
; ++i
) {
2941 m_jit
.and32(JITCompiler::Address(opGPRs
[i
], JSString::offsetOfFlags()), scratchGPR
);
2943 Uncountable
, JSValueSource(), nullptr,
2945 JITCompiler::Overflow
,
2946 JITCompiler::Address(opGPRs
[i
], JSString::offsetOfLength()), allocatorGPR
));
2948 m_jit
.and32(JITCompiler::TrustedImm32(JSString::Is8Bit
), scratchGPR
);
2949 m_jit
.store32(scratchGPR
, JITCompiler::Address(resultGPR
, JSString::offsetOfFlags()));
2950 if (!ASSERT_DISABLED
) {
2951 JITCompiler::Jump ok
= m_jit
.branch32(
2952 JITCompiler::GreaterThanOrEqual
, allocatorGPR
, TrustedImm32(0));
2953 m_jit
.abortWithReason(DFGNegativeStringLength
);
2956 m_jit
.store32(allocatorGPR
, JITCompiler::Address(resultGPR
, JSString::offsetOfLength()));
2958 switch (numOpGPRs
) {
2960 addSlowPathGenerator(slowPathCall(
2961 slowPath
, this, operationMakeRope2
, resultGPR
, opGPRs
[0], opGPRs
[1]));
2964 addSlowPathGenerator(slowPathCall(
2965 slowPath
, this, operationMakeRope3
, resultGPR
, opGPRs
[0], opGPRs
[1], opGPRs
[2]));
2968 RELEASE_ASSERT_NOT_REACHED();
2972 cellResult(resultGPR
, node
);
2975 void SpeculativeJIT::compileArithClz32(Node
* node
)
2977 ASSERT_WITH_MESSAGE(node
->child1().useKind() == Int32Use
|| node
->child1().useKind() == KnownInt32Use
, "The Fixup phase should have enforced a Int32 operand.");
2978 SpeculateInt32Operand
value(this, node
->child1());
2979 GPRTemporary
result(this, Reuse
, value
);
2980 GPRReg valueReg
= value
.gpr();
2981 GPRReg resultReg
= result
.gpr();
2982 m_jit
.countLeadingZeros32(valueReg
, resultReg
);
2983 int32Result(resultReg
, node
);
2986 void SpeculativeJIT::compileArithSub(Node
* node
)
2988 switch (node
->binaryUseKind()) {
2990 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
2992 if (node
->child2()->isInt32Constant()) {
2993 SpeculateInt32Operand
op1(this, node
->child1());
2994 int32_t imm2
= node
->child2()->asInt32();
2995 GPRTemporary
result(this);
2997 if (!shouldCheckOverflow(node
->arithMode())) {
2998 m_jit
.move(op1
.gpr(), result
.gpr());
2999 m_jit
.sub32(Imm32(imm2
), result
.gpr());
3001 GPRTemporary
scratch(this);
3002 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), Imm32(imm2
), result
.gpr(), scratch
.gpr()));
3005 int32Result(result
.gpr(), node
);
3009 if (node
->child1()->isInt32Constant()) {
3010 int32_t imm1
= node
->child1()->asInt32();
3011 SpeculateInt32Operand
op2(this, node
->child2());
3012 GPRTemporary
result(this);
3014 m_jit
.move(Imm32(imm1
), result
.gpr());
3015 if (!shouldCheckOverflow(node
->arithMode()))
3016 m_jit
.sub32(op2
.gpr(), result
.gpr());
3018 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op2
.gpr(), result
.gpr()));
3020 int32Result(result
.gpr(), node
);
3024 SpeculateInt32Operand
op1(this, node
->child1());
3025 SpeculateInt32Operand
op2(this, node
->child2());
3026 GPRTemporary
result(this);
3028 if (!shouldCheckOverflow(node
->arithMode())) {
3029 m_jit
.move(op1
.gpr(), result
.gpr());
3030 m_jit
.sub32(op2
.gpr(), result
.gpr());
3032 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchSub32(MacroAssembler::Overflow
, op1
.gpr(), op2
.gpr(), result
.gpr()));
3034 int32Result(result
.gpr(), node
);
3040 ASSERT(shouldCheckOverflow(node
->arithMode()));
3041 ASSERT(!shouldCheckNegativeZero(node
->arithMode()));
3043 // Will we need an overflow check? If we can prove that neither input can be
3044 // Int52 then the overflow check will not be necessary.
3045 if (!m_state
.forNode(node
->child1()).couldBeType(SpecInt52
)
3046 && !m_state
.forNode(node
->child2()).couldBeType(SpecInt52
)) {
3047 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
3048 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), op1
);
3049 GPRTemporary
result(this, Reuse
, op1
);
3050 m_jit
.move(op1
.gpr(), result
.gpr());
3051 m_jit
.sub64(op2
.gpr(), result
.gpr());
3052 int52Result(result
.gpr(), node
, op1
.format());
3056 SpeculateInt52Operand
op1(this, node
->child1());
3057 SpeculateInt52Operand
op2(this, node
->child2());
3058 GPRTemporary
result(this);
3059 m_jit
.move(op1
.gpr(), result
.gpr());
3061 Int52Overflow
, JSValueRegs(), 0,
3062 m_jit
.branchSub64(MacroAssembler::Overflow
, op2
.gpr(), result
.gpr()));
3063 int52Result(result
.gpr(), node
);
3066 #endif // USE(JSVALUE64)
3068 case DoubleRepUse
: {
3069 SpeculateDoubleOperand
op1(this, node
->child1());
3070 SpeculateDoubleOperand
op2(this, node
->child2());
3071 FPRTemporary
result(this, op1
);
3073 FPRReg reg1
= op1
.fpr();
3074 FPRReg reg2
= op2
.fpr();
3075 m_jit
.subDouble(reg1
, reg2
, result
.fpr());
3077 doubleResult(result
.fpr(), node
);
3082 RELEASE_ASSERT_NOT_REACHED();
3087 void SpeculativeJIT::compileArithNegate(Node
* node
)
3089 switch (node
->child1().useKind()) {
3091 SpeculateInt32Operand
op1(this, node
->child1());
3092 GPRTemporary
result(this);
3094 m_jit
.move(op1
.gpr(), result
.gpr());
3096 // Note: there is no notion of being not used as a number, but someone
3097 // caring about negative zero.
3099 if (!shouldCheckOverflow(node
->arithMode()))
3100 m_jit
.neg32(result
.gpr());
3101 else if (!shouldCheckNegativeZero(node
->arithMode()))
3102 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchNeg32(MacroAssembler::Overflow
, result
.gpr()));
3104 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(MacroAssembler::Zero
, result
.gpr(), TrustedImm32(0x7fffffff)));
3105 m_jit
.neg32(result
.gpr());
3108 int32Result(result
.gpr(), node
);
3114 ASSERT(shouldCheckOverflow(node
->arithMode()));
3116 if (!m_state
.forNode(node
->child1()).couldBeType(SpecInt52
)) {
3117 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
3118 GPRTemporary
result(this);
3119 GPRReg op1GPR
= op1
.gpr();
3120 GPRReg resultGPR
= result
.gpr();
3121 m_jit
.move(op1GPR
, resultGPR
);
3122 m_jit
.neg64(resultGPR
);
3123 if (shouldCheckNegativeZero(node
->arithMode())) {
3125 NegativeZero
, JSValueRegs(), 0,
3126 m_jit
.branchTest64(MacroAssembler::Zero
, resultGPR
));
3128 int52Result(resultGPR
, node
, op1
.format());
3132 SpeculateInt52Operand
op1(this, node
->child1());
3133 GPRTemporary
result(this);
3134 GPRReg op1GPR
= op1
.gpr();
3135 GPRReg resultGPR
= result
.gpr();
3136 m_jit
.move(op1GPR
, resultGPR
);
3138 Int52Overflow
, JSValueRegs(), 0,
3139 m_jit
.branchNeg64(MacroAssembler::Overflow
, resultGPR
));
3140 if (shouldCheckNegativeZero(node
->arithMode())) {
3142 NegativeZero
, JSValueRegs(), 0,
3143 m_jit
.branchTest64(MacroAssembler::Zero
, resultGPR
));
3145 int52Result(resultGPR
, node
);
3148 #endif // USE(JSVALUE64)
3150 case DoubleRepUse
: {
3151 SpeculateDoubleOperand
op1(this, node
->child1());
3152 FPRTemporary
result(this);
3154 m_jit
.negateDouble(op1
.fpr(), result
.fpr());
3156 doubleResult(result
.fpr(), node
);
3161 RELEASE_ASSERT_NOT_REACHED();
3165 void SpeculativeJIT::compileArithMul(Node
* node
)
3167 switch (node
->binaryUseKind()) {
3169 SpeculateInt32Operand
op1(this, node
->child1());
3170 SpeculateInt32Operand
op2(this, node
->child2());
3171 GPRTemporary
result(this);
3173 GPRReg reg1
= op1
.gpr();
3174 GPRReg reg2
= op2
.gpr();
3176 // We can perform truncated multiplications if we get to this point, because if the
3177 // fixup phase could not prove that it would be safe, it would have turned us into
3178 // a double multiplication.
3179 if (!shouldCheckOverflow(node
->arithMode())) {
3180 m_jit
.move(reg1
, result
.gpr());
3181 m_jit
.mul32(reg2
, result
.gpr());
3184 Overflow
, JSValueRegs(), 0,
3185 m_jit
.branchMul32(MacroAssembler::Overflow
, reg1
, reg2
, result
.gpr()));
3188 // Check for negative zero, if the users of this node care about such things.
3189 if (shouldCheckNegativeZero(node
->arithMode())) {
3190 MacroAssembler::Jump resultNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, result
.gpr());
3191 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, reg1
, TrustedImm32(0)));
3192 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, reg2
, TrustedImm32(0)));
3193 resultNonZero
.link(&m_jit
);
3196 int32Result(result
.gpr(), node
);
3202 ASSERT(shouldCheckOverflow(node
->arithMode()));
3204 // This is super clever. We want to do an int52 multiplication and check the
3205 // int52 overflow bit. There is no direct hardware support for this, but we do
3206 // have the ability to do an int64 multiplication and check the int64 overflow
3207 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3208 // registers, with the high 12 bits being sign-extended. We can do:
3212 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3213 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3214 // multiplication overflows is identical to whether the 'a * b' 52-bit
3215 // multiplication overflows.
3217 // In our nomenclature, this is:
3219 // strictInt52(a) * int52(b) => int52
3221 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3224 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3225 // we just do whatever is more convenient for op1 and have op2 do the
3226 // opposite. This ensures that we do at most one shift.
3228 SpeculateWhicheverInt52Operand
op1(this, node
->child1());
3229 SpeculateWhicheverInt52Operand
op2(this, node
->child2(), OppositeShift
, op1
);
3230 GPRTemporary
result(this);
3232 GPRReg op1GPR
= op1
.gpr();
3233 GPRReg op2GPR
= op2
.gpr();
3234 GPRReg resultGPR
= result
.gpr();
3236 m_jit
.move(op1GPR
, resultGPR
);
3238 Int52Overflow
, JSValueRegs(), 0,
3239 m_jit
.branchMul64(MacroAssembler::Overflow
, op2GPR
, resultGPR
));
3241 if (shouldCheckNegativeZero(node
->arithMode())) {
3242 MacroAssembler::Jump resultNonZero
= m_jit
.branchTest64(
3243 MacroAssembler::NonZero
, resultGPR
);
3245 NegativeZero
, JSValueRegs(), 0,
3246 m_jit
.branch64(MacroAssembler::LessThan
, op1GPR
, TrustedImm64(0)));
3248 NegativeZero
, JSValueRegs(), 0,
3249 m_jit
.branch64(MacroAssembler::LessThan
, op2GPR
, TrustedImm64(0)));
3250 resultNonZero
.link(&m_jit
);
3253 int52Result(resultGPR
, node
);
3256 #endif // USE(JSVALUE64)
3258 case DoubleRepUse
: {
3259 SpeculateDoubleOperand
op1(this, node
->child1());
3260 SpeculateDoubleOperand
op2(this, node
->child2());
3261 FPRTemporary
result(this, op1
, op2
);
3263 FPRReg reg1
= op1
.fpr();
3264 FPRReg reg2
= op2
.fpr();
3266 m_jit
.mulDouble(reg1
, reg2
, result
.fpr());
3268 doubleResult(result
.fpr(), node
);
3273 RELEASE_ASSERT_NOT_REACHED();
3278 void SpeculativeJIT::compileArithDiv(Node
* node
)
3280 switch (node
->binaryUseKind()) {
3282 #if CPU(X86) || CPU(X86_64)
3283 SpeculateInt32Operand
op1(this, node
->child1());
3284 SpeculateInt32Operand
op2(this, node
->child2());
3285 GPRTemporary
eax(this, X86Registers::eax
);
3286 GPRTemporary
edx(this, X86Registers::edx
);
3287 GPRReg op1GPR
= op1
.gpr();
3288 GPRReg op2GPR
= op2
.gpr();
3292 if (op2GPR
== X86Registers::eax
|| op2GPR
== X86Registers::edx
) {
3293 op2TempGPR
= allocate();
3296 op2TempGPR
= InvalidGPRReg
;
3297 if (op1GPR
== X86Registers::eax
)
3298 temp
= X86Registers::edx
;
3300 temp
= X86Registers::eax
;
3303 ASSERT(temp
!= op1GPR
);
3304 ASSERT(temp
!= op2GPR
);
3306 m_jit
.add32(JITCompiler::TrustedImm32(1), op2GPR
, temp
);
3308 JITCompiler::Jump safeDenominator
= m_jit
.branch32(JITCompiler::Above
, temp
, JITCompiler::TrustedImm32(1));
3310 JITCompiler::JumpList done
;
3311 if (shouldCheckOverflow(node
->arithMode())) {
3312 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, op2GPR
));
3313 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::Equal
, op1GPR
, TrustedImm32(-2147483647-1)));
3315 // This is the case where we convert the result to an int after we're done, and we
3316 // already know that the denominator is either -1 or 0. So, if the denominator is
3317 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3318 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3319 // are happy to fall through to a normal division, since we're just dividing
3320 // something by negative 1.
3322 JITCompiler::Jump notZero
= m_jit
.branchTest32(JITCompiler::NonZero
, op2GPR
);
3323 m_jit
.move(TrustedImm32(0), eax
.gpr());
3324 done
.append(m_jit
.jump());
3326 notZero
.link(&m_jit
);
3327 JITCompiler::Jump notNeg2ToThe31
=
3328 m_jit
.branch32(JITCompiler::NotEqual
, op1GPR
, TrustedImm32(-2147483647-1));
3329 m_jit
.zeroExtend32ToPtr(op1GPR
, eax
.gpr());
3330 done
.append(m_jit
.jump());
3332 notNeg2ToThe31
.link(&m_jit
);
3335 safeDenominator
.link(&m_jit
);
3337 // If the user cares about negative zero, then speculate that we're not about
3338 // to produce negative zero.
3339 if (shouldCheckNegativeZero(node
->arithMode())) {
3340 MacroAssembler::Jump numeratorNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, op1GPR
);
3341 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, op2GPR
, TrustedImm32(0)));
3342 numeratorNonZero
.link(&m_jit
);
3345 if (op2TempGPR
!= InvalidGPRReg
) {
3346 m_jit
.move(op2GPR
, op2TempGPR
);
3347 op2GPR
= op2TempGPR
;
3350 m_jit
.move(op1GPR
, eax
.gpr());
3351 m_jit
.assembler().cdq();
3352 m_jit
.assembler().idivl_r(op2GPR
);
3354 if (op2TempGPR
!= InvalidGPRReg
)
3357 // Check that there was no remainder. If there had been, then we'd be obligated to
3358 // produce a double result instead.
3359 if (shouldCheckOverflow(node
->arithMode()))
3360 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::NonZero
, edx
.gpr()));
3363 int32Result(eax
.gpr(), node
);
3364 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3365 SpeculateInt32Operand
op1(this, node
->child1());
3366 SpeculateInt32Operand
op2(this, node
->child2());
3367 GPRReg op1GPR
= op1
.gpr();
3368 GPRReg op2GPR
= op2
.gpr();
3369 GPRTemporary
quotient(this);
3370 GPRTemporary
multiplyAnswer(this);
3372 // If the user cares about negative zero, then speculate that we're not about
3373 // to produce negative zero.
3374 if (shouldCheckNegativeZero(node
->arithMode())) {
3375 MacroAssembler::Jump numeratorNonZero
= m_jit
.branchTest32(MacroAssembler::NonZero
, op1GPR
);
3376 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, op2GPR
, TrustedImm32(0)));
3377 numeratorNonZero
.link(&m_jit
);
3380 m_jit
.assembler().sdiv
<32>(quotient
.gpr(), op1GPR
, op2GPR
);
3382 // Check that there was no remainder. If there had been, then we'd be obligated to
3383 // produce a double result instead.
3384 if (shouldCheckOverflow(node
->arithMode())) {
3385 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotient
.gpr(), op2GPR
, multiplyAnswer
.gpr()));
3386 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::NotEqual
, multiplyAnswer
.gpr(), op1GPR
));
3389 int32Result(quotient
.gpr(), node
);
3391 RELEASE_ASSERT_NOT_REACHED();
3396 case DoubleRepUse
: {
3397 SpeculateDoubleOperand
op1(this, node
->child1());
3398 SpeculateDoubleOperand
op2(this, node
->child2());
3399 FPRTemporary
result(this, op1
);
3401 FPRReg reg1
= op1
.fpr();
3402 FPRReg reg2
= op2
.fpr();
3403 m_jit
.divDouble(reg1
, reg2
, result
.fpr());
3405 doubleResult(result
.fpr(), node
);
3410 RELEASE_ASSERT_NOT_REACHED();
3415 void SpeculativeJIT::compileArithMod(Node
* node
)
3417 switch (node
->binaryUseKind()) {
3419 // In the fast path, the dividend value could be the final result
3420 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3421 SpeculateStrictInt32Operand
op1(this, node
->child1());
3423 if (node
->child2()->isInt32Constant()) {
3424 int32_t divisor
= node
->child2()->asInt32();
3425 if (divisor
> 1 && hasOneBitSet(divisor
)) {
3426 unsigned logarithm
= WTF::fastLog2(divisor
);
3427 GPRReg dividendGPR
= op1
.gpr();
3428 GPRTemporary
result(this);
3429 GPRReg resultGPR
= result
.gpr();
3431 // This is what LLVM generates. It's pretty crazy. Here's my
3432 // attempt at understanding it.
3434 // First, compute either divisor - 1, or 0, depending on whether
3435 // the dividend is negative:
3437 // If dividend < 0: resultGPR = divisor - 1
3438 // If dividend >= 0: resultGPR = 0
3439 m_jit
.move(dividendGPR
, resultGPR
);
3440 m_jit
.rshift32(TrustedImm32(31), resultGPR
);
3441 m_jit
.urshift32(TrustedImm32(32 - logarithm
), resultGPR
);
3443 // Add in the dividend, so that:
3445 // If dividend < 0: resultGPR = dividend + divisor - 1
3446 // If dividend >= 0: resultGPR = dividend
3447 m_jit
.add32(dividendGPR
, resultGPR
);
3449 // Mask so as to only get the *high* bits. This rounds down
3450 // (towards negative infinity) resultGPR to the nearest multiple
3451 // of divisor, so that:
3453 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3454 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3456 // Note that this can be simplified to:
3458 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3459 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3461 // Note that if the dividend is negative, resultGPR will also be negative.
3462 // Regardless of the sign of dividend, resultGPR will be rounded towards
3463 // zero, because of how things are conditionalized.
3464 m_jit
.and32(TrustedImm32(-divisor
), resultGPR
);
3466 // Subtract resultGPR from dividendGPR, which yields the remainder:
3468 // resultGPR = dividendGPR - resultGPR
3469 m_jit
.neg32(resultGPR
);
3470 m_jit
.add32(dividendGPR
, resultGPR
);
3472 if (shouldCheckNegativeZero(node
->arithMode())) {
3473 // Check that we're not about to create negative zero.
3474 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
3475 speculationCheck(NegativeZero
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, resultGPR
));
3476 numeratorPositive
.link(&m_jit
);
3479 int32Result(resultGPR
, node
);
3484 #if CPU(X86) || CPU(X86_64)
3485 if (node
->child2()->isInt32Constant()) {
3486 int32_t divisor
= node
->child2()->asInt32();
3487 if (divisor
&& divisor
!= -1) {
3488 GPRReg op1Gpr
= op1
.gpr();
3490 GPRTemporary
eax(this, X86Registers::eax
);
3491 GPRTemporary
edx(this, X86Registers::edx
);
3492 GPRTemporary
scratch(this);
3493 GPRReg scratchGPR
= scratch
.gpr();
3496 if (op1Gpr
== X86Registers::eax
|| op1Gpr
== X86Registers::edx
) {
3497 op1SaveGPR
= allocate();
3498 ASSERT(op1Gpr
!= op1SaveGPR
);
3499 m_jit
.move(op1Gpr
, op1SaveGPR
);
3501 op1SaveGPR
= op1Gpr
;
3502 ASSERT(op1SaveGPR
!= X86Registers::eax
);
3503 ASSERT(op1SaveGPR
!= X86Registers::edx
);
3505 m_jit
.move(op1Gpr
, eax
.gpr());
3506 m_jit
.move(TrustedImm32(divisor
), scratchGPR
);
3507 m_jit
.assembler().cdq();
3508 m_jit
.assembler().idivl_r(scratchGPR
);
3509 if (shouldCheckNegativeZero(node
->arithMode())) {
3510 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, op1SaveGPR
, TrustedImm32(0));
3511 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, edx
.gpr()));
3512 numeratorPositive
.link(&m_jit
);
3515 if (op1SaveGPR
!= op1Gpr
)
3518 int32Result(edx
.gpr(), node
);
3524 SpeculateInt32Operand
op2(this, node
->child2());
3525 #if CPU(X86) || CPU(X86_64)
3526 GPRTemporary
eax(this, X86Registers::eax
);
3527 GPRTemporary
edx(this, X86Registers::edx
);
3528 GPRReg op1GPR
= op1
.gpr();
3529 GPRReg op2GPR
= op2
.gpr();
3535 if (op2GPR
== X86Registers::eax
|| op2GPR
== X86Registers::edx
) {
3536 op2TempGPR
= allocate();
3539 op2TempGPR
= InvalidGPRReg
;
3540 if (op1GPR
== X86Registers::eax
)
3541 temp
= X86Registers::edx
;
3543 temp
= X86Registers::eax
;
3546 if (op1GPR
== X86Registers::eax
|| op1GPR
== X86Registers::edx
) {
3547 op1SaveGPR
= allocate();
3548 ASSERT(op1GPR
!= op1SaveGPR
);
3549 m_jit
.move(op1GPR
, op1SaveGPR
);
3551 op1SaveGPR
= op1GPR
;
3553 ASSERT(temp
!= op1GPR
);
3554 ASSERT(temp
!= op2GPR
);
3555 ASSERT(op1SaveGPR
!= X86Registers::eax
);
3556 ASSERT(op1SaveGPR
!= X86Registers::edx
);
3558 m_jit
.add32(JITCompiler::TrustedImm32(1), op2GPR
, temp
);
3560 JITCompiler::Jump safeDenominator
= m_jit
.branch32(JITCompiler::Above
, temp
, JITCompiler::TrustedImm32(1));
3562 JITCompiler::JumpList done
;
3564 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3565 // separate case for that. But it probably doesn't matter so much.
3566 if (shouldCheckOverflow(node
->arithMode())) {
3567 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, op2GPR
));
3568 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branch32(JITCompiler::Equal
, op1GPR
, TrustedImm32(-2147483647-1)));
3570 // This is the case where we convert the result to an int after we're done, and we
3571 // already know that the denominator is either -1 or 0. So, if the denominator is
3572 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3573 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3574 // happy to fall through to a normal division, since we're just dividing something
3577 JITCompiler::Jump notZero
= m_jit
.branchTest32(JITCompiler::NonZero
, op2GPR
);
3578 m_jit
.move(TrustedImm32(0), edx
.gpr());
3579 done
.append(m_jit
.jump());
3581 notZero
.link(&m_jit
);
3582 JITCompiler::Jump notNeg2ToThe31
=
3583 m_jit
.branch32(JITCompiler::NotEqual
, op1GPR
, TrustedImm32(-2147483647-1));
3584 m_jit
.move(TrustedImm32(0), edx
.gpr());
3585 done
.append(m_jit
.jump());
3587 notNeg2ToThe31
.link(&m_jit
);
3590 safeDenominator
.link(&m_jit
);
3592 if (op2TempGPR
!= InvalidGPRReg
) {
3593 m_jit
.move(op2GPR
, op2TempGPR
);
3594 op2GPR
= op2TempGPR
;
3597 m_jit
.move(op1GPR
, eax
.gpr());
3598 m_jit
.assembler().cdq();
3599 m_jit
.assembler().idivl_r(op2GPR
);
3601 if (op2TempGPR
!= InvalidGPRReg
)
3604 // Check that we're not about to create negative zero.
3605 if (shouldCheckNegativeZero(node
->arithMode())) {
3606 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, op1SaveGPR
, TrustedImm32(0));
3607 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, edx
.gpr()));
3608 numeratorPositive
.link(&m_jit
);
3611 if (op1SaveGPR
!= op1GPR
)
3615 int32Result(edx
.gpr(), node
);
3617 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3618 GPRTemporary
temp(this);
3619 GPRTemporary
quotientThenRemainder(this);
3620 GPRTemporary
multiplyAnswer(this);
3621 GPRReg dividendGPR
= op1
.gpr();
3622 GPRReg divisorGPR
= op2
.gpr();
3623 GPRReg quotientThenRemainderGPR
= quotientThenRemainder
.gpr();
3624 GPRReg multiplyAnswerGPR
= multiplyAnswer
.gpr();
3626 JITCompiler::JumpList done
;
3628 if (shouldCheckOverflow(node
->arithMode()))
3629 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, divisorGPR
));
3631 JITCompiler::Jump denominatorNotZero
= m_jit
.branchTest32(JITCompiler::NonZero
, divisorGPR
);
3632 m_jit
.move(divisorGPR
, quotientThenRemainderGPR
);
3633 done
.append(m_jit
.jump());
3634 denominatorNotZero
.link(&m_jit
);
3637 m_jit
.assembler().sdiv
<32>(quotientThenRemainderGPR
, dividendGPR
, divisorGPR
);
3638 // FIXME: It seems like there are cases where we don't need this? What if we have
3639 // arithMode() == Arith::Unchecked?
3640 // https://bugs.webkit.org/show_bug.cgi?id=126444
3641 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchMul32(JITCompiler::Overflow
, quotientThenRemainderGPR
, divisorGPR
, multiplyAnswerGPR
));
3642 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3643 m_jit
.assembler().sub(quotientThenRemainderGPR
, dividendGPR
, multiplyAnswerGPR
);
3645 m_jit
.assembler().sub
<32>(quotientThenRemainderGPR
, dividendGPR
, multiplyAnswerGPR
);
3648 // If the user cares about negative zero, then speculate that we're not about
3649 // to produce negative zero.
3650 if (shouldCheckNegativeZero(node
->arithMode())) {
3651 // Check that we're not about to create negative zero.
3652 JITCompiler::Jump numeratorPositive
= m_jit
.branch32(JITCompiler::GreaterThanOrEqual
, dividendGPR
, TrustedImm32(0));
3653 speculationCheck(Overflow
, JSValueRegs(), 0, m_jit
.branchTest32(JITCompiler::Zero
, quotientThenRemainderGPR
));
3654 numeratorPositive
.link(&m_jit
);
3659 int32Result(quotientThenRemainderGPR
, node
);
3660 #else // not architecture that can do integer division
3661 RELEASE_ASSERT_NOT_REACHED();
3666 case DoubleRepUse
: {
3667 SpeculateDoubleOperand
op1(this, node
->child1());
3668 SpeculateDoubleOperand
op2(this, node
->child2());
3670 FPRReg op1FPR
= op1
.fpr();
3671 FPRReg op2FPR
= op2
.fpr();
3675 FPRResult
result(this);
3677 callOperation(fmodAsDFGOperation
, result
.fpr(), op1FPR
, op2FPR
);
3679 doubleResult(result
.fpr(), node
);
3684 RELEASE_ASSERT_NOT_REACHED();
3689 void SpeculativeJIT::compileArithRound(Node
* node
)
3691 ASSERT(node
->child1().useKind() == DoubleRepUse
);
3693 SpeculateDoubleOperand
value(this, node
->child1());
3694 FPRReg valueFPR
= value
.fpr();
3696 if (producesInteger(node
->arithRoundingMode()) && !shouldCheckNegativeZero(node
->arithRoundingMode())) {
3697 FPRTemporary
oneHalf(this);
3698 GPRTemporary
roundedResultAsInt32(this);
3699 FPRReg oneHalfFPR
= oneHalf
.fpr();
3700 GPRReg resultGPR
= roundedResultAsInt32
.gpr();
3702 static const double halfConstant
= 0.5;
3703 m_jit
.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant
), oneHalfFPR
);
3704 m_jit
.addDouble(valueFPR
, oneHalfFPR
);
3706 JITCompiler::Jump truncationFailed
= m_jit
.branchTruncateDoubleToInt32(oneHalfFPR
, resultGPR
);
3707 speculationCheck(Overflow
, JSValueRegs(), node
, truncationFailed
);
3708 int32Result(resultGPR
, node
);
3713 FPRResult
roundedResultAsDouble(this);
3714 FPRReg resultFPR
= roundedResultAsDouble
.fpr();
3715 callOperation(jsRound
, resultFPR
, valueFPR
);
3716 if (producesInteger(node
->arithRoundingMode())) {
3717 GPRTemporary
roundedResultAsInt32(this);
3718 FPRTemporary
scratch(this);
3719 FPRReg scratchFPR
= scratch
.fpr();
3720 GPRReg resultGPR
= roundedResultAsInt32
.gpr();
3721 JITCompiler::JumpList failureCases
;
3722 m_jit
.branchConvertDoubleToInt32(resultFPR
, resultGPR
, failureCases
, scratchFPR
);
3723 speculationCheck(Overflow
, JSValueRegs(), node
, failureCases
);
3725 int32Result(resultGPR
, node
);
3727 doubleResult(resultFPR
, node
);
3730 void SpeculativeJIT::compileArithSqrt(Node
* node
)
3732 SpeculateDoubleOperand
op1(this, node
->child1());
3733 FPRReg op1FPR
= op1
.fpr();
3735 if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3737 FPRResult
result(this);
3738 callOperation(sqrt
, result
.fpr(), op1FPR
);
3739 doubleResult(result
.fpr(), node
);
3741 FPRTemporary
result(this, op1
);
3742 m_jit
.sqrtDouble(op1
.fpr(), result
.fpr());
3743 doubleResult(result
.fpr(), node
);
3747 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3748 // Every register is clobbered by this helper.
3749 static MacroAssembler::Jump
compileArithPowIntegerFastPath(JITCompiler
& assembler
, FPRReg xOperand
, GPRReg yOperand
, FPRReg result
)
3751 MacroAssembler::JumpList skipFastPath
;
3752 skipFastPath
.append(assembler
.branch32(MacroAssembler::LessThan
, yOperand
, MacroAssembler::TrustedImm32(0)));
3753 skipFastPath
.append(assembler
.branch32(MacroAssembler::GreaterThan
, yOperand
, MacroAssembler::TrustedImm32(1000)));
3755 static const double oneConstant
= 1.0;
3756 assembler
.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant
), result
);
3758 MacroAssembler::Label
startLoop(assembler
.label());
3759 MacroAssembler::Jump exponentIsEven
= assembler
.branchTest32(MacroAssembler::Zero
, yOperand
, MacroAssembler::TrustedImm32(1));
3760 assembler
.mulDouble(xOperand
, result
);
3761 exponentIsEven
.link(&assembler
);
3762 assembler
.mulDouble(xOperand
, xOperand
);
3763 assembler
.rshift32(MacroAssembler::TrustedImm32(1), yOperand
);
3764 assembler
.branchTest32(MacroAssembler::NonZero
, yOperand
).linkTo(startLoop
, &assembler
);
3766 MacroAssembler::Jump skipSlowPath
= assembler
.jump();
3767 skipFastPath
.link(&assembler
);
3769 return skipSlowPath
;
3772 void SpeculativeJIT::compileArithPow(Node
* node
)
3774 if (node
->child2().useKind() == Int32Use
) {
3775 SpeculateDoubleOperand
xOperand(this, node
->child1());
3776 SpeculateInt32Operand
yOperand(this, node
->child2());
3777 FPRReg xOperandfpr
= xOperand
.fpr();
3778 GPRReg yOperandGpr
= yOperand
.gpr();
3779 FPRTemporary
yOperandfpr(this);
3783 FPRResult
result(this);
3784 FPRReg resultFpr
= result
.fpr();
3786 FPRTemporary
xOperandCopy(this);
3787 FPRReg xOperandCopyFpr
= xOperandCopy
.fpr();
3788 m_jit
.moveDouble(xOperandfpr
, xOperandCopyFpr
);
3790 GPRTemporary
counter(this);
3791 GPRReg counterGpr
= counter
.gpr();
3792 m_jit
.move(yOperandGpr
, counterGpr
);
3794 MacroAssembler::Jump skipFallback
= compileArithPowIntegerFastPath(m_jit
, xOperandCopyFpr
, counterGpr
, resultFpr
);
3795 m_jit
.convertInt32ToDouble(yOperandGpr
, yOperandfpr
.fpr());
3796 callOperation(operationMathPow
, resultFpr
, xOperandfpr
, yOperandfpr
.fpr());
3798 skipFallback
.link(&m_jit
);
3799 doubleResult(resultFpr
, node
);
3803 SpeculateDoubleOperand
xOperand(this, node
->child1());
3804 SpeculateDoubleOperand
yOperand(this, node
->child2());
3805 FPRReg xOperandfpr
= xOperand
.fpr();
3806 FPRReg yOperandfpr
= yOperand
.fpr();
3810 FPRResult
result(this);
3811 FPRReg resultFpr
= result
.fpr();
3813 FPRTemporary
xOperandCopy(this);
3814 FPRReg xOperandCopyFpr
= xOperandCopy
.fpr();
3816 FPRTemporary
scratch(this);
3817 FPRReg scratchFpr
= scratch
.fpr();
3819 GPRTemporary
yOperandInteger(this);
3820 GPRReg yOperandIntegerGpr
= yOperandInteger
.gpr();
3821 MacroAssembler::JumpList failedExponentConversionToInteger
;
3822 m_jit
.branchConvertDoubleToInt32(yOperandfpr
, yOperandIntegerGpr
, failedExponentConversionToInteger
, scratchFpr
, false);
3824 m_jit
.moveDouble(xOperandfpr
, xOperandCopyFpr
);
3825 MacroAssembler::Jump skipFallback
= compileArithPowIntegerFastPath(m_jit
, xOperandCopyFpr
, yOperandInteger
.gpr(), resultFpr
);
3826 failedExponentConversionToInteger
.link(&m_jit
);
3828 callOperation(operationMathPow
, resultFpr
, xOperandfpr
, yOperandfpr
);
3829 skipFallback
.link(&m_jit
);
3830 doubleResult(resultFpr
, node
);
3833 void SpeculativeJIT::compileArithLog(Node
* node
)
3835 SpeculateDoubleOperand
op1(this, node
->child1());
3836 FPRReg op1FPR
= op1
.fpr();
3838 FPRResult
result(this);
3839 callOperation(log
, result
.fpr(), op1FPR
);
3840 doubleResult(result
.fpr(), node
);
3843 // Returns true if the compare is fused with a subsequent branch.
3844 bool SpeculativeJIT::compare(Node
* node
, MacroAssembler::RelationalCondition condition
, MacroAssembler::DoubleCondition doubleCondition
, S_JITOperation_EJJ operation
)
3846 if (compilePeepHoleBranch(node
, condition
, doubleCondition
, operation
))
3849 if (node
->isBinaryUseKind(Int32Use
)) {
3850 compileInt32Compare(node
, condition
);
3855 if (node
->isBinaryUseKind(Int52RepUse
)) {
3856 compileInt52Compare(node
, condition
);
3859 #endif // USE(JSVALUE64)
3861 if (node
->isBinaryUseKind(DoubleRepUse
)) {
3862 compileDoubleCompare(node
, doubleCondition
);
3866 if (node
->op() == CompareEq
) {
3867 if (node
->isBinaryUseKind(StringUse
)) {
3868 compileStringEquality(node
);
3872 if (node
->isBinaryUseKind(BooleanUse
)) {
3873 compileBooleanCompare(node
, condition
);
3877 if (node
->isBinaryUseKind(StringIdentUse
)) {
3878 compileStringIdentEquality(node
);
3882 if (node
->isBinaryUseKind(ObjectUse
)) {
3883 compileObjectEquality(node
);
3887 if (node
->isBinaryUseKind(ObjectUse
, ObjectOrOtherUse
)) {
3888 compileObjectToObjectOrOtherEquality(node
->child1(), node
->child2());
3892 if (node
->isBinaryUseKind(ObjectOrOtherUse
, ObjectUse
)) {
3893 compileObjectToObjectOrOtherEquality(node
->child2(), node
->child1());
3898 nonSpeculativeNonPeepholeCompare(node
, condition
, operation
);
3902 bool SpeculativeJIT::compileStrictEq(Node
* node
)
3904 if (node
->isBinaryUseKind(BooleanUse
)) {
3905 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3906 if (branchIndexInBlock
!= UINT_MAX
) {
3907 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3908 compilePeepHoleBooleanBranch(node
, branchNode
, MacroAssembler::Equal
);
3909 use(node
->child1());
3910 use(node
->child2());
3911 m_indexInBlock
= branchIndexInBlock
;
3912 m_currentNode
= branchNode
;
3915 compileBooleanCompare(node
, MacroAssembler::Equal
);
3919 if (node
->isBinaryUseKind(Int32Use
)) {
3920 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3921 if (branchIndexInBlock
!= UINT_MAX
) {
3922 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3923 compilePeepHoleInt32Branch(node
, branchNode
, MacroAssembler::Equal
);
3924 use(node
->child1());
3925 use(node
->child2());
3926 m_indexInBlock
= branchIndexInBlock
;
3927 m_currentNode
= branchNode
;
3930 compileInt32Compare(node
, MacroAssembler::Equal
);
3935 if (node
->isBinaryUseKind(Int52RepUse
)) {
3936 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3937 if (branchIndexInBlock
!= UINT_MAX
) {
3938 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3939 compilePeepHoleInt52Branch(node
, branchNode
, MacroAssembler::Equal
);
3940 use(node
->child1());
3941 use(node
->child2());
3942 m_indexInBlock
= branchIndexInBlock
;
3943 m_currentNode
= branchNode
;
3946 compileInt52Compare(node
, MacroAssembler::Equal
);
3949 #endif // USE(JSVALUE64)
3951 if (node
->isBinaryUseKind(DoubleRepUse
)) {
3952 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3953 if (branchIndexInBlock
!= UINT_MAX
) {
3954 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3955 compilePeepHoleDoubleBranch(node
, branchNode
, MacroAssembler::DoubleEqual
);
3956 use(node
->child1());
3957 use(node
->child2());
3958 m_indexInBlock
= branchIndexInBlock
;
3959 m_currentNode
= branchNode
;
3962 compileDoubleCompare(node
, MacroAssembler::DoubleEqual
);
3966 if (node
->isBinaryUseKind(StringUse
)) {
3967 compileStringEquality(node
);
3971 if (node
->isBinaryUseKind(StringIdentUse
)) {
3972 compileStringIdentEquality(node
);
3976 if (node
->isBinaryUseKind(ObjectUse
, UntypedUse
)) {
3977 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3978 if (branchIndexInBlock
!= UINT_MAX
) {
3979 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3980 compilePeepHoleObjectStrictEquality(node
->child1(), node
->child2(), branchNode
);
3981 use(node
->child1());
3982 use(node
->child2());
3983 m_indexInBlock
= branchIndexInBlock
;
3984 m_currentNode
= branchNode
;
3987 compileObjectStrictEquality(node
->child1(), node
->child2());
3991 if (node
->isBinaryUseKind(UntypedUse
, ObjectUse
)) {
3992 unsigned branchIndexInBlock
= detectPeepHoleBranch();
3993 if (branchIndexInBlock
!= UINT_MAX
) {
3994 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
3995 compilePeepHoleObjectStrictEquality(node
->child2(), node
->child1(), branchNode
);
3996 use(node
->child1());
3997 use(node
->child2());
3998 m_indexInBlock
= branchIndexInBlock
;
3999 m_currentNode
= branchNode
;
4002 compileObjectStrictEquality(node
->child2(), node
->child1());
4006 if (node
->isBinaryUseKind(ObjectUse
)) {
4007 unsigned branchIndexInBlock
= detectPeepHoleBranch();
4008 if (branchIndexInBlock
!= UINT_MAX
) {
4009 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
4010 compilePeepHoleObjectEquality(node
, branchNode
);
4011 use(node
->child1());
4012 use(node
->child2());
4013 m_indexInBlock
= branchIndexInBlock
;
4014 m_currentNode
= branchNode
;
4017 compileObjectEquality(node
);
4021 if (node
->isBinaryUseKind(MiscUse
, UntypedUse
)
4022 || node
->isBinaryUseKind(UntypedUse
, MiscUse
)) {
4023 compileMiscStrictEq(node
);
4027 if (node
->isBinaryUseKind(StringIdentUse
, NotStringVarUse
)) {
4028 compileStringIdentToNotStringVarEquality(node
, node
->child1(), node
->child2());
4032 if (node
->isBinaryUseKind(NotStringVarUse
, StringIdentUse
)) {
4033 compileStringIdentToNotStringVarEquality(node
, node
->child2(), node
->child1());
4037 if (node
->isBinaryUseKind(StringUse
, UntypedUse
)) {
4038 compileStringToUntypedEquality(node
, node
->child1(), node
->child2());
4042 if (node
->isBinaryUseKind(UntypedUse
, StringUse
)) {
4043 compileStringToUntypedEquality(node
, node
->child2(), node
->child1());
4047 RELEASE_ASSERT(node
->isBinaryUseKind(UntypedUse
));
4048 return nonSpeculativeStrictEq(node
);
4051 void SpeculativeJIT::compileBooleanCompare(Node
* node
, MacroAssembler::RelationalCondition condition
)
4053 SpeculateBooleanOperand
op1(this, node
->child1());
4054 SpeculateBooleanOperand
op2(this, node
->child2());
4055 GPRTemporary
result(this);
4057 m_jit
.compare32(condition
, op1
.gpr(), op2
.gpr(), result
.gpr());
4059 unblessedBooleanResult(result
.gpr(), node
);
4062 void SpeculativeJIT::compileStringEquality(
4063 Node
* node
, GPRReg leftGPR
, GPRReg rightGPR
, GPRReg lengthGPR
, GPRReg leftTempGPR
,
4064 GPRReg rightTempGPR
, GPRReg leftTemp2GPR
, GPRReg rightTemp2GPR
,
4065 JITCompiler::JumpList fastTrue
, JITCompiler::JumpList fastFalse
)
4067 JITCompiler::JumpList trueCase
;
4068 JITCompiler::JumpList falseCase
;
4069 JITCompiler::JumpList slowCase
;
4071 trueCase
.append(fastTrue
);
4072 falseCase
.append(fastFalse
);
4074 m_jit
.load32(MacroAssembler::Address(leftGPR
, JSString::offsetOfLength()), lengthGPR
);
4076 falseCase
.append(m_jit
.branch32(
4077 MacroAssembler::NotEqual
,
4078 MacroAssembler::Address(rightGPR
, JSString::offsetOfLength()),
4081 trueCase
.append(m_jit
.branchTest32(MacroAssembler::Zero
, lengthGPR
));
4083 m_jit
.loadPtr(MacroAssembler::Address(leftGPR
, JSString::offsetOfValue()), leftTempGPR
);
4084 m_jit
.loadPtr(MacroAssembler::Address(rightGPR
, JSString::offsetOfValue()), rightTempGPR
);
4086 slowCase
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, leftTempGPR
));
4087 slowCase
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, rightTempGPR
));
4089 slowCase
.append(m_jit
.branchTest32(
4090 MacroAssembler::Zero
,
4091 MacroAssembler::Address(leftTempGPR
, StringImpl::flagsOffset()),
4092 TrustedImm32(StringImpl::flagIs8Bit())));
4093 slowCase
.append(m_jit
.branchTest32(
4094 MacroAssembler::Zero
,
4095 MacroAssembler::Address(rightTempGPR
, StringImpl::flagsOffset()),
4096 TrustedImm32(StringImpl::flagIs8Bit())));
4098 m_jit
.loadPtr(MacroAssembler::Address(leftTempGPR
, StringImpl::dataOffset()), leftTempGPR
);
4099 m_jit
.loadPtr(MacroAssembler::Address(rightTempGPR
, StringImpl::dataOffset()), rightTempGPR
);
4101 MacroAssembler::Label loop
= m_jit
.label();
4103 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
4105 // This isn't going to generate the best code on x86. But that's OK, it's still better
4106 // than not inlining.
4107 m_jit
.load8(MacroAssembler::BaseIndex(leftTempGPR
, lengthGPR
, MacroAssembler::TimesOne
), leftTemp2GPR
);
4108 m_jit
.load8(MacroAssembler::BaseIndex(rightTempGPR
, lengthGPR
, MacroAssembler::TimesOne
), rightTemp2GPR
);
4109 falseCase
.append(m_jit
.branch32(MacroAssembler::NotEqual
, leftTemp2GPR
, rightTemp2GPR
));
4111 m_jit
.branchTest32(MacroAssembler::NonZero
, lengthGPR
).linkTo(loop
, &m_jit
);
4113 trueCase
.link(&m_jit
);
4114 moveTrueTo(leftTempGPR
);
4116 JITCompiler::Jump done
= m_jit
.jump();
4118 falseCase
.link(&m_jit
);
4119 moveFalseTo(leftTempGPR
);
4122 addSlowPathGenerator(
4124 slowCase
, this, operationCompareStringEq
, leftTempGPR
, leftGPR
, rightGPR
));
4126 blessedBooleanResult(leftTempGPR
, node
);
4129 void SpeculativeJIT::compileStringEquality(Node
* node
)
4131 SpeculateCellOperand
left(this, node
->child1());
4132 SpeculateCellOperand
right(this, node
->child2());
4133 GPRTemporary
length(this);
4134 GPRTemporary
leftTemp(this);
4135 GPRTemporary
rightTemp(this);
4136 GPRTemporary
leftTemp2(this, Reuse
, left
);
4137 GPRTemporary
rightTemp2(this, Reuse
, right
);
4139 GPRReg leftGPR
= left
.gpr();
4140 GPRReg rightGPR
= right
.gpr();
4141 GPRReg lengthGPR
= length
.gpr();
4142 GPRReg leftTempGPR
= leftTemp
.gpr();
4143 GPRReg rightTempGPR
= rightTemp
.gpr();
4144 GPRReg leftTemp2GPR
= leftTemp2
.gpr();
4145 GPRReg rightTemp2GPR
= rightTemp2
.gpr();
4147 speculateString(node
->child1(), leftGPR
);
4149 // It's safe to branch around the type check below, since proving that the values are
4150 // equal does indeed prove that the right value is a string.
4151 JITCompiler::Jump fastTrue
= m_jit
.branchPtr(MacroAssembler::Equal
, leftGPR
, rightGPR
);
4153 speculateString(node
->child2(), rightGPR
);
4155 compileStringEquality(
4156 node
, leftGPR
, rightGPR
, lengthGPR
, leftTempGPR
, rightTempGPR
, leftTemp2GPR
,
4157 rightTemp2GPR
, fastTrue
, JITCompiler::Jump());
4160 void SpeculativeJIT::compileStringToUntypedEquality(Node
* node
, Edge stringEdge
, Edge untypedEdge
)
4162 SpeculateCellOperand
left(this, stringEdge
);
4163 JSValueOperand
right(this, untypedEdge
, ManualOperandSpeculation
);
4164 GPRTemporary
length(this);
4165 GPRTemporary
leftTemp(this);
4166 GPRTemporary
rightTemp(this);
4167 GPRTemporary
leftTemp2(this, Reuse
, left
);
4168 GPRTemporary
rightTemp2(this);
4170 GPRReg leftGPR
= left
.gpr();
4171 JSValueRegs rightRegs
= right
.jsValueRegs();
4172 GPRReg lengthGPR
= length
.gpr();
4173 GPRReg leftTempGPR
= leftTemp
.gpr();
4174 GPRReg rightTempGPR
= rightTemp
.gpr();
4175 GPRReg leftTemp2GPR
= leftTemp2
.gpr();
4176 GPRReg rightTemp2GPR
= rightTemp2
.gpr();
4178 speculateString(stringEdge
, leftGPR
);
4180 JITCompiler::JumpList fastTrue
;
4181 JITCompiler::JumpList fastFalse
;
4183 fastFalse
.append(m_jit
.branchIfNotCell(rightRegs
));
4185 // It's safe to branch around the type check below, since proving that the values are
4186 // equal does indeed prove that the right value is a string.
4187 fastTrue
.append(m_jit
.branchPtr(
4188 MacroAssembler::Equal
, leftGPR
, rightRegs
.payloadGPR()));
4190 fastFalse
.append(m_jit
.branchIfNotString(rightRegs
.payloadGPR()));
4192 compileStringEquality(
4193 node
, leftGPR
, rightRegs
.payloadGPR(), lengthGPR
, leftTempGPR
, rightTempGPR
, leftTemp2GPR
,
4194 rightTemp2GPR
, fastTrue
, fastFalse
);
4197 void SpeculativeJIT::compileStringIdentEquality(Node
* node
)
4199 SpeculateCellOperand
left(this, node
->child1());
4200 SpeculateCellOperand
right(this, node
->child2());
4201 GPRTemporary
leftTemp(this);
4202 GPRTemporary
rightTemp(this);
4204 GPRReg leftGPR
= left
.gpr();
4205 GPRReg rightGPR
= right
.gpr();
4206 GPRReg leftTempGPR
= leftTemp
.gpr();
4207 GPRReg rightTempGPR
= rightTemp
.gpr();
4209 speculateString(node
->child1(), leftGPR
);
4210 speculateString(node
->child2(), rightGPR
);
4212 speculateStringIdentAndLoadStorage(node
->child1(), leftGPR
, leftTempGPR
);
4213 speculateStringIdentAndLoadStorage(node
->child2(), rightGPR
, rightTempGPR
);
4215 m_jit
.comparePtr(MacroAssembler::Equal
, leftTempGPR
, rightTempGPR
, leftTempGPR
);
4217 unblessedBooleanResult(leftTempGPR
, node
);
4220 void SpeculativeJIT::compileStringIdentToNotStringVarEquality(
4221 Node
* node
, Edge stringEdge
, Edge notStringVarEdge
)
4223 SpeculateCellOperand
left(this, stringEdge
);
4224 JSValueOperand
right(this, notStringVarEdge
, ManualOperandSpeculation
);
4225 GPRTemporary
leftTemp(this);
4226 GPRTemporary
rightTemp(this);
4227 GPRReg leftTempGPR
= leftTemp
.gpr();
4228 GPRReg rightTempGPR
= rightTemp
.gpr();
4229 GPRReg leftGPR
= left
.gpr();
4230 JSValueRegs rightRegs
= right
.jsValueRegs();
4232 speculateString(stringEdge
, leftGPR
);
4233 speculateStringIdentAndLoadStorage(stringEdge
, leftGPR
, leftTempGPR
);
4235 moveFalseTo(rightTempGPR
);
4236 JITCompiler::JumpList notString
;
4237 notString
.append(m_jit
.branchIfNotCell(rightRegs
));
4238 notString
.append(m_jit
.branchIfNotString(rightRegs
.payloadGPR()));
4240 speculateStringIdentAndLoadStorage(notStringVarEdge
, rightRegs
.payloadGPR(), rightTempGPR
);
4242 m_jit
.comparePtr(MacroAssembler::Equal
, leftTempGPR
, rightTempGPR
, rightTempGPR
);
4243 notString
.link(&m_jit
);
4245 unblessedBooleanResult(rightTempGPR
, node
);
4248 void SpeculativeJIT::compileStringZeroLength(Node
* node
)
4250 SpeculateCellOperand
str(this, node
->child1());
4251 GPRReg strGPR
= str
.gpr();
4253 // Make sure that this is a string.
4254 speculateString(node
->child1(), strGPR
);
4256 GPRTemporary
eq(this);
4257 GPRReg eqGPR
= eq
.gpr();
4259 // Fetch the length field from the string object.
4260 m_jit
.test32(MacroAssembler::Zero
, MacroAssembler::Address(strGPR
, JSString::offsetOfLength()), MacroAssembler::TrustedImm32(-1), eqGPR
);
4262 unblessedBooleanResult(eqGPR
, node
);
4265 void SpeculativeJIT::emitStringBranch(Edge nodeUse
, BasicBlock
* taken
, BasicBlock
* notTaken
)
4267 SpeculateCellOperand
str(this, nodeUse
);
4268 speculateString(nodeUse
, str
.gpr());
4269 branchTest32(JITCompiler::NonZero
, MacroAssembler::Address(str
.gpr(), JSString::offsetOfLength()), taken
);
4271 noResult(m_currentNode
);
4274 void SpeculativeJIT::compileConstantStoragePointer(Node
* node
)
4276 GPRTemporary
storage(this);
4277 GPRReg storageGPR
= storage
.gpr();
4278 m_jit
.move(TrustedImmPtr(node
->storagePointer()), storageGPR
);
4279 storageResult(storageGPR
, node
);
4282 void SpeculativeJIT::compileGetIndexedPropertyStorage(Node
* node
)
4284 SpeculateCellOperand
base(this, node
->child1());
4285 GPRReg baseReg
= base
.gpr();
4287 GPRTemporary
storage(this);
4288 GPRReg storageReg
= storage
.gpr();
4290 switch (node
->arrayMode().type()) {
4292 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, JSString::offsetOfValue()), storageReg
);
4294 addSlowPathGenerator(
4296 m_jit
.branchTest32(MacroAssembler::Zero
, storageReg
),
4297 this, operationResolveRope
, storageReg
, baseReg
));
4299 m_jit
.loadPtr(MacroAssembler::Address(storageReg
, StringImpl::dataOffset()), storageReg
);
4303 ASSERT(isTypedView(node
->arrayMode().typedArrayType()));
4305 MacroAssembler::Address(baseReg
, JSArrayBufferView::offsetOfVector()),
4310 storageResult(storageReg
, node
);
4313 void SpeculativeJIT::compileGetTypedArrayByteOffset(Node
* node
)
4315 SpeculateCellOperand
base(this, node
->child1());
4316 GPRTemporary
vector(this);
4317 GPRTemporary
data(this);
4319 GPRReg baseGPR
= base
.gpr();
4320 GPRReg vectorGPR
= vector
.gpr();
4321 GPRReg dataGPR
= data
.gpr();
4323 JITCompiler::Jump emptyByteOffset
= m_jit
.branch32(
4324 MacroAssembler::NotEqual
,
4325 MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfMode()),
4326 TrustedImm32(WastefulTypedArray
));
4328 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSObject::butterflyOffset()), dataGPR
);
4329 m_jit
.loadPtr(MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfVector()), vectorGPR
);
4330 m_jit
.loadPtr(MacroAssembler::Address(dataGPR
, Butterfly::offsetOfArrayBuffer()), dataGPR
);
4331 m_jit
.loadPtr(MacroAssembler::Address(dataGPR
, ArrayBuffer::offsetOfData()), dataGPR
);
4332 m_jit
.subPtr(dataGPR
, vectorGPR
);
4334 JITCompiler::Jump done
= m_jit
.jump();
4336 emptyByteOffset
.link(&m_jit
);
4337 m_jit
.move(TrustedImmPtr(0), vectorGPR
);
4341 int32Result(vectorGPR
, node
);
4344 void SpeculativeJIT::compileGetByValOnDirectArguments(Node
* node
)
4346 SpeculateCellOperand
base(this, node
->child1());
4347 SpeculateStrictInt32Operand
property(this, node
->child2());
4348 GPRTemporary
result(this);
4349 #if USE(JSVALUE32_64)
4350 GPRTemporary
resultTag(this);
4353 GPRReg baseReg
= base
.gpr();
4354 GPRReg propertyReg
= property
.gpr();
4355 GPRReg resultReg
= result
.gpr();
4356 #if USE(JSVALUE32_64)
4357 GPRReg resultTagReg
= resultTag
.gpr();
4358 JSValueRegs resultRegs
= JSValueRegs(resultTagReg
, resultReg
);
4360 JSValueRegs resultRegs
= JSValueRegs(resultReg
);
4366 ASSERT(ArrayMode(Array::DirectArguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4369 ExoticObjectMode
, JSValueSource(), 0,
4370 m_jit
.branchTestPtr(
4371 MacroAssembler::NonZero
,
4372 MacroAssembler::Address(baseReg
, DirectArguments::offsetOfOverrides())));
4374 ExoticObjectMode
, JSValueSource(), 0,
4376 MacroAssembler::AboveOrEqual
, propertyReg
,
4377 MacroAssembler::Address(baseReg
, DirectArguments::offsetOfLength())));
4380 MacroAssembler::BaseIndex(
4381 baseReg
, propertyReg
, MacroAssembler::TimesEight
, DirectArguments::storageOffset()),
4384 jsValueResult(resultRegs
, node
);
4387 void SpeculativeJIT::compileGetByValOnScopedArguments(Node
* node
)
4389 SpeculateCellOperand
base(this, node
->child1());
4390 SpeculateStrictInt32Operand
property(this, node
->child2());
4391 GPRTemporary
result(this);
4392 #if USE(JSVALUE32_64)
4393 GPRTemporary
resultTag(this);
4395 GPRTemporary
scratch(this);
4396 GPRTemporary
scratch2(this);
4398 GPRReg baseReg
= base
.gpr();
4399 GPRReg propertyReg
= property
.gpr();
4400 GPRReg resultReg
= result
.gpr();
4401 #if USE(JSVALUE32_64)
4402 GPRReg resultTagReg
= resultTag
.gpr();
4403 JSValueRegs resultRegs
= JSValueRegs(resultTagReg
, resultReg
);
4405 JSValueRegs resultRegs
= JSValueRegs(resultReg
);
4407 GPRReg scratchReg
= scratch
.gpr();
4408 GPRReg scratch2Reg
= scratch2
.gpr();
4413 ASSERT(ArrayMode(Array::ScopedArguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4416 ExoticObjectMode
, JSValueSource(), nullptr,
4418 MacroAssembler::AboveOrEqual
, propertyReg
,
4419 MacroAssembler::Address(baseReg
, ScopedArguments::offsetOfTotalLength())));
4421 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, ScopedArguments::offsetOfTable()), scratchReg
);
4423 MacroAssembler::Address(scratchReg
, ScopedArgumentsTable::offsetOfLength()), scratch2Reg
);
4425 MacroAssembler::Jump overflowArgument
= m_jit
.branch32(
4426 MacroAssembler::AboveOrEqual
, propertyReg
, scratch2Reg
);
4428 m_jit
.loadPtr(MacroAssembler::Address(baseReg
, ScopedArguments::offsetOfScope()), scratch2Reg
);
4431 MacroAssembler::Address(scratchReg
, ScopedArgumentsTable::offsetOfArguments()),
4434 MacroAssembler::BaseIndex(scratchReg
, propertyReg
, MacroAssembler::TimesFour
),
4438 ExoticObjectMode
, JSValueSource(), nullptr,
4440 MacroAssembler::Equal
, scratchReg
, TrustedImm32(ScopeOffset::invalidOffset
)));
4443 MacroAssembler::BaseIndex(
4444 scratch2Reg
, propertyReg
, MacroAssembler::TimesEight
,
4445 JSEnvironmentRecord::offsetOfVariables()),
4448 MacroAssembler::Jump done
= m_jit
.jump();
4449 overflowArgument
.link(&m_jit
);
4451 m_jit
.sub32(propertyReg
, scratch2Reg
);
4452 m_jit
.neg32(scratch2Reg
);
4455 MacroAssembler::BaseIndex(
4456 baseReg
, scratch2Reg
, MacroAssembler::TimesEight
,
4457 ScopedArguments::overflowStorageOffset()),
4459 speculationCheck(ExoticObjectMode
, JSValueSource(), nullptr, m_jit
.branchIfEmpty(resultRegs
));
4463 jsValueResult(resultRegs
, node
);
4466 void SpeculativeJIT::compileGetScope(Node
* node
)
4468 SpeculateCellOperand
function(this, node
->child1());
4469 GPRTemporary
result(this, Reuse
, function
);
4470 m_jit
.loadPtr(JITCompiler::Address(function
.gpr(), JSFunction::offsetOfScopeChain()), result
.gpr());
4471 cellResult(result
.gpr(), node
);
4474 void SpeculativeJIT::compileSkipScope(Node
* node
)
4476 SpeculateCellOperand
scope(this, node
->child1());
4477 GPRTemporary
result(this, Reuse
, scope
);
4478 m_jit
.loadPtr(JITCompiler::Address(scope
.gpr(), JSScope::offsetOfNext()), result
.gpr());
4479 cellResult(result
.gpr(), node
);
4482 void SpeculativeJIT::compileGetArrayLength(Node
* node
)
4484 switch (node
->arrayMode().type()) {
4487 case Array::Contiguous
: {
4488 StorageOperand
storage(this, node
->child2());
4489 GPRTemporary
result(this, Reuse
, storage
);
4490 GPRReg storageReg
= storage
.gpr();
4491 GPRReg resultReg
= result
.gpr();
4492 m_jit
.load32(MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()), resultReg
);
4494 int32Result(resultReg
, node
);
4497 case Array::ArrayStorage
:
4498 case Array::SlowPutArrayStorage
: {
4499 StorageOperand
storage(this, node
->child2());
4500 GPRTemporary
result(this, Reuse
, storage
);
4501 GPRReg storageReg
= storage
.gpr();
4502 GPRReg resultReg
= result
.gpr();
4503 m_jit
.load32(MacroAssembler::Address(storageReg
, Butterfly::offsetOfPublicLength()), resultReg
);
4505 speculationCheck(Uncountable
, JSValueRegs(), 0, m_jit
.branch32(MacroAssembler::LessThan
, resultReg
, MacroAssembler::TrustedImm32(0)));
4507 int32Result(resultReg
, node
);
4510 case Array::String
: {
4511 SpeculateCellOperand
base(this, node
->child1());
4512 GPRTemporary
result(this, Reuse
, base
);
4513 GPRReg baseGPR
= base
.gpr();
4514 GPRReg resultGPR
= result
.gpr();
4515 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSString::offsetOfLength()), resultGPR
);
4516 int32Result(resultGPR
, node
);
4519 case Array::DirectArguments
: {
4520 SpeculateCellOperand
base(this, node
->child1());
4521 GPRTemporary
result(this, Reuse
, base
);
4523 GPRReg baseReg
= base
.gpr();
4524 GPRReg resultReg
= result
.gpr();
4529 ASSERT(ArrayMode(Array::DirectArguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4532 ExoticObjectMode
, JSValueSource(), 0,
4533 m_jit
.branchTestPtr(
4534 MacroAssembler::NonZero
,
4535 MacroAssembler::Address(baseReg
, DirectArguments::offsetOfOverrides())));
4538 MacroAssembler::Address(baseReg
, DirectArguments::offsetOfLength()), resultReg
);
4540 int32Result(resultReg
, node
);
4543 case Array::ScopedArguments
: {
4544 SpeculateCellOperand
base(this, node
->child1());
4545 GPRTemporary
result(this, Reuse
, base
);
4547 GPRReg baseReg
= base
.gpr();
4548 GPRReg resultReg
= result
.gpr();
4553 ASSERT(ArrayMode(Array::ScopedArguments
).alreadyChecked(m_jit
.graph(), node
, m_state
.forNode(node
->child1())));
4556 ExoticObjectMode
, JSValueSource(), 0,
4558 MacroAssembler::NonZero
,
4559 MacroAssembler::Address(baseReg
, ScopedArguments::offsetOfOverrodeThings())));
4562 MacroAssembler::Address(baseReg
, ScopedArguments::offsetOfTotalLength()), resultReg
);
4564 int32Result(resultReg
, node
);
4568 ASSERT(isTypedView(node
->arrayMode().typedArrayType()));
4569 SpeculateCellOperand
base(this, node
->child1());
4570 GPRTemporary
result(this, Reuse
, base
);
4571 GPRReg baseGPR
= base
.gpr();
4572 GPRReg resultGPR
= result
.gpr();
4573 m_jit
.load32(MacroAssembler::Address(baseGPR
, JSArrayBufferView::offsetOfLength()), resultGPR
);
4574 int32Result(resultGPR
, node
);
4579 void SpeculativeJIT::compileNewFunction(Node
* node
)
4581 SpeculateCellOperand
scope(this, node
->child1());
4582 GPRReg scopeGPR
= scope
.gpr();
4584 FunctionExecutable
* executable
= node
->castOperand
<FunctionExecutable
*>();
4586 if (executable
->singletonFunction()->isStillValid()) {
4587 GPRFlushedCallResult
result(this);
4588 GPRReg resultGPR
= result
.gpr();
4592 callOperation(operationNewFunction
, resultGPR
, scopeGPR
, executable
);
4593 cellResult(resultGPR
, node
);
4597 Structure
* structure
= m_jit
.graph().globalObjectFor(
4598 node
->origin
.semantic
)->functionStructure();
4600 GPRTemporary
result(this);
4601 GPRTemporary
scratch1(this);
4602 GPRTemporary
scratch2(this);
4603 GPRReg resultGPR
= result
.gpr();
4604 GPRReg scratch1GPR
= scratch1
.gpr();
4605 GPRReg scratch2GPR
= scratch2
.gpr();
4607 JITCompiler::JumpList slowPath
;
4608 emitAllocateJSObjectWithKnownSize
<JSFunction
>(
4609 resultGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0),
4610 scratch1GPR
, scratch2GPR
, slowPath
, JSFunction::allocationSize(0));
4612 // Don't need a memory barriers since we just fast-created the function, so it
4616 JITCompiler::Address(resultGPR
, JSFunction::offsetOfScopeChain()));
4618 TrustedImmPtr(executable
),
4619 JITCompiler::Address(resultGPR
, JSFunction::offsetOfExecutable()));
4622 JITCompiler::Address(resultGPR
, JSFunction::offsetOfRareData()));
4625 addSlowPathGenerator(
4627 slowPath
, this, operationNewFunctionWithInvalidatedReallocationWatchpoint
, resultGPR
, scopeGPR
, executable
));
4629 cellResult(resultGPR
, node
);
4632 void SpeculativeJIT::compileForwardVarargs(Node
* node
)
4634 LoadVarargsData
* data
= node
->loadVarargsData();
4635 InlineCallFrame
* inlineCallFrame
= node
->child1()->origin
.semantic
.inlineCallFrame
;
4637 GPRTemporary
length(this);
4638 JSValueRegsTemporary
temp(this);
4639 GPRReg lengthGPR
= length
.gpr();
4640 JSValueRegs tempRegs
= temp
.regs();
4642 emitGetLength(inlineCallFrame
, lengthGPR
, /* includeThis = */ true);
4644 m_jit
.sub32(TrustedImm32(data
->offset
), lengthGPR
);
4647 VarargsOverflow
, JSValueSource(), Edge(), m_jit
.branch32(
4648 MacroAssembler::Above
,
4649 lengthGPR
, TrustedImm32(data
->limit
)));
4651 m_jit
.store32(lengthGPR
, JITCompiler::payloadFor(data
->machineCount
));
4653 VirtualRegister sourceStart
= JITCompiler::argumentsStart(inlineCallFrame
) + data
->offset
;
4654 VirtualRegister targetStart
= data
->machineStart
;
4656 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
4658 // First have a loop that fills in the undefined slots in case of an arity check failure.
4659 m_jit
.move(TrustedImm32(data
->mandatoryMinimum
), tempRegs
.payloadGPR());
4660 JITCompiler::Jump done
= m_jit
.branch32(JITCompiler::BelowOrEqual
, tempRegs
.payloadGPR(), lengthGPR
);
4662 JITCompiler::Label loop
= m_jit
.label();
4663 m_jit
.sub32(TrustedImm32(1), tempRegs
.payloadGPR());
4664 m_jit
.storeTrustedValue(
4666 JITCompiler::BaseIndex(
4667 GPRInfo::callFrameRegister
, tempRegs
.payloadGPR(), JITCompiler::TimesEight
,
4668 targetStart
.offset() * sizeof(EncodedJSValue
)));
4669 m_jit
.branch32(JITCompiler::Above
, tempRegs
.payloadGPR(), lengthGPR
).linkTo(loop
, &m_jit
);
4672 // And then fill in the actual argument values.
4673 done
= m_jit
.branchTest32(JITCompiler::Zero
, lengthGPR
);
4675 loop
= m_jit
.label();
4676 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
4678 JITCompiler::BaseIndex(
4679 GPRInfo::callFrameRegister
, lengthGPR
, JITCompiler::TimesEight
,
4680 sourceStart
.offset() * sizeof(EncodedJSValue
)),
4684 JITCompiler::BaseIndex(
4685 GPRInfo::callFrameRegister
, lengthGPR
, JITCompiler::TimesEight
,
4686 targetStart
.offset() * sizeof(EncodedJSValue
)));
4687 m_jit
.branchTest32(JITCompiler::NonZero
, lengthGPR
).linkTo(loop
, &m_jit
);
4694 void SpeculativeJIT::compileCreateActivation(Node
* node
)
4696 SymbolTable
* table
= node
->castOperand
<SymbolTable
*>();
4697 Structure
* structure
= m_jit
.graph().globalObjectFor(
4698 node
->origin
.semantic
)->activationStructure();
4700 SpeculateCellOperand
scope(this, node
->child1());
4701 GPRReg scopeGPR
= scope
.gpr();
4703 if (table
->singletonScope()->isStillValid()) {
4704 GPRFlushedCallResult
result(this);
4705 GPRReg resultGPR
= result
.gpr();
4709 callOperation(operationCreateActivationDirect
, resultGPR
, structure
, scopeGPR
, table
);
4710 cellResult(resultGPR
, node
);
4714 GPRTemporary
result(this);
4715 GPRTemporary
scratch1(this);
4716 GPRTemporary
scratch2(this);
4717 GPRReg resultGPR
= result
.gpr();
4718 GPRReg scratch1GPR
= scratch1
.gpr();
4719 GPRReg scratch2GPR
= scratch2
.gpr();
4721 JITCompiler::JumpList slowPath
;
4722 emitAllocateJSObjectWithKnownSize
<JSLexicalEnvironment
>(
4723 resultGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratch1GPR
, scratch2GPR
,
4724 slowPath
, JSLexicalEnvironment::allocationSize(table
));
4726 // Don't need a memory barriers since we just fast-created the activation, so the
4727 // activation must be young.
4728 m_jit
.storePtr(scopeGPR
, JITCompiler::Address(resultGPR
, JSScope::offsetOfNext()));
4730 TrustedImmPtr(table
),
4731 JITCompiler::Address(resultGPR
, JSLexicalEnvironment::offsetOfSymbolTable()));
4733 // Must initialize all members to undefined.
4734 for (unsigned i
= 0; i
< table
->scopeSize(); ++i
) {
4735 m_jit
.storeTrustedValue(
4737 JITCompiler::Address(
4738 resultGPR
, JSLexicalEnvironment::offsetOfVariable(ScopeOffset(i
))));
4741 addSlowPathGenerator(
4743 slowPath
, this, operationCreateActivationDirect
, resultGPR
, structure
, scopeGPR
, table
));
4745 cellResult(resultGPR
, node
);
4748 void SpeculativeJIT::compileCreateDirectArguments(Node
* node
)
4750 // FIXME: A more effective way of dealing with the argument count and callee is to have
4751 // them be explicit arguments to this node.
4752 // https://bugs.webkit.org/show_bug.cgi?id=142207
4754 GPRTemporary
result(this);
4755 GPRTemporary
scratch1(this);
4756 GPRTemporary
scratch2(this);
4757 GPRTemporary length
;
4758 GPRReg resultGPR
= result
.gpr();
4759 GPRReg scratch1GPR
= scratch1
.gpr();
4760 GPRReg scratch2GPR
= scratch2
.gpr();
4761 GPRReg lengthGPR
= InvalidGPRReg
;
4762 JSValueRegs valueRegs
= JSValueRegs::withTwoAvailableRegs(scratch1GPR
, scratch2GPR
);
4764 unsigned minCapacity
= m_jit
.graph().baselineCodeBlockFor(node
->origin
.semantic
)->numParameters() - 1;
4766 unsigned knownLength
;
4767 bool lengthIsKnown
; // if false, lengthGPR will have the length.
4768 if (node
->origin
.semantic
.inlineCallFrame
4769 && !node
->origin
.semantic
.inlineCallFrame
->isVarargs()) {
4770 knownLength
= node
->origin
.semantic
.inlineCallFrame
->arguments
.size() - 1;
4771 lengthIsKnown
= true;
4773 knownLength
= UINT_MAX
;
4774 lengthIsKnown
= false;
4776 GPRTemporary
realLength(this);
4777 length
.adopt(realLength
);
4778 lengthGPR
= length
.gpr();
4780 VirtualRegister argumentCountRegister
;
4781 if (!node
->origin
.semantic
.inlineCallFrame
)
4782 argumentCountRegister
= VirtualRegister(JSStack::ArgumentCount
);
4784 argumentCountRegister
= node
->origin
.semantic
.inlineCallFrame
->argumentCountRegister
;
4785 m_jit
.load32(JITCompiler::payloadFor(argumentCountRegister
), lengthGPR
);
4786 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
4789 Structure
* structure
=
4790 m_jit
.graph().globalObjectFor(node
->origin
.semantic
)->directArgumentsStructure();
4792 // Use a different strategy for allocating the object depending on whether we know its
4794 JITCompiler::JumpList slowPath
;
4795 if (lengthIsKnown
) {
4796 emitAllocateJSObjectWithKnownSize
<DirectArguments
>(
4797 resultGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratch1GPR
, scratch2GPR
,
4798 slowPath
, DirectArguments::allocationSize(std::max(knownLength
, minCapacity
)));
4801 TrustedImm32(knownLength
),
4802 JITCompiler::Address(resultGPR
, DirectArguments::offsetOfLength()));
4804 JITCompiler::Jump tooFewArguments
;
4807 m_jit
.branch32(JITCompiler::Below
, lengthGPR
, TrustedImm32(minCapacity
));
4809 m_jit
.lshift32(lengthGPR
, TrustedImm32(3), scratch1GPR
);
4810 m_jit
.add32(TrustedImm32(DirectArguments::storageOffset()), scratch1GPR
);
4812 JITCompiler::Jump done
= m_jit
.jump();
4813 tooFewArguments
.link(&m_jit
);
4814 m_jit
.move(TrustedImm32(DirectArguments::allocationSize(minCapacity
)), scratch1GPR
);
4818 emitAllocateVariableSizedJSObject
<DirectArguments
>(
4819 resultGPR
, TrustedImmPtr(structure
), scratch1GPR
, scratch1GPR
, scratch2GPR
,
4823 lengthGPR
, JITCompiler::Address(resultGPR
, DirectArguments::offsetOfLength()));
4827 TrustedImm32(minCapacity
),
4828 JITCompiler::Address(resultGPR
, DirectArguments::offsetOfMinCapacity()));
4831 TrustedImmPtr(0), JITCompiler::Address(resultGPR
, DirectArguments::offsetOfOverrides()));
4833 if (lengthIsKnown
) {
4834 addSlowPathGenerator(
4836 slowPath
, this, operationCreateDirectArguments
, resultGPR
, structure
,
4837 knownLength
, minCapacity
));
4839 auto generator
= std::make_unique
<CallCreateDirectArgumentsSlowPathGenerator
>(
4840 slowPath
, this, resultGPR
, structure
, lengthGPR
, minCapacity
);
4841 addSlowPathGenerator(WTF::move(generator
));
4844 if (node
->origin
.semantic
.inlineCallFrame
) {
4845 if (node
->origin
.semantic
.inlineCallFrame
->isClosureCall
) {
4847 JITCompiler::addressFor(
4848 node
->origin
.semantic
.inlineCallFrame
->calleeRecovery
.virtualRegister()),
4853 node
->origin
.semantic
.inlineCallFrame
->calleeRecovery
.constant().asCell()),
4857 m_jit
.loadPtr(JITCompiler::addressFor(JSStack::Callee
), scratch1GPR
);
4859 // Don't need a memory barriers since we just fast-created the activation, so the
4860 // activation must be young.
4862 scratch1GPR
, JITCompiler::Address(resultGPR
, DirectArguments::offsetOfCallee()));
4864 VirtualRegister start
= m_jit
.argumentsStart(node
->origin
.semantic
);
4865 if (lengthIsKnown
) {
4866 for (unsigned i
= 0; i
< std::max(knownLength
, minCapacity
); ++i
) {
4867 m_jit
.loadValue(JITCompiler::addressFor(start
+ i
), valueRegs
);
4869 valueRegs
, JITCompiler::Address(resultGPR
, DirectArguments::offsetOfSlot(i
)));
4872 JITCompiler::Jump done
;
4874 JITCompiler::Jump startLoop
= m_jit
.branch32(
4875 JITCompiler::AboveOrEqual
, lengthGPR
, TrustedImm32(minCapacity
));
4876 m_jit
.move(TrustedImm32(minCapacity
), lengthGPR
);
4877 startLoop
.link(&m_jit
);
4879 done
= m_jit
.branchTest32(MacroAssembler::Zero
, lengthGPR
);
4880 JITCompiler::Label loop
= m_jit
.label();
4881 m_jit
.sub32(TrustedImm32(1), lengthGPR
);
4883 JITCompiler::BaseIndex(
4884 GPRInfo::callFrameRegister
, lengthGPR
, JITCompiler::TimesEight
,
4885 start
.offset() * static_cast<int>(sizeof(Register
))),
4889 JITCompiler::BaseIndex(
4890 resultGPR
, lengthGPR
, JITCompiler::TimesEight
,
4891 DirectArguments::storageOffset()));
4892 m_jit
.branchTest32(MacroAssembler::NonZero
, lengthGPR
).linkTo(loop
, &m_jit
);
4897 cellResult(resultGPR
, node
);
4900 void SpeculativeJIT::compileGetFromArguments(Node
* node
)
4902 SpeculateCellOperand
arguments(this, node
->child1());
4903 JSValueRegsTemporary
result(this);
4905 GPRReg argumentsGPR
= arguments
.gpr();
4906 JSValueRegs resultRegs
= result
.regs();
4908 m_jit
.loadValue(JITCompiler::Address(argumentsGPR
, DirectArguments::offsetOfSlot(node
->capturedArgumentsOffset().offset())), resultRegs
);
4909 jsValueResult(resultRegs
, node
);
4912 void SpeculativeJIT::compilePutToArguments(Node
* node
)
4914 SpeculateCellOperand
arguments(this, node
->child1());
4915 JSValueOperand
value(this, node
->child2());
4917 GPRReg argumentsGPR
= arguments
.gpr();
4918 JSValueRegs valueRegs
= value
.jsValueRegs();
4920 m_jit
.storeValue(valueRegs
, JITCompiler::Address(argumentsGPR
, DirectArguments::offsetOfSlot(node
->capturedArgumentsOffset().offset())));
4924 void SpeculativeJIT::compileCreateScopedArguments(Node
* node
)
4926 SpeculateCellOperand
scope(this, node
->child1());
4927 GPRReg scopeGPR
= scope
.gpr();
4929 GPRFlushedCallResult
result(this);
4930 GPRReg resultGPR
= result
.gpr();
4933 // We set up the arguments ourselves, because we have the whole register file and we can
4934 // set them up directly into the argument registers. This also means that we don't have to
4935 // invent a four-argument-register shuffle.
4937 // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee, 5:scope
4939 // Do the scopeGPR first, since it might alias an argument register.
4940 m_jit
.setupArgument(5, [&] (GPRReg destGPR
) { m_jit
.move(scopeGPR
, destGPR
); });
4942 // These other things could be done in any order.
4943 m_jit
.setupArgument(4, [&] (GPRReg destGPR
) { emitGetCallee(node
->origin
.semantic
, destGPR
); });
4944 m_jit
.setupArgument(3, [&] (GPRReg destGPR
) { emitGetLength(node
->origin
.semantic
, destGPR
); });
4945 m_jit
.setupArgument(2, [&] (GPRReg destGPR
) { emitGetArgumentStart(node
->origin
.semantic
, destGPR
); });
4946 m_jit
.setupArgument(
4947 1, [&] (GPRReg destGPR
) {
4949 TrustedImmPtr(m_jit
.globalObjectFor(node
->origin
.semantic
)->scopedArgumentsStructure()),
4952 m_jit
.setupArgument(0, [&] (GPRReg destGPR
) { m_jit
.move(GPRInfo::callFrameRegister
, destGPR
); });
4954 appendCallWithExceptionCheckSetResult(operationCreateScopedArguments
, resultGPR
);
4956 cellResult(resultGPR
, node
);
4959 void SpeculativeJIT::compileCreateClonedArguments(Node
* node
)
4961 GPRFlushedCallResult
result(this);
4962 GPRReg resultGPR
= result
.gpr();
4965 // We set up the arguments ourselves, because we have the whole register file and we can
4966 // set them up directly into the argument registers.
4968 // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee
4969 m_jit
.setupArgument(4, [&] (GPRReg destGPR
) { emitGetCallee(node
->origin
.semantic
, destGPR
); });
4970 m_jit
.setupArgument(3, [&] (GPRReg destGPR
) { emitGetLength(node
->origin
.semantic
, destGPR
); });
4971 m_jit
.setupArgument(2, [&] (GPRReg destGPR
) { emitGetArgumentStart(node
->origin
.semantic
, destGPR
); });
4972 m_jit
.setupArgument(
4973 1, [&] (GPRReg destGPR
) {
4976 m_jit
.globalObjectFor(node
->origin
.semantic
)->outOfBandArgumentsStructure()),
4979 m_jit
.setupArgument(0, [&] (GPRReg destGPR
) { m_jit
.move(GPRInfo::callFrameRegister
, destGPR
); });
4981 appendCallWithExceptionCheckSetResult(operationCreateClonedArguments
, resultGPR
);
4983 cellResult(resultGPR
, node
);
4986 void SpeculativeJIT::compileNotifyWrite(Node
* node
)
4988 WatchpointSet
* set
= node
->watchpointSet();
4990 JITCompiler::Jump slowCase
= m_jit
.branch8(
4991 JITCompiler::NotEqual
,
4992 JITCompiler::AbsoluteAddress(set
->addressOfState()),
4993 TrustedImm32(IsInvalidated
));
4995 addSlowPathGenerator(
4996 slowPathCall(slowCase
, this, operationNotifyWrite
, NoResult
, set
));
5001 bool SpeculativeJIT::compileRegExpExec(Node
* node
)
5003 unsigned branchIndexInBlock
= detectPeepHoleBranch();
5004 if (branchIndexInBlock
== UINT_MAX
)
5006 Node
* branchNode
= m_block
->at(branchIndexInBlock
);
5007 ASSERT(node
->adjustedRefCount() == 1);
5009 BasicBlock
* taken
= branchNode
->branchData()->taken
.block
;
5010 BasicBlock
* notTaken
= branchNode
->branchData()->notTaken
.block
;
5012 bool invert
= false;
5013 if (taken
== nextBlock()) {
5015 BasicBlock
* tmp
= taken
;
5020 SpeculateCellOperand
base(this, node
->child1());
5021 SpeculateCellOperand
argument(this, node
->child2());
5022 GPRReg baseGPR
= base
.gpr();
5023 GPRReg argumentGPR
= argument
.gpr();
5026 GPRFlushedCallResult
result(this);
5027 callOperation(operationRegExpTest
, result
.gpr(), baseGPR
, argumentGPR
);
5029 branchTest32(invert
? JITCompiler::Zero
: JITCompiler::NonZero
, result
.gpr(), taken
);
5032 use(node
->child1());
5033 use(node
->child2());
5034 m_indexInBlock
= branchIndexInBlock
;
5035 m_currentNode
= branchNode
;
5040 void SpeculativeJIT::compileIsObjectOrNull(Node
* node
)
5042 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
5044 JSValueOperand
value(this, node
->child1());
5045 JSValueRegs valueRegs
= value
.jsValueRegs();
5047 GPRTemporary
result(this);
5048 GPRReg resultGPR
= result
.gpr();
5050 JITCompiler::Jump isCell
= m_jit
.branchIfCell(valueRegs
);
5052 JITCompiler::Jump isNull
= m_jit
.branchIfEqual(valueRegs
, jsNull());
5053 JITCompiler::Jump isNonNullNonCell
= m_jit
.jump();
5055 isCell
.link(&m_jit
);
5056 JITCompiler::Jump isFunction
= m_jit
.branchIfFunction(valueRegs
.payloadGPR());
5057 JITCompiler::Jump notObject
= m_jit
.branchIfNotObject(valueRegs
.payloadGPR());
5059 JITCompiler::Jump slowPath
= m_jit
.branchTest8(
5060 JITCompiler::NonZero
,
5061 JITCompiler::Address(valueRegs
.payloadGPR(), JSCell::typeInfoFlagsOffset()),
5062 TrustedImm32(MasqueradesAsUndefined
| TypeOfShouldCallGetCallData
));
5064 isNull
.link(&m_jit
);
5065 m_jit
.move(TrustedImm32(1), resultGPR
);
5066 JITCompiler::Jump done
= m_jit
.jump();
5068 isNonNullNonCell
.link(&m_jit
);
5069 isFunction
.link(&m_jit
);
5070 notObject
.link(&m_jit
);
5071 m_jit
.move(TrustedImm32(0), resultGPR
);
5073 addSlowPathGenerator(
5075 slowPath
, this, operationObjectIsObject
, resultGPR
, globalObject
,
5076 valueRegs
.payloadGPR()));
5080 unblessedBooleanResult(resultGPR
, node
);
5083 void SpeculativeJIT::compileIsFunction(Node
* node
)
5085 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
5087 JSValueOperand
value(this, node
->child1());
5088 JSValueRegs valueRegs
= value
.jsValueRegs();
5090 GPRTemporary
result(this);
5091 GPRReg resultGPR
= result
.gpr();
5093 JITCompiler::Jump notCell
= m_jit
.branchIfNotCell(valueRegs
);
5094 JITCompiler::Jump isFunction
= m_jit
.branchIfFunction(valueRegs
.payloadGPR());
5095 JITCompiler::Jump notObject
= m_jit
.branchIfNotObject(valueRegs
.payloadGPR());
5097 JITCompiler::Jump slowPath
= m_jit
.branchTest8(
5098 JITCompiler::NonZero
,
5099 JITCompiler::Address(valueRegs
.payloadGPR(), JSCell::typeInfoFlagsOffset()),
5100 TrustedImm32(MasqueradesAsUndefined
| TypeOfShouldCallGetCallData
));
5102 notCell
.link(&m_jit
);
5103 notObject
.link(&m_jit
);
5104 m_jit
.move(TrustedImm32(0), resultGPR
);
5105 JITCompiler::Jump done
= m_jit
.jump();
5107 isFunction
.link(&m_jit
);
5108 m_jit
.move(TrustedImm32(1), resultGPR
);
5110 addSlowPathGenerator(
5112 slowPath
, this, operationObjectIsFunction
, resultGPR
, globalObject
,
5113 valueRegs
.payloadGPR()));
5117 unblessedBooleanResult(resultGPR
, node
);
5120 void SpeculativeJIT::compileTypeOf(Node
* node
)
5122 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
5124 JSValueOperand
value(this, node
->child1());
5125 JSValueRegs valueRegs
= value
.jsValueRegs();
5127 GPRTemporary
result(this);
5128 GPRReg resultGPR
= result
.gpr();
5130 JITCompiler::JumpList done
;
5131 JITCompiler::Jump slowPath
;
5133 valueRegs
, resultGPR
,
5134 [&] (TypeofType type
, bool fallsThrough
) {
5135 m_jit
.move(TrustedImmPtr(m_jit
.vm()->smallStrings
.typeString(type
)), resultGPR
);
5137 done
.append(m_jit
.jump());
5139 [&] (JITCompiler::Jump theSlowPath
) {
5140 slowPath
= theSlowPath
;
5144 addSlowPathGenerator(
5146 slowPath
, this, operationTypeOfObject
, resultGPR
, globalObject
,
5147 valueRegs
.payloadGPR()));
5149 cellResult(resultGPR
, node
);
5152 void SpeculativeJIT::compileAllocatePropertyStorage(Node
* node
)
5154 if (node
->transition()->previous
->couldHaveIndexingHeader()) {
5155 SpeculateCellOperand
base(this, node
->child1());
5157 GPRReg baseGPR
= base
.gpr();
5161 GPRFlushedCallResult
result(this);
5162 callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity
, result
.gpr(), baseGPR
);
5164 storageResult(result
.gpr(), node
);
5168 SpeculateCellOperand
base(this, node
->child1());
5169 GPRTemporary
scratch1(this);
5171 GPRReg baseGPR
= base
.gpr();
5172 GPRReg scratchGPR1
= scratch1
.gpr();
5174 ASSERT(!node
->transition()->previous
->outOfLineCapacity());
5175 ASSERT(initialOutOfLineCapacity
== node
->transition()->next
->outOfLineCapacity());
5177 JITCompiler::Jump slowPath
=
5178 emitAllocateBasicStorage(
5179 TrustedImm32(initialOutOfLineCapacity
* sizeof(JSValue
)), scratchGPR1
);
5181 m_jit
.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader
)), scratchGPR1
);
5183 addSlowPathGenerator(
5184 slowPathCall(slowPath
, this, operationAllocatePropertyStorageWithInitialCapacity
, scratchGPR1
));
5186 m_jit
.storePtr(scratchGPR1
, JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()));
5188 storageResult(scratchGPR1
, node
);
5191 void SpeculativeJIT::compileReallocatePropertyStorage(Node
* node
)
5193 size_t oldSize
= node
->transition()->previous
->outOfLineCapacity() * sizeof(JSValue
);
5194 size_t newSize
= oldSize
* outOfLineGrowthFactor
;
5195 ASSERT(newSize
== node
->transition()->next
->outOfLineCapacity() * sizeof(JSValue
));
5197 if (node
->transition()->previous
->couldHaveIndexingHeader()) {
5198 SpeculateCellOperand
base(this, node
->child1());
5200 GPRReg baseGPR
= base
.gpr();
5204 GPRFlushedCallResult
result(this);
5205 callOperation(operationReallocateButterflyToGrowPropertyStorage
, result
.gpr(), baseGPR
, newSize
/ sizeof(JSValue
));
5207 storageResult(result
.gpr(), node
);
5211 SpeculateCellOperand
base(this, node
->child1());
5212 StorageOperand
oldStorage(this, node
->child2());
5213 GPRTemporary
scratch1(this);
5214 GPRTemporary
scratch2(this);
5216 GPRReg baseGPR
= base
.gpr();
5217 GPRReg oldStorageGPR
= oldStorage
.gpr();
5218 GPRReg scratchGPR1
= scratch1
.gpr();
5219 GPRReg scratchGPR2
= scratch2
.gpr();
5221 JITCompiler::Jump slowPath
=
5222 emitAllocateBasicStorage(TrustedImm32(newSize
), scratchGPR1
);
5224 m_jit
.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader
)), scratchGPR1
);
5226 addSlowPathGenerator(
5227 slowPathCall(slowPath
, this, operationAllocatePropertyStorage
, scratchGPR1
, newSize
/ sizeof(JSValue
)));
5229 // We have scratchGPR1 = new storage, scratchGPR2 = scratch
5230 for (ptrdiff_t offset
= 0; offset
< static_cast<ptrdiff_t>(oldSize
); offset
+= sizeof(void*)) {
5231 m_jit
.loadPtr(JITCompiler::Address(oldStorageGPR
, -(offset
+ sizeof(JSValue
) + sizeof(void*))), scratchGPR2
);
5232 m_jit
.storePtr(scratchGPR2
, JITCompiler::Address(scratchGPR1
, -(offset
+ sizeof(JSValue
) + sizeof(void*))));
5234 m_jit
.storePtr(scratchGPR1
, JITCompiler::Address(baseGPR
, JSObject::butterflyOffset()));
5236 storageResult(scratchGPR1
, node
);
5239 GPRReg
SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary
& temporary
, ArrayMode arrayMode
)
5241 if (!putByValWillNeedExtraRegister(arrayMode
))
5242 return InvalidGPRReg
;
5244 GPRTemporary
realTemporary(this);
5245 temporary
.adopt(realTemporary
);
5246 return temporary
.gpr();
5249 void SpeculativeJIT::compileToStringOrCallStringConstructorOnCell(Node
* node
)
5251 SpeculateCellOperand
op1(this, node
->child1());
5252 GPRReg op1GPR
= op1
.gpr();
5254 switch (node
->child1().useKind()) {
5255 case StringObjectUse
: {
5256 GPRTemporary
result(this);
5257 GPRReg resultGPR
= result
.gpr();
5259 speculateStringObject(node
->child1(), op1GPR
);
5260 m_interpreter
.filter(node
->child1(), SpecStringObject
);
5262 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, JSWrapperObject::internalValueCellOffset()), resultGPR
);
5263 cellResult(resultGPR
, node
);
5267 case StringOrStringObjectUse
: {
5268 GPRTemporary
result(this);
5269 GPRReg resultGPR
= result
.gpr();
5271 m_jit
.load32(JITCompiler::Address(op1GPR
, JSCell::structureIDOffset()), resultGPR
);
5272 JITCompiler::Jump isString
= m_jit
.branchStructurePtr(
5275 m_jit
.vm()->stringStructure
.get());
5277 speculateStringObjectForStructure(node
->child1(), resultGPR
);
5279 m_jit
.loadPtr(JITCompiler::Address(op1GPR
, JSWrapperObject::internalValueCellOffset()), resultGPR
);
5281 JITCompiler::Jump done
= m_jit
.jump();
5282 isString
.link(&m_jit
);
5283 m_jit
.move(op1GPR
, resultGPR
);
5286 m_interpreter
.filter(node
->child1(), SpecString
| SpecStringObject
);
5288 cellResult(resultGPR
, node
);
5293 GPRFlushedCallResult
result(this);
5294 GPRReg resultGPR
= result
.gpr();
5296 // We flush registers instead of silent spill/fill because in this mode we
5297 // believe that most likely the input is not a string, and we need to take
5300 JITCompiler::Jump done
;
5301 if (node
->child1()->prediction() & SpecString
) {
5302 JITCompiler::Jump needCall
= m_jit
.branchIfNotString(op1GPR
);
5303 m_jit
.move(op1GPR
, resultGPR
);
5304 done
= m_jit
.jump();
5305 needCall
.link(&m_jit
);
5307 if (node
->op() == ToString
)
5308 callOperation(operationToStringOnCell
, resultGPR
, op1GPR
);
5310 ASSERT(node
->op() == CallStringConstructor
);
5311 callOperation(operationCallStringConstructorOnCell
, resultGPR
, op1GPR
);
5315 cellResult(resultGPR
, node
);
5320 RELEASE_ASSERT_NOT_REACHED();
5324 void SpeculativeJIT::compileNewStringObject(Node
* node
)
5326 SpeculateCellOperand
operand(this, node
->child1());
5328 GPRTemporary
result(this);
5329 GPRTemporary
scratch1(this);
5330 GPRTemporary
scratch2(this);
5332 GPRReg operandGPR
= operand
.gpr();
5333 GPRReg resultGPR
= result
.gpr();
5334 GPRReg scratch1GPR
= scratch1
.gpr();
5335 GPRReg scratch2GPR
= scratch2
.gpr();
5337 JITCompiler::JumpList slowPath
;
5339 emitAllocateJSObject
<StringObject
>(
5340 resultGPR
, TrustedImmPtr(node
->structure()), TrustedImmPtr(0), scratch1GPR
, scratch2GPR
,
5344 TrustedImmPtr(StringObject::info()),
5345 JITCompiler::Address(resultGPR
, JSDestructibleObject::classInfoOffset()));
5348 operandGPR
, JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset()));
5351 TrustedImm32(JSValue::CellTag
),
5352 JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
5355 JITCompiler::Address(resultGPR
, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
5358 addSlowPathGenerator(slowPathCall(
5359 slowPath
, this, operationNewStringObject
, resultGPR
, operandGPR
, node
->structure()));
5361 cellResult(resultGPR
, node
);
5364 void SpeculativeJIT::compileNewTypedArray(Node
* node
)
5366 JSGlobalObject
* globalObject
= m_jit
.graph().globalObjectFor(node
->origin
.semantic
);
5367 TypedArrayType type
= node
->typedArrayType();
5368 Structure
* structure
= globalObject
->typedArrayStructure(type
);
5370 SpeculateInt32Operand
size(this, node
->child1());
5371 GPRReg sizeGPR
= size
.gpr();
5373 GPRTemporary
result(this);
5374 GPRTemporary
storage(this);
5375 GPRTemporary
scratch(this);
5376 GPRTemporary
scratch2(this);
5377 GPRReg resultGPR
= result
.gpr();
5378 GPRReg storageGPR
= storage
.gpr();
5379 GPRReg scratchGPR
= scratch
.gpr();
5380 GPRReg scratchGPR2
= scratch2
.gpr();
5382 JITCompiler::JumpList slowCases
;
5384 slowCases
.append(m_jit
.branch32(
5385 MacroAssembler::Above
, sizeGPR
, TrustedImm32(JSArrayBufferView::fastSizeLimit
)));
5386 slowCases
.append(m_jit
.branchTest32(MacroAssembler::Zero
, sizeGPR
));
5388 m_jit
.move(sizeGPR
, scratchGPR
);
5389 m_jit
.lshift32(TrustedImm32(logElementSize(type
)), scratchGPR
);
5390 if (elementSize(type
) < 8) {
5391 m_jit
.add32(TrustedImm32(7), scratchGPR
);
5392 m_jit
.and32(TrustedImm32(~7), scratchGPR
);
5395 emitAllocateBasicStorage(scratchGPR
, storageGPR
));
5397 m_jit
.subPtr(scratchGPR
, storageGPR
);
5399 emitAllocateJSObject
<JSArrayBufferView
>(
5400 resultGPR
, TrustedImmPtr(structure
), TrustedImmPtr(0), scratchGPR
, scratchGPR2
,
5405 MacroAssembler::Address(resultGPR
, JSArrayBufferView::offsetOfVector()));
5408 MacroAssembler::Address(resultGPR
, JSArrayBufferView::offsetOfLength()));
5410 TrustedImm32(FastTypedArray
),
5411 MacroAssembler::Address(resultGPR
, JSArrayBufferView::offsetOfMode()));
5413 #if USE(JSVALUE32_64)
5414 MacroAssembler::Jump done
= m_jit
.branchTest32(MacroAssembler::Zero
, sizeGPR
);
5415 m_jit
.move(sizeGPR
, scratchGPR
);
5416 if (elementSize(type
) != 4) {
5417 if (elementSize(type
) > 4)
5418 m_jit
.lshift32(TrustedImm32(logElementSize(type
) - 2), scratchGPR
);
5420 if (elementSize(type
) > 1)
5421 m_jit
.lshift32(TrustedImm32(logElementSize(type
)), scratchGPR
);
5422 m_jit
.add32(TrustedImm32(3), scratchGPR
);
5423 m_jit
.urshift32(TrustedImm32(2), scratchGPR
);
5426 MacroAssembler::Label loop
= m_jit
.label();
5427 m_jit
.sub32(TrustedImm32(1), scratchGPR
);
5430 MacroAssembler::BaseIndex(storageGPR
, scratchGPR
, MacroAssembler::TimesFour
));
5431 m_jit
.branchTest32(MacroAssembler::NonZero
, scratchGPR
).linkTo(loop
, &m_jit
);
5433 #endif // USE(JSVALUE32_64)
5435 addSlowPathGenerator(slowPathCall(
5436 slowCases
, this, operationNewTypedArrayWithSizeForType(type
),
5437 resultGPR
, structure
, sizeGPR
));
5439 cellResult(resultGPR
, node
);
5442 void SpeculativeJIT::speculateCellTypeWithoutTypeFiltering(
5443 Edge edge
, GPRReg cellGPR
, JSType jsType
)
5446 BadType
, JSValueSource::unboxedCell(cellGPR
), edge
,
5448 MacroAssembler::NotEqual
,
5449 MacroAssembler::Address(cellGPR
, JSCell::typeInfoTypeOffset()),
5450 MacroAssembler::TrustedImm32(jsType
)));
5453 void SpeculativeJIT::speculateCellType(
5454 Edge edge
, GPRReg cellGPR
, SpeculatedType specType
, JSType jsType
)
5457 JSValueSource::unboxedCell(cellGPR
), edge
, specType
,
5459 MacroAssembler::NotEqual
,
5460 MacroAssembler::Address(cellGPR
, JSCell::typeInfoTypeOffset()),
5461 TrustedImm32(jsType
)));
5464 void SpeculativeJIT::speculateInt32(Edge edge
)
5466 if (!needsTypeCheck(edge
, SpecInt32
))
5469 (SpeculateInt32Operand(this, edge
)).gpr();
5472 void SpeculativeJIT::speculateNumber(Edge edge
)
5474 if (!needsTypeCheck(edge
, SpecBytecodeNumber
))
5477 JSValueOperand
value(this, edge
, ManualOperandSpeculation
);
5479 GPRReg gpr
= value
.gpr();
5481 JSValueRegs(gpr
), edge
, SpecBytecodeNumber
,
5482 m_jit
.branchTest64(MacroAssembler::Zero
, gpr
, GPRInfo::tagTypeNumberRegister
));
5484 GPRReg tagGPR
= value
.tagGPR();
5486 value
.jsValueRegs(), edge
, ~SpecInt32
,
5487 m_jit
.branch32(MacroAssembler::Equal
, tagGPR
, TrustedImm32(JSValue::Int32Tag
)));
5489 value
.jsValueRegs(), edge
, SpecBytecodeNumber
,
5490 m_jit
.branch32(MacroAssembler::AboveOrEqual
, tagGPR
, TrustedImm32(JSValue::LowestTag
)));
5494 void SpeculativeJIT::speculateRealNumber(Edge edge
)
5496 if (!needsTypeCheck(edge
, SpecBytecodeRealNumber
))
5499 JSValueOperand
op1(this, edge
, ManualOperandSpeculation
);
5500 FPRTemporary
result(this);
5502 JSValueRegs op1Regs
= op1
.jsValueRegs();
5503 FPRReg resultFPR
= result
.fpr();
5506 GPRTemporary
temp(this);
5507 GPRReg tempGPR
= temp
.gpr();
5508 m_jit
.move(op1Regs
.gpr(), tempGPR
);
5509 m_jit
.unboxDoubleWithoutAssertions(tempGPR
, resultFPR
);
5511 FPRTemporary
temp(this);
5512 FPRReg tempFPR
= temp
.fpr();
5513 unboxDouble(op1Regs
.tagGPR(), op1Regs
.payloadGPR(), resultFPR
, tempFPR
);
5516 JITCompiler::Jump done
= m_jit
.branchDouble(
5517 JITCompiler::DoubleEqual
, resultFPR
, resultFPR
);
5519 typeCheck(op1Regs
, edge
, SpecBytecodeRealNumber
, m_jit
.branchIfNotInt32(op1Regs
));
5524 void SpeculativeJIT::speculateDoubleRepReal(Edge edge
)
5526 if (!needsTypeCheck(edge
, SpecDoubleReal
))
5529 SpeculateDoubleOperand
operand(this, edge
);
5530 FPRReg fpr
= operand
.fpr();
5532 JSValueRegs(), edge
, SpecDoubleReal
,
5534 MacroAssembler::DoubleNotEqualOrUnordered
, fpr
, fpr
));
5537 void SpeculativeJIT::speculateBoolean(Edge edge
)
5539 if (!needsTypeCheck(edge
, SpecBoolean
))
5542 (SpeculateBooleanOperand(this, edge
)).gpr();
5545 void SpeculativeJIT::speculateCell(Edge edge
)
5547 if (!needsTypeCheck(edge
, SpecCell
))
5550 (SpeculateCellOperand(this, edge
)).gpr();
5553 void SpeculativeJIT::speculateObject(Edge edge
)
5555 if (!needsTypeCheck(edge
, SpecObject
))
5558 SpeculateCellOperand
operand(this, edge
);
5559 GPRReg gpr
= operand
.gpr();
5561 JSValueSource::unboxedCell(gpr
), edge
, SpecObject
, m_jit
.branchIfNotObject(gpr
));
5564 void SpeculativeJIT::speculateFunction(Edge edge
)
5566 if (!needsTypeCheck(edge
, SpecFunction
))
5569 SpeculateCellOperand
operand(this, edge
);
5570 speculateCellType(edge
, operand
.gpr(), SpecFunction
, JSFunctionType
);
5573 void SpeculativeJIT::speculateFinalObject(Edge edge
)
5575 if (!needsTypeCheck(edge
, SpecFinalObject
))
5578 SpeculateCellOperand
operand(this, edge
);
5579 speculateCellType(edge
, operand
.gpr(), SpecFinalObject
, FinalObjectType
);
5582 void SpeculativeJIT::speculateObjectOrOther(Edge edge
)
5584 if (!needsTypeCheck(edge
, SpecObject
| SpecOther
))
5587 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
5588 GPRTemporary
temp(this);
5589 GPRReg tempGPR
= temp
.gpr();
5590 MacroAssembler::Jump notCell
= m_jit
.branchIfNotCell(operand
.jsValueRegs());
5591 GPRReg gpr
= operand
.jsValueRegs().payloadGPR();
5593 operand
.jsValueRegs(), edge
, (~SpecCell
) | SpecObject
, m_jit
.branchIfNotObject(gpr
));
5594 MacroAssembler::Jump done
= m_jit
.jump();
5595 notCell
.link(&m_jit
);
5596 if (needsTypeCheck(edge
, SpecCell
| SpecOther
)) {
5598 operand
.jsValueRegs(), edge
, SpecCell
| SpecOther
,
5599 m_jit
.branchIfNotOther(operand
.jsValueRegs(), tempGPR
));
5604 void SpeculativeJIT::speculateString(Edge edge
, GPRReg cell
)
5607 JSValueSource::unboxedCell(cell
), edge
, SpecString
| ~SpecCell
, m_jit
.branchIfNotString(cell
));
5610 void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge
, GPRReg string
, GPRReg storage
)
5612 m_jit
.loadPtr(MacroAssembler::Address(string
, JSString::offsetOfValue()), storage
);
5614 if (!needsTypeCheck(edge
, SpecStringIdent
| ~SpecString
))
5618 BadType
, JSValueSource::unboxedCell(string
), edge
,
5619 m_jit
.branchTestPtr(MacroAssembler::Zero
, storage
));
5621 BadType
, JSValueSource::unboxedCell(string
), edge
, m_jit
.branchTest32(
5622 MacroAssembler::Zero
,
5623 MacroAssembler::Address(storage
, StringImpl::flagsOffset()),
5624 MacroAssembler::TrustedImm32(StringImpl::flagIsAtomic())));
5626 m_interpreter
.filter(edge
, SpecStringIdent
| ~SpecString
);
5629 void SpeculativeJIT::speculateStringIdent(Edge edge
, GPRReg string
)
5631 if (!needsTypeCheck(edge
, SpecStringIdent
))
5634 GPRTemporary
temp(this);
5635 speculateStringIdentAndLoadStorage(edge
, string
, temp
.gpr());
5638 void SpeculativeJIT::speculateStringIdent(Edge edge
)
5640 if (!needsTypeCheck(edge
, SpecStringIdent
))
5643 SpeculateCellOperand
operand(this, edge
);
5644 GPRReg gpr
= operand
.gpr();
5645 speculateString(edge
, gpr
);
5646 speculateStringIdent(edge
, gpr
);
5649 void SpeculativeJIT::speculateString(Edge edge
)
5651 if (!needsTypeCheck(edge
, SpecString
))
5654 SpeculateCellOperand
operand(this, edge
);
5655 speculateString(edge
, operand
.gpr());
5658 void SpeculativeJIT::speculateStringObject(Edge edge
, GPRReg gpr
)
5660 speculateStringObjectForStructure(edge
, JITCompiler::Address(gpr
, JSCell::structureIDOffset()));
5663 void SpeculativeJIT::speculateStringObject(Edge edge
)
5665 if (!needsTypeCheck(edge
, SpecStringObject
))
5668 SpeculateCellOperand
operand(this, edge
);
5669 GPRReg gpr
= operand
.gpr();
5670 if (!needsTypeCheck(edge
, SpecStringObject
))
5673 speculateStringObject(edge
, gpr
);
5674 m_interpreter
.filter(edge
, SpecStringObject
);
5677 void SpeculativeJIT::speculateStringOrStringObject(Edge edge
)
5679 if (!needsTypeCheck(edge
, SpecString
| SpecStringObject
))
5682 SpeculateCellOperand
operand(this, edge
);
5683 GPRReg gpr
= operand
.gpr();
5684 if (!needsTypeCheck(edge
, SpecString
| SpecStringObject
))
5687 GPRTemporary
structureID(this);
5688 GPRReg structureIDGPR
= structureID
.gpr();
5690 m_jit
.load32(JITCompiler::Address(gpr
, JSCell::structureIDOffset()), structureIDGPR
);
5691 JITCompiler::Jump isString
= m_jit
.branchStructurePtr(
5694 m_jit
.vm()->stringStructure
.get());
5696 speculateStringObjectForStructure(edge
, structureIDGPR
);
5698 isString
.link(&m_jit
);
5700 m_interpreter
.filter(edge
, SpecString
| SpecStringObject
);
5703 void SpeculativeJIT::speculateNotStringVar(Edge edge
)
5705 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
5706 GPRTemporary
temp(this);
5707 GPRReg tempGPR
= temp
.gpr();
5709 JITCompiler::Jump notCell
= m_jit
.branchIfNotCell(operand
.jsValueRegs());
5710 GPRReg cell
= operand
.jsValueRegs().payloadGPR();
5712 JITCompiler::Jump notString
= m_jit
.branchIfNotString(cell
);
5714 speculateStringIdentAndLoadStorage(edge
, cell
, tempGPR
);
5716 notString
.link(&m_jit
);
5717 notCell
.link(&m_jit
);
5720 void SpeculativeJIT::speculateNotCell(Edge edge
)
5722 if (!needsTypeCheck(edge
, ~SpecCell
))
5725 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
5726 typeCheck(operand
.jsValueRegs(), edge
, ~SpecCell
, m_jit
.branchIfCell(operand
.jsValueRegs()));
5729 void SpeculativeJIT::speculateOther(Edge edge
)
5731 if (!needsTypeCheck(edge
, SpecOther
))
5734 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
5735 GPRTemporary
temp(this);
5736 GPRReg tempGPR
= temp
.gpr();
5738 operand
.jsValueRegs(), edge
, SpecOther
,
5739 m_jit
.branchIfNotOther(operand
.jsValueRegs(), tempGPR
));
5742 void SpeculativeJIT::speculateMisc(Edge edge
, JSValueRegs regs
)
5746 regs
, edge
, SpecMisc
,
5747 m_jit
.branch64(MacroAssembler::Above
, regs
.gpr(), MacroAssembler::TrustedImm64(TagBitTypeOther
| TagBitBool
| TagBitUndefined
)));
5750 regs
, edge
, ~SpecInt32
,
5751 m_jit
.branch32(MacroAssembler::Equal
, regs
.tagGPR(), MacroAssembler::TrustedImm32(JSValue::Int32Tag
)));
5753 regs
, edge
, SpecMisc
,
5754 m_jit
.branch32(MacroAssembler::Below
, regs
.tagGPR(), MacroAssembler::TrustedImm32(JSValue::UndefinedTag
)));
5758 void SpeculativeJIT::speculateMisc(Edge edge
)
5760 if (!needsTypeCheck(edge
, SpecMisc
))
5763 JSValueOperand
operand(this, edge
, ManualOperandSpeculation
);
5764 speculateMisc(edge
, operand
.jsValueRegs());
5767 void SpeculativeJIT::speculate(Node
*, Edge edge
)
5769 switch (edge
.useKind()) {
5773 ASSERT(!needsTypeCheck(edge
, SpecInt32
));
5776 ASSERT(!needsTypeCheck(edge
, SpecFullDouble
));
5779 ASSERT(!needsTypeCheck(edge
, SpecMachineInt
));
5782 ASSERT(!needsTypeCheck(edge
, SpecCell
));
5784 case KnownStringUse
:
5785 ASSERT(!needsTypeCheck(edge
, SpecString
));
5788 speculateInt32(edge
);
5791 speculateNumber(edge
);
5794 speculateRealNumber(edge
);
5796 case DoubleRepRealUse
:
5797 speculateDoubleRepReal(edge
);
5801 speculateMachineInt(edge
);
5803 case DoubleRepMachineIntUse
:
5804 speculateDoubleRepMachineInt(edge
);
5808 speculateBoolean(edge
);
5811 speculateCell(edge
);
5814 speculateObject(edge
);
5817 speculateFunction(edge
);
5819 case FinalObjectUse
:
5820 speculateFinalObject(edge
);
5822 case ObjectOrOtherUse
:
5823 speculateObjectOrOther(edge
);
5825 case StringIdentUse
:
5826 speculateStringIdent(edge
);
5829 speculateString(edge
);
5831 case StringObjectUse
:
5832 speculateStringObject(edge
);
5834 case StringOrStringObjectUse
:
5835 speculateStringOrStringObject(edge
);
5837 case NotStringVarUse
:
5838 speculateNotStringVar(edge
);
5841 speculateNotCell(edge
);
5844 speculateOther(edge
);
5847 speculateMisc(edge
);
5850 RELEASE_ASSERT_NOT_REACHED();
5855 void SpeculativeJIT::emitSwitchIntJump(
5856 SwitchData
* data
, GPRReg value
, GPRReg scratch
)
5858 SimpleJumpTable
& table
= m_jit
.codeBlock()->switchJumpTable(data
->switchTableIndex
);
5859 table
.ensureCTITable();
5860 m_jit
.sub32(Imm32(table
.min
), value
);
5862 m_jit
.branch32(JITCompiler::AboveOrEqual
, value
, Imm32(table
.ctiOffsets
.size())),
5863 data
->fallThrough
.block
);
5864 m_jit
.move(TrustedImmPtr(table
.ctiOffsets
.begin()), scratch
);
5865 m_jit
.loadPtr(JITCompiler::BaseIndex(scratch
, value
, JITCompiler::timesPtr()), scratch
);
5866 m_jit
.jump(scratch
);
5867 data
->didUseJumpTable
= true;
5870 void SpeculativeJIT::emitSwitchImm(Node
* node
, SwitchData
* data
)
5872 switch (node
->child1().useKind()) {
5874 SpeculateInt32Operand
value(this, node
->child1());
5875 GPRTemporary
temp(this);
5876 emitSwitchIntJump(data
, value
.gpr(), temp
.gpr());
5882 JSValueOperand
value(this, node
->child1());
5883 GPRTemporary
temp(this);
5884 JSValueRegs valueRegs
= value
.jsValueRegs();
5885 GPRReg scratch
= temp
.gpr();
5890 JITCompiler::Jump notInt
= m_jit
.branch64(
5891 JITCompiler::Below
, valueRegs
.gpr(), GPRInfo::tagTypeNumberRegister
);
5892 emitSwitchIntJump(data
, valueRegs
.gpr(), scratch
);
5893 notInt
.link(&m_jit
);
5896 JITCompiler::Zero
, valueRegs
.gpr(), GPRInfo::tagTypeNumberRegister
),
5897 data
->fallThrough
.block
);
5898 silentSpillAllRegisters(scratch
);
5899 callOperation(operationFindSwitchImmTargetForDouble
, scratch
, valueRegs
.gpr(), data
->switchTableIndex
);
5900 silentFillAllRegisters(scratch
);
5901 m_jit
.jump(scratch
);
5903 JITCompiler::Jump notInt
= m_jit
.branch32(
5904 JITCompiler::NotEqual
, valueRegs
.tagGPR(), TrustedImm32(JSValue::Int32Tag
));
5905 emitSwitchIntJump(data
, valueRegs
.payloadGPR(), scratch
);
5906 notInt
.link(&m_jit
);
5909 JITCompiler::AboveOrEqual
, valueRegs
.tagGPR(),
5910 TrustedImm32(JSValue::LowestTag
)),
5911 data
->fallThrough
.block
);
5912 silentSpillAllRegisters(scratch
);
5913 callOperation(operationFindSwitchImmTargetForDouble
, scratch
, valueRegs
, data
->switchTableIndex
);
5914 silentFillAllRegisters(scratch
);
5915 m_jit
.jump(scratch
);
5917 noResult(node
, UseChildrenCalledExplicitly
);
5922 RELEASE_ASSERT_NOT_REACHED();
5927 void SpeculativeJIT::emitSwitchCharStringJump(
5928 SwitchData
* data
, GPRReg value
, GPRReg scratch
)
5932 MacroAssembler::NotEqual
,
5933 MacroAssembler::Address(value
, JSString::offsetOfLength()),
5935 data
->fallThrough
.block
);
5937 m_jit
.loadPtr(MacroAssembler::Address(value
, JSString::offsetOfValue()), scratch
);
5939 addSlowPathGenerator(
5941 m_jit
.branchTestPtr(MacroAssembler::Zero
, scratch
),
5942 this, operationResolveRope
, scratch
, value
));
5944 m_jit
.loadPtr(MacroAssembler::Address(scratch
, StringImpl::dataOffset()), value
);
5946 JITCompiler::Jump is8Bit
= m_jit
.branchTest32(
5947 MacroAssembler::NonZero
,
5948 MacroAssembler::Address(scratch
, StringImpl::flagsOffset()),
5949 TrustedImm32(StringImpl::flagIs8Bit()));
5951 m_jit
.load16(MacroAssembler::Address(value
), scratch
);
5953 JITCompiler::Jump ready
= m_jit
.jump();
5955 is8Bit
.link(&m_jit
);
5956 m_jit
.load8(MacroAssembler::Address(value
), scratch
);
5959 emitSwitchIntJump(data
, scratch
, value
);
5962 void SpeculativeJIT::emitSwitchChar(Node
* node
, SwitchData
* data
)
5964 switch (node
->child1().useKind()) {
5966 SpeculateCellOperand
op1(this, node
->child1());
5967 GPRTemporary
temp(this);
5969 GPRReg op1GPR
= op1
.gpr();
5970 GPRReg tempGPR
= temp
.gpr();
5974 speculateString(node
->child1(), op1GPR
);
5975 emitSwitchCharStringJump(data
, op1GPR
, tempGPR
);
5976 noResult(node
, UseChildrenCalledExplicitly
);
5981 JSValueOperand
op1(this, node
->child1());
5982 GPRTemporary
temp(this);
5984 JSValueRegs op1Regs
= op1
.jsValueRegs();
5985 GPRReg tempGPR
= temp
.gpr();
5989 addBranch(m_jit
.branchIfNotCell(op1Regs
), data
->fallThrough
.block
);
5991 addBranch(m_jit
.branchIfNotString(op1Regs
.payloadGPR()), data
->fallThrough
.block
);
5993 emitSwitchCharStringJump(data
, op1Regs
.payloadGPR(), tempGPR
);
5994 noResult(node
, UseChildrenCalledExplicitly
);
5999 RELEASE_ASSERT_NOT_REACHED();
6006 struct CharacterCase
{
6007 bool operator<(const CharacterCase
& other
) const
6009 return character
< other
.character
;
6017 } // anonymous namespace
6019 void SpeculativeJIT::emitBinarySwitchStringRecurse(
6020 SwitchData
* data
, const Vector
<SpeculativeJIT::StringSwitchCase
>& cases
,
6021 unsigned numChecked
, unsigned begin
, unsigned end
, GPRReg buffer
, GPRReg length
,
6022 GPRReg temp
, unsigned alreadyCheckedLength
, bool checkedExactLength
)
6024 static const bool verbose
= false;
6027 dataLog("We're down to the following cases, alreadyCheckedLength = ", alreadyCheckedLength
, ":\n");
6028 for (unsigned i
= begin
; i
< end
; ++i
) {
6029 dataLog(" ", cases
[i
].string
, "\n");
6034 jump(data
->fallThrough
.block
, ForceJump
);
6038 unsigned minLength
= cases
[begin
].string
->length();
6039 unsigned commonChars
= minLength
;
6040 bool allLengthsEqual
= true;
6041 for (unsigned i
= begin
+ 1; i
< end
; ++i
) {
6042 unsigned myCommonChars
= numChecked
;
6043 for (unsigned j
= numChecked
;
6044 j
< std::min(cases
[begin
].string
->length(), cases
[i
].string
->length());
6046 if (cases
[begin
].string
->at(j
) != cases
[i
].string
->at(j
)) {
6048 dataLog("string(", cases
[i
].string
, ")[", j
, "] != string(", cases
[begin
].string
, ")[", j
, "]\n");
6053 commonChars
= std::min(commonChars
, myCommonChars
);
6054 if (minLength
!= cases
[i
].string
->length())
6055 allLengthsEqual
= false;
6056 minLength
= std::min(minLength
, cases
[i
].string
->length());
6059 if (checkedExactLength
) {
6060 RELEASE_ASSERT(alreadyCheckedLength
== minLength
);
6061 RELEASE_ASSERT(allLengthsEqual
);
6064 RELEASE_ASSERT(minLength
>= commonChars
);
6067 dataLog("length = ", minLength
, ", commonChars = ", commonChars
, ", allLengthsEqual = ", allLengthsEqual
, "\n");
6069 if (!allLengthsEqual
&& alreadyCheckedLength
< minLength
)
6070 branch32(MacroAssembler::Below
, length
, Imm32(minLength
), data
->fallThrough
.block
);
6071 if (allLengthsEqual
&& (alreadyCheckedLength
< minLength
|| !checkedExactLength
))
6072 branch32(MacroAssembler::NotEqual
, length
, Imm32(minLength
), data
->fallThrough
.block
);
6074 for (unsigned i
= numChecked
; i
< commonChars
; ++i
) {
6076 MacroAssembler::NotEqual
, MacroAssembler::Address(buffer
, i
),
6077 TrustedImm32(cases
[begin
].string
->at(i
)), data
->fallThrough
.block
);
6080 if (minLength
== commonChars
) {
6081 // This is the case where one of the cases is a prefix of all of the other cases.
6082 // We've already checked that the input string is a prefix of all of the cases,
6083 // so we just check length to jump to that case.
6085 if (!ASSERT_DISABLED
) {
6086 ASSERT(cases
[begin
].string
->length() == commonChars
);
6087 for (unsigned i
= begin
+ 1; i
< end
; ++i
)
6088 ASSERT(cases
[i
].string
->length() > commonChars
);
6091 if (allLengthsEqual
) {
6092 RELEASE_ASSERT(end
== begin
+ 1);
6093 jump(cases
[begin
].target
, ForceJump
);
6097 branch32(MacroAssembler::Equal
, length
, Imm32(commonChars
), cases
[begin
].target
);
6099 // We've checked if the length is >= minLength, and then we checked if the
6100 // length is == commonChars. We get to this point if it is >= minLength but not
6101 // == commonChars. Hence we know that it now must be > minLength, i.e., that
6102 // it's >= minLength + 1.
6103 emitBinarySwitchStringRecurse(
6104 data
, cases
, commonChars
, begin
+ 1, end
, buffer
, length
, temp
, minLength
+ 1, false);
6108 // At this point we know that the string is longer than commonChars, and we've only
6109 // verified commonChars. Use a binary switch on the next unchecked character, i.e.
6110 // string[commonChars].
6112 RELEASE_ASSERT(end
>= begin
+ 2);
6114 m_jit
.load8(MacroAssembler::Address(buffer
, commonChars
), temp
);
6116 Vector
<CharacterCase
> characterCases
;
6117 CharacterCase currentCase
;
6118 currentCase
.character
= cases
[begin
].string
->at(commonChars
);
6119 currentCase
.begin
= begin
;
6120 currentCase
.end
= begin
+ 1;
6121 for (unsigned i
= begin
+ 1; i
< end
; ++i
) {
6122 if (cases
[i
].string
->at(commonChars
) != currentCase
.character
) {
6124 dataLog("string(", cases
[i
].string
, ")[", commonChars
, "] != string(", cases
[begin
].string
, ")[", commonChars
, "]\n");
6125 currentCase
.end
= i
;
6126 characterCases
.append(currentCase
);
6127 currentCase
.character
= cases
[i
].string
->at(commonChars
);
6128 currentCase
.begin
= i
;
6129 currentCase
.end
= i
+ 1;
6131 currentCase
.end
= i
+ 1;
6133 characterCases
.append(currentCase
);
6135 Vector
<int64_t> characterCaseValues
;
6136 for (unsigned i
= 0; i
< characterCases
.size(); ++i
)
6137 characterCaseValues
.append(characterCases
[i
].character
);
6139 BinarySwitch
binarySwitch(temp
, characterCaseValues
, BinarySwitch::Int32
);
6140 while (binarySwitch
.advance(m_jit
)) {
6141 const CharacterCase
& myCase
= characterCases
[binarySwitch
.caseIndex()];
6142 emitBinarySwitchStringRecurse(
6143 data
, cases
, commonChars
+ 1, myCase
.begin
, myCase
.end
, buffer
, length
,
6144 temp
, minLength
, allLengthsEqual
);
6147 addBranch(binarySwitch
.fallThrough(), data
->fallThrough
.block
);
6150 void SpeculativeJIT::emitSwitchStringOnString(SwitchData
* data
, GPRReg string
)
6152 data
->didUseJumpTable
= true;
6154 bool canDoBinarySwitch
= true;
6155 unsigned totalLength
= 0;
6157 for (unsigned i
= data
->cases
.size(); i
--;) {
6158 StringImpl
* string
= data
->cases
[i
].value
.stringImpl();
6159 if (!string
->is8Bit()) {
6160 canDoBinarySwitch
= false;
6163 if (string
->length() > Options::maximumBinaryStringSwitchCaseLength()) {
6164 canDoBinarySwitch
= false;
6167 totalLength
+= string
->length();
6170 if (!canDoBinarySwitch
|| totalLength
> Options::maximumBinaryStringSwitchTotalLength()) {
6173 operationSwitchString
, string
, data
->switchTableIndex
, string
);
6178 GPRTemporary
length(this);
6179 GPRTemporary
temp(this);
6181 GPRReg lengthGPR
= length
.gpr();
6182 GPRReg tempGPR
= temp
.gpr();
6184 m_jit
.load32(MacroAssembler::Address(string
, JSString::offsetOfLength()), lengthGPR
);
6185 m_jit
.loadPtr(MacroAssembler::Address(string
, JSString::offsetOfValue()), tempGPR
);
6187 MacroAssembler::JumpList slowCases
;
6188 slowCases
.append(m_jit
.branchTestPtr(MacroAssembler::Zero
, tempGPR
));
6189 slowCases
.append(m_jit
.branchTest32(
6190 MacroAssembler::Zero
,
6191 MacroAssembler::Address(tempGPR
, StringImpl::flagsOffset()),
6192 TrustedImm32(StringImpl::flagIs8Bit())));
6194 m_jit
.loadPtr(MacroAssembler::Address(tempGPR
, StringImpl::dataOffset()), string
);
6196 Vector
<StringSwitchCase
> cases
;
6197 for (unsigned i
= 0; i
< data
->cases
.size(); ++i
) {
6199 StringSwitchCase(data
->cases
[i
].value
.stringImpl(), data
->cases
[i
].target
.block
));
6202 std::sort(cases
.begin(), cases
.end());
6204 emitBinarySwitchStringRecurse(
6205 data
, cases
, 0, 0, cases
.size(), string
, lengthGPR
, tempGPR
, 0, false);
6207 slowCases
.link(&m_jit
);
6208 silentSpillAllRegisters(string
);
6209 callOperation(operationSwitchString
, string
, data
->switchTableIndex
, string
);
6210 silentFillAllRegisters(string
);
6214 void SpeculativeJIT::emitSwitchString(Node
* node
, SwitchData
* data
)
6216 switch (node
->child1().useKind()) {
6217 case StringIdentUse
: {
6218 SpeculateCellOperand
op1(this, node
->child1());
6219 GPRTemporary
temp(this);
6221 GPRReg op1GPR
= op1
.gpr();
6222 GPRReg tempGPR
= temp
.gpr();
6224 speculateString(node
->child1(), op1GPR
);
6225 speculateStringIdentAndLoadStorage(node
->child1(), op1GPR
, tempGPR
);
6227 Vector
<int64_t> identifierCaseValues
;
6228 for (unsigned i
= 0; i
< data
->cases
.size(); ++i
) {
6229 identifierCaseValues
.append(
6230 static_cast<int64_t>(bitwise_cast
<intptr_t>(data
->cases
[i
].value
.stringImpl())));
6233 BinarySwitch
binarySwitch(tempGPR
, identifierCaseValues
, BinarySwitch::IntPtr
);
6234 while (binarySwitch
.advance(m_jit
))
6235 jump(data
->cases
[binarySwitch
.caseIndex()].target
.block
, ForceJump
);
6236 addBranch(binarySwitch
.fallThrough(), data
->fallThrough
.block
);
6243 SpeculateCellOperand
op1(this, node
->child1());
6245 GPRReg op1GPR
= op1
.gpr();
6249 speculateString(node
->child1(), op1GPR
);
6250 emitSwitchStringOnString(data
, op1GPR
);
6251 noResult(node
, UseChildrenCalledExplicitly
);
6256 JSValueOperand
op1(this, node
->child1());
6258 JSValueRegs op1Regs
= op1
.jsValueRegs();
6262 addBranch(m_jit
.branchIfNotCell(op1Regs
), data
->fallThrough
.block
);
6264 addBranch(m_jit
.branchIfNotString(op1Regs
.payloadGPR()), data
->fallThrough
.block
);
6266 emitSwitchStringOnString(data
, op1Regs
.payloadGPR());
6267 noResult(node
, UseChildrenCalledExplicitly
);
6272 RELEASE_ASSERT_NOT_REACHED();
6277 void SpeculativeJIT::emitSwitch(Node
* node
)
6279 SwitchData
* data
= node
->switchData();
6280 switch (data
->kind
) {
6282 emitSwitchImm(node
, data
);
6286 emitSwitchChar(node
, data
);
6289 case SwitchString
: {
6290 emitSwitchString(node
, data
);
6294 DFG_CRASH(m_jit
.graph(), node
, "Bad switch kind");
6297 RELEASE_ASSERT_NOT_REACHED();
6300 void SpeculativeJIT::addBranch(const MacroAssembler::JumpList
& jump
, BasicBlock
* destination
)
6302 for (unsigned i
= jump
.jumps().size(); i
--;)
6303 addBranch(jump
.jumps()[i
], destination
);
6306 void SpeculativeJIT::linkBranches()
6308 for (size_t i
= 0; i
< m_branches
.size(); ++i
) {
6309 BranchRecord
& branch
= m_branches
[i
];
6310 branch
.jump
.linkTo(m_jit
.blockHeads()[branch
.destination
->index
], &m_jit
);
6315 void SpeculativeJIT::compileStoreBarrier(Node
* node
)
6317 ASSERT(node
->op() == StoreBarrier
);
6319 SpeculateCellOperand
base(this, node
->child1());
6320 GPRTemporary
scratch1(this);
6321 GPRTemporary
scratch2(this);
6323 writeBarrier(base
.gpr(), scratch1
.gpr(), scratch2
.gpr());
6328 void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell
, GPRReg scratch1
, GPRReg scratch2
)
6330 ASSERT(scratch1
!= scratch2
);
6331 WriteBarrierBuffer
& writeBarrierBuffer
= m_jit
.vm()->heap
.m_writeBarrierBuffer
;
6332 m_jit
.load32(writeBarrierBuffer
.currentIndexAddress(), scratch2
);
6333 JITCompiler::Jump needToFlush
= m_jit
.branch32(MacroAssembler::AboveOrEqual
, scratch2
, MacroAssembler::TrustedImm32(writeBarrierBuffer
.capacity()));
6335 m_jit
.add32(TrustedImm32(1), scratch2
);
6336 m_jit
.store32(scratch2
, writeBarrierBuffer
.currentIndexAddress());
6338 m_jit
.move(TrustedImmPtr(writeBarrierBuffer
.buffer()), scratch1
);
6339 // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
6340 m_jit
.storePtr(cell
, MacroAssembler::BaseIndex(scratch1
, scratch2
, MacroAssembler::ScalePtr
, static_cast<int32_t>(-sizeof(void*))));
6342 JITCompiler::Jump done
= m_jit
.jump();
6343 needToFlush
.link(&m_jit
);
6345 silentSpillAllRegisters(InvalidGPRReg
);
6346 callOperation(operationFlushWriteBarrierBuffer
, cell
);
6347 silentFillAllRegisters(InvalidGPRReg
);
6352 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR
, GPRReg scratch1
, GPRReg scratch2
)
6354 JITCompiler::Jump ownerIsRememberedOrInEden
= m_jit
.jumpIfIsRememberedOrInEden(ownerGPR
);
6355 storeToWriteBarrierBuffer(ownerGPR
, scratch1
, scratch2
);
6356 ownerIsRememberedOrInEden
.link(&m_jit
);
6359 void SpeculativeJIT::compileStoreBarrier(Node
* node
)
6361 DFG_NODE_DO_TO_CHILDREN(m_jit
.graph(), node
, speculate
);
6364 #endif // ENABLE(GGC)
6366 } } // namespace JSC::DFG