]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGSpeculativeJIT.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGSpeculativeJIT.cpp
1 /*
2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JSCInlines.h"
42 #include "JSEnvironmentRecord.h"
43 #include "JSLexicalEnvironment.h"
44 #include "LinkBuffer.h"
45 #include "ScopedArguments.h"
46 #include "ScratchRegisterAllocator.h"
47 #include "WriteBarrierBuffer.h"
48 #include <wtf/MathExtras.h>
49
50 namespace JSC { namespace DFG {
51
52 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
53 : m_compileOkay(true)
54 , m_jit(jit)
55 , m_currentNode(0)
56 , m_lastGeneratedNode(LastNodeType)
57 , m_indexInBlock(0)
58 , m_generationInfo(m_jit.graph().frameRegisterCount())
59 , m_state(m_jit.graph())
60 , m_interpreter(m_jit.graph(), m_state)
61 , m_stream(&jit.jitCode()->variableEventStream)
62 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
63 , m_isCheckingArgumentTypes(false)
64 {
65 }
66
67 SpeculativeJIT::~SpeculativeJIT()
68 {
69 }
70
71 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
72 {
73 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
74
75 GPRTemporary scratch(this);
76 GPRTemporary scratch2(this);
77 GPRReg scratchGPR = scratch.gpr();
78 GPRReg scratch2GPR = scratch2.gpr();
79
80 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
81
82 JITCompiler::JumpList slowCases;
83
84 slowCases.append(
85 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
86 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
87 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
88
89 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
90 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
91
92 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
93 #if USE(JSVALUE64)
94 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
95 for (unsigned i = numElements; i < vectorLength; ++i)
96 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
97 #else
98 EncodedValueDescriptor value;
99 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
100 for (unsigned i = numElements; i < vectorLength; ++i) {
101 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
102 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
103 }
104 #endif
105 }
106
107 // I want a slow path that also loads out the storage pointer, and that's
108 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
109 // of work for a very small piece of functionality. :-/
110 addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
111 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
112 structure, numElements));
113 }
114
115 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
116 {
117 if (inlineCallFrame && !inlineCallFrame->isVarargs())
118 m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
119 else {
120 VirtualRegister argumentCountRegister;
121 if (!inlineCallFrame)
122 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
123 else
124 argumentCountRegister = inlineCallFrame->argumentCountRegister;
125 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
126 if (!includeThis)
127 m_jit.sub32(TrustedImm32(1), lengthGPR);
128 }
129 }
130
131 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
132 {
133 emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
134 }
135
136 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
137 {
138 if (origin.inlineCallFrame) {
139 if (origin.inlineCallFrame->isClosureCall) {
140 m_jit.loadPtr(
141 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
142 calleeGPR);
143 } else {
144 m_jit.move(
145 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
146 calleeGPR);
147 }
148 } else
149 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
150 }
151
152 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
153 {
154 m_jit.addPtr(
155 TrustedImm32(
156 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
157 GPRInfo::callFrameRegister, startGPR);
158 }
159
160 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
161 {
162 if (!doOSRExitFuzzing())
163 return MacroAssembler::Jump();
164
165 MacroAssembler::Jump result;
166
167 m_jit.pushToSave(GPRInfo::regT0);
168 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
169 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
170 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
171 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
172 unsigned at = Options::fireOSRExitFuzzAt();
173 if (at || atOrAfter) {
174 unsigned threshold;
175 MacroAssembler::RelationalCondition condition;
176 if (atOrAfter) {
177 threshold = atOrAfter;
178 condition = MacroAssembler::Below;
179 } else {
180 threshold = at;
181 condition = MacroAssembler::NotEqual;
182 }
183 MacroAssembler::Jump ok = m_jit.branch32(
184 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
185 m_jit.popToRestore(GPRInfo::regT0);
186 result = m_jit.jump();
187 ok.link(&m_jit);
188 }
189 m_jit.popToRestore(GPRInfo::regT0);
190
191 return result;
192 }
193
194 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
195 {
196 if (!m_compileOkay)
197 return;
198 ASSERT(m_isCheckingArgumentTypes || m_canExit);
199 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
200 if (fuzzJump.isSet()) {
201 JITCompiler::JumpList jumpsToFail;
202 jumpsToFail.append(fuzzJump);
203 jumpsToFail.append(jumpToFail);
204 m_jit.appendExitInfo(jumpsToFail);
205 } else
206 m_jit.appendExitInfo(jumpToFail);
207 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
208 }
209
210 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
211 {
212 if (!m_compileOkay)
213 return;
214 ASSERT(m_isCheckingArgumentTypes || m_canExit);
215 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
216 if (fuzzJump.isSet()) {
217 JITCompiler::JumpList myJumpsToFail;
218 myJumpsToFail.append(jumpsToFail);
219 myJumpsToFail.append(fuzzJump);
220 m_jit.appendExitInfo(myJumpsToFail);
221 } else
222 m_jit.appendExitInfo(jumpsToFail);
223 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
224 }
225
226 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
227 {
228 if (!m_compileOkay)
229 return OSRExitJumpPlaceholder();
230 ASSERT(m_isCheckingArgumentTypes || m_canExit);
231 unsigned index = m_jit.jitCode()->osrExit.size();
232 m_jit.appendExitInfo();
233 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
234 return OSRExitJumpPlaceholder(index);
235 }
236
237 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
238 {
239 ASSERT(m_isCheckingArgumentTypes || m_canExit);
240 return speculationCheck(kind, jsValueSource, nodeUse.node());
241 }
242
243 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
244 {
245 ASSERT(m_isCheckingArgumentTypes || m_canExit);
246 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
250 {
251 ASSERT(m_isCheckingArgumentTypes || m_canExit);
252 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
253 }
254
255 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
256 {
257 if (!m_compileOkay)
258 return;
259 ASSERT(m_isCheckingArgumentTypes || m_canExit);
260 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
261 m_jit.appendExitInfo(jumpToFail);
262 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
263 }
264
265 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
266 {
267 ASSERT(m_isCheckingArgumentTypes || m_canExit);
268 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
269 }
270
271 void SpeculativeJIT::emitInvalidationPoint(Node* node)
272 {
273 if (!m_compileOkay)
274 return;
275 ASSERT(m_canExit);
276 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
277 m_jit.jitCode()->appendOSRExit(OSRExit(
278 UncountableInvalidation, JSValueSource(),
279 m_jit.graph().methodOfGettingAValueProfileFor(node),
280 this, m_stream->size()));
281 info.m_replacementSource = m_jit.watchpointLabel();
282 ASSERT(info.m_replacementSource.isSet());
283 noResult(node);
284 }
285
286 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
287 {
288 ASSERT(m_isCheckingArgumentTypes || m_canExit);
289 if (!m_compileOkay)
290 return;
291 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
292 m_compileOkay = false;
293 if (verboseCompilationEnabled())
294 dataLog("Bailing compilation.\n");
295 }
296
297 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
298 {
299 ASSERT(m_isCheckingArgumentTypes || m_canExit);
300 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
301 }
302
303 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
304 {
305 ASSERT(needsTypeCheck(edge, typesPassedThrough));
306 m_interpreter.filter(edge, typesPassedThrough);
307 speculationCheck(BadType, source, edge.node(), jumpToFail);
308 }
309
310 RegisterSet SpeculativeJIT::usedRegisters()
311 {
312 RegisterSet result;
313
314 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
315 GPRReg gpr = GPRInfo::toRegister(i);
316 if (m_gprs.isInUse(gpr))
317 result.set(gpr);
318 }
319 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
320 FPRReg fpr = FPRInfo::toRegister(i);
321 if (m_fprs.isInUse(fpr))
322 result.set(fpr);
323 }
324
325 result.merge(RegisterSet::specialRegisters());
326
327 return result;
328 }
329
330 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
331 {
332 m_slowPathGenerators.append(WTF::move(slowPathGenerator));
333 }
334
335 void SpeculativeJIT::runSlowPathGenerators()
336 {
337 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
338 m_slowPathGenerators[i]->generate(this);
339 }
340
341 // On Windows we need to wrap fmod; on other platforms we can call it directly.
342 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
343 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
344 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
345 {
346 return fmod(x, y);
347 }
348 #else
349 #define fmodAsDFGOperation fmod
350 #endif
351
352 void SpeculativeJIT::clearGenerationInfo()
353 {
354 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
355 m_generationInfo[i] = GenerationInfo();
356 m_gprs = RegisterBank<GPRInfo>();
357 m_fprs = RegisterBank<FPRInfo>();
358 }
359
360 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
361 {
362 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
363 Node* node = info.node();
364 DataFormat registerFormat = info.registerFormat();
365 ASSERT(registerFormat != DataFormatNone);
366 ASSERT(registerFormat != DataFormatDouble);
367
368 SilentSpillAction spillAction;
369 SilentFillAction fillAction;
370
371 if (!info.needsSpill())
372 spillAction = DoNothingForSpill;
373 else {
374 #if USE(JSVALUE64)
375 ASSERT(info.gpr() == source);
376 if (registerFormat == DataFormatInt32)
377 spillAction = Store32Payload;
378 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
379 spillAction = StorePtr;
380 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
381 spillAction = Store64;
382 else {
383 ASSERT(registerFormat & DataFormatJS);
384 spillAction = Store64;
385 }
386 #elif USE(JSVALUE32_64)
387 if (registerFormat & DataFormatJS) {
388 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
389 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
390 } else {
391 ASSERT(info.gpr() == source);
392 spillAction = Store32Payload;
393 }
394 #endif
395 }
396
397 if (registerFormat == DataFormatInt32) {
398 ASSERT(info.gpr() == source);
399 ASSERT(isJSInt32(info.registerFormat()));
400 if (node->hasConstant()) {
401 ASSERT(node->isInt32Constant());
402 fillAction = SetInt32Constant;
403 } else
404 fillAction = Load32Payload;
405 } else if (registerFormat == DataFormatBoolean) {
406 #if USE(JSVALUE64)
407 RELEASE_ASSERT_NOT_REACHED();
408 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
409 fillAction = DoNothingForFill;
410 #endif
411 #elif USE(JSVALUE32_64)
412 ASSERT(info.gpr() == source);
413 if (node->hasConstant()) {
414 ASSERT(node->isBooleanConstant());
415 fillAction = SetBooleanConstant;
416 } else
417 fillAction = Load32Payload;
418 #endif
419 } else if (registerFormat == DataFormatCell) {
420 ASSERT(info.gpr() == source);
421 if (node->hasConstant()) {
422 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
423 node->asCell(); // To get the assertion.
424 fillAction = SetCellConstant;
425 } else {
426 #if USE(JSVALUE64)
427 fillAction = LoadPtr;
428 #else
429 fillAction = Load32Payload;
430 #endif
431 }
432 } else if (registerFormat == DataFormatStorage) {
433 ASSERT(info.gpr() == source);
434 fillAction = LoadPtr;
435 } else if (registerFormat == DataFormatInt52) {
436 if (node->hasConstant())
437 fillAction = SetInt52Constant;
438 else if (info.spillFormat() == DataFormatInt52)
439 fillAction = Load64;
440 else if (info.spillFormat() == DataFormatStrictInt52)
441 fillAction = Load64ShiftInt52Left;
442 else if (info.spillFormat() == DataFormatNone)
443 fillAction = Load64;
444 else {
445 RELEASE_ASSERT_NOT_REACHED();
446 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
447 fillAction = Load64; // Make GCC happy.
448 #endif
449 }
450 } else if (registerFormat == DataFormatStrictInt52) {
451 if (node->hasConstant())
452 fillAction = SetStrictInt52Constant;
453 else if (info.spillFormat() == DataFormatInt52)
454 fillAction = Load64ShiftInt52Right;
455 else if (info.spillFormat() == DataFormatStrictInt52)
456 fillAction = Load64;
457 else if (info.spillFormat() == DataFormatNone)
458 fillAction = Load64;
459 else {
460 RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462 fillAction = Load64; // Make GCC happy.
463 #endif
464 }
465 } else {
466 ASSERT(registerFormat & DataFormatJS);
467 #if USE(JSVALUE64)
468 ASSERT(info.gpr() == source);
469 if (node->hasConstant()) {
470 if (node->isCellConstant())
471 fillAction = SetTrustedJSConstant;
472 else
473 fillAction = SetJSConstant;
474 } else if (info.spillFormat() == DataFormatInt32) {
475 ASSERT(registerFormat == DataFormatJSInt32);
476 fillAction = Load32PayloadBoxInt;
477 } else
478 fillAction = Load64;
479 #else
480 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
481 if (node->hasConstant())
482 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
483 else if (info.payloadGPR() == source)
484 fillAction = Load32Payload;
485 else { // Fill the Tag
486 switch (info.spillFormat()) {
487 case DataFormatInt32:
488 ASSERT(registerFormat == DataFormatJSInt32);
489 fillAction = SetInt32Tag;
490 break;
491 case DataFormatCell:
492 ASSERT(registerFormat == DataFormatJSCell);
493 fillAction = SetCellTag;
494 break;
495 case DataFormatBoolean:
496 ASSERT(registerFormat == DataFormatJSBoolean);
497 fillAction = SetBooleanTag;
498 break;
499 default:
500 fillAction = Load32Tag;
501 break;
502 }
503 }
504 #endif
505 }
506
507 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
508 }
509
510 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
511 {
512 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
513 Node* node = info.node();
514 ASSERT(info.registerFormat() == DataFormatDouble);
515
516 SilentSpillAction spillAction;
517 SilentFillAction fillAction;
518
519 if (!info.needsSpill())
520 spillAction = DoNothingForSpill;
521 else {
522 ASSERT(!node->hasConstant());
523 ASSERT(info.spillFormat() == DataFormatNone);
524 ASSERT(info.fpr() == source);
525 spillAction = StoreDouble;
526 }
527
528 #if USE(JSVALUE64)
529 if (node->hasConstant()) {
530 node->asNumber(); // To get the assertion.
531 fillAction = SetDoubleConstant;
532 } else {
533 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
534 fillAction = LoadDouble;
535 }
536 #elif USE(JSVALUE32_64)
537 ASSERT(info.registerFormat() == DataFormatDouble);
538 if (node->hasConstant()) {
539 node->asNumber(); // To get the assertion.
540 fillAction = SetDoubleConstant;
541 } else
542 fillAction = LoadDouble;
543 #endif
544
545 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
546 }
547
548 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
549 {
550 switch (plan.spillAction()) {
551 case DoNothingForSpill:
552 break;
553 case Store32Tag:
554 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
555 break;
556 case Store32Payload:
557 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
558 break;
559 case StorePtr:
560 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
561 break;
562 #if USE(JSVALUE64)
563 case Store64:
564 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
565 break;
566 #endif
567 case StoreDouble:
568 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
569 break;
570 default:
571 RELEASE_ASSERT_NOT_REACHED();
572 }
573 }
574
575 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
576 {
577 #if USE(JSVALUE32_64)
578 UNUSED_PARAM(canTrample);
579 #endif
580 switch (plan.fillAction()) {
581 case DoNothingForFill:
582 break;
583 case SetInt32Constant:
584 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
585 break;
586 #if USE(JSVALUE64)
587 case SetInt52Constant:
588 m_jit.move(Imm64(plan.node()->asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
589 break;
590 case SetStrictInt52Constant:
591 m_jit.move(Imm64(plan.node()->asMachineInt()), plan.gpr());
592 break;
593 #endif // USE(JSVALUE64)
594 case SetBooleanConstant:
595 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
596 break;
597 case SetCellConstant:
598 m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
599 break;
600 #if USE(JSVALUE64)
601 case SetTrustedJSConstant:
602 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
603 break;
604 case SetJSConstant:
605 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
606 break;
607 case SetDoubleConstant:
608 m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
609 m_jit.move64ToDouble(canTrample, plan.fpr());
610 break;
611 case Load32PayloadBoxInt:
612 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
613 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
614 break;
615 case Load32PayloadConvertToInt52:
616 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
617 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
618 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
619 break;
620 case Load32PayloadSignExtend:
621 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
622 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
623 break;
624 #else
625 case SetJSConstantTag:
626 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
627 break;
628 case SetJSConstantPayload:
629 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
630 break;
631 case SetInt32Tag:
632 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
633 break;
634 case SetCellTag:
635 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
636 break;
637 case SetBooleanTag:
638 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
639 break;
640 case SetDoubleConstant:
641 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
642 break;
643 #endif
644 case Load32Tag:
645 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
646 break;
647 case Load32Payload:
648 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
649 break;
650 case LoadPtr:
651 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
652 break;
653 #if USE(JSVALUE64)
654 case Load64:
655 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
656 break;
657 case Load64ShiftInt52Right:
658 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
659 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
660 break;
661 case Load64ShiftInt52Left:
662 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
663 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
664 break;
665 #endif
666 case LoadDouble:
667 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
668 break;
669 default:
670 RELEASE_ASSERT_NOT_REACHED();
671 }
672 }
673
674 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
675 {
676 switch (arrayMode.arrayClass()) {
677 case Array::OriginalArray: {
678 CRASH();
679 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
680 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
681 return result;
682 #endif
683 }
684
685 case Array::Array:
686 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
687 return m_jit.branch32(
688 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
689
690 case Array::NonArray:
691 case Array::OriginalNonArray:
692 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
693 return m_jit.branch32(
694 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
695
696 case Array::PossiblyArray:
697 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
698 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
699 }
700
701 RELEASE_ASSERT_NOT_REACHED();
702 return JITCompiler::Jump();
703 }
704
705 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
706 {
707 JITCompiler::JumpList result;
708
709 switch (arrayMode.type()) {
710 case Array::Int32:
711 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
712
713 case Array::Double:
714 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
715
716 case Array::Contiguous:
717 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
718
719 case Array::ArrayStorage:
720 case Array::SlowPutArrayStorage: {
721 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
722
723 if (arrayMode.isJSArray()) {
724 if (arrayMode.isSlowPut()) {
725 result.append(
726 m_jit.branchTest32(
727 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
728 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
729 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
730 result.append(
731 m_jit.branch32(
732 MacroAssembler::Above, tempGPR,
733 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
734 break;
735 }
736 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
737 result.append(
738 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
739 break;
740 }
741 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
742 if (arrayMode.isSlowPut()) {
743 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
744 result.append(
745 m_jit.branch32(
746 MacroAssembler::Above, tempGPR,
747 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
748 break;
749 }
750 result.append(
751 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
752 break;
753 }
754 default:
755 CRASH();
756 break;
757 }
758
759 return result;
760 }
761
762 void SpeculativeJIT::checkArray(Node* node)
763 {
764 ASSERT(node->arrayMode().isSpecific());
765 ASSERT(!node->arrayMode().doesConversion());
766
767 SpeculateCellOperand base(this, node->child1());
768 GPRReg baseReg = base.gpr();
769
770 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
771 noResult(m_currentNode);
772 return;
773 }
774
775 const ClassInfo* expectedClassInfo = 0;
776
777 switch (node->arrayMode().type()) {
778 case Array::String:
779 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
780 break;
781 case Array::Int32:
782 case Array::Double:
783 case Array::Contiguous:
784 case Array::ArrayStorage:
785 case Array::SlowPutArrayStorage: {
786 GPRTemporary temp(this);
787 GPRReg tempGPR = temp.gpr();
788 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
789 speculationCheck(
790 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
791 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
792
793 noResult(m_currentNode);
794 return;
795 }
796 case Array::DirectArguments:
797 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
798 noResult(m_currentNode);
799 return;
800 case Array::ScopedArguments:
801 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
802 noResult(m_currentNode);
803 return;
804 default:
805 speculateCellTypeWithoutTypeFiltering(
806 node->child1(), baseReg,
807 typeForTypedArrayType(node->arrayMode().typedArrayType()));
808 noResult(m_currentNode);
809 return;
810 }
811
812 RELEASE_ASSERT(expectedClassInfo);
813
814 GPRTemporary temp(this);
815 GPRTemporary temp2(this);
816 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
817 speculationCheck(
818 BadType, JSValueSource::unboxedCell(baseReg), node,
819 m_jit.branchPtr(
820 MacroAssembler::NotEqual,
821 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
822 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
823
824 noResult(m_currentNode);
825 }
826
827 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
828 {
829 ASSERT(node->arrayMode().doesConversion());
830
831 GPRTemporary temp(this);
832 GPRTemporary structure;
833 GPRReg tempGPR = temp.gpr();
834 GPRReg structureGPR = InvalidGPRReg;
835
836 if (node->op() != ArrayifyToStructure) {
837 GPRTemporary realStructure(this);
838 structure.adopt(realStructure);
839 structureGPR = structure.gpr();
840 }
841
842 // We can skip all that comes next if we already have array storage.
843 MacroAssembler::JumpList slowPath;
844
845 if (node->op() == ArrayifyToStructure) {
846 slowPath.append(m_jit.branchWeakStructure(
847 JITCompiler::NotEqual,
848 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
849 node->structure()));
850 } else {
851 m_jit.load8(
852 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
853
854 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
855 }
856
857 addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
858 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
859
860 noResult(m_currentNode);
861 }
862
863 void SpeculativeJIT::arrayify(Node* node)
864 {
865 ASSERT(node->arrayMode().isSpecific());
866
867 SpeculateCellOperand base(this, node->child1());
868
869 if (!node->child2()) {
870 arrayify(node, base.gpr(), InvalidGPRReg);
871 return;
872 }
873
874 SpeculateInt32Operand property(this, node->child2());
875
876 arrayify(node, base.gpr(), property.gpr());
877 }
878
879 GPRReg SpeculativeJIT::fillStorage(Edge edge)
880 {
881 VirtualRegister virtualRegister = edge->virtualRegister();
882 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
883
884 switch (info.registerFormat()) {
885 case DataFormatNone: {
886 if (info.spillFormat() == DataFormatStorage) {
887 GPRReg gpr = allocate();
888 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
889 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
890 info.fillStorage(*m_stream, gpr);
891 return gpr;
892 }
893
894 // Must be a cell; fill it as a cell and then return the pointer.
895 return fillSpeculateCell(edge);
896 }
897
898 case DataFormatStorage: {
899 GPRReg gpr = info.gpr();
900 m_gprs.lock(gpr);
901 return gpr;
902 }
903
904 default:
905 return fillSpeculateCell(edge);
906 }
907 }
908
909 void SpeculativeJIT::useChildren(Node* node)
910 {
911 if (node->flags() & NodeHasVarArgs) {
912 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
913 if (!!m_jit.graph().m_varArgChildren[childIdx])
914 use(m_jit.graph().m_varArgChildren[childIdx]);
915 }
916 } else {
917 Edge child1 = node->child1();
918 if (!child1) {
919 ASSERT(!node->child2() && !node->child3());
920 return;
921 }
922 use(child1);
923
924 Edge child2 = node->child2();
925 if (!child2) {
926 ASSERT(!node->child3());
927 return;
928 }
929 use(child2);
930
931 Edge child3 = node->child3();
932 if (!child3)
933 return;
934 use(child3);
935 }
936 }
937
938 void SpeculativeJIT::compileIn(Node* node)
939 {
940 SpeculateCellOperand base(this, node->child2());
941 GPRReg baseGPR = base.gpr();
942
943 if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
944 if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
945 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
946
947 GPRTemporary result(this);
948 GPRReg resultGPR = result.gpr();
949
950 use(node->child1());
951
952 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
953 MacroAssembler::Label done = m_jit.label();
954
955 // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
956 // we can cast it to const AtomicStringImpl* safely.
957 auto slowPath = slowPathCall(
958 jump.m_jump, this, operationInOptimize,
959 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
960 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
961
962 stubInfo->codeOrigin = node->origin.semantic;
963 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
964 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
965 stubInfo->patch.usedRegisters = usedRegisters();
966 stubInfo->patch.spillMode = NeedToSpill;
967
968 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
969 addSlowPathGenerator(WTF::move(slowPath));
970
971 base.use();
972
973 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
974 return;
975 }
976 }
977
978 JSValueOperand key(this, node->child1());
979 JSValueRegs regs = key.jsValueRegs();
980
981 GPRFlushedCallResult result(this);
982 GPRReg resultGPR = result.gpr();
983
984 base.use();
985 key.use();
986
987 flushRegisters();
988 callOperation(
989 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
990 baseGPR, regs);
991 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
992 }
993
994 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
995 {
996 unsigned branchIndexInBlock = detectPeepHoleBranch();
997 if (branchIndexInBlock != UINT_MAX) {
998 Node* branchNode = m_block->at(branchIndexInBlock);
999
1000 ASSERT(node->adjustedRefCount() == 1);
1001
1002 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1003
1004 m_indexInBlock = branchIndexInBlock;
1005 m_currentNode = branchNode;
1006
1007 return true;
1008 }
1009
1010 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1011
1012 return false;
1013 }
1014
1015 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1016 {
1017 unsigned branchIndexInBlock = detectPeepHoleBranch();
1018 if (branchIndexInBlock != UINT_MAX) {
1019 Node* branchNode = m_block->at(branchIndexInBlock);
1020
1021 ASSERT(node->adjustedRefCount() == 1);
1022
1023 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1024
1025 m_indexInBlock = branchIndexInBlock;
1026 m_currentNode = branchNode;
1027
1028 return true;
1029 }
1030
1031 nonSpeculativeNonPeepholeStrictEq(node, invert);
1032
1033 return false;
1034 }
1035
1036 static const char* dataFormatString(DataFormat format)
1037 {
1038 // These values correspond to the DataFormat enum.
1039 const char* strings[] = {
1040 "[ ]",
1041 "[ i]",
1042 "[ d]",
1043 "[ c]",
1044 "Err!",
1045 "Err!",
1046 "Err!",
1047 "Err!",
1048 "[J ]",
1049 "[Ji]",
1050 "[Jd]",
1051 "[Jc]",
1052 "Err!",
1053 "Err!",
1054 "Err!",
1055 "Err!",
1056 };
1057 return strings[format];
1058 }
1059
1060 void SpeculativeJIT::dump(const char* label)
1061 {
1062 if (label)
1063 dataLogF("<%s>\n", label);
1064
1065 dataLogF(" gprs:\n");
1066 m_gprs.dump();
1067 dataLogF(" fprs:\n");
1068 m_fprs.dump();
1069 dataLogF(" VirtualRegisters:\n");
1070 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1071 GenerationInfo& info = m_generationInfo[i];
1072 if (info.alive())
1073 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1074 else
1075 dataLogF(" % 3d:[__][__]", i);
1076 if (info.registerFormat() == DataFormatDouble)
1077 dataLogF(":fpr%d\n", info.fpr());
1078 else if (info.registerFormat() != DataFormatNone
1079 #if USE(JSVALUE32_64)
1080 && !(info.registerFormat() & DataFormatJS)
1081 #endif
1082 ) {
1083 ASSERT(info.gpr() != InvalidGPRReg);
1084 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1085 } else
1086 dataLogF("\n");
1087 }
1088 if (label)
1089 dataLogF("</%s>\n", label);
1090 }
1091
1092 GPRTemporary::GPRTemporary()
1093 : m_jit(0)
1094 , m_gpr(InvalidGPRReg)
1095 {
1096 }
1097
1098 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1099 : m_jit(jit)
1100 , m_gpr(InvalidGPRReg)
1101 {
1102 m_gpr = m_jit->allocate();
1103 }
1104
1105 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1106 : m_jit(jit)
1107 , m_gpr(InvalidGPRReg)
1108 {
1109 m_gpr = m_jit->allocate(specific);
1110 }
1111
1112 #if USE(JSVALUE32_64)
1113 GPRTemporary::GPRTemporary(
1114 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1115 : m_jit(jit)
1116 , m_gpr(InvalidGPRReg)
1117 {
1118 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1119 m_gpr = m_jit->reuse(op1.gpr(which));
1120 else
1121 m_gpr = m_jit->allocate();
1122 }
1123 #endif // USE(JSVALUE32_64)
1124
1125 JSValueRegsTemporary::JSValueRegsTemporary() { }
1126
1127 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1128 #if USE(JSVALUE64)
1129 : m_gpr(jit)
1130 #else
1131 : m_payloadGPR(jit)
1132 , m_tagGPR(jit)
1133 #endif
1134 {
1135 }
1136
1137 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1138
1139 JSValueRegs JSValueRegsTemporary::regs()
1140 {
1141 #if USE(JSVALUE64)
1142 return JSValueRegs(m_gpr.gpr());
1143 #else
1144 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1145 #endif
1146 }
1147
1148 void GPRTemporary::adopt(GPRTemporary& other)
1149 {
1150 ASSERT(!m_jit);
1151 ASSERT(m_gpr == InvalidGPRReg);
1152 ASSERT(other.m_jit);
1153 ASSERT(other.m_gpr != InvalidGPRReg);
1154 m_jit = other.m_jit;
1155 m_gpr = other.m_gpr;
1156 other.m_jit = 0;
1157 other.m_gpr = InvalidGPRReg;
1158 }
1159
1160 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1161 : m_jit(jit)
1162 , m_fpr(InvalidFPRReg)
1163 {
1164 m_fpr = m_jit->fprAllocate();
1165 }
1166
1167 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1168 : m_jit(jit)
1169 , m_fpr(InvalidFPRReg)
1170 {
1171 if (m_jit->canReuse(op1.node()))
1172 m_fpr = m_jit->reuse(op1.fpr());
1173 else
1174 m_fpr = m_jit->fprAllocate();
1175 }
1176
1177 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1178 : m_jit(jit)
1179 , m_fpr(InvalidFPRReg)
1180 {
1181 if (m_jit->canReuse(op1.node()))
1182 m_fpr = m_jit->reuse(op1.fpr());
1183 else if (m_jit->canReuse(op2.node()))
1184 m_fpr = m_jit->reuse(op2.fpr());
1185 else
1186 m_fpr = m_jit->fprAllocate();
1187 }
1188
1189 #if USE(JSVALUE32_64)
1190 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1191 : m_jit(jit)
1192 , m_fpr(InvalidFPRReg)
1193 {
1194 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1195 m_fpr = m_jit->reuse(op1.fpr());
1196 else
1197 m_fpr = m_jit->fprAllocate();
1198 }
1199 #endif
1200
1201 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1202 {
1203 BasicBlock* taken = branchNode->branchData()->taken.block;
1204 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1205
1206 SpeculateDoubleOperand op1(this, node->child1());
1207 SpeculateDoubleOperand op2(this, node->child2());
1208
1209 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1210 jump(notTaken);
1211 }
1212
1213 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1214 {
1215 BasicBlock* taken = branchNode->branchData()->taken.block;
1216 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1217
1218 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1219
1220 if (taken == nextBlock()) {
1221 condition = MacroAssembler::NotEqual;
1222 BasicBlock* tmp = taken;
1223 taken = notTaken;
1224 notTaken = tmp;
1225 }
1226
1227 SpeculateCellOperand op1(this, node->child1());
1228 SpeculateCellOperand op2(this, node->child2());
1229
1230 GPRReg op1GPR = op1.gpr();
1231 GPRReg op2GPR = op2.gpr();
1232
1233 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1234 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1235 speculationCheck(
1236 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1237 }
1238 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1239 speculationCheck(
1240 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1241 }
1242 } else {
1243 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1244 speculationCheck(
1245 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1246 m_jit.branchIfNotObject(op1GPR));
1247 }
1248 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1249 m_jit.branchTest8(
1250 MacroAssembler::NonZero,
1251 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1252 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1253
1254 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1255 speculationCheck(
1256 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1257 m_jit.branchIfNotObject(op2GPR));
1258 }
1259 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1260 m_jit.branchTest8(
1261 MacroAssembler::NonZero,
1262 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1263 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1264 }
1265
1266 branchPtr(condition, op1GPR, op2GPR, taken);
1267 jump(notTaken);
1268 }
1269
1270 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1271 {
1272 BasicBlock* taken = branchNode->branchData()->taken.block;
1273 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1274
1275 // The branch instruction will branch to the taken block.
1276 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1277 if (taken == nextBlock()) {
1278 condition = JITCompiler::invert(condition);
1279 BasicBlock* tmp = taken;
1280 taken = notTaken;
1281 notTaken = tmp;
1282 }
1283
1284 if (node->child1()->isBooleanConstant()) {
1285 bool imm = node->child1()->asBoolean();
1286 SpeculateBooleanOperand op2(this, node->child2());
1287 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1288 } else if (node->child2()->isBooleanConstant()) {
1289 SpeculateBooleanOperand op1(this, node->child1());
1290 bool imm = node->child2()->asBoolean();
1291 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1292 } else {
1293 SpeculateBooleanOperand op1(this, node->child1());
1294 SpeculateBooleanOperand op2(this, node->child2());
1295 branch32(condition, op1.gpr(), op2.gpr(), taken);
1296 }
1297
1298 jump(notTaken);
1299 }
1300
1301 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1302 {
1303 BasicBlock* taken = branchNode->branchData()->taken.block;
1304 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1305
1306 // The branch instruction will branch to the taken block.
1307 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1308 if (taken == nextBlock()) {
1309 condition = JITCompiler::invert(condition);
1310 BasicBlock* tmp = taken;
1311 taken = notTaken;
1312 notTaken = tmp;
1313 }
1314
1315 if (node->child1()->isInt32Constant()) {
1316 int32_t imm = node->child1()->asInt32();
1317 SpeculateInt32Operand op2(this, node->child2());
1318 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1319 } else if (node->child2()->isInt32Constant()) {
1320 SpeculateInt32Operand op1(this, node->child1());
1321 int32_t imm = node->child2()->asInt32();
1322 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1323 } else {
1324 SpeculateInt32Operand op1(this, node->child1());
1325 SpeculateInt32Operand op2(this, node->child2());
1326 branch32(condition, op1.gpr(), op2.gpr(), taken);
1327 }
1328
1329 jump(notTaken);
1330 }
1331
1332 // Returns true if the compare is fused with a subsequent branch.
1333 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1334 {
1335 // Fused compare & branch.
1336 unsigned branchIndexInBlock = detectPeepHoleBranch();
1337 if (branchIndexInBlock != UINT_MAX) {
1338 Node* branchNode = m_block->at(branchIndexInBlock);
1339
1340 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1341 // so can be no intervening nodes to also reference the compare.
1342 ASSERT(node->adjustedRefCount() == 1);
1343
1344 if (node->isBinaryUseKind(Int32Use))
1345 compilePeepHoleInt32Branch(node, branchNode, condition);
1346 #if USE(JSVALUE64)
1347 else if (node->isBinaryUseKind(Int52RepUse))
1348 compilePeepHoleInt52Branch(node, branchNode, condition);
1349 #endif // USE(JSVALUE64)
1350 else if (node->isBinaryUseKind(DoubleRepUse))
1351 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1352 else if (node->op() == CompareEq) {
1353 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1354 // Use non-peephole comparison, for now.
1355 return false;
1356 }
1357 if (node->isBinaryUseKind(BooleanUse))
1358 compilePeepHoleBooleanBranch(node, branchNode, condition);
1359 else if (node->isBinaryUseKind(ObjectUse))
1360 compilePeepHoleObjectEquality(node, branchNode);
1361 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1362 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1363 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1364 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1365 else {
1366 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1367 return true;
1368 }
1369 } else {
1370 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1371 return true;
1372 }
1373
1374 use(node->child1());
1375 use(node->child2());
1376 m_indexInBlock = branchIndexInBlock;
1377 m_currentNode = branchNode;
1378 return true;
1379 }
1380 return false;
1381 }
1382
1383 void SpeculativeJIT::noticeOSRBirth(Node* node)
1384 {
1385 if (!node->hasVirtualRegister())
1386 return;
1387
1388 VirtualRegister virtualRegister = node->virtualRegister();
1389 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1390
1391 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1392 }
1393
1394 void SpeculativeJIT::compileMovHint(Node* node)
1395 {
1396 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1397
1398 Node* child = node->child1().node();
1399 noticeOSRBirth(child);
1400
1401 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1402 }
1403
1404 void SpeculativeJIT::bail(AbortReason reason)
1405 {
1406 if (verboseCompilationEnabled())
1407 dataLog("Bailing compilation.\n");
1408 m_compileOkay = true;
1409 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1410 clearGenerationInfo();
1411 }
1412
1413 void SpeculativeJIT::compileCurrentBlock()
1414 {
1415 ASSERT(m_compileOkay);
1416
1417 if (!m_block)
1418 return;
1419
1420 ASSERT(m_block->isReachable);
1421
1422 m_jit.blockHeads()[m_block->index] = m_jit.label();
1423
1424 if (!m_block->intersectionOfCFAHasVisited) {
1425 // Don't generate code for basic blocks that are unreachable according to CFA.
1426 // But to be sure that nobody has generated a jump to this block, drop in a
1427 // breakpoint here.
1428 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1429 return;
1430 }
1431
1432 m_stream->appendAndLog(VariableEvent::reset());
1433
1434 m_jit.jitAssertHasValidCallFrame();
1435 m_jit.jitAssertTagsInPlace();
1436 m_jit.jitAssertArgumentCountSane();
1437
1438 m_state.reset();
1439 m_state.beginBasicBlock(m_block);
1440
1441 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1442 int operand = m_block->variablesAtHead.operandForIndex(i);
1443 Node* node = m_block->variablesAtHead[i];
1444 if (!node)
1445 continue; // No need to record dead SetLocal's.
1446
1447 VariableAccessData* variable = node->variableAccessData();
1448 DataFormat format;
1449 if (!node->refCount())
1450 continue; // No need to record dead SetLocal's.
1451 format = dataFormatFor(variable->flushFormat());
1452 m_stream->appendAndLog(
1453 VariableEvent::setLocal(
1454 VirtualRegister(operand),
1455 variable->machineLocal(),
1456 format));
1457 }
1458
1459 m_codeOriginForExitTarget = CodeOrigin();
1460 m_codeOriginForExitProfile = CodeOrigin();
1461
1462 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1463 m_currentNode = m_block->at(m_indexInBlock);
1464
1465 // We may have hit a contradiction that the CFA was aware of but that the JIT
1466 // didn't cause directly.
1467 if (!m_state.isValid()) {
1468 bail(DFGBailedAtTopOfBlock);
1469 return;
1470 }
1471
1472 if (ASSERT_DISABLED)
1473 m_canExit = true; // Essentially disable the assertions.
1474 else
1475 m_canExit = mayExit(m_jit.graph(), m_currentNode);
1476
1477 m_interpreter.startExecuting();
1478 m_jit.setForNode(m_currentNode);
1479 m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1480 m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1481 m_lastGeneratedNode = m_currentNode->op();
1482
1483 ASSERT(m_currentNode->shouldGenerate());
1484
1485 if (verboseCompilationEnabled()) {
1486 dataLogF(
1487 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1488 (int)m_currentNode->index(),
1489 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1490 dataLog("\n");
1491 }
1492
1493 compile(m_currentNode);
1494
1495 if (belongsInMinifiedGraph(m_currentNode->op()))
1496 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1497
1498 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1499 m_jit.clearRegisterAllocationOffsets();
1500 #endif
1501
1502 if (!m_compileOkay) {
1503 bail(DFGBailedAtEndOfNode);
1504 return;
1505 }
1506
1507 // Make sure that the abstract state is rematerialized for the next node.
1508 m_interpreter.executeEffects(m_indexInBlock);
1509 }
1510
1511 // Perform the most basic verification that children have been used correctly.
1512 if (!ASSERT_DISABLED) {
1513 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1514 GenerationInfo& info = m_generationInfo[index];
1515 RELEASE_ASSERT(!info.alive());
1516 }
1517 }
1518 }
1519
1520 // If we are making type predictions about our arguments then
1521 // we need to check that they are correct on function entry.
1522 void SpeculativeJIT::checkArgumentTypes()
1523 {
1524 ASSERT(!m_currentNode);
1525 m_isCheckingArgumentTypes = true;
1526 m_codeOriginForExitTarget = CodeOrigin(0);
1527 m_codeOriginForExitProfile = CodeOrigin(0);
1528
1529 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1530 Node* node = m_jit.graph().m_arguments[i];
1531 if (!node) {
1532 // The argument is dead. We don't do any checks for such arguments.
1533 continue;
1534 }
1535
1536 ASSERT(node->op() == SetArgument);
1537 ASSERT(node->shouldGenerate());
1538
1539 VariableAccessData* variableAccessData = node->variableAccessData();
1540 FlushFormat format = variableAccessData->flushFormat();
1541
1542 if (format == FlushedJSValue)
1543 continue;
1544
1545 VirtualRegister virtualRegister = variableAccessData->local();
1546
1547 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1548
1549 #if USE(JSVALUE64)
1550 switch (format) {
1551 case FlushedInt32: {
1552 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1553 break;
1554 }
1555 case FlushedBoolean: {
1556 GPRTemporary temp(this);
1557 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1558 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1559 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1560 break;
1561 }
1562 case FlushedCell: {
1563 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1564 break;
1565 }
1566 default:
1567 RELEASE_ASSERT_NOT_REACHED();
1568 break;
1569 }
1570 #else
1571 switch (format) {
1572 case FlushedInt32: {
1573 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1574 break;
1575 }
1576 case FlushedBoolean: {
1577 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1578 break;
1579 }
1580 case FlushedCell: {
1581 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1582 break;
1583 }
1584 default:
1585 RELEASE_ASSERT_NOT_REACHED();
1586 break;
1587 }
1588 #endif
1589 }
1590 m_isCheckingArgumentTypes = false;
1591 }
1592
1593 bool SpeculativeJIT::compile()
1594 {
1595 checkArgumentTypes();
1596
1597 ASSERT(!m_currentNode);
1598 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1599 m_jit.setForBlockIndex(blockIndex);
1600 m_block = m_jit.graph().block(blockIndex);
1601 compileCurrentBlock();
1602 }
1603 linkBranches();
1604 return true;
1605 }
1606
1607 void SpeculativeJIT::createOSREntries()
1608 {
1609 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1610 BasicBlock* block = m_jit.graph().block(blockIndex);
1611 if (!block)
1612 continue;
1613 if (!block->isOSRTarget)
1614 continue;
1615
1616 // Currently we don't have OSR entry trampolines. We could add them
1617 // here if need be.
1618 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1619 }
1620 }
1621
1622 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1623 {
1624 unsigned osrEntryIndex = 0;
1625 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1626 BasicBlock* block = m_jit.graph().block(blockIndex);
1627 if (!block)
1628 continue;
1629 if (!block->isOSRTarget)
1630 continue;
1631 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1632 }
1633 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1634
1635 if (verboseCompilationEnabled()) {
1636 DumpContext dumpContext;
1637 dataLog("OSR Entries:\n");
1638 for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1639 dataLog(" ", inContext(entryData, &dumpContext), "\n");
1640 if (!dumpContext.isEmpty())
1641 dumpContext.dump(WTF::dataFile());
1642 }
1643 }
1644
1645 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1646 {
1647 Edge child3 = m_jit.graph().varArgChild(node, 2);
1648 Edge child4 = m_jit.graph().varArgChild(node, 3);
1649
1650 ArrayMode arrayMode = node->arrayMode();
1651
1652 GPRReg baseReg = base.gpr();
1653 GPRReg propertyReg = property.gpr();
1654
1655 SpeculateDoubleOperand value(this, child3);
1656
1657 FPRReg valueReg = value.fpr();
1658
1659 DFG_TYPE_CHECK(
1660 JSValueRegs(), child3, SpecFullRealNumber,
1661 m_jit.branchDouble(
1662 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1663
1664 if (!m_compileOkay)
1665 return;
1666
1667 StorageOperand storage(this, child4);
1668 GPRReg storageReg = storage.gpr();
1669
1670 if (node->op() == PutByValAlias) {
1671 // Store the value to the array.
1672 GPRReg propertyReg = property.gpr();
1673 FPRReg valueReg = value.fpr();
1674 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1675
1676 noResult(m_currentNode);
1677 return;
1678 }
1679
1680 GPRTemporary temporary;
1681 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1682
1683 MacroAssembler::Jump slowCase;
1684
1685 if (arrayMode.isInBounds()) {
1686 speculationCheck(
1687 OutOfBounds, JSValueRegs(), 0,
1688 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1689 } else {
1690 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1691
1692 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1693
1694 if (!arrayMode.isOutOfBounds())
1695 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1696
1697 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1698 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1699
1700 inBounds.link(&m_jit);
1701 }
1702
1703 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1704
1705 base.use();
1706 property.use();
1707 value.use();
1708 storage.use();
1709
1710 if (arrayMode.isOutOfBounds()) {
1711 addSlowPathGenerator(
1712 slowPathCall(
1713 slowCase, this,
1714 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1715 NoResult, baseReg, propertyReg, valueReg));
1716 }
1717
1718 noResult(m_currentNode, UseChildrenCalledExplicitly);
1719 }
1720
1721 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1722 {
1723 SpeculateCellOperand string(this, node->child1());
1724 SpeculateStrictInt32Operand index(this, node->child2());
1725 StorageOperand storage(this, node->child3());
1726
1727 GPRReg stringReg = string.gpr();
1728 GPRReg indexReg = index.gpr();
1729 GPRReg storageReg = storage.gpr();
1730
1731 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1732
1733 // unsigned comparison so we can filter out negative indices and indices that are too large
1734 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1735
1736 GPRTemporary scratch(this);
1737 GPRReg scratchReg = scratch.gpr();
1738
1739 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1740
1741 // Load the character into scratchReg
1742 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1743
1744 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1745 JITCompiler::Jump cont8Bit = m_jit.jump();
1746
1747 is16Bit.link(&m_jit);
1748
1749 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1750
1751 cont8Bit.link(&m_jit);
1752
1753 int32Result(scratchReg, m_currentNode);
1754 }
1755
1756 void SpeculativeJIT::compileGetByValOnString(Node* node)
1757 {
1758 SpeculateCellOperand base(this, node->child1());
1759 SpeculateStrictInt32Operand property(this, node->child2());
1760 StorageOperand storage(this, node->child3());
1761 GPRReg baseReg = base.gpr();
1762 GPRReg propertyReg = property.gpr();
1763 GPRReg storageReg = storage.gpr();
1764
1765 GPRTemporary scratch(this);
1766 GPRReg scratchReg = scratch.gpr();
1767 #if USE(JSVALUE32_64)
1768 GPRTemporary resultTag;
1769 GPRReg resultTagReg = InvalidGPRReg;
1770 if (node->arrayMode().isOutOfBounds()) {
1771 GPRTemporary realResultTag(this);
1772 resultTag.adopt(realResultTag);
1773 resultTagReg = resultTag.gpr();
1774 }
1775 #endif
1776
1777 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1778
1779 // unsigned comparison so we can filter out negative indices and indices that are too large
1780 JITCompiler::Jump outOfBounds = m_jit.branch32(
1781 MacroAssembler::AboveOrEqual, propertyReg,
1782 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1783 if (node->arrayMode().isInBounds())
1784 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1785
1786 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1787
1788 // Load the character into scratchReg
1789 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1790
1791 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1792 JITCompiler::Jump cont8Bit = m_jit.jump();
1793
1794 is16Bit.link(&m_jit);
1795
1796 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1797
1798 JITCompiler::Jump bigCharacter =
1799 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1800
1801 // 8 bit string values don't need the isASCII check.
1802 cont8Bit.link(&m_jit);
1803
1804 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1805 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1806 m_jit.loadPtr(scratchReg, scratchReg);
1807
1808 addSlowPathGenerator(
1809 slowPathCall(
1810 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1811
1812 if (node->arrayMode().isOutOfBounds()) {
1813 #if USE(JSVALUE32_64)
1814 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1815 #endif
1816
1817 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1818 if (globalObject->stringPrototypeChainIsSane()) {
1819 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1820 // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1821 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1822 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1823 // indexed properties either.
1824 // https://bugs.webkit.org/show_bug.cgi?id=144668
1825 m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1826 m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1827
1828 #if USE(JSVALUE64)
1829 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1830 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
1831 #else
1832 addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
1833 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1834 baseReg, propertyReg));
1835 #endif
1836 } else {
1837 #if USE(JSVALUE64)
1838 addSlowPathGenerator(
1839 slowPathCall(
1840 outOfBounds, this, operationGetByValStringInt,
1841 scratchReg, baseReg, propertyReg));
1842 #else
1843 addSlowPathGenerator(
1844 slowPathCall(
1845 outOfBounds, this, operationGetByValStringInt,
1846 resultTagReg, scratchReg, baseReg, propertyReg));
1847 #endif
1848 }
1849
1850 #if USE(JSVALUE64)
1851 jsValueResult(scratchReg, m_currentNode);
1852 #else
1853 jsValueResult(resultTagReg, scratchReg, m_currentNode);
1854 #endif
1855 } else
1856 cellResult(scratchReg, m_currentNode);
1857 }
1858
1859 void SpeculativeJIT::compileFromCharCode(Node* node)
1860 {
1861 SpeculateStrictInt32Operand property(this, node->child1());
1862 GPRReg propertyReg = property.gpr();
1863 GPRTemporary smallStrings(this);
1864 GPRTemporary scratch(this);
1865 GPRReg scratchReg = scratch.gpr();
1866 GPRReg smallStringsReg = smallStrings.gpr();
1867
1868 JITCompiler::JumpList slowCases;
1869 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1870 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1871 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1872
1873 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1874 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1875 cellResult(scratchReg, m_currentNode);
1876 }
1877
1878 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1879 {
1880 VirtualRegister virtualRegister = node->virtualRegister();
1881 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1882
1883 switch (info.registerFormat()) {
1884 case DataFormatStorage:
1885 RELEASE_ASSERT_NOT_REACHED();
1886
1887 case DataFormatBoolean:
1888 case DataFormatCell:
1889 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1890 return GeneratedOperandTypeUnknown;
1891
1892 case DataFormatNone:
1893 case DataFormatJSCell:
1894 case DataFormatJS:
1895 case DataFormatJSBoolean:
1896 case DataFormatJSDouble:
1897 return GeneratedOperandJSValue;
1898
1899 case DataFormatJSInt32:
1900 case DataFormatInt32:
1901 return GeneratedOperandInteger;
1902
1903 default:
1904 RELEASE_ASSERT_NOT_REACHED();
1905 return GeneratedOperandTypeUnknown;
1906 }
1907 }
1908
1909 void SpeculativeJIT::compileValueToInt32(Node* node)
1910 {
1911 switch (node->child1().useKind()) {
1912 #if USE(JSVALUE64)
1913 case Int52RepUse: {
1914 SpeculateStrictInt52Operand op1(this, node->child1());
1915 GPRTemporary result(this, Reuse, op1);
1916 GPRReg op1GPR = op1.gpr();
1917 GPRReg resultGPR = result.gpr();
1918 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1919 int32Result(resultGPR, node, DataFormatInt32);
1920 return;
1921 }
1922 #endif // USE(JSVALUE64)
1923
1924 case DoubleRepUse: {
1925 GPRTemporary result(this);
1926 SpeculateDoubleOperand op1(this, node->child1());
1927 FPRReg fpr = op1.fpr();
1928 GPRReg gpr = result.gpr();
1929 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1930
1931 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1932
1933 int32Result(gpr, node);
1934 return;
1935 }
1936
1937 case NumberUse:
1938 case NotCellUse: {
1939 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1940 case GeneratedOperandInteger: {
1941 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1942 GPRTemporary result(this, Reuse, op1);
1943 m_jit.move(op1.gpr(), result.gpr());
1944 int32Result(result.gpr(), node, op1.format());
1945 return;
1946 }
1947 case GeneratedOperandJSValue: {
1948 GPRTemporary result(this);
1949 #if USE(JSVALUE64)
1950 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1951
1952 GPRReg gpr = op1.gpr();
1953 GPRReg resultGpr = result.gpr();
1954 FPRTemporary tempFpr(this);
1955 FPRReg fpr = tempFpr.fpr();
1956
1957 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1958 JITCompiler::JumpList converted;
1959
1960 if (node->child1().useKind() == NumberUse) {
1961 DFG_TYPE_CHECK(
1962 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1963 m_jit.branchTest64(
1964 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1965 } else {
1966 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1967
1968 DFG_TYPE_CHECK(
1969 JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
1970
1971 // It's not a cell: so true turns into 1 and all else turns into 0.
1972 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1973 converted.append(m_jit.jump());
1974
1975 isNumber.link(&m_jit);
1976 }
1977
1978 // First, if we get here we have a double encoded as a JSValue
1979 m_jit.move(gpr, resultGpr);
1980 unboxDouble(resultGpr, fpr);
1981
1982 silentSpillAllRegisters(resultGpr);
1983 callOperation(toInt32, resultGpr, fpr);
1984 silentFillAllRegisters(resultGpr);
1985
1986 converted.append(m_jit.jump());
1987
1988 isInteger.link(&m_jit);
1989 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1990
1991 converted.link(&m_jit);
1992 #else
1993 Node* childNode = node->child1().node();
1994 VirtualRegister virtualRegister = childNode->virtualRegister();
1995 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1996
1997 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1998
1999 GPRReg payloadGPR = op1.payloadGPR();
2000 GPRReg resultGpr = result.gpr();
2001
2002 JITCompiler::JumpList converted;
2003
2004 if (info.registerFormat() == DataFormatJSInt32)
2005 m_jit.move(payloadGPR, resultGpr);
2006 else {
2007 GPRReg tagGPR = op1.tagGPR();
2008 FPRTemporary tempFpr(this);
2009 FPRReg fpr = tempFpr.fpr();
2010 FPRTemporary scratch(this);
2011
2012 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2013
2014 if (node->child1().useKind() == NumberUse) {
2015 DFG_TYPE_CHECK(
2016 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2017 m_jit.branch32(
2018 MacroAssembler::AboveOrEqual, tagGPR,
2019 TrustedImm32(JSValue::LowestTag)));
2020 } else {
2021 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2022
2023 DFG_TYPE_CHECK(
2024 op1.jsValueRegs(), node->child1(), ~SpecCell,
2025 m_jit.branchIfCell(op1.jsValueRegs()));
2026
2027 // It's not a cell: so true turns into 1 and all else turns into 0.
2028 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2029 m_jit.move(TrustedImm32(0), resultGpr);
2030 converted.append(m_jit.jump());
2031
2032 isBoolean.link(&m_jit);
2033 m_jit.move(payloadGPR, resultGpr);
2034 converted.append(m_jit.jump());
2035
2036 isNumber.link(&m_jit);
2037 }
2038
2039 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2040
2041 silentSpillAllRegisters(resultGpr);
2042 callOperation(toInt32, resultGpr, fpr);
2043 silentFillAllRegisters(resultGpr);
2044
2045 converted.append(m_jit.jump());
2046
2047 isInteger.link(&m_jit);
2048 m_jit.move(payloadGPR, resultGpr);
2049
2050 converted.link(&m_jit);
2051 }
2052 #endif
2053 int32Result(resultGpr, node);
2054 return;
2055 }
2056 case GeneratedOperandTypeUnknown:
2057 RELEASE_ASSERT(!m_compileOkay);
2058 return;
2059 }
2060 RELEASE_ASSERT_NOT_REACHED();
2061 return;
2062 }
2063
2064 default:
2065 ASSERT(!m_compileOkay);
2066 return;
2067 }
2068 }
2069
2070 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2071 {
2072 if (doesOverflow(node->arithMode())) {
2073 // We know that this sometimes produces doubles. So produce a double every
2074 // time. This at least allows subsequent code to not have weird conditionals.
2075
2076 SpeculateInt32Operand op1(this, node->child1());
2077 FPRTemporary result(this);
2078
2079 GPRReg inputGPR = op1.gpr();
2080 FPRReg outputFPR = result.fpr();
2081
2082 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2083
2084 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2085 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2086 positive.link(&m_jit);
2087
2088 doubleResult(outputFPR, node);
2089 return;
2090 }
2091
2092 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2093
2094 SpeculateInt32Operand op1(this, node->child1());
2095 GPRTemporary result(this);
2096
2097 m_jit.move(op1.gpr(), result.gpr());
2098
2099 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2100
2101 int32Result(result.gpr(), node, op1.format());
2102 }
2103
2104 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2105 {
2106 SpeculateDoubleOperand op1(this, node->child1());
2107 FPRTemporary scratch(this);
2108 GPRTemporary result(this);
2109
2110 FPRReg valueFPR = op1.fpr();
2111 FPRReg scratchFPR = scratch.fpr();
2112 GPRReg resultGPR = result.gpr();
2113
2114 JITCompiler::JumpList failureCases;
2115 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2116 m_jit.branchConvertDoubleToInt32(
2117 valueFPR, resultGPR, failureCases, scratchFPR,
2118 shouldCheckNegativeZero(node->arithMode()));
2119 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2120
2121 int32Result(resultGPR, node);
2122 }
2123
2124 void SpeculativeJIT::compileDoubleRep(Node* node)
2125 {
2126 switch (node->child1().useKind()) {
2127 case RealNumberUse: {
2128 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2129 FPRTemporary result(this);
2130
2131 JSValueRegs op1Regs = op1.jsValueRegs();
2132 FPRReg resultFPR = result.fpr();
2133
2134 #if USE(JSVALUE64)
2135 GPRTemporary temp(this);
2136 GPRReg tempGPR = temp.gpr();
2137 m_jit.move(op1Regs.gpr(), tempGPR);
2138 m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
2139 #else
2140 FPRTemporary temp(this);
2141 FPRReg tempFPR = temp.fpr();
2142 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2143 #endif
2144
2145 JITCompiler::Jump done = m_jit.branchDouble(
2146 JITCompiler::DoubleEqual, resultFPR, resultFPR);
2147
2148 DFG_TYPE_CHECK(
2149 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2150 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2151
2152 done.link(&m_jit);
2153
2154 doubleResult(resultFPR, node);
2155 return;
2156 }
2157
2158 case NotCellUse:
2159 case NumberUse: {
2160 ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2161
2162 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2163 if (isInt32Speculation(possibleTypes)) {
2164 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2165 FPRTemporary result(this);
2166 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2167 doubleResult(result.fpr(), node);
2168 return;
2169 }
2170
2171 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2172 FPRTemporary result(this);
2173
2174 #if USE(JSVALUE64)
2175 GPRTemporary temp(this);
2176
2177 GPRReg op1GPR = op1.gpr();
2178 GPRReg tempGPR = temp.gpr();
2179 FPRReg resultFPR = result.fpr();
2180 JITCompiler::JumpList done;
2181
2182 JITCompiler::Jump isInteger = m_jit.branch64(
2183 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2184
2185 if (node->child1().useKind() == NotCellUse) {
2186 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2187 JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2188
2189 static const double zero = 0;
2190 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2191
2192 JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2193 done.append(isNull);
2194
2195 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2196 m_jit.branchTest64(JITCompiler::NonZero, op1GPR, TrustedImm32(static_cast<int32_t>(~1))));
2197
2198 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2199 static const double one = 1;
2200 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2201 done.append(isFalse);
2202
2203 isUndefined.link(&m_jit);
2204 static const double NaN = PNaN;
2205 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2206 done.append(m_jit.jump());
2207
2208 isNumber.link(&m_jit);
2209 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2210 typeCheck(
2211 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2212 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2213 }
2214
2215 m_jit.move(op1GPR, tempGPR);
2216 unboxDouble(tempGPR, resultFPR);
2217 done.append(m_jit.jump());
2218
2219 isInteger.link(&m_jit);
2220 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2221 done.link(&m_jit);
2222 #else // USE(JSVALUE64) -> this is the 32_64 case
2223 FPRTemporary temp(this);
2224
2225 GPRReg op1TagGPR = op1.tagGPR();
2226 GPRReg op1PayloadGPR = op1.payloadGPR();
2227 FPRReg tempFPR = temp.fpr();
2228 FPRReg resultFPR = result.fpr();
2229 JITCompiler::JumpList done;
2230
2231 JITCompiler::Jump isInteger = m_jit.branch32(
2232 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2233
2234 if (node->child1().useKind() == NotCellUse) {
2235 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2236 JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2237
2238 static const double zero = 0;
2239 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2240
2241 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2242 done.append(isNull);
2243
2244 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2245
2246 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2247 static const double one = 1;
2248 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2249 done.append(isFalse);
2250
2251 isUndefined.link(&m_jit);
2252 static const double NaN = PNaN;
2253 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2254 done.append(m_jit.jump());
2255
2256 isNumber.link(&m_jit);
2257 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2258 typeCheck(
2259 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2260 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2261 }
2262
2263 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2264 done.append(m_jit.jump());
2265
2266 isInteger.link(&m_jit);
2267 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2268 done.link(&m_jit);
2269 #endif // USE(JSVALUE64)
2270
2271 doubleResult(resultFPR, node);
2272 return;
2273 }
2274
2275 #if USE(JSVALUE64)
2276 case Int52RepUse: {
2277 SpeculateStrictInt52Operand value(this, node->child1());
2278 FPRTemporary result(this);
2279
2280 GPRReg valueGPR = value.gpr();
2281 FPRReg resultFPR = result.fpr();
2282
2283 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2284
2285 doubleResult(resultFPR, node);
2286 return;
2287 }
2288 #endif // USE(JSVALUE64)
2289
2290 default:
2291 RELEASE_ASSERT_NOT_REACHED();
2292 return;
2293 }
2294 }
2295
2296 void SpeculativeJIT::compileValueRep(Node* node)
2297 {
2298 switch (node->child1().useKind()) {
2299 case DoubleRepUse: {
2300 SpeculateDoubleOperand value(this, node->child1());
2301 JSValueRegsTemporary result(this);
2302
2303 FPRReg valueFPR = value.fpr();
2304 JSValueRegs resultRegs = result.regs();
2305
2306 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2307 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2308 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2309 // local was purified.
2310 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2311 m_jit.purifyNaN(valueFPR);
2312
2313 boxDouble(valueFPR, resultRegs);
2314
2315 jsValueResult(resultRegs, node);
2316 return;
2317 }
2318
2319 #if USE(JSVALUE64)
2320 case Int52RepUse: {
2321 SpeculateStrictInt52Operand value(this, node->child1());
2322 GPRTemporary result(this);
2323
2324 GPRReg valueGPR = value.gpr();
2325 GPRReg resultGPR = result.gpr();
2326
2327 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2328
2329 jsValueResult(resultGPR, node);
2330 return;
2331 }
2332 #endif // USE(JSVALUE64)
2333
2334 default:
2335 RELEASE_ASSERT_NOT_REACHED();
2336 return;
2337 }
2338 }
2339
2340 static double clampDoubleToByte(double d)
2341 {
2342 d += 0.5;
2343 if (!(d > 0))
2344 d = 0;
2345 else if (d > 255)
2346 d = 255;
2347 return d;
2348 }
2349
2350 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2351 {
2352 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2353 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2354 jit.xorPtr(result, result);
2355 MacroAssembler::Jump clamped = jit.jump();
2356 tooBig.link(&jit);
2357 jit.move(JITCompiler::TrustedImm32(255), result);
2358 clamped.link(&jit);
2359 inBounds.link(&jit);
2360 }
2361
2362 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2363 {
2364 // Unordered compare so we pick up NaN
2365 static const double zero = 0;
2366 static const double byteMax = 255;
2367 static const double half = 0.5;
2368 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2369 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2370 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2371 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2372
2373 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2374 // FIXME: This should probably just use a floating point round!
2375 // https://bugs.webkit.org/show_bug.cgi?id=72054
2376 jit.addDouble(source, scratch);
2377 jit.truncateDoubleToInt32(scratch, result);
2378 MacroAssembler::Jump truncatedInt = jit.jump();
2379
2380 tooSmall.link(&jit);
2381 jit.xorPtr(result, result);
2382 MacroAssembler::Jump zeroed = jit.jump();
2383
2384 tooBig.link(&jit);
2385 jit.move(JITCompiler::TrustedImm32(255), result);
2386
2387 truncatedInt.link(&jit);
2388 zeroed.link(&jit);
2389
2390 }
2391
2392 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2393 {
2394 if (node->op() == PutByValAlias)
2395 return JITCompiler::Jump();
2396 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2397 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2398 if (view) {
2399 uint32_t length = view->length();
2400 Node* indexNode = m_jit.graph().child(node, 1).node();
2401 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2402 return JITCompiler::Jump();
2403 return m_jit.branch32(
2404 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2405 }
2406 return m_jit.branch32(
2407 MacroAssembler::AboveOrEqual, indexGPR,
2408 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2409 }
2410
2411 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2412 {
2413 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2414 if (!jump.isSet())
2415 return;
2416 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2417 }
2418
2419 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2420 {
2421 ASSERT(isInt(type));
2422
2423 SpeculateCellOperand base(this, node->child1());
2424 SpeculateStrictInt32Operand property(this, node->child2());
2425 StorageOperand storage(this, node->child3());
2426
2427 GPRReg baseReg = base.gpr();
2428 GPRReg propertyReg = property.gpr();
2429 GPRReg storageReg = storage.gpr();
2430
2431 GPRTemporary result(this);
2432 GPRReg resultReg = result.gpr();
2433
2434 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2435
2436 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2437 switch (elementSize(type)) {
2438 case 1:
2439 if (isSigned(type))
2440 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2441 else
2442 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2443 break;
2444 case 2:
2445 if (isSigned(type))
2446 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2447 else
2448 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2449 break;
2450 case 4:
2451 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2452 break;
2453 default:
2454 CRASH();
2455 }
2456 if (elementSize(type) < 4 || isSigned(type)) {
2457 int32Result(resultReg, node);
2458 return;
2459 }
2460
2461 ASSERT(elementSize(type) == 4 && !isSigned(type));
2462 if (node->shouldSpeculateInt32()) {
2463 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2464 int32Result(resultReg, node);
2465 return;
2466 }
2467
2468 #if USE(JSVALUE64)
2469 if (node->shouldSpeculateMachineInt()) {
2470 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2471 strictInt52Result(resultReg, node);
2472 return;
2473 }
2474 #endif
2475
2476 FPRTemporary fresult(this);
2477 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2478 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2479 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2480 positive.link(&m_jit);
2481 doubleResult(fresult.fpr(), node);
2482 }
2483
2484 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2485 {
2486 ASSERT(isInt(type));
2487
2488 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2489 GPRReg storageReg = storage.gpr();
2490
2491 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2492
2493 GPRTemporary value;
2494 GPRReg valueGPR = InvalidGPRReg;
2495
2496 if (valueUse->isConstant()) {
2497 JSValue jsValue = valueUse->asJSValue();
2498 if (!jsValue.isNumber()) {
2499 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2500 noResult(node);
2501 return;
2502 }
2503 double d = jsValue.asNumber();
2504 if (isClamped(type)) {
2505 ASSERT(elementSize(type) == 1);
2506 d = clampDoubleToByte(d);
2507 }
2508 GPRTemporary scratch(this);
2509 GPRReg scratchReg = scratch.gpr();
2510 m_jit.move(Imm32(toInt32(d)), scratchReg);
2511 value.adopt(scratch);
2512 valueGPR = scratchReg;
2513 } else {
2514 switch (valueUse.useKind()) {
2515 case Int32Use: {
2516 SpeculateInt32Operand valueOp(this, valueUse);
2517 GPRTemporary scratch(this);
2518 GPRReg scratchReg = scratch.gpr();
2519 m_jit.move(valueOp.gpr(), scratchReg);
2520 if (isClamped(type)) {
2521 ASSERT(elementSize(type) == 1);
2522 compileClampIntegerToByte(m_jit, scratchReg);
2523 }
2524 value.adopt(scratch);
2525 valueGPR = scratchReg;
2526 break;
2527 }
2528
2529 #if USE(JSVALUE64)
2530 case Int52RepUse: {
2531 SpeculateStrictInt52Operand valueOp(this, valueUse);
2532 GPRTemporary scratch(this);
2533 GPRReg scratchReg = scratch.gpr();
2534 m_jit.move(valueOp.gpr(), scratchReg);
2535 if (isClamped(type)) {
2536 ASSERT(elementSize(type) == 1);
2537 MacroAssembler::Jump inBounds = m_jit.branch64(
2538 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2539 MacroAssembler::Jump tooBig = m_jit.branch64(
2540 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2541 m_jit.move(TrustedImm32(0), scratchReg);
2542 MacroAssembler::Jump clamped = m_jit.jump();
2543 tooBig.link(&m_jit);
2544 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2545 clamped.link(&m_jit);
2546 inBounds.link(&m_jit);
2547 }
2548 value.adopt(scratch);
2549 valueGPR = scratchReg;
2550 break;
2551 }
2552 #endif // USE(JSVALUE64)
2553
2554 case DoubleRepUse: {
2555 if (isClamped(type)) {
2556 ASSERT(elementSize(type) == 1);
2557 SpeculateDoubleOperand valueOp(this, valueUse);
2558 GPRTemporary result(this);
2559 FPRTemporary floatScratch(this);
2560 FPRReg fpr = valueOp.fpr();
2561 GPRReg gpr = result.gpr();
2562 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2563 value.adopt(result);
2564 valueGPR = gpr;
2565 } else {
2566 SpeculateDoubleOperand valueOp(this, valueUse);
2567 GPRTemporary result(this);
2568 FPRReg fpr = valueOp.fpr();
2569 GPRReg gpr = result.gpr();
2570 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2571 m_jit.xorPtr(gpr, gpr);
2572 MacroAssembler::Jump fixed = m_jit.jump();
2573 notNaN.link(&m_jit);
2574
2575 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2576 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2577
2578 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2579
2580 fixed.link(&m_jit);
2581 value.adopt(result);
2582 valueGPR = gpr;
2583 }
2584 break;
2585 }
2586
2587 default:
2588 RELEASE_ASSERT_NOT_REACHED();
2589 break;
2590 }
2591 }
2592
2593 ASSERT_UNUSED(valueGPR, valueGPR != property);
2594 ASSERT(valueGPR != base);
2595 ASSERT(valueGPR != storageReg);
2596 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2597 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2598 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2599 outOfBounds = MacroAssembler::Jump();
2600 }
2601
2602 switch (elementSize(type)) {
2603 case 1:
2604 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2605 break;
2606 case 2:
2607 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2608 break;
2609 case 4:
2610 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2611 break;
2612 default:
2613 CRASH();
2614 }
2615 if (outOfBounds.isSet())
2616 outOfBounds.link(&m_jit);
2617 noResult(node);
2618 }
2619
2620 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2621 {
2622 ASSERT(isFloat(type));
2623
2624 SpeculateCellOperand base(this, node->child1());
2625 SpeculateStrictInt32Operand property(this, node->child2());
2626 StorageOperand storage(this, node->child3());
2627
2628 GPRReg baseReg = base.gpr();
2629 GPRReg propertyReg = property.gpr();
2630 GPRReg storageReg = storage.gpr();
2631
2632 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2633
2634 FPRTemporary result(this);
2635 FPRReg resultReg = result.fpr();
2636 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2637 switch (elementSize(type)) {
2638 case 4:
2639 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2640 m_jit.convertFloatToDouble(resultReg, resultReg);
2641 break;
2642 case 8: {
2643 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2644 break;
2645 }
2646 default:
2647 RELEASE_ASSERT_NOT_REACHED();
2648 }
2649
2650 doubleResult(resultReg, node);
2651 }
2652
2653 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2654 {
2655 ASSERT(isFloat(type));
2656
2657 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2658 GPRReg storageReg = storage.gpr();
2659
2660 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2661 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2662
2663 SpeculateDoubleOperand valueOp(this, valueUse);
2664 FPRTemporary scratch(this);
2665 FPRReg valueFPR = valueOp.fpr();
2666 FPRReg scratchFPR = scratch.fpr();
2667
2668 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2669
2670 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2671 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2672 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2673 outOfBounds = MacroAssembler::Jump();
2674 }
2675
2676 switch (elementSize(type)) {
2677 case 4: {
2678 m_jit.moveDouble(valueFPR, scratchFPR);
2679 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2680 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2681 break;
2682 }
2683 case 8:
2684 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2685 break;
2686 default:
2687 RELEASE_ASSERT_NOT_REACHED();
2688 }
2689 if (outOfBounds.isSet())
2690 outOfBounds.link(&m_jit);
2691 noResult(node);
2692 }
2693
2694 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2695 {
2696 // Check that prototype is an object.
2697 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2698
2699 // Initialize scratchReg with the value being checked.
2700 m_jit.move(valueReg, scratchReg);
2701
2702 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2703 MacroAssembler::Label loop(&m_jit);
2704 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2705 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2706 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2707 #if USE(JSVALUE64)
2708 m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2709 #else
2710 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2711 #endif
2712
2713 // No match - result is false.
2714 #if USE(JSVALUE64)
2715 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2716 #else
2717 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2718 #endif
2719 MacroAssembler::Jump putResult = m_jit.jump();
2720
2721 isInstance.link(&m_jit);
2722 #if USE(JSVALUE64)
2723 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2724 #else
2725 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2726 #endif
2727
2728 putResult.link(&m_jit);
2729 }
2730
2731 void SpeculativeJIT::compileInstanceOf(Node* node)
2732 {
2733 if (node->child1().useKind() == UntypedUse) {
2734 // It might not be a cell. Speculate less aggressively.
2735 // Or: it might only be used once (i.e. by us), so we get zero benefit
2736 // from speculating any more aggressively than we absolutely need to.
2737
2738 JSValueOperand value(this, node->child1());
2739 SpeculateCellOperand prototype(this, node->child2());
2740 GPRTemporary scratch(this);
2741 GPRTemporary scratch2(this);
2742
2743 GPRReg prototypeReg = prototype.gpr();
2744 GPRReg scratchReg = scratch.gpr();
2745 GPRReg scratch2Reg = scratch2.gpr();
2746
2747 MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2748 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2749 moveFalseTo(scratchReg);
2750
2751 MacroAssembler::Jump done = m_jit.jump();
2752
2753 isCell.link(&m_jit);
2754
2755 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2756
2757 done.link(&m_jit);
2758
2759 blessedBooleanResult(scratchReg, node);
2760 return;
2761 }
2762
2763 SpeculateCellOperand value(this, node->child1());
2764 SpeculateCellOperand prototype(this, node->child2());
2765
2766 GPRTemporary scratch(this);
2767 GPRTemporary scratch2(this);
2768
2769 GPRReg valueReg = value.gpr();
2770 GPRReg prototypeReg = prototype.gpr();
2771 GPRReg scratchReg = scratch.gpr();
2772 GPRReg scratch2Reg = scratch2.gpr();
2773
2774 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2775
2776 blessedBooleanResult(scratchReg, node);
2777 }
2778
2779 void SpeculativeJIT::compileAdd(Node* node)
2780 {
2781 switch (node->binaryUseKind()) {
2782 case Int32Use: {
2783 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2784
2785 if (node->child1()->isInt32Constant()) {
2786 int32_t imm1 = node->child1()->asInt32();
2787 SpeculateInt32Operand op2(this, node->child2());
2788 GPRTemporary result(this);
2789
2790 if (!shouldCheckOverflow(node->arithMode())) {
2791 m_jit.move(op2.gpr(), result.gpr());
2792 m_jit.add32(Imm32(imm1), result.gpr());
2793 } else
2794 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2795
2796 int32Result(result.gpr(), node);
2797 return;
2798 }
2799
2800 if (node->child2()->isInt32Constant()) {
2801 SpeculateInt32Operand op1(this, node->child1());
2802 int32_t imm2 = node->child2()->asInt32();
2803 GPRTemporary result(this);
2804
2805 if (!shouldCheckOverflow(node->arithMode())) {
2806 m_jit.move(op1.gpr(), result.gpr());
2807 m_jit.add32(Imm32(imm2), result.gpr());
2808 } else
2809 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2810
2811 int32Result(result.gpr(), node);
2812 return;
2813 }
2814
2815 SpeculateInt32Operand op1(this, node->child1());
2816 SpeculateInt32Operand op2(this, node->child2());
2817 GPRTemporary result(this, Reuse, op1, op2);
2818
2819 GPRReg gpr1 = op1.gpr();
2820 GPRReg gpr2 = op2.gpr();
2821 GPRReg gprResult = result.gpr();
2822
2823 if (!shouldCheckOverflow(node->arithMode())) {
2824 if (gpr1 == gprResult)
2825 m_jit.add32(gpr2, gprResult);
2826 else {
2827 m_jit.move(gpr2, gprResult);
2828 m_jit.add32(gpr1, gprResult);
2829 }
2830 } else {
2831 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2832
2833 if (gpr1 == gprResult)
2834 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2835 else if (gpr2 == gprResult)
2836 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2837 else
2838 speculationCheck(Overflow, JSValueRegs(), 0, check);
2839 }
2840
2841 int32Result(gprResult, node);
2842 return;
2843 }
2844
2845 #if USE(JSVALUE64)
2846 case Int52RepUse: {
2847 ASSERT(shouldCheckOverflow(node->arithMode()));
2848 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2849
2850 // Will we need an overflow check? If we can prove that neither input can be
2851 // Int52 then the overflow check will not be necessary.
2852 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2853 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2854 SpeculateWhicheverInt52Operand op1(this, node->child1());
2855 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2856 GPRTemporary result(this, Reuse, op1);
2857 m_jit.move(op1.gpr(), result.gpr());
2858 m_jit.add64(op2.gpr(), result.gpr());
2859 int52Result(result.gpr(), node, op1.format());
2860 return;
2861 }
2862
2863 SpeculateInt52Operand op1(this, node->child1());
2864 SpeculateInt52Operand op2(this, node->child2());
2865 GPRTemporary result(this);
2866 m_jit.move(op1.gpr(), result.gpr());
2867 speculationCheck(
2868 Int52Overflow, JSValueRegs(), 0,
2869 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2870 int52Result(result.gpr(), node);
2871 return;
2872 }
2873 #endif // USE(JSVALUE64)
2874
2875 case DoubleRepUse: {
2876 SpeculateDoubleOperand op1(this, node->child1());
2877 SpeculateDoubleOperand op2(this, node->child2());
2878 FPRTemporary result(this, op1, op2);
2879
2880 FPRReg reg1 = op1.fpr();
2881 FPRReg reg2 = op2.fpr();
2882 m_jit.addDouble(reg1, reg2, result.fpr());
2883
2884 doubleResult(result.fpr(), node);
2885 return;
2886 }
2887
2888 default:
2889 RELEASE_ASSERT_NOT_REACHED();
2890 break;
2891 }
2892 }
2893
2894 void SpeculativeJIT::compileMakeRope(Node* node)
2895 {
2896 ASSERT(node->child1().useKind() == KnownStringUse);
2897 ASSERT(node->child2().useKind() == KnownStringUse);
2898 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2899
2900 SpeculateCellOperand op1(this, node->child1());
2901 SpeculateCellOperand op2(this, node->child2());
2902 SpeculateCellOperand op3(this, node->child3());
2903 GPRTemporary result(this);
2904 GPRTemporary allocator(this);
2905 GPRTemporary scratch(this);
2906
2907 GPRReg opGPRs[3];
2908 unsigned numOpGPRs;
2909 opGPRs[0] = op1.gpr();
2910 opGPRs[1] = op2.gpr();
2911 if (node->child3()) {
2912 opGPRs[2] = op3.gpr();
2913 numOpGPRs = 3;
2914 } else {
2915 opGPRs[2] = InvalidGPRReg;
2916 numOpGPRs = 2;
2917 }
2918 GPRReg resultGPR = result.gpr();
2919 GPRReg allocatorGPR = allocator.gpr();
2920 GPRReg scratchGPR = scratch.gpr();
2921
2922 JITCompiler::JumpList slowPath;
2923 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
2924 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2925 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2926
2927 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2928 for (unsigned i = 0; i < numOpGPRs; ++i)
2929 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2930 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2931 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2932 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2933 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2934 if (!ASSERT_DISABLED) {
2935 JITCompiler::Jump ok = m_jit.branch32(
2936 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2937 m_jit.abortWithReason(DFGNegativeStringLength);
2938 ok.link(&m_jit);
2939 }
2940 for (unsigned i = 1; i < numOpGPRs; ++i) {
2941 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2942 speculationCheck(
2943 Uncountable, JSValueSource(), nullptr,
2944 m_jit.branchAdd32(
2945 JITCompiler::Overflow,
2946 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2947 }
2948 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2949 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2950 if (!ASSERT_DISABLED) {
2951 JITCompiler::Jump ok = m_jit.branch32(
2952 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2953 m_jit.abortWithReason(DFGNegativeStringLength);
2954 ok.link(&m_jit);
2955 }
2956 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2957
2958 switch (numOpGPRs) {
2959 case 2:
2960 addSlowPathGenerator(slowPathCall(
2961 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2962 break;
2963 case 3:
2964 addSlowPathGenerator(slowPathCall(
2965 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2966 break;
2967 default:
2968 RELEASE_ASSERT_NOT_REACHED();
2969 break;
2970 }
2971
2972 cellResult(resultGPR, node);
2973 }
2974
2975 void SpeculativeJIT::compileArithClz32(Node* node)
2976 {
2977 ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
2978 SpeculateInt32Operand value(this, node->child1());
2979 GPRTemporary result(this, Reuse, value);
2980 GPRReg valueReg = value.gpr();
2981 GPRReg resultReg = result.gpr();
2982 m_jit.countLeadingZeros32(valueReg, resultReg);
2983 int32Result(resultReg, node);
2984 }
2985
2986 void SpeculativeJIT::compileArithSub(Node* node)
2987 {
2988 switch (node->binaryUseKind()) {
2989 case Int32Use: {
2990 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2991
2992 if (node->child2()->isInt32Constant()) {
2993 SpeculateInt32Operand op1(this, node->child1());
2994 int32_t imm2 = node->child2()->asInt32();
2995 GPRTemporary result(this);
2996
2997 if (!shouldCheckOverflow(node->arithMode())) {
2998 m_jit.move(op1.gpr(), result.gpr());
2999 m_jit.sub32(Imm32(imm2), result.gpr());
3000 } else {
3001 GPRTemporary scratch(this);
3002 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3003 }
3004
3005 int32Result(result.gpr(), node);
3006 return;
3007 }
3008
3009 if (node->child1()->isInt32Constant()) {
3010 int32_t imm1 = node->child1()->asInt32();
3011 SpeculateInt32Operand op2(this, node->child2());
3012 GPRTemporary result(this);
3013
3014 m_jit.move(Imm32(imm1), result.gpr());
3015 if (!shouldCheckOverflow(node->arithMode()))
3016 m_jit.sub32(op2.gpr(), result.gpr());
3017 else
3018 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3019
3020 int32Result(result.gpr(), node);
3021 return;
3022 }
3023
3024 SpeculateInt32Operand op1(this, node->child1());
3025 SpeculateInt32Operand op2(this, node->child2());
3026 GPRTemporary result(this);
3027
3028 if (!shouldCheckOverflow(node->arithMode())) {
3029 m_jit.move(op1.gpr(), result.gpr());
3030 m_jit.sub32(op2.gpr(), result.gpr());
3031 } else
3032 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3033
3034 int32Result(result.gpr(), node);
3035 return;
3036 }
3037
3038 #if USE(JSVALUE64)
3039 case Int52RepUse: {
3040 ASSERT(shouldCheckOverflow(node->arithMode()));
3041 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3042
3043 // Will we need an overflow check? If we can prove that neither input can be
3044 // Int52 then the overflow check will not be necessary.
3045 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
3046 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
3047 SpeculateWhicheverInt52Operand op1(this, node->child1());
3048 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3049 GPRTemporary result(this, Reuse, op1);
3050 m_jit.move(op1.gpr(), result.gpr());
3051 m_jit.sub64(op2.gpr(), result.gpr());
3052 int52Result(result.gpr(), node, op1.format());
3053 return;
3054 }
3055
3056 SpeculateInt52Operand op1(this, node->child1());
3057 SpeculateInt52Operand op2(this, node->child2());
3058 GPRTemporary result(this);
3059 m_jit.move(op1.gpr(), result.gpr());
3060 speculationCheck(
3061 Int52Overflow, JSValueRegs(), 0,
3062 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3063 int52Result(result.gpr(), node);
3064 return;
3065 }
3066 #endif // USE(JSVALUE64)
3067
3068 case DoubleRepUse: {
3069 SpeculateDoubleOperand op1(this, node->child1());
3070 SpeculateDoubleOperand op2(this, node->child2());
3071 FPRTemporary result(this, op1);
3072
3073 FPRReg reg1 = op1.fpr();
3074 FPRReg reg2 = op2.fpr();
3075 m_jit.subDouble(reg1, reg2, result.fpr());
3076
3077 doubleResult(result.fpr(), node);
3078 return;
3079 }
3080
3081 default:
3082 RELEASE_ASSERT_NOT_REACHED();
3083 return;
3084 }
3085 }
3086
3087 void SpeculativeJIT::compileArithNegate(Node* node)
3088 {
3089 switch (node->child1().useKind()) {
3090 case Int32Use: {
3091 SpeculateInt32Operand op1(this, node->child1());
3092 GPRTemporary result(this);
3093
3094 m_jit.move(op1.gpr(), result.gpr());
3095
3096 // Note: there is no notion of being not used as a number, but someone
3097 // caring about negative zero.
3098
3099 if (!shouldCheckOverflow(node->arithMode()))
3100 m_jit.neg32(result.gpr());
3101 else if (!shouldCheckNegativeZero(node->arithMode()))
3102 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3103 else {
3104 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3105 m_jit.neg32(result.gpr());
3106 }
3107
3108 int32Result(result.gpr(), node);
3109 return;
3110 }
3111
3112 #if USE(JSVALUE64)
3113 case Int52RepUse: {
3114 ASSERT(shouldCheckOverflow(node->arithMode()));
3115
3116 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
3117 SpeculateWhicheverInt52Operand op1(this, node->child1());
3118 GPRTemporary result(this);
3119 GPRReg op1GPR = op1.gpr();
3120 GPRReg resultGPR = result.gpr();
3121 m_jit.move(op1GPR, resultGPR);
3122 m_jit.neg64(resultGPR);
3123 if (shouldCheckNegativeZero(node->arithMode())) {
3124 speculationCheck(
3125 NegativeZero, JSValueRegs(), 0,
3126 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3127 }
3128 int52Result(resultGPR, node, op1.format());
3129 return;
3130 }
3131
3132 SpeculateInt52Operand op1(this, node->child1());
3133 GPRTemporary result(this);
3134 GPRReg op1GPR = op1.gpr();
3135 GPRReg resultGPR = result.gpr();
3136 m_jit.move(op1GPR, resultGPR);
3137 speculationCheck(
3138 Int52Overflow, JSValueRegs(), 0,
3139 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
3140 if (shouldCheckNegativeZero(node->arithMode())) {
3141 speculationCheck(
3142 NegativeZero, JSValueRegs(), 0,
3143 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3144 }
3145 int52Result(resultGPR, node);
3146 return;
3147 }
3148 #endif // USE(JSVALUE64)
3149
3150 case DoubleRepUse: {
3151 SpeculateDoubleOperand op1(this, node->child1());
3152 FPRTemporary result(this);
3153
3154 m_jit.negateDouble(op1.fpr(), result.fpr());
3155
3156 doubleResult(result.fpr(), node);
3157 return;
3158 }
3159
3160 default:
3161 RELEASE_ASSERT_NOT_REACHED();
3162 return;
3163 }
3164 }
3165 void SpeculativeJIT::compileArithMul(Node* node)
3166 {
3167 switch (node->binaryUseKind()) {
3168 case Int32Use: {
3169 SpeculateInt32Operand op1(this, node->child1());
3170 SpeculateInt32Operand op2(this, node->child2());
3171 GPRTemporary result(this);
3172
3173 GPRReg reg1 = op1.gpr();
3174 GPRReg reg2 = op2.gpr();
3175
3176 // We can perform truncated multiplications if we get to this point, because if the
3177 // fixup phase could not prove that it would be safe, it would have turned us into
3178 // a double multiplication.
3179 if (!shouldCheckOverflow(node->arithMode())) {
3180 m_jit.move(reg1, result.gpr());
3181 m_jit.mul32(reg2, result.gpr());
3182 } else {
3183 speculationCheck(
3184 Overflow, JSValueRegs(), 0,
3185 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3186 }
3187
3188 // Check for negative zero, if the users of this node care about such things.
3189 if (shouldCheckNegativeZero(node->arithMode())) {
3190 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3191 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3192 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3193 resultNonZero.link(&m_jit);
3194 }
3195
3196 int32Result(result.gpr(), node);
3197 return;
3198 }
3199
3200 #if USE(JSVALUE64)
3201 case Int52RepUse: {
3202 ASSERT(shouldCheckOverflow(node->arithMode()));
3203
3204 // This is super clever. We want to do an int52 multiplication and check the
3205 // int52 overflow bit. There is no direct hardware support for this, but we do
3206 // have the ability to do an int64 multiplication and check the int64 overflow
3207 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3208 // registers, with the high 12 bits being sign-extended. We can do:
3209 //
3210 // (a * (b << 12))
3211 //
3212 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3213 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3214 // multiplication overflows is identical to whether the 'a * b' 52-bit
3215 // multiplication overflows.
3216 //
3217 // In our nomenclature, this is:
3218 //
3219 // strictInt52(a) * int52(b) => int52
3220 //
3221 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3222 // bits.
3223 //
3224 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3225 // we just do whatever is more convenient for op1 and have op2 do the
3226 // opposite. This ensures that we do at most one shift.
3227
3228 SpeculateWhicheverInt52Operand op1(this, node->child1());
3229 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3230 GPRTemporary result(this);
3231
3232 GPRReg op1GPR = op1.gpr();
3233 GPRReg op2GPR = op2.gpr();
3234 GPRReg resultGPR = result.gpr();
3235
3236 m_jit.move(op1GPR, resultGPR);
3237 speculationCheck(
3238 Int52Overflow, JSValueRegs(), 0,
3239 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3240
3241 if (shouldCheckNegativeZero(node->arithMode())) {
3242 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3243 MacroAssembler::NonZero, resultGPR);
3244 speculationCheck(
3245 NegativeZero, JSValueRegs(), 0,
3246 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3247 speculationCheck(
3248 NegativeZero, JSValueRegs(), 0,
3249 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3250 resultNonZero.link(&m_jit);
3251 }
3252
3253 int52Result(resultGPR, node);
3254 return;
3255 }
3256 #endif // USE(JSVALUE64)
3257
3258 case DoubleRepUse: {
3259 SpeculateDoubleOperand op1(this, node->child1());
3260 SpeculateDoubleOperand op2(this, node->child2());
3261 FPRTemporary result(this, op1, op2);
3262
3263 FPRReg reg1 = op1.fpr();
3264 FPRReg reg2 = op2.fpr();
3265
3266 m_jit.mulDouble(reg1, reg2, result.fpr());
3267
3268 doubleResult(result.fpr(), node);
3269 return;
3270 }
3271
3272 default:
3273 RELEASE_ASSERT_NOT_REACHED();
3274 return;
3275 }
3276 }
3277
3278 void SpeculativeJIT::compileArithDiv(Node* node)
3279 {
3280 switch (node->binaryUseKind()) {
3281 case Int32Use: {
3282 #if CPU(X86) || CPU(X86_64)
3283 SpeculateInt32Operand op1(this, node->child1());
3284 SpeculateInt32Operand op2(this, node->child2());
3285 GPRTemporary eax(this, X86Registers::eax);
3286 GPRTemporary edx(this, X86Registers::edx);
3287 GPRReg op1GPR = op1.gpr();
3288 GPRReg op2GPR = op2.gpr();
3289
3290 GPRReg op2TempGPR;
3291 GPRReg temp;
3292 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3293 op2TempGPR = allocate();
3294 temp = op2TempGPR;
3295 } else {
3296 op2TempGPR = InvalidGPRReg;
3297 if (op1GPR == X86Registers::eax)
3298 temp = X86Registers::edx;
3299 else
3300 temp = X86Registers::eax;
3301 }
3302
3303 ASSERT(temp != op1GPR);
3304 ASSERT(temp != op2GPR);
3305
3306 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3307
3308 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3309
3310 JITCompiler::JumpList done;
3311 if (shouldCheckOverflow(node->arithMode())) {
3312 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3313 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3314 } else {
3315 // This is the case where we convert the result to an int after we're done, and we
3316 // already know that the denominator is either -1 or 0. So, if the denominator is
3317 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3318 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3319 // are happy to fall through to a normal division, since we're just dividing
3320 // something by negative 1.
3321
3322 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3323 m_jit.move(TrustedImm32(0), eax.gpr());
3324 done.append(m_jit.jump());
3325
3326 notZero.link(&m_jit);
3327 JITCompiler::Jump notNeg2ToThe31 =
3328 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3329 m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3330 done.append(m_jit.jump());
3331
3332 notNeg2ToThe31.link(&m_jit);
3333 }
3334
3335 safeDenominator.link(&m_jit);
3336
3337 // If the user cares about negative zero, then speculate that we're not about
3338 // to produce negative zero.
3339 if (shouldCheckNegativeZero(node->arithMode())) {
3340 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3341 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3342 numeratorNonZero.link(&m_jit);
3343 }
3344
3345 if (op2TempGPR != InvalidGPRReg) {
3346 m_jit.move(op2GPR, op2TempGPR);
3347 op2GPR = op2TempGPR;
3348 }
3349
3350 m_jit.move(op1GPR, eax.gpr());
3351 m_jit.assembler().cdq();
3352 m_jit.assembler().idivl_r(op2GPR);
3353
3354 if (op2TempGPR != InvalidGPRReg)
3355 unlock(op2TempGPR);
3356
3357 // Check that there was no remainder. If there had been, then we'd be obligated to
3358 // produce a double result instead.
3359 if (shouldCheckOverflow(node->arithMode()))
3360 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3361
3362 done.link(&m_jit);
3363 int32Result(eax.gpr(), node);
3364 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3365 SpeculateInt32Operand op1(this, node->child1());
3366 SpeculateInt32Operand op2(this, node->child2());
3367 GPRReg op1GPR = op1.gpr();
3368 GPRReg op2GPR = op2.gpr();
3369 GPRTemporary quotient(this);
3370 GPRTemporary multiplyAnswer(this);
3371
3372 // If the user cares about negative zero, then speculate that we're not about
3373 // to produce negative zero.
3374 if (shouldCheckNegativeZero(node->arithMode())) {
3375 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3376 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3377 numeratorNonZero.link(&m_jit);
3378 }
3379
3380 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3381
3382 // Check that there was no remainder. If there had been, then we'd be obligated to
3383 // produce a double result instead.
3384 if (shouldCheckOverflow(node->arithMode())) {
3385 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3386 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3387 }
3388
3389 int32Result(quotient.gpr(), node);
3390 #else
3391 RELEASE_ASSERT_NOT_REACHED();
3392 #endif
3393 break;
3394 }
3395
3396 case DoubleRepUse: {
3397 SpeculateDoubleOperand op1(this, node->child1());
3398 SpeculateDoubleOperand op2(this, node->child2());
3399 FPRTemporary result(this, op1);
3400
3401 FPRReg reg1 = op1.fpr();
3402 FPRReg reg2 = op2.fpr();
3403 m_jit.divDouble(reg1, reg2, result.fpr());
3404
3405 doubleResult(result.fpr(), node);
3406 break;
3407 }
3408
3409 default:
3410 RELEASE_ASSERT_NOT_REACHED();
3411 break;
3412 }
3413 }
3414
3415 void SpeculativeJIT::compileArithMod(Node* node)
3416 {
3417 switch (node->binaryUseKind()) {
3418 case Int32Use: {
3419 // In the fast path, the dividend value could be the final result
3420 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3421 SpeculateStrictInt32Operand op1(this, node->child1());
3422
3423 if (node->child2()->isInt32Constant()) {
3424 int32_t divisor = node->child2()->asInt32();
3425 if (divisor > 1 && hasOneBitSet(divisor)) {
3426 unsigned logarithm = WTF::fastLog2(divisor);
3427 GPRReg dividendGPR = op1.gpr();
3428 GPRTemporary result(this);
3429 GPRReg resultGPR = result.gpr();
3430
3431 // This is what LLVM generates. It's pretty crazy. Here's my
3432 // attempt at understanding it.
3433
3434 // First, compute either divisor - 1, or 0, depending on whether
3435 // the dividend is negative:
3436 //
3437 // If dividend < 0: resultGPR = divisor - 1
3438 // If dividend >= 0: resultGPR = 0
3439 m_jit.move(dividendGPR, resultGPR);
3440 m_jit.rshift32(TrustedImm32(31), resultGPR);
3441 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3442
3443 // Add in the dividend, so that:
3444 //
3445 // If dividend < 0: resultGPR = dividend + divisor - 1
3446 // If dividend >= 0: resultGPR = dividend
3447 m_jit.add32(dividendGPR, resultGPR);
3448
3449 // Mask so as to only get the *high* bits. This rounds down
3450 // (towards negative infinity) resultGPR to the nearest multiple
3451 // of divisor, so that:
3452 //
3453 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3454 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3455 //
3456 // Note that this can be simplified to:
3457 //
3458 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3459 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3460 //
3461 // Note that if the dividend is negative, resultGPR will also be negative.
3462 // Regardless of the sign of dividend, resultGPR will be rounded towards
3463 // zero, because of how things are conditionalized.
3464 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3465
3466 // Subtract resultGPR from dividendGPR, which yields the remainder:
3467 //
3468 // resultGPR = dividendGPR - resultGPR
3469 m_jit.neg32(resultGPR);
3470 m_jit.add32(dividendGPR, resultGPR);
3471
3472 if (shouldCheckNegativeZero(node->arithMode())) {
3473 // Check that we're not about to create negative zero.
3474 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3475 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3476 numeratorPositive.link(&m_jit);
3477 }
3478
3479 int32Result(resultGPR, node);
3480 return;
3481 }
3482 }
3483
3484 #if CPU(X86) || CPU(X86_64)
3485 if (node->child2()->isInt32Constant()) {
3486 int32_t divisor = node->child2()->asInt32();
3487 if (divisor && divisor != -1) {
3488 GPRReg op1Gpr = op1.gpr();
3489
3490 GPRTemporary eax(this, X86Registers::eax);
3491 GPRTemporary edx(this, X86Registers::edx);
3492 GPRTemporary scratch(this);
3493 GPRReg scratchGPR = scratch.gpr();
3494
3495 GPRReg op1SaveGPR;
3496 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3497 op1SaveGPR = allocate();
3498 ASSERT(op1Gpr != op1SaveGPR);
3499 m_jit.move(op1Gpr, op1SaveGPR);
3500 } else
3501 op1SaveGPR = op1Gpr;
3502 ASSERT(op1SaveGPR != X86Registers::eax);
3503 ASSERT(op1SaveGPR != X86Registers::edx);
3504
3505 m_jit.move(op1Gpr, eax.gpr());
3506 m_jit.move(TrustedImm32(divisor), scratchGPR);
3507 m_jit.assembler().cdq();
3508 m_jit.assembler().idivl_r(scratchGPR);
3509 if (shouldCheckNegativeZero(node->arithMode())) {
3510 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3511 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3512 numeratorPositive.link(&m_jit);
3513 }
3514
3515 if (op1SaveGPR != op1Gpr)
3516 unlock(op1SaveGPR);
3517
3518 int32Result(edx.gpr(), node);
3519 return;
3520 }
3521 }
3522 #endif
3523
3524 SpeculateInt32Operand op2(this, node->child2());
3525 #if CPU(X86) || CPU(X86_64)
3526 GPRTemporary eax(this, X86Registers::eax);
3527 GPRTemporary edx(this, X86Registers::edx);
3528 GPRReg op1GPR = op1.gpr();
3529 GPRReg op2GPR = op2.gpr();
3530
3531 GPRReg op2TempGPR;
3532 GPRReg temp;
3533 GPRReg op1SaveGPR;
3534
3535 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3536 op2TempGPR = allocate();
3537 temp = op2TempGPR;
3538 } else {
3539 op2TempGPR = InvalidGPRReg;
3540 if (op1GPR == X86Registers::eax)
3541 temp = X86Registers::edx;
3542 else
3543 temp = X86Registers::eax;
3544 }
3545
3546 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3547 op1SaveGPR = allocate();
3548 ASSERT(op1GPR != op1SaveGPR);
3549 m_jit.move(op1GPR, op1SaveGPR);
3550 } else
3551 op1SaveGPR = op1GPR;
3552
3553 ASSERT(temp != op1GPR);
3554 ASSERT(temp != op2GPR);
3555 ASSERT(op1SaveGPR != X86Registers::eax);
3556 ASSERT(op1SaveGPR != X86Registers::edx);
3557
3558 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3559
3560 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3561
3562 JITCompiler::JumpList done;
3563
3564 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3565 // separate case for that. But it probably doesn't matter so much.
3566 if (shouldCheckOverflow(node->arithMode())) {
3567 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3568 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3569 } else {
3570 // This is the case where we convert the result to an int after we're done, and we
3571 // already know that the denominator is either -1 or 0. So, if the denominator is
3572 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3573 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3574 // happy to fall through to a normal division, since we're just dividing something
3575 // by negative 1.
3576
3577 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3578 m_jit.move(TrustedImm32(0), edx.gpr());
3579 done.append(m_jit.jump());
3580
3581 notZero.link(&m_jit);
3582 JITCompiler::Jump notNeg2ToThe31 =
3583 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3584 m_jit.move(TrustedImm32(0), edx.gpr());
3585 done.append(m_jit.jump());
3586
3587 notNeg2ToThe31.link(&m_jit);
3588 }
3589
3590 safeDenominator.link(&m_jit);
3591
3592 if (op2TempGPR != InvalidGPRReg) {
3593 m_jit.move(op2GPR, op2TempGPR);
3594 op2GPR = op2TempGPR;
3595 }
3596
3597 m_jit.move(op1GPR, eax.gpr());
3598 m_jit.assembler().cdq();
3599 m_jit.assembler().idivl_r(op2GPR);
3600
3601 if (op2TempGPR != InvalidGPRReg)
3602 unlock(op2TempGPR);
3603
3604 // Check that we're not about to create negative zero.
3605 if (shouldCheckNegativeZero(node->arithMode())) {
3606 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3607 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3608 numeratorPositive.link(&m_jit);
3609 }
3610
3611 if (op1SaveGPR != op1GPR)
3612 unlock(op1SaveGPR);
3613
3614 done.link(&m_jit);
3615 int32Result(edx.gpr(), node);
3616
3617 #elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
3618 GPRTemporary temp(this);
3619 GPRTemporary quotientThenRemainder(this);
3620 GPRTemporary multiplyAnswer(this);
3621 GPRReg dividendGPR = op1.gpr();
3622 GPRReg divisorGPR = op2.gpr();
3623 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3624 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3625
3626 JITCompiler::JumpList done;
3627
3628 if (shouldCheckOverflow(node->arithMode()))
3629 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3630 else {
3631 JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3632 m_jit.move(divisorGPR, quotientThenRemainderGPR);
3633 done.append(m_jit.jump());
3634 denominatorNotZero.link(&m_jit);
3635 }
3636
3637 m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3638 // FIXME: It seems like there are cases where we don't need this? What if we have
3639 // arithMode() == Arith::Unchecked?
3640 // https://bugs.webkit.org/show_bug.cgi?id=126444
3641 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3642 #if HAVE(ARM_IDIV_INSTRUCTIONS)
3643 m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3644 #else
3645 m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3646 #endif
3647
3648 // If the user cares about negative zero, then speculate that we're not about
3649 // to produce negative zero.
3650 if (shouldCheckNegativeZero(node->arithMode())) {
3651 // Check that we're not about to create negative zero.
3652 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3653 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3654 numeratorPositive.link(&m_jit);
3655 }
3656
3657 done.link(&m_jit);
3658
3659 int32Result(quotientThenRemainderGPR, node);
3660 #else // not architecture that can do integer division
3661 RELEASE_ASSERT_NOT_REACHED();
3662 #endif
3663 return;
3664 }
3665
3666 case DoubleRepUse: {
3667 SpeculateDoubleOperand op1(this, node->child1());
3668 SpeculateDoubleOperand op2(this, node->child2());
3669
3670 FPRReg op1FPR = op1.fpr();
3671 FPRReg op2FPR = op2.fpr();
3672
3673 flushRegisters();
3674
3675 FPRResult result(this);
3676
3677 callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3678
3679 doubleResult(result.fpr(), node);
3680 return;
3681 }
3682
3683 default:
3684 RELEASE_ASSERT_NOT_REACHED();
3685 return;
3686 }
3687 }
3688
3689 void SpeculativeJIT::compileArithRound(Node* node)
3690 {
3691 ASSERT(node->child1().useKind() == DoubleRepUse);
3692
3693 SpeculateDoubleOperand value(this, node->child1());
3694 FPRReg valueFPR = value.fpr();
3695
3696 if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
3697 FPRTemporary oneHalf(this);
3698 GPRTemporary roundedResultAsInt32(this);
3699 FPRReg oneHalfFPR = oneHalf.fpr();
3700 GPRReg resultGPR = roundedResultAsInt32.gpr();
3701
3702 static const double halfConstant = 0.5;
3703 m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), oneHalfFPR);
3704 m_jit.addDouble(valueFPR, oneHalfFPR);
3705
3706 JITCompiler::Jump truncationFailed = m_jit.branchTruncateDoubleToInt32(oneHalfFPR, resultGPR);
3707 speculationCheck(Overflow, JSValueRegs(), node, truncationFailed);
3708 int32Result(resultGPR, node);
3709 return;
3710 }
3711
3712 flushRegisters();
3713 FPRResult roundedResultAsDouble(this);
3714 FPRReg resultFPR = roundedResultAsDouble.fpr();
3715 callOperation(jsRound, resultFPR, valueFPR);
3716 if (producesInteger(node->arithRoundingMode())) {
3717 GPRTemporary roundedResultAsInt32(this);
3718 FPRTemporary scratch(this);
3719 FPRReg scratchFPR = scratch.fpr();
3720 GPRReg resultGPR = roundedResultAsInt32.gpr();
3721 JITCompiler::JumpList failureCases;
3722 m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
3723 speculationCheck(Overflow, JSValueRegs(), node, failureCases);
3724
3725 int32Result(resultGPR, node);
3726 } else
3727 doubleResult(resultFPR, node);
3728 }
3729
3730 void SpeculativeJIT::compileArithSqrt(Node* node)
3731 {
3732 SpeculateDoubleOperand op1(this, node->child1());
3733 FPRReg op1FPR = op1.fpr();
3734
3735 if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
3736 flushRegisters();
3737 FPRResult result(this);
3738 callOperation(sqrt, result.fpr(), op1FPR);
3739 doubleResult(result.fpr(), node);
3740 } else {
3741 FPRTemporary result(this, op1);
3742 m_jit.sqrtDouble(op1.fpr(), result.fpr());
3743 doubleResult(result.fpr(), node);
3744 }
3745 }
3746
3747 // For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
3748 // Every register is clobbered by this helper.
3749 static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
3750 {
3751 MacroAssembler::JumpList skipFastPath;
3752 skipFastPath.append(assembler.branch32(MacroAssembler::LessThan, yOperand, MacroAssembler::TrustedImm32(0)));
3753 skipFastPath.append(assembler.branch32(MacroAssembler::GreaterThan, yOperand, MacroAssembler::TrustedImm32(1000)));
3754
3755 static const double oneConstant = 1.0;
3756 assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
3757
3758 MacroAssembler::Label startLoop(assembler.label());
3759 MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
3760 assembler.mulDouble(xOperand, result);
3761 exponentIsEven.link(&assembler);
3762 assembler.mulDouble(xOperand, xOperand);
3763 assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
3764 assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
3765
3766 MacroAssembler::Jump skipSlowPath = assembler.jump();
3767 skipFastPath.link(&assembler);
3768
3769 return skipSlowPath;
3770 }
3771
3772 void SpeculativeJIT::compileArithPow(Node* node)
3773 {
3774 if (node->child2().useKind() == Int32Use) {
3775 SpeculateDoubleOperand xOperand(this, node->child1());
3776 SpeculateInt32Operand yOperand(this, node->child2());
3777 FPRReg xOperandfpr = xOperand.fpr();
3778 GPRReg yOperandGpr = yOperand.gpr();
3779 FPRTemporary yOperandfpr(this);
3780
3781 flushRegisters();
3782
3783 FPRResult result(this);
3784 FPRReg resultFpr = result.fpr();
3785
3786 FPRTemporary xOperandCopy(this);
3787 FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3788 m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3789
3790 GPRTemporary counter(this);
3791 GPRReg counterGpr = counter.gpr();
3792 m_jit.move(yOperandGpr, counterGpr);
3793
3794 MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
3795 m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr());
3796 callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr());
3797
3798 skipFallback.link(&m_jit);
3799 doubleResult(resultFpr, node);
3800 return;
3801 }
3802
3803 SpeculateDoubleOperand xOperand(this, node->child1());
3804 SpeculateDoubleOperand yOperand(this, node->child2());
3805 FPRReg xOperandfpr = xOperand.fpr();
3806 FPRReg yOperandfpr = yOperand.fpr();
3807
3808 flushRegisters();
3809
3810 FPRResult result(this);
3811 FPRReg resultFpr = result.fpr();
3812
3813 FPRTemporary xOperandCopy(this);
3814 FPRReg xOperandCopyFpr = xOperandCopy.fpr();
3815
3816 FPRTemporary scratch(this);
3817 FPRReg scratchFpr = scratch.fpr();
3818
3819 GPRTemporary yOperandInteger(this);
3820 GPRReg yOperandIntegerGpr = yOperandInteger.gpr();
3821 MacroAssembler::JumpList failedExponentConversionToInteger;
3822 m_jit.branchConvertDoubleToInt32(yOperandfpr, yOperandIntegerGpr, failedExponentConversionToInteger, scratchFpr, false);
3823
3824 m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
3825 MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, yOperandInteger.gpr(), resultFpr);
3826 failedExponentConversionToInteger.link(&m_jit);
3827
3828 callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr);
3829 skipFallback.link(&m_jit);
3830 doubleResult(resultFpr, node);
3831 }
3832
3833 void SpeculativeJIT::compileArithLog(Node* node)
3834 {
3835 SpeculateDoubleOperand op1(this, node->child1());
3836 FPRReg op1FPR = op1.fpr();
3837 flushRegisters();
3838 FPRResult result(this);
3839 callOperation(log, result.fpr(), op1FPR);
3840 doubleResult(result.fpr(), node);
3841 }
3842
3843 // Returns true if the compare is fused with a subsequent branch.
3844 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3845 {
3846 if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3847 return true;
3848
3849 if (node->isBinaryUseKind(Int32Use)) {
3850 compileInt32Compare(node, condition);
3851 return false;
3852 }
3853
3854 #if USE(JSVALUE64)
3855 if (node->isBinaryUseKind(Int52RepUse)) {
3856 compileInt52Compare(node, condition);
3857 return false;
3858 }
3859 #endif // USE(JSVALUE64)
3860
3861 if (node->isBinaryUseKind(DoubleRepUse)) {
3862 compileDoubleCompare(node, doubleCondition);
3863 return false;
3864 }
3865
3866 if (node->op() == CompareEq) {
3867 if (node->isBinaryUseKind(StringUse)) {
3868 compileStringEquality(node);
3869 return false;
3870 }
3871
3872 if (node->isBinaryUseKind(BooleanUse)) {
3873 compileBooleanCompare(node, condition);
3874 return false;
3875 }
3876
3877 if (node->isBinaryUseKind(StringIdentUse)) {
3878 compileStringIdentEquality(node);
3879 return false;
3880 }
3881
3882 if (node->isBinaryUseKind(ObjectUse)) {
3883 compileObjectEquality(node);
3884 return false;
3885 }
3886
3887 if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3888 compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3889 return false;
3890 }
3891
3892 if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3893 compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3894 return false;
3895 }
3896 }
3897
3898 nonSpeculativeNonPeepholeCompare(node, condition, operation);
3899 return false;
3900 }
3901
3902 bool SpeculativeJIT::compileStrictEq(Node* node)
3903 {
3904 if (node->isBinaryUseKind(BooleanUse)) {
3905 unsigned branchIndexInBlock = detectPeepHoleBranch();
3906 if (branchIndexInBlock != UINT_MAX) {
3907 Node* branchNode = m_block->at(branchIndexInBlock);
3908 compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3909 use(node->child1());
3910 use(node->child2());
3911 m_indexInBlock = branchIndexInBlock;
3912 m_currentNode = branchNode;
3913 return true;
3914 }
3915 compileBooleanCompare(node, MacroAssembler::Equal);
3916 return false;
3917 }
3918
3919 if (node->isBinaryUseKind(Int32Use)) {
3920 unsigned branchIndexInBlock = detectPeepHoleBranch();
3921 if (branchIndexInBlock != UINT_MAX) {
3922 Node* branchNode = m_block->at(branchIndexInBlock);
3923 compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3924 use(node->child1());
3925 use(node->child2());
3926 m_indexInBlock = branchIndexInBlock;
3927 m_currentNode = branchNode;
3928 return true;
3929 }
3930 compileInt32Compare(node, MacroAssembler::Equal);
3931 return false;
3932 }
3933
3934 #if USE(JSVALUE64)
3935 if (node->isBinaryUseKind(Int52RepUse)) {
3936 unsigned branchIndexInBlock = detectPeepHoleBranch();
3937 if (branchIndexInBlock != UINT_MAX) {
3938 Node* branchNode = m_block->at(branchIndexInBlock);
3939 compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3940 use(node->child1());
3941 use(node->child2());
3942 m_indexInBlock = branchIndexInBlock;
3943 m_currentNode = branchNode;
3944 return true;
3945 }
3946 compileInt52Compare(node, MacroAssembler::Equal);
3947 return false;
3948 }
3949 #endif // USE(JSVALUE64)
3950
3951 if (node->isBinaryUseKind(DoubleRepUse)) {
3952 unsigned branchIndexInBlock = detectPeepHoleBranch();
3953 if (branchIndexInBlock != UINT_MAX) {
3954 Node* branchNode = m_block->at(branchIndexInBlock);
3955 compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3956 use(node->child1());
3957 use(node->child2());
3958 m_indexInBlock = branchIndexInBlock;
3959 m_currentNode = branchNode;
3960 return true;
3961 }
3962 compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3963 return false;
3964 }
3965
3966 if (node->isBinaryUseKind(StringUse)) {
3967 compileStringEquality(node);
3968 return false;
3969 }
3970
3971 if (node->isBinaryUseKind(StringIdentUse)) {
3972 compileStringIdentEquality(node);
3973 return false;
3974 }
3975
3976 if (node->isBinaryUseKind(ObjectUse, UntypedUse)) {
3977 unsigned branchIndexInBlock = detectPeepHoleBranch();
3978 if (branchIndexInBlock != UINT_MAX) {
3979 Node* branchNode = m_block->at(branchIndexInBlock);
3980 compilePeepHoleObjectStrictEquality(node->child1(), node->child2(), branchNode);
3981 use(node->child1());
3982 use(node->child2());
3983 m_indexInBlock = branchIndexInBlock;
3984 m_currentNode = branchNode;
3985 return true;
3986 }
3987 compileObjectStrictEquality(node->child1(), node->child2());
3988 return false;
3989 }
3990
3991 if (node->isBinaryUseKind(UntypedUse, ObjectUse)) {
3992 unsigned branchIndexInBlock = detectPeepHoleBranch();
3993 if (branchIndexInBlock != UINT_MAX) {
3994 Node* branchNode = m_block->at(branchIndexInBlock);
3995 compilePeepHoleObjectStrictEquality(node->child2(), node->child1(), branchNode);
3996 use(node->child1());
3997 use(node->child2());
3998 m_indexInBlock = branchIndexInBlock;
3999 m_currentNode = branchNode;
4000 return true;
4001 }
4002 compileObjectStrictEquality(node->child2(), node->child1());
4003 return false;
4004 }
4005
4006 if (node->isBinaryUseKind(ObjectUse)) {
4007 unsigned branchIndexInBlock = detectPeepHoleBranch();
4008 if (branchIndexInBlock != UINT_MAX) {
4009 Node* branchNode = m_block->at(branchIndexInBlock);
4010 compilePeepHoleObjectEquality(node, branchNode);
4011 use(node->child1());
4012 use(node->child2());
4013 m_indexInBlock = branchIndexInBlock;
4014 m_currentNode = branchNode;
4015 return true;
4016 }
4017 compileObjectEquality(node);
4018 return false;
4019 }
4020
4021 if (node->isBinaryUseKind(MiscUse, UntypedUse)
4022 || node->isBinaryUseKind(UntypedUse, MiscUse)) {
4023 compileMiscStrictEq(node);
4024 return false;
4025 }
4026
4027 if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
4028 compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
4029 return false;
4030 }
4031
4032 if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
4033 compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
4034 return false;
4035 }
4036
4037 if (node->isBinaryUseKind(StringUse, UntypedUse)) {
4038 compileStringToUntypedEquality(node, node->child1(), node->child2());
4039 return false;
4040 }
4041
4042 if (node->isBinaryUseKind(UntypedUse, StringUse)) {
4043 compileStringToUntypedEquality(node, node->child2(), node->child1());
4044 return false;
4045 }
4046
4047 RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
4048 return nonSpeculativeStrictEq(node);
4049 }
4050
4051 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
4052 {
4053 SpeculateBooleanOperand op1(this, node->child1());
4054 SpeculateBooleanOperand op2(this, node->child2());
4055 GPRTemporary result(this);
4056
4057 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
4058
4059 unblessedBooleanResult(result.gpr(), node);
4060 }
4061
4062 void SpeculativeJIT::compileStringEquality(
4063 Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
4064 GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
4065 JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
4066 {
4067 JITCompiler::JumpList trueCase;
4068 JITCompiler::JumpList falseCase;
4069 JITCompiler::JumpList slowCase;
4070
4071 trueCase.append(fastTrue);
4072 falseCase.append(fastFalse);
4073
4074 m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
4075
4076 falseCase.append(m_jit.branch32(
4077 MacroAssembler::NotEqual,
4078 MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
4079 lengthGPR));
4080
4081 trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
4082
4083 m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
4084 m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
4085
4086 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
4087 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
4088
4089 slowCase.append(m_jit.branchTest32(
4090 MacroAssembler::Zero,
4091 MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
4092 TrustedImm32(StringImpl::flagIs8Bit())));
4093 slowCase.append(m_jit.branchTest32(
4094 MacroAssembler::Zero,
4095 MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
4096 TrustedImm32(StringImpl::flagIs8Bit())));
4097
4098 m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
4099 m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
4100
4101 MacroAssembler::Label loop = m_jit.label();
4102
4103 m_jit.sub32(TrustedImm32(1), lengthGPR);
4104
4105 // This isn't going to generate the best code on x86. But that's OK, it's still better
4106 // than not inlining.
4107 m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
4108 m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
4109 falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
4110
4111 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
4112
4113 trueCase.link(&m_jit);
4114 moveTrueTo(leftTempGPR);
4115
4116 JITCompiler::Jump done = m_jit.jump();
4117
4118 falseCase.link(&m_jit);
4119 moveFalseTo(leftTempGPR);
4120
4121 done.link(&m_jit);
4122 addSlowPathGenerator(
4123 slowPathCall(
4124 slowCase, this, operationCompareStringEq, leftTempGPR, leftGPR, rightGPR));
4125
4126 blessedBooleanResult(leftTempGPR, node);
4127 }
4128
4129 void SpeculativeJIT::compileStringEquality(Node* node)
4130 {
4131 SpeculateCellOperand left(this, node->child1());
4132 SpeculateCellOperand right(this, node->child2());
4133 GPRTemporary length(this);
4134 GPRTemporary leftTemp(this);
4135 GPRTemporary rightTemp(this);
4136 GPRTemporary leftTemp2(this, Reuse, left);
4137 GPRTemporary rightTemp2(this, Reuse, right);
4138
4139 GPRReg leftGPR = left.gpr();
4140 GPRReg rightGPR = right.gpr();
4141 GPRReg lengthGPR = length.gpr();
4142 GPRReg leftTempGPR = leftTemp.gpr();
4143 GPRReg rightTempGPR = rightTemp.gpr();
4144 GPRReg leftTemp2GPR = leftTemp2.gpr();
4145 GPRReg rightTemp2GPR = rightTemp2.gpr();
4146
4147 speculateString(node->child1(), leftGPR);
4148
4149 // It's safe to branch around the type check below, since proving that the values are
4150 // equal does indeed prove that the right value is a string.
4151 JITCompiler::Jump fastTrue = m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR);
4152
4153 speculateString(node->child2(), rightGPR);
4154
4155 compileStringEquality(
4156 node, leftGPR, rightGPR, lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
4157 rightTemp2GPR, fastTrue, JITCompiler::Jump());
4158 }
4159
4160 void SpeculativeJIT::compileStringToUntypedEquality(Node* node, Edge stringEdge, Edge untypedEdge)
4161 {
4162 SpeculateCellOperand left(this, stringEdge);
4163 JSValueOperand right(this, untypedEdge, ManualOperandSpeculation);
4164 GPRTemporary length(this);
4165 GPRTemporary leftTemp(this);
4166 GPRTemporary rightTemp(this);
4167 GPRTemporary leftTemp2(this, Reuse, left);
4168 GPRTemporary rightTemp2(this);
4169
4170 GPRReg leftGPR = left.gpr();
4171 JSValueRegs rightRegs = right.jsValueRegs();
4172 GPRReg lengthGPR = length.gpr();
4173 GPRReg leftTempGPR = leftTemp.gpr();
4174 GPRReg rightTempGPR = rightTemp.gpr();
4175 GPRReg leftTemp2GPR = leftTemp2.gpr();
4176 GPRReg rightTemp2GPR = rightTemp2.gpr();
4177
4178 speculateString(stringEdge, leftGPR);
4179
4180 JITCompiler::JumpList fastTrue;
4181 JITCompiler::JumpList fastFalse;
4182
4183 fastFalse.append(m_jit.branchIfNotCell(rightRegs));
4184
4185 // It's safe to branch around the type check below, since proving that the values are
4186 // equal does indeed prove that the right value is a string.
4187 fastTrue.append(m_jit.branchPtr(
4188 MacroAssembler::Equal, leftGPR, rightRegs.payloadGPR()));
4189
4190 fastFalse.append(m_jit.branchIfNotString(rightRegs.payloadGPR()));
4191
4192 compileStringEquality(
4193 node, leftGPR, rightRegs.payloadGPR(), lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
4194 rightTemp2GPR, fastTrue, fastFalse);
4195 }
4196
4197 void SpeculativeJIT::compileStringIdentEquality(Node* node)
4198 {
4199 SpeculateCellOperand left(this, node->child1());
4200 SpeculateCellOperand right(this, node->child2());
4201 GPRTemporary leftTemp(this);
4202 GPRTemporary rightTemp(this);
4203
4204 GPRReg leftGPR = left.gpr();
4205 GPRReg rightGPR = right.gpr();
4206 GPRReg leftTempGPR = leftTemp.gpr();
4207 GPRReg rightTempGPR = rightTemp.gpr();
4208
4209 speculateString(node->child1(), leftGPR);
4210 speculateString(node->child2(), rightGPR);
4211
4212 speculateStringIdentAndLoadStorage(node->child1(), leftGPR, leftTempGPR);
4213 speculateStringIdentAndLoadStorage(node->child2(), rightGPR, rightTempGPR);
4214
4215 m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, leftTempGPR);
4216
4217 unblessedBooleanResult(leftTempGPR, node);
4218 }
4219
4220 void SpeculativeJIT::compileStringIdentToNotStringVarEquality(
4221 Node* node, Edge stringEdge, Edge notStringVarEdge)
4222 {
4223 SpeculateCellOperand left(this, stringEdge);
4224 JSValueOperand right(this, notStringVarEdge, ManualOperandSpeculation);
4225 GPRTemporary leftTemp(this);
4226 GPRTemporary rightTemp(this);
4227 GPRReg leftTempGPR = leftTemp.gpr();
4228 GPRReg rightTempGPR = rightTemp.gpr();
4229 GPRReg leftGPR = left.gpr();
4230 JSValueRegs rightRegs = right.jsValueRegs();
4231
4232 speculateString(stringEdge, leftGPR);
4233 speculateStringIdentAndLoadStorage(stringEdge, leftGPR, leftTempGPR);
4234
4235 moveFalseTo(rightTempGPR);
4236 JITCompiler::JumpList notString;
4237 notString.append(m_jit.branchIfNotCell(rightRegs));
4238 notString.append(m_jit.branchIfNotString(rightRegs.payloadGPR()));
4239
4240 speculateStringIdentAndLoadStorage(notStringVarEdge, rightRegs.payloadGPR(), rightTempGPR);
4241
4242 m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, rightTempGPR);
4243 notString.link(&m_jit);
4244
4245 unblessedBooleanResult(rightTempGPR, node);
4246 }
4247
4248 void SpeculativeJIT::compileStringZeroLength(Node* node)
4249 {
4250 SpeculateCellOperand str(this, node->child1());
4251 GPRReg strGPR = str.gpr();
4252
4253 // Make sure that this is a string.
4254 speculateString(node->child1(), strGPR);
4255
4256 GPRTemporary eq(this);
4257 GPRReg eqGPR = eq.gpr();
4258
4259 // Fetch the length field from the string object.
4260 m_jit.test32(MacroAssembler::Zero, MacroAssembler::Address(strGPR, JSString::offsetOfLength()), MacroAssembler::TrustedImm32(-1), eqGPR);
4261
4262 unblessedBooleanResult(eqGPR, node);
4263 }
4264
4265 void SpeculativeJIT::emitStringBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
4266 {
4267 SpeculateCellOperand str(this, nodeUse);
4268 speculateString(nodeUse, str.gpr());
4269 branchTest32(JITCompiler::NonZero, MacroAssembler::Address(str.gpr(), JSString::offsetOfLength()), taken);
4270 jump(notTaken);
4271 noResult(m_currentNode);
4272 }
4273
4274 void SpeculativeJIT::compileConstantStoragePointer(Node* node)
4275 {
4276 GPRTemporary storage(this);
4277 GPRReg storageGPR = storage.gpr();
4278 m_jit.move(TrustedImmPtr(node->storagePointer()), storageGPR);
4279 storageResult(storageGPR, node);
4280 }
4281
4282 void SpeculativeJIT::compileGetIndexedPropertyStorage(Node* node)
4283 {
4284 SpeculateCellOperand base(this, node->child1());
4285 GPRReg baseReg = base.gpr();
4286
4287 GPRTemporary storage(this);
4288 GPRReg storageReg = storage.gpr();
4289
4290 switch (node->arrayMode().type()) {
4291 case Array::String:
4292 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg);
4293
4294 addSlowPathGenerator(
4295 slowPathCall(
4296 m_jit.branchTest32(MacroAssembler::Zero, storageReg),
4297 this, operationResolveRope, storageReg, baseReg));
4298
4299 m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
4300 break;
4301
4302 default:
4303 ASSERT(isTypedView(node->arrayMode().typedArrayType()));
4304 m_jit.loadPtr(
4305 MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfVector()),
4306 storageReg);
4307 break;
4308 }
4309
4310 storageResult(storageReg, node);
4311 }
4312
4313 void SpeculativeJIT::compileGetTypedArrayByteOffset(Node* node)
4314 {
4315 SpeculateCellOperand base(this, node->child1());
4316 GPRTemporary vector(this);
4317 GPRTemporary data(this);
4318
4319 GPRReg baseGPR = base.gpr();
4320 GPRReg vectorGPR = vector.gpr();
4321 GPRReg dataGPR = data.gpr();
4322
4323 JITCompiler::Jump emptyByteOffset = m_jit.branch32(
4324 MacroAssembler::NotEqual,
4325 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
4326 TrustedImm32(WastefulTypedArray));
4327
4328 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), dataGPR);
4329 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), vectorGPR);
4330 m_jit.loadPtr(MacroAssembler::Address(dataGPR, Butterfly::offsetOfArrayBuffer()), dataGPR);
4331 m_jit.loadPtr(MacroAssembler::Address(dataGPR, ArrayBuffer::offsetOfData()), dataGPR);
4332 m_jit.subPtr(dataGPR, vectorGPR);
4333
4334 JITCompiler::Jump done = m_jit.jump();
4335
4336 emptyByteOffset.link(&m_jit);
4337 m_jit.move(TrustedImmPtr(0), vectorGPR);
4338
4339 done.link(&m_jit);
4340
4341 int32Result(vectorGPR, node);
4342 }
4343
4344 void SpeculativeJIT::compileGetByValOnDirectArguments(Node* node)
4345 {
4346 SpeculateCellOperand base(this, node->child1());
4347 SpeculateStrictInt32Operand property(this, node->child2());
4348 GPRTemporary result(this);
4349 #if USE(JSVALUE32_64)
4350 GPRTemporary resultTag(this);
4351 #endif
4352
4353 GPRReg baseReg = base.gpr();
4354 GPRReg propertyReg = property.gpr();
4355 GPRReg resultReg = result.gpr();
4356 #if USE(JSVALUE32_64)
4357 GPRReg resultTagReg = resultTag.gpr();
4358 JSValueRegs resultRegs = JSValueRegs(resultTagReg, resultReg);
4359 #else
4360 JSValueRegs resultRegs = JSValueRegs(resultReg);
4361 #endif
4362
4363 if (!m_compileOkay)
4364 return;
4365
4366 ASSERT(ArrayMode(Array::DirectArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4367
4368 speculationCheck(
4369 ExoticObjectMode, JSValueSource(), 0,
4370 m_jit.branchTestPtr(
4371 MacroAssembler::NonZero,
4372 MacroAssembler::Address(baseReg, DirectArguments::offsetOfOverrides())));
4373 speculationCheck(
4374 ExoticObjectMode, JSValueSource(), 0,
4375 m_jit.branch32(
4376 MacroAssembler::AboveOrEqual, propertyReg,
4377 MacroAssembler::Address(baseReg, DirectArguments::offsetOfLength())));
4378
4379 m_jit.loadValue(
4380 MacroAssembler::BaseIndex(
4381 baseReg, propertyReg, MacroAssembler::TimesEight, DirectArguments::storageOffset()),
4382 resultRegs);
4383
4384 jsValueResult(resultRegs, node);
4385 }
4386
4387 void SpeculativeJIT::compileGetByValOnScopedArguments(Node* node)
4388 {
4389 SpeculateCellOperand base(this, node->child1());
4390 SpeculateStrictInt32Operand property(this, node->child2());
4391 GPRTemporary result(this);
4392 #if USE(JSVALUE32_64)
4393 GPRTemporary resultTag(this);
4394 #endif
4395 GPRTemporary scratch(this);
4396 GPRTemporary scratch2(this);
4397
4398 GPRReg baseReg = base.gpr();
4399 GPRReg propertyReg = property.gpr();
4400 GPRReg resultReg = result.gpr();
4401 #if USE(JSVALUE32_64)
4402 GPRReg resultTagReg = resultTag.gpr();
4403 JSValueRegs resultRegs = JSValueRegs(resultTagReg, resultReg);
4404 #else
4405 JSValueRegs resultRegs = JSValueRegs(resultReg);
4406 #endif
4407 GPRReg scratchReg = scratch.gpr();
4408 GPRReg scratch2Reg = scratch2.gpr();
4409
4410 if (!m_compileOkay)
4411 return;
4412
4413 ASSERT(ArrayMode(Array::ScopedArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4414
4415 speculationCheck(
4416 ExoticObjectMode, JSValueSource(), nullptr,
4417 m_jit.branch32(
4418 MacroAssembler::AboveOrEqual, propertyReg,
4419 MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength())));
4420
4421 m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTable()), scratchReg);
4422 m_jit.load32(
4423 MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfLength()), scratch2Reg);
4424
4425 MacroAssembler::Jump overflowArgument = m_jit.branch32(
4426 MacroAssembler::AboveOrEqual, propertyReg, scratch2Reg);
4427
4428 m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfScope()), scratch2Reg);
4429
4430 m_jit.loadPtr(
4431 MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfArguments()),
4432 scratchReg);
4433 m_jit.load32(
4434 MacroAssembler::BaseIndex(scratchReg, propertyReg, MacroAssembler::TimesFour),
4435 scratchReg);
4436
4437 speculationCheck(
4438 ExoticObjectMode, JSValueSource(), nullptr,
4439 m_jit.branch32(
4440 MacroAssembler::Equal, scratchReg, TrustedImm32(ScopeOffset::invalidOffset)));
4441
4442 m_jit.loadValue(
4443 MacroAssembler::BaseIndex(
4444 scratch2Reg, propertyReg, MacroAssembler::TimesEight,
4445 JSEnvironmentRecord::offsetOfVariables()),
4446 resultRegs);
4447
4448 MacroAssembler::Jump done = m_jit.jump();
4449 overflowArgument.link(&m_jit);
4450
4451 m_jit.sub32(propertyReg, scratch2Reg);
4452 m_jit.neg32(scratch2Reg);
4453
4454 m_jit.loadValue(
4455 MacroAssembler::BaseIndex(
4456 baseReg, scratch2Reg, MacroAssembler::TimesEight,
4457 ScopedArguments::overflowStorageOffset()),
4458 resultRegs);
4459 speculationCheck(ExoticObjectMode, JSValueSource(), nullptr, m_jit.branchIfEmpty(resultRegs));
4460
4461 done.link(&m_jit);
4462
4463 jsValueResult(resultRegs, node);
4464 }
4465
4466 void SpeculativeJIT::compileGetScope(Node* node)
4467 {
4468 SpeculateCellOperand function(this, node->child1());
4469 GPRTemporary result(this, Reuse, function);
4470 m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr());
4471 cellResult(result.gpr(), node);
4472 }
4473
4474 void SpeculativeJIT::compileSkipScope(Node* node)
4475 {
4476 SpeculateCellOperand scope(this, node->child1());
4477 GPRTemporary result(this, Reuse, scope);
4478 m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr());
4479 cellResult(result.gpr(), node);
4480 }
4481
4482 void SpeculativeJIT::compileGetArrayLength(Node* node)
4483 {
4484 switch (node->arrayMode().type()) {
4485 case Array::Int32:
4486 case Array::Double:
4487 case Array::Contiguous: {
4488 StorageOperand storage(this, node->child2());
4489 GPRTemporary result(this, Reuse, storage);
4490 GPRReg storageReg = storage.gpr();
4491 GPRReg resultReg = result.gpr();
4492 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
4493
4494 int32Result(resultReg, node);
4495 break;
4496 }
4497 case Array::ArrayStorage:
4498 case Array::SlowPutArrayStorage: {
4499 StorageOperand storage(this, node->child2());
4500 GPRTemporary result(this, Reuse, storage);
4501 GPRReg storageReg = storage.gpr();
4502 GPRReg resultReg = result.gpr();
4503 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
4504
4505 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, MacroAssembler::TrustedImm32(0)));
4506
4507 int32Result(resultReg, node);
4508 break;
4509 }
4510 case Array::String: {
4511 SpeculateCellOperand base(this, node->child1());
4512 GPRTemporary result(this, Reuse, base);
4513 GPRReg baseGPR = base.gpr();
4514 GPRReg resultGPR = result.gpr();
4515 m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
4516 int32Result(resultGPR, node);
4517 break;
4518 }
4519 case Array::DirectArguments: {
4520 SpeculateCellOperand base(this, node->child1());
4521 GPRTemporary result(this, Reuse, base);
4522
4523 GPRReg baseReg = base.gpr();
4524 GPRReg resultReg = result.gpr();
4525
4526 if (!m_compileOkay)
4527 return;
4528
4529 ASSERT(ArrayMode(Array::DirectArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4530
4531 speculationCheck(
4532 ExoticObjectMode, JSValueSource(), 0,
4533 m_jit.branchTestPtr(
4534 MacroAssembler::NonZero,
4535 MacroAssembler::Address(baseReg, DirectArguments::offsetOfOverrides())));
4536
4537 m_jit.load32(
4538 MacroAssembler::Address(baseReg, DirectArguments::offsetOfLength()), resultReg);
4539
4540 int32Result(resultReg, node);
4541 break;
4542 }
4543 case Array::ScopedArguments: {
4544 SpeculateCellOperand base(this, node->child1());
4545 GPRTemporary result(this, Reuse, base);
4546
4547 GPRReg baseReg = base.gpr();
4548 GPRReg resultReg = result.gpr();
4549
4550 if (!m_compileOkay)
4551 return;
4552
4553 ASSERT(ArrayMode(Array::ScopedArguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4554
4555 speculationCheck(
4556 ExoticObjectMode, JSValueSource(), 0,
4557 m_jit.branchTest8(
4558 MacroAssembler::NonZero,
4559 MacroAssembler::Address(baseReg, ScopedArguments::offsetOfOverrodeThings())));
4560
4561 m_jit.load32(
4562 MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength()), resultReg);
4563
4564 int32Result(resultReg, node);
4565 break;
4566 }
4567 default: {
4568 ASSERT(isTypedView(node->arrayMode().typedArrayType()));
4569 SpeculateCellOperand base(this, node->child1());
4570 GPRTemporary result(this, Reuse, base);
4571 GPRReg baseGPR = base.gpr();
4572 GPRReg resultGPR = result.gpr();
4573 m_jit.load32(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()), resultGPR);
4574 int32Result(resultGPR, node);
4575 break;
4576 } }
4577 }
4578
4579 void SpeculativeJIT::compileNewFunction(Node* node)
4580 {
4581 SpeculateCellOperand scope(this, node->child1());
4582 GPRReg scopeGPR = scope.gpr();
4583
4584 FunctionExecutable* executable = node->castOperand<FunctionExecutable*>();
4585
4586 if (executable->singletonFunction()->isStillValid()) {
4587 GPRFlushedCallResult result(this);
4588 GPRReg resultGPR = result.gpr();
4589
4590 flushRegisters();
4591
4592 callOperation(operationNewFunction, resultGPR, scopeGPR, executable);
4593 cellResult(resultGPR, node);
4594 return;
4595 }
4596
4597 Structure* structure = m_jit.graph().globalObjectFor(
4598 node->origin.semantic)->functionStructure();
4599
4600 GPRTemporary result(this);
4601 GPRTemporary scratch1(this);
4602 GPRTemporary scratch2(this);
4603 GPRReg resultGPR = result.gpr();
4604 GPRReg scratch1GPR = scratch1.gpr();
4605 GPRReg scratch2GPR = scratch2.gpr();
4606
4607 JITCompiler::JumpList slowPath;
4608 emitAllocateJSObjectWithKnownSize<JSFunction>(
4609 resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0),
4610 scratch1GPR, scratch2GPR, slowPath, JSFunction::allocationSize(0));
4611
4612 // Don't need a memory barriers since we just fast-created the function, so it
4613 // must be young.
4614 m_jit.storePtr(
4615 scopeGPR,
4616 JITCompiler::Address(resultGPR, JSFunction::offsetOfScopeChain()));
4617 m_jit.storePtr(
4618 TrustedImmPtr(executable),
4619 JITCompiler::Address(resultGPR, JSFunction::offsetOfExecutable()));
4620 m_jit.storePtr(
4621 TrustedImmPtr(0),
4622 JITCompiler::Address(resultGPR, JSFunction::offsetOfRareData()));
4623
4624
4625 addSlowPathGenerator(
4626 slowPathCall(
4627 slowPath, this, operationNewFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable));
4628
4629 cellResult(resultGPR, node);
4630 }
4631
4632 void SpeculativeJIT::compileForwardVarargs(Node* node)
4633 {
4634 LoadVarargsData* data = node->loadVarargsData();
4635 InlineCallFrame* inlineCallFrame = node->child1()->origin.semantic.inlineCallFrame;
4636
4637 GPRTemporary length(this);
4638 JSValueRegsTemporary temp(this);
4639 GPRReg lengthGPR = length.gpr();
4640 JSValueRegs tempRegs = temp.regs();
4641
4642 emitGetLength(inlineCallFrame, lengthGPR, /* includeThis = */ true);
4643 if (data->offset)
4644 m_jit.sub32(TrustedImm32(data->offset), lengthGPR);
4645
4646 speculationCheck(
4647 VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
4648 MacroAssembler::Above,
4649 lengthGPR, TrustedImm32(data->limit)));
4650
4651 m_jit.store32(lengthGPR, JITCompiler::payloadFor(data->machineCount));
4652
4653 VirtualRegister sourceStart = JITCompiler::argumentsStart(inlineCallFrame) + data->offset;
4654 VirtualRegister targetStart = data->machineStart;
4655
4656 m_jit.sub32(TrustedImm32(1), lengthGPR);
4657
4658 // First have a loop that fills in the undefined slots in case of an arity check failure.
4659 m_jit.move(TrustedImm32(data->mandatoryMinimum), tempRegs.payloadGPR());
4660 JITCompiler::Jump done = m_jit.branch32(JITCompiler::BelowOrEqual, tempRegs.payloadGPR(), lengthGPR);
4661
4662 JITCompiler::Label loop = m_jit.label();
4663 m_jit.sub32(TrustedImm32(1), tempRegs.payloadGPR());
4664 m_jit.storeTrustedValue(
4665 jsUndefined(),
4666 JITCompiler::BaseIndex(
4667 GPRInfo::callFrameRegister, tempRegs.payloadGPR(), JITCompiler::TimesEight,
4668 targetStart.offset() * sizeof(EncodedJSValue)));
4669 m_jit.branch32(JITCompiler::Above, tempRegs.payloadGPR(), lengthGPR).linkTo(loop, &m_jit);
4670 done.link(&m_jit);
4671
4672 // And then fill in the actual argument values.
4673 done = m_jit.branchTest32(JITCompiler::Zero, lengthGPR);
4674
4675 loop = m_jit.label();
4676 m_jit.sub32(TrustedImm32(1), lengthGPR);
4677 m_jit.loadValue(
4678 JITCompiler::BaseIndex(
4679 GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
4680 sourceStart.offset() * sizeof(EncodedJSValue)),
4681 tempRegs);
4682 m_jit.storeValue(
4683 tempRegs,
4684 JITCompiler::BaseIndex(
4685 GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
4686 targetStart.offset() * sizeof(EncodedJSValue)));
4687 m_jit.branchTest32(JITCompiler::NonZero, lengthGPR).linkTo(loop, &m_jit);
4688
4689 done.link(&m_jit);
4690
4691 noResult(node);
4692 }
4693
4694 void SpeculativeJIT::compileCreateActivation(Node* node)
4695 {
4696 SymbolTable* table = node->castOperand<SymbolTable*>();
4697 Structure* structure = m_jit.graph().globalObjectFor(
4698 node->origin.semantic)->activationStructure();
4699
4700 SpeculateCellOperand scope(this, node->child1());
4701 GPRReg scopeGPR = scope.gpr();
4702
4703 if (table->singletonScope()->isStillValid()) {
4704 GPRFlushedCallResult result(this);
4705 GPRReg resultGPR = result.gpr();
4706
4707 flushRegisters();
4708
4709 callOperation(operationCreateActivationDirect, resultGPR, structure, scopeGPR, table);
4710 cellResult(resultGPR, node);
4711 return;
4712 }
4713
4714 GPRTemporary result(this);
4715 GPRTemporary scratch1(this);
4716 GPRTemporary scratch2(this);
4717 GPRReg resultGPR = result.gpr();
4718 GPRReg scratch1GPR = scratch1.gpr();
4719 GPRReg scratch2GPR = scratch2.gpr();
4720
4721 JITCompiler::JumpList slowPath;
4722 emitAllocateJSObjectWithKnownSize<JSLexicalEnvironment>(
4723 resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
4724 slowPath, JSLexicalEnvironment::allocationSize(table));
4725
4726 // Don't need a memory barriers since we just fast-created the activation, so the
4727 // activation must be young.
4728 m_jit.storePtr(scopeGPR, JITCompiler::Address(resultGPR, JSScope::offsetOfNext()));
4729 m_jit.storePtr(
4730 TrustedImmPtr(table),
4731 JITCompiler::Address(resultGPR, JSLexicalEnvironment::offsetOfSymbolTable()));
4732
4733 // Must initialize all members to undefined.
4734 for (unsigned i = 0; i < table->scopeSize(); ++i) {
4735 m_jit.storeTrustedValue(
4736 jsUndefined(),
4737 JITCompiler::Address(
4738 resultGPR, JSLexicalEnvironment::offsetOfVariable(ScopeOffset(i))));
4739 }
4740
4741 addSlowPathGenerator(
4742 slowPathCall(
4743 slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table));
4744
4745 cellResult(resultGPR, node);
4746 }
4747
4748 void SpeculativeJIT::compileCreateDirectArguments(Node* node)
4749 {
4750 // FIXME: A more effective way of dealing with the argument count and callee is to have
4751 // them be explicit arguments to this node.
4752 // https://bugs.webkit.org/show_bug.cgi?id=142207
4753
4754 GPRTemporary result(this);
4755 GPRTemporary scratch1(this);
4756 GPRTemporary scratch2(this);
4757 GPRTemporary length;
4758 GPRReg resultGPR = result.gpr();
4759 GPRReg scratch1GPR = scratch1.gpr();
4760 GPRReg scratch2GPR = scratch2.gpr();
4761 GPRReg lengthGPR = InvalidGPRReg;
4762 JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(scratch1GPR, scratch2GPR);
4763
4764 unsigned minCapacity = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->numParameters() - 1;
4765
4766 unsigned knownLength;
4767 bool lengthIsKnown; // if false, lengthGPR will have the length.
4768 if (node->origin.semantic.inlineCallFrame
4769 && !node->origin.semantic.inlineCallFrame->isVarargs()) {
4770 knownLength = node->origin.semantic.inlineCallFrame->arguments.size() - 1;
4771 lengthIsKnown = true;
4772 } else {
4773 knownLength = UINT_MAX;
4774 lengthIsKnown = false;
4775
4776 GPRTemporary realLength(this);
4777 length.adopt(realLength);
4778 lengthGPR = length.gpr();
4779
4780 VirtualRegister argumentCountRegister;
4781 if (!node->origin.semantic.inlineCallFrame)
4782 argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
4783 else
4784 argumentCountRegister = node->origin.semantic.inlineCallFrame->argumentCountRegister;
4785 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
4786 m_jit.sub32(TrustedImm32(1), lengthGPR);
4787 }
4788
4789 Structure* structure =
4790 m_jit.graph().globalObjectFor(node->origin.semantic)->directArgumentsStructure();
4791
4792 // Use a different strategy for allocating the object depending on whether we know its
4793 // size statically.
4794 JITCompiler::JumpList slowPath;
4795 if (lengthIsKnown) {
4796 emitAllocateJSObjectWithKnownSize<DirectArguments>(
4797 resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
4798 slowPath, DirectArguments::allocationSize(std::max(knownLength, minCapacity)));
4799
4800 m_jit.store32(
4801 TrustedImm32(knownLength),
4802 JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength()));
4803 } else {
4804 JITCompiler::Jump tooFewArguments;
4805 if (minCapacity) {
4806 tooFewArguments =
4807 m_jit.branch32(JITCompiler::Below, lengthGPR, TrustedImm32(minCapacity));
4808 }
4809 m_jit.lshift32(lengthGPR, TrustedImm32(3), scratch1GPR);
4810 m_jit.add32(TrustedImm32(DirectArguments::storageOffset()), scratch1GPR);
4811 if (minCapacity) {
4812 JITCompiler::Jump done = m_jit.jump();
4813 tooFewArguments.link(&m_jit);
4814 m_jit.move(TrustedImm32(DirectArguments::allocationSize(minCapacity)), scratch1GPR);
4815 done.link(&m_jit);
4816 }
4817
4818 emitAllocateVariableSizedJSObject<DirectArguments>(
4819 resultGPR, TrustedImmPtr(structure), scratch1GPR, scratch1GPR, scratch2GPR,
4820 slowPath);
4821
4822 m_jit.store32(
4823 lengthGPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength()));
4824 }
4825
4826 m_jit.store32(
4827 TrustedImm32(minCapacity),
4828 JITCompiler::Address(resultGPR, DirectArguments::offsetOfMinCapacity()));
4829
4830 m_jit.storePtr(
4831 TrustedImmPtr(0), JITCompiler::Address(resultGPR, DirectArguments::offsetOfOverrides()));
4832
4833 if (lengthIsKnown) {
4834 addSlowPathGenerator(
4835 slowPathCall(
4836 slowPath, this, operationCreateDirectArguments, resultGPR, structure,
4837 knownLength, minCapacity));
4838 } else {
4839 auto generator = std::make_unique<CallCreateDirectArgumentsSlowPathGenerator>(
4840 slowPath, this, resultGPR, structure, lengthGPR, minCapacity);
4841 addSlowPathGenerator(WTF::move(generator));
4842 }
4843
4844 if (node->origin.semantic.inlineCallFrame) {
4845 if (node->origin.semantic.inlineCallFrame->isClosureCall) {
4846 m_jit.loadPtr(
4847 JITCompiler::addressFor(
4848 node->origin.semantic.inlineCallFrame->calleeRecovery.virtualRegister()),
4849 scratch1GPR);
4850 } else {
4851 m_jit.move(
4852 TrustedImmPtr(
4853 node->origin.semantic.inlineCallFrame->calleeRecovery.constant().asCell()),
4854 scratch1GPR);
4855 }
4856 } else
4857 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratch1GPR);
4858
4859 // Don't need a memory barriers since we just fast-created the activation, so the
4860 // activation must be young.
4861 m_jit.storePtr(
4862 scratch1GPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfCallee()));
4863
4864 VirtualRegister start = m_jit.argumentsStart(node->origin.semantic);
4865 if (lengthIsKnown) {
4866 for (unsigned i = 0; i < std::max(knownLength, minCapacity); ++i) {
4867 m_jit.loadValue(JITCompiler::addressFor(start + i), valueRegs);
4868 m_jit.storeValue(
4869 valueRegs, JITCompiler::Address(resultGPR, DirectArguments::offsetOfSlot(i)));
4870 }
4871 } else {
4872 JITCompiler::Jump done;
4873 if (minCapacity) {
4874 JITCompiler::Jump startLoop = m_jit.branch32(
4875 JITCompiler::AboveOrEqual, lengthGPR, TrustedImm32(minCapacity));
4876 m_jit.move(TrustedImm32(minCapacity), lengthGPR);
4877 startLoop.link(&m_jit);
4878 } else
4879 done = m_jit.branchTest32(MacroAssembler::Zero, lengthGPR);
4880 JITCompiler::Label loop = m_jit.label();
4881 m_jit.sub32(TrustedImm32(1), lengthGPR);
4882 m_jit.loadValue(
4883 JITCompiler::BaseIndex(
4884 GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
4885 start.offset() * static_cast<int>(sizeof(Register))),
4886 valueRegs);
4887 m_jit.storeValue(
4888 valueRegs,
4889 JITCompiler::BaseIndex(
4890 resultGPR, lengthGPR, JITCompiler::TimesEight,
4891 DirectArguments::storageOffset()));
4892 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
4893 if (done.isSet())
4894 done.link(&m_jit);
4895 }
4896
4897 cellResult(resultGPR, node);
4898 }
4899
4900 void SpeculativeJIT::compileGetFromArguments(Node* node)
4901 {
4902 SpeculateCellOperand arguments(this, node->child1());
4903 JSValueRegsTemporary result(this);
4904
4905 GPRReg argumentsGPR = arguments.gpr();
4906 JSValueRegs resultRegs = result.regs();
4907
4908 m_jit.loadValue(JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset())), resultRegs);
4909 jsValueResult(resultRegs, node);
4910 }
4911
4912 void SpeculativeJIT::compilePutToArguments(Node* node)
4913 {
4914 SpeculateCellOperand arguments(this, node->child1());
4915 JSValueOperand value(this, node->child2());
4916
4917 GPRReg argumentsGPR = arguments.gpr();
4918 JSValueRegs valueRegs = value.jsValueRegs();
4919
4920 m_jit.storeValue(valueRegs, JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset())));
4921 noResult(node);
4922 }
4923
4924 void SpeculativeJIT::compileCreateScopedArguments(Node* node)
4925 {
4926 SpeculateCellOperand scope(this, node->child1());
4927 GPRReg scopeGPR = scope.gpr();
4928
4929 GPRFlushedCallResult result(this);
4930 GPRReg resultGPR = result.gpr();
4931 flushRegisters();
4932
4933 // We set up the arguments ourselves, because we have the whole register file and we can
4934 // set them up directly into the argument registers. This also means that we don't have to
4935 // invent a four-argument-register shuffle.
4936
4937 // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee, 5:scope
4938
4939 // Do the scopeGPR first, since it might alias an argument register.
4940 m_jit.setupArgument(5, [&] (GPRReg destGPR) { m_jit.move(scopeGPR, destGPR); });
4941
4942 // These other things could be done in any order.
4943 m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); });
4944 m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); });
4945 m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); });
4946 m_jit.setupArgument(
4947 1, [&] (GPRReg destGPR) {
4948 m_jit.move(
4949 TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)->scopedArgumentsStructure()),
4950 destGPR);
4951 });
4952 m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); });
4953
4954 appendCallWithExceptionCheckSetResult(operationCreateScopedArguments, resultGPR);
4955
4956 cellResult(resultGPR, node);
4957 }
4958
4959 void SpeculativeJIT::compileCreateClonedArguments(Node* node)
4960 {
4961 GPRFlushedCallResult result(this);
4962 GPRReg resultGPR = result.gpr();
4963 flushRegisters();
4964
4965 // We set up the arguments ourselves, because we have the whole register file and we can
4966 // set them up directly into the argument registers.
4967
4968 // Arguments: 0:exec, 1:structure, 2:start, 3:length, 4:callee
4969 m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); });
4970 m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); });
4971 m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); });
4972 m_jit.setupArgument(
4973 1, [&] (GPRReg destGPR) {
4974 m_jit.move(
4975 TrustedImmPtr(
4976 m_jit.globalObjectFor(node->origin.semantic)->outOfBandArgumentsStructure()),
4977 destGPR);
4978 });
4979 m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); });
4980
4981 appendCallWithExceptionCheckSetResult(operationCreateClonedArguments, resultGPR);
4982
4983 cellResult(resultGPR, node);
4984 }
4985
4986 void SpeculativeJIT::compileNotifyWrite(Node* node)
4987 {
4988 WatchpointSet* set = node->watchpointSet();
4989
4990 JITCompiler::Jump slowCase = m_jit.branch8(
4991 JITCompiler::NotEqual,
4992 JITCompiler::AbsoluteAddress(set->addressOfState()),
4993 TrustedImm32(IsInvalidated));
4994
4995 addSlowPathGenerator(
4996 slowPathCall(slowCase, this, operationNotifyWrite, NoResult, set));
4997
4998 noResult(node);
4999 }
5000
5001 bool SpeculativeJIT::compileRegExpExec(Node* node)
5002 {
5003 unsigned branchIndexInBlock = detectPeepHoleBranch();
5004 if (branchIndexInBlock == UINT_MAX)
5005 return false;
5006 Node* branchNode = m_block->at(branchIndexInBlock);
5007 ASSERT(node->adjustedRefCount() == 1);
5008
5009 BasicBlock* taken = branchNode->branchData()->taken.block;
5010 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
5011
5012 bool invert = false;
5013 if (taken == nextBlock()) {
5014 invert = true;
5015 BasicBlock* tmp = taken;
5016 taken = notTaken;
5017 notTaken = tmp;
5018 }
5019
5020 SpeculateCellOperand base(this, node->child1());
5021 SpeculateCellOperand argument(this, node->child2());
5022 GPRReg baseGPR = base.gpr();
5023 GPRReg argumentGPR = argument.gpr();
5024
5025 flushRegisters();
5026 GPRFlushedCallResult result(this);
5027 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
5028
5029 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
5030 jump(notTaken);
5031
5032 use(node->child1());
5033 use(node->child2());
5034 m_indexInBlock = branchIndexInBlock;
5035 m_currentNode = branchNode;
5036
5037 return true;
5038 }
5039
5040 void SpeculativeJIT::compileIsObjectOrNull(Node* node)
5041 {
5042 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
5043
5044 JSValueOperand value(this, node->child1());
5045 JSValueRegs valueRegs = value.jsValueRegs();
5046
5047 GPRTemporary result(this);
5048 GPRReg resultGPR = result.gpr();
5049
5050 JITCompiler::Jump isCell = m_jit.branchIfCell(valueRegs);
5051
5052 JITCompiler::Jump isNull = m_jit.branchIfEqual(valueRegs, jsNull());
5053 JITCompiler::Jump isNonNullNonCell = m_jit.jump();
5054
5055 isCell.link(&m_jit);
5056 JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR());
5057 JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR());
5058
5059 JITCompiler::Jump slowPath = m_jit.branchTest8(
5060 JITCompiler::NonZero,
5061 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()),
5062 TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData));
5063
5064 isNull.link(&m_jit);
5065 m_jit.move(TrustedImm32(1), resultGPR);
5066 JITCompiler::Jump done = m_jit.jump();
5067
5068 isNonNullNonCell.link(&m_jit);
5069 isFunction.link(&m_jit);
5070 notObject.link(&m_jit);
5071 m_jit.move(TrustedImm32(0), resultGPR);
5072
5073 addSlowPathGenerator(
5074 slowPathCall(
5075 slowPath, this, operationObjectIsObject, resultGPR, globalObject,
5076 valueRegs.payloadGPR()));
5077
5078 done.link(&m_jit);
5079
5080 unblessedBooleanResult(resultGPR, node);
5081 }
5082
5083 void SpeculativeJIT::compileIsFunction(Node* node)
5084 {
5085 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
5086
5087 JSValueOperand value(this, node->child1());
5088 JSValueRegs valueRegs = value.jsValueRegs();
5089
5090 GPRTemporary result(this);
5091 GPRReg resultGPR = result.gpr();
5092
5093 JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs);
5094 JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR());
5095 JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR());
5096
5097 JITCompiler::Jump slowPath = m_jit.branchTest8(
5098 JITCompiler::NonZero,
5099 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()),
5100 TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData));
5101
5102 notCell.link(&m_jit);
5103 notObject.link(&m_jit);
5104 m_jit.move(TrustedImm32(0), resultGPR);
5105 JITCompiler::Jump done = m_jit.jump();
5106
5107 isFunction.link(&m_jit);
5108 m_jit.move(TrustedImm32(1), resultGPR);
5109
5110 addSlowPathGenerator(
5111 slowPathCall(
5112 slowPath, this, operationObjectIsFunction, resultGPR, globalObject,
5113 valueRegs.payloadGPR()));
5114
5115 done.link(&m_jit);
5116
5117 unblessedBooleanResult(resultGPR, node);
5118 }
5119
5120 void SpeculativeJIT::compileTypeOf(Node* node)
5121 {
5122 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
5123
5124 JSValueOperand value(this, node->child1());
5125 JSValueRegs valueRegs = value.jsValueRegs();
5126
5127 GPRTemporary result(this);
5128 GPRReg resultGPR = result.gpr();
5129
5130 JITCompiler::JumpList done;
5131 JITCompiler::Jump slowPath;
5132 m_jit.emitTypeOf(
5133 valueRegs, resultGPR,
5134 [&] (TypeofType type, bool fallsThrough) {
5135 m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.typeString(type)), resultGPR);
5136 if (!fallsThrough)
5137 done.append(m_jit.jump());
5138 },
5139 [&] (JITCompiler::Jump theSlowPath) {
5140 slowPath = theSlowPath;
5141 });
5142 done.link(&m_jit);
5143
5144 addSlowPathGenerator(
5145 slowPathCall(
5146 slowPath, this, operationTypeOfObject, resultGPR, globalObject,
5147 valueRegs.payloadGPR()));
5148
5149 cellResult(resultGPR, node);
5150 }
5151
5152 void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
5153 {
5154 if (node->transition()->previous->couldHaveIndexingHeader()) {
5155 SpeculateCellOperand base(this, node->child1());
5156
5157 GPRReg baseGPR = base.gpr();
5158
5159 flushRegisters();
5160
5161 GPRFlushedCallResult result(this);
5162 callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity, result.gpr(), baseGPR);
5163
5164 storageResult(result.gpr(), node);
5165 return;
5166 }
5167
5168 SpeculateCellOperand base(this, node->child1());
5169 GPRTemporary scratch1(this);
5170
5171 GPRReg baseGPR = base.gpr();
5172 GPRReg scratchGPR1 = scratch1.gpr();
5173
5174 ASSERT(!node->transition()->previous->outOfLineCapacity());
5175 ASSERT(initialOutOfLineCapacity == node->transition()->next->outOfLineCapacity());
5176
5177 JITCompiler::Jump slowPath =
5178 emitAllocateBasicStorage(
5179 TrustedImm32(initialOutOfLineCapacity * sizeof(JSValue)), scratchGPR1);
5180
5181 m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader)), scratchGPR1);
5182
5183 addSlowPathGenerator(
5184 slowPathCall(slowPath, this, operationAllocatePropertyStorageWithInitialCapacity, scratchGPR1));
5185
5186 m_jit.storePtr(scratchGPR1, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
5187
5188 storageResult(scratchGPR1, node);
5189 }
5190
5191 void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
5192 {
5193 size_t oldSize = node->transition()->previous->outOfLineCapacity() * sizeof(JSValue);
5194 size_t newSize = oldSize * outOfLineGrowthFactor;
5195 ASSERT(newSize == node->transition()->next->outOfLineCapacity() * sizeof(JSValue));
5196
5197 if (node->transition()->previous->couldHaveIndexingHeader()) {
5198 SpeculateCellOperand base(this, node->child1());
5199
5200 GPRReg baseGPR = base.gpr();
5201
5202 flushRegisters();
5203
5204 GPRFlushedCallResult result(this);
5205 callOperation(operationReallocateButterflyToGrowPropertyStorage, result.gpr(), baseGPR, newSize / sizeof(JSValue));
5206
5207 storageResult(result.gpr(), node);
5208 return;
5209 }
5210
5211 SpeculateCellOperand base(this, node->child1());
5212 StorageOperand oldStorage(this, node->child2());
5213 GPRTemporary scratch1(this);
5214 GPRTemporary scratch2(this);
5215
5216 GPRReg baseGPR = base.gpr();
5217 GPRReg oldStorageGPR = oldStorage.gpr();
5218 GPRReg scratchGPR1 = scratch1.gpr();
5219 GPRReg scratchGPR2 = scratch2.gpr();
5220
5221 JITCompiler::Jump slowPath =
5222 emitAllocateBasicStorage(TrustedImm32(newSize), scratchGPR1);
5223
5224 m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader)), scratchGPR1);
5225
5226 addSlowPathGenerator(
5227 slowPathCall(slowPath, this, operationAllocatePropertyStorage, scratchGPR1, newSize / sizeof(JSValue)));
5228
5229 // We have scratchGPR1 = new storage, scratchGPR2 = scratch
5230 for (ptrdiff_t offset = 0; offset < static_cast<ptrdiff_t>(oldSize); offset += sizeof(void*)) {
5231 m_jit.loadPtr(JITCompiler::Address(oldStorageGPR, -(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR2);
5232 m_jit.storePtr(scratchGPR2, JITCompiler::Address(scratchGPR1, -(offset + sizeof(JSValue) + sizeof(void*))));
5233 }
5234 m_jit.storePtr(scratchGPR1, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
5235
5236 storageResult(scratchGPR1, node);
5237 }
5238
5239 GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode)
5240 {
5241 if (!putByValWillNeedExtraRegister(arrayMode))
5242 return InvalidGPRReg;
5243
5244 GPRTemporary realTemporary(this);
5245 temporary.adopt(realTemporary);
5246 return temporary.gpr();
5247 }
5248
5249 void SpeculativeJIT::compileToStringOrCallStringConstructorOnCell(Node* node)
5250 {
5251 SpeculateCellOperand op1(this, node->child1());
5252 GPRReg op1GPR = op1.gpr();
5253
5254 switch (node->child1().useKind()) {
5255 case StringObjectUse: {
5256 GPRTemporary result(this);
5257 GPRReg resultGPR = result.gpr();
5258
5259 speculateStringObject(node->child1(), op1GPR);
5260 m_interpreter.filter(node->child1(), SpecStringObject);
5261
5262 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
5263 cellResult(resultGPR, node);
5264 break;
5265 }
5266
5267 case StringOrStringObjectUse: {
5268 GPRTemporary result(this);
5269 GPRReg resultGPR = result.gpr();
5270
5271 m_jit.load32(JITCompiler::Address(op1GPR, JSCell::structureIDOffset()), resultGPR);
5272 JITCompiler::Jump isString = m_jit.branchStructurePtr(
5273 JITCompiler::Equal,
5274 resultGPR,
5275 m_jit.vm()->stringStructure.get());
5276
5277 speculateStringObjectForStructure(node->child1(), resultGPR);
5278
5279 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
5280
5281 JITCompiler::Jump done = m_jit.jump();
5282 isString.link(&m_jit);
5283 m_jit.move(op1GPR, resultGPR);
5284 done.link(&m_jit);
5285
5286 m_interpreter.filter(node->child1(), SpecString | SpecStringObject);
5287
5288 cellResult(resultGPR, node);
5289 break;
5290 }
5291
5292 case CellUse: {
5293 GPRFlushedCallResult result(this);
5294 GPRReg resultGPR = result.gpr();
5295
5296 // We flush registers instead of silent spill/fill because in this mode we
5297 // believe that most likely the input is not a string, and we need to take
5298 // slow path.
5299 flushRegisters();
5300 JITCompiler::Jump done;
5301 if (node->child1()->prediction() & SpecString) {
5302 JITCompiler::Jump needCall = m_jit.branchIfNotString(op1GPR);
5303 m_jit.move(op1GPR, resultGPR);
5304 done = m_jit.jump();
5305 needCall.link(&m_jit);
5306 }
5307 if (node->op() == ToString)
5308 callOperation(operationToStringOnCell, resultGPR, op1GPR);
5309 else {
5310 ASSERT(node->op() == CallStringConstructor);
5311 callOperation(operationCallStringConstructorOnCell, resultGPR, op1GPR);
5312 }
5313 if (done.isSet())
5314 done.link(&m_jit);
5315 cellResult(resultGPR, node);
5316 break;
5317 }
5318
5319 default:
5320 RELEASE_ASSERT_NOT_REACHED();
5321 }
5322 }
5323
5324 void SpeculativeJIT::compileNewStringObject(Node* node)
5325 {
5326 SpeculateCellOperand operand(this, node->child1());
5327
5328 GPRTemporary result(this);
5329 GPRTemporary scratch1(this);
5330 GPRTemporary scratch2(this);
5331
5332 GPRReg operandGPR = operand.gpr();
5333 GPRReg resultGPR = result.gpr();
5334 GPRReg scratch1GPR = scratch1.gpr();
5335 GPRReg scratch2GPR = scratch2.gpr();
5336
5337 JITCompiler::JumpList slowPath;
5338
5339 emitAllocateJSObject<StringObject>(
5340 resultGPR, TrustedImmPtr(node->structure()), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
5341 slowPath);
5342
5343 m_jit.storePtr(
5344 TrustedImmPtr(StringObject::info()),
5345 JITCompiler::Address(resultGPR, JSDestructibleObject::classInfoOffset()));
5346 #if USE(JSVALUE64)
5347 m_jit.store64(
5348 operandGPR, JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset()));
5349 #else
5350 m_jit.store32(
5351 TrustedImm32(JSValue::CellTag),
5352 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
5353 m_jit.store32(
5354 operandGPR,
5355 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
5356 #endif
5357
5358 addSlowPathGenerator(slowPathCall(
5359 slowPath, this, operationNewStringObject, resultGPR, operandGPR, node->structure()));
5360
5361 cellResult(resultGPR, node);
5362 }
5363
5364 void SpeculativeJIT::compileNewTypedArray(Node* node)
5365 {
5366 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
5367 TypedArrayType type = node->typedArrayType();
5368 Structure* structure = globalObject->typedArrayStructure(type);
5369
5370 SpeculateInt32Operand size(this, node->child1());
5371 GPRReg sizeGPR = size.gpr();
5372
5373 GPRTemporary result(this);
5374 GPRTemporary storage(this);
5375 GPRTemporary scratch(this);
5376 GPRTemporary scratch2(this);
5377 GPRReg resultGPR = result.gpr();
5378 GPRReg storageGPR = storage.gpr();
5379 GPRReg scratchGPR = scratch.gpr();
5380 GPRReg scratchGPR2 = scratch2.gpr();
5381
5382 JITCompiler::JumpList slowCases;
5383
5384 slowCases.append(m_jit.branch32(
5385 MacroAssembler::Above, sizeGPR, TrustedImm32(JSArrayBufferView::fastSizeLimit)));
5386 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, sizeGPR));
5387
5388 m_jit.move(sizeGPR, scratchGPR);
5389 m_jit.lshift32(TrustedImm32(logElementSize(type)), scratchGPR);
5390 if (elementSize(type) < 8) {
5391 m_jit.add32(TrustedImm32(7), scratchGPR);
5392 m_jit.and32(TrustedImm32(~7), scratchGPR);
5393 }
5394 slowCases.append(
5395 emitAllocateBasicStorage(scratchGPR, storageGPR));
5396
5397 m_jit.subPtr(scratchGPR, storageGPR);
5398
5399 emitAllocateJSObject<JSArrayBufferView>(
5400 resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR, scratchGPR2,
5401 slowCases);
5402
5403 m_jit.storePtr(
5404 storageGPR,
5405 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfVector()));
5406 m_jit.store32(
5407 sizeGPR,
5408 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfLength()));
5409 m_jit.store32(
5410 TrustedImm32(FastTypedArray),
5411 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfMode()));
5412
5413 #if USE(JSVALUE32_64)
5414 MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, sizeGPR);
5415 m_jit.move(sizeGPR, scratchGPR);
5416 if (elementSize(type) != 4) {
5417 if (elementSize(type) > 4)
5418 m_jit.lshift32(TrustedImm32(logElementSize(type) - 2), scratchGPR);
5419 else {
5420 if (elementSize(type) > 1)
5421 m_jit.lshift32(TrustedImm32(logElementSize(type)), scratchGPR);
5422 m_jit.add32(TrustedImm32(3), scratchGPR);
5423 m_jit.urshift32(TrustedImm32(2), scratchGPR);
5424 }
5425 }
5426 MacroAssembler::Label loop = m_jit.label();
5427 m_jit.sub32(TrustedImm32(1), scratchGPR);
5428 m_jit.store32(
5429 TrustedImm32(0),
5430 MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesFour));
5431 m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
5432 done.link(&m_jit);
5433 #endif // USE(JSVALUE32_64)
5434
5435 addSlowPathGenerator(slowPathCall(
5436 slowCases, this, operationNewTypedArrayWithSizeForType(type),
5437 resultGPR, structure, sizeGPR));
5438
5439 cellResult(resultGPR, node);
5440 }
5441
5442 void SpeculativeJIT::speculateCellTypeWithoutTypeFiltering(
5443 Edge edge, GPRReg cellGPR, JSType jsType)
5444 {
5445 speculationCheck(
5446 BadType, JSValueSource::unboxedCell(cellGPR), edge,
5447 m_jit.branch8(
5448 MacroAssembler::NotEqual,
5449 MacroAssembler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
5450 MacroAssembler::TrustedImm32(jsType)));
5451 }
5452
5453 void SpeculativeJIT::speculateCellType(
5454 Edge edge, GPRReg cellGPR, SpeculatedType specType, JSType jsType)
5455 {
5456 DFG_TYPE_CHECK(
5457 JSValueSource::unboxedCell(cellGPR), edge, specType,
5458 m_jit.branch8(
5459 MacroAssembler::NotEqual,
5460 MacroAssembler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
5461 TrustedImm32(jsType)));
5462 }
5463
5464 void SpeculativeJIT::speculateInt32(Edge edge)
5465 {
5466 if (!needsTypeCheck(edge, SpecInt32))
5467 return;
5468
5469 (SpeculateInt32Operand(this, edge)).gpr();
5470 }
5471
5472 void SpeculativeJIT::speculateNumber(Edge edge)
5473 {
5474 if (!needsTypeCheck(edge, SpecBytecodeNumber))
5475 return;
5476
5477 JSValueOperand value(this, edge, ManualOperandSpeculation);
5478 #if USE(JSVALUE64)
5479 GPRReg gpr = value.gpr();
5480 typeCheck(
5481 JSValueRegs(gpr), edge, SpecBytecodeNumber,
5482 m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
5483 #else
5484 GPRReg tagGPR = value.tagGPR();
5485 DFG_TYPE_CHECK(
5486 value.jsValueRegs(), edge, ~SpecInt32,
5487 m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)));
5488 DFG_TYPE_CHECK(
5489 value.jsValueRegs(), edge, SpecBytecodeNumber,
5490 m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
5491 #endif
5492 }
5493
5494 void SpeculativeJIT::speculateRealNumber(Edge edge)
5495 {
5496 if (!needsTypeCheck(edge, SpecBytecodeRealNumber))
5497 return;
5498
5499 JSValueOperand op1(this, edge, ManualOperandSpeculation);
5500 FPRTemporary result(this);
5501
5502 JSValueRegs op1Regs = op1.jsValueRegs();
5503 FPRReg resultFPR = result.fpr();
5504
5505 #if USE(JSVALUE64)
5506 GPRTemporary temp(this);
5507 GPRReg tempGPR = temp.gpr();
5508 m_jit.move(op1Regs.gpr(), tempGPR);
5509 m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
5510 #else
5511 FPRTemporary temp(this);
5512 FPRReg tempFPR = temp.fpr();
5513 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
5514 #endif
5515
5516 JITCompiler::Jump done = m_jit.branchDouble(
5517 JITCompiler::DoubleEqual, resultFPR, resultFPR);
5518
5519 typeCheck(op1Regs, edge, SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
5520
5521 done.link(&m_jit);
5522 }
5523
5524 void SpeculativeJIT::speculateDoubleRepReal(Edge edge)
5525 {
5526 if (!needsTypeCheck(edge, SpecDoubleReal))
5527 return;
5528
5529 SpeculateDoubleOperand operand(this, edge);
5530 FPRReg fpr = operand.fpr();
5531 typeCheck(
5532 JSValueRegs(), edge, SpecDoubleReal,
5533 m_jit.branchDouble(
5534 MacroAssembler::DoubleNotEqualOrUnordered, fpr, fpr));
5535 }
5536
5537 void SpeculativeJIT::speculateBoolean(Edge edge)
5538 {
5539 if (!needsTypeCheck(edge, SpecBoolean))
5540 return;
5541
5542 (SpeculateBooleanOperand(this, edge)).gpr();
5543 }
5544
5545 void SpeculativeJIT::speculateCell(Edge edge)
5546 {
5547 if (!needsTypeCheck(edge, SpecCell))
5548 return;
5549
5550 (SpeculateCellOperand(this, edge)).gpr();
5551 }
5552
5553 void SpeculativeJIT::speculateObject(Edge edge)
5554 {
5555 if (!needsTypeCheck(edge, SpecObject))
5556 return;
5557
5558 SpeculateCellOperand operand(this, edge);
5559 GPRReg gpr = operand.gpr();
5560 DFG_TYPE_CHECK(
5561 JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchIfNotObject(gpr));
5562 }
5563
5564 void SpeculativeJIT::speculateFunction(Edge edge)
5565 {
5566 if (!needsTypeCheck(edge, SpecFunction))
5567 return;
5568
5569 SpeculateCellOperand operand(this, edge);
5570 speculateCellType(edge, operand.gpr(), SpecFunction, JSFunctionType);
5571 }
5572
5573 void SpeculativeJIT::speculateFinalObject(Edge edge)
5574 {
5575 if (!needsTypeCheck(edge, SpecFinalObject))
5576 return;
5577
5578 SpeculateCellOperand operand(this, edge);
5579 speculateCellType(edge, operand.gpr(), SpecFinalObject, FinalObjectType);
5580 }
5581
5582 void SpeculativeJIT::speculateObjectOrOther(Edge edge)
5583 {
5584 if (!needsTypeCheck(edge, SpecObject | SpecOther))
5585 return;
5586
5587 JSValueOperand operand(this, edge, ManualOperandSpeculation);
5588 GPRTemporary temp(this);
5589 GPRReg tempGPR = temp.gpr();
5590 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs());
5591 GPRReg gpr = operand.jsValueRegs().payloadGPR();
5592 DFG_TYPE_CHECK(
5593 operand.jsValueRegs(), edge, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(gpr));
5594 MacroAssembler::Jump done = m_jit.jump();
5595 notCell.link(&m_jit);
5596 if (needsTypeCheck(edge, SpecCell | SpecOther)) {
5597 typeCheck(
5598 operand.jsValueRegs(), edge, SpecCell | SpecOther,
5599 m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR));
5600 }
5601 done.link(&m_jit);
5602 }
5603
5604 void SpeculativeJIT::speculateString(Edge edge, GPRReg cell)
5605 {
5606 DFG_TYPE_CHECK(
5607 JSValueSource::unboxedCell(cell), edge, SpecString | ~SpecCell, m_jit.branchIfNotString(cell));
5608 }
5609
5610 void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage)
5611 {
5612 m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), storage);
5613
5614 if (!needsTypeCheck(edge, SpecStringIdent | ~SpecString))
5615 return;
5616
5617 speculationCheck(
5618 BadType, JSValueSource::unboxedCell(string), edge,
5619 m_jit.branchTestPtr(MacroAssembler::Zero, storage));
5620 speculationCheck(
5621 BadType, JSValueSource::unboxedCell(string), edge, m_jit.branchTest32(
5622 MacroAssembler::Zero,
5623 MacroAssembler::Address(storage, StringImpl::flagsOffset()),
5624 MacroAssembler::TrustedImm32(StringImpl::flagIsAtomic())));
5625
5626 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
5627 }
5628
5629 void SpeculativeJIT::speculateStringIdent(Edge edge, GPRReg string)
5630 {
5631 if (!needsTypeCheck(edge, SpecStringIdent))
5632 return;
5633
5634 GPRTemporary temp(this);
5635 speculateStringIdentAndLoadStorage(edge, string, temp.gpr());
5636 }
5637
5638 void SpeculativeJIT::speculateStringIdent(Edge edge)
5639 {
5640 if (!needsTypeCheck(edge, SpecStringIdent))
5641 return;
5642
5643 SpeculateCellOperand operand(this, edge);
5644 GPRReg gpr = operand.gpr();
5645 speculateString(edge, gpr);
5646 speculateStringIdent(edge, gpr);
5647 }
5648
5649 void SpeculativeJIT::speculateString(Edge edge)
5650 {
5651 if (!needsTypeCheck(edge, SpecString))
5652 return;
5653
5654 SpeculateCellOperand operand(this, edge);
5655 speculateString(edge, operand.gpr());
5656 }
5657
5658 void SpeculativeJIT::speculateStringObject(Edge edge, GPRReg gpr)
5659 {
5660 speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureIDOffset()));
5661 }
5662
5663 void SpeculativeJIT::speculateStringObject(Edge edge)
5664 {
5665 if (!needsTypeCheck(edge, SpecStringObject))
5666 return;
5667
5668 SpeculateCellOperand operand(this, edge);
5669 GPRReg gpr = operand.gpr();
5670 if (!needsTypeCheck(edge, SpecStringObject))
5671 return;
5672
5673 speculateStringObject(edge, gpr);
5674 m_interpreter.filter(edge, SpecStringObject);
5675 }
5676
5677 void SpeculativeJIT::speculateStringOrStringObject(Edge edge)
5678 {
5679 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
5680 return;
5681
5682 SpeculateCellOperand operand(this, edge);
5683 GPRReg gpr = operand.gpr();
5684 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
5685 return;
5686
5687 GPRTemporary structureID(this);
5688 GPRReg structureIDGPR = structureID.gpr();
5689
5690 m_jit.load32(JITCompiler::Address(gpr, JSCell::structureIDOffset()), structureIDGPR);
5691 JITCompiler::Jump isString = m_jit.branchStructurePtr(
5692 JITCompiler::Equal,
5693 structureIDGPR,
5694 m_jit.vm()->stringStructure.get());
5695
5696 speculateStringObjectForStructure(edge, structureIDGPR);
5697
5698 isString.link(&m_jit);
5699
5700 m_interpreter.filter(edge, SpecString | SpecStringObject);
5701 }
5702
5703 void SpeculativeJIT::speculateNotStringVar(Edge edge)
5704 {
5705 JSValueOperand operand(this, edge, ManualOperandSpeculation);
5706 GPRTemporary temp(this);
5707 GPRReg tempGPR = temp.gpr();
5708
5709 JITCompiler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs());
5710 GPRReg cell = operand.jsValueRegs().payloadGPR();
5711
5712 JITCompiler::Jump notString = m_jit.branchIfNotString(cell);
5713
5714 speculateStringIdentAndLoadStorage(edge, cell, tempGPR);
5715
5716 notString.link(&m_jit);
5717 notCell.link(&m_jit);
5718 }
5719
5720 void SpeculativeJIT::speculateNotCell(Edge edge)
5721 {
5722 if (!needsTypeCheck(edge, ~SpecCell))
5723 return;
5724
5725 JSValueOperand operand(this, edge, ManualOperandSpeculation);
5726 typeCheck(operand.jsValueRegs(), edge, ~SpecCell, m_jit.branchIfCell(operand.jsValueRegs()));
5727 }
5728
5729 void SpeculativeJIT::speculateOther(Edge edge)
5730 {
5731 if (!needsTypeCheck(edge, SpecOther))
5732 return;
5733
5734 JSValueOperand operand(this, edge, ManualOperandSpeculation);
5735 GPRTemporary temp(this);
5736 GPRReg tempGPR = temp.gpr();
5737 typeCheck(
5738 operand.jsValueRegs(), edge, SpecOther,
5739 m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR));
5740 }
5741
5742 void SpeculativeJIT::speculateMisc(Edge edge, JSValueRegs regs)
5743 {
5744 #if USE(JSVALUE64)
5745 DFG_TYPE_CHECK(
5746 regs, edge, SpecMisc,
5747 m_jit.branch64(MacroAssembler::Above, regs.gpr(), MacroAssembler::TrustedImm64(TagBitTypeOther | TagBitBool | TagBitUndefined)));
5748 #else
5749 DFG_TYPE_CHECK(
5750 regs, edge, ~SpecInt32,
5751 m_jit.branch32(MacroAssembler::Equal, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::Int32Tag)));
5752 DFG_TYPE_CHECK(
5753 regs, edge, SpecMisc,
5754 m_jit.branch32(MacroAssembler::Below, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::UndefinedTag)));
5755 #endif
5756 }
5757
5758 void SpeculativeJIT::speculateMisc(Edge edge)
5759 {
5760 if (!needsTypeCheck(edge, SpecMisc))
5761 return;
5762
5763 JSValueOperand operand(this, edge, ManualOperandSpeculation);
5764 speculateMisc(edge, operand.jsValueRegs());
5765 }
5766
5767 void SpeculativeJIT::speculate(Node*, Edge edge)
5768 {
5769 switch (edge.useKind()) {
5770 case UntypedUse:
5771 break;
5772 case KnownInt32Use:
5773 ASSERT(!needsTypeCheck(edge, SpecInt32));
5774 break;
5775 case DoubleRepUse:
5776 ASSERT(!needsTypeCheck(edge, SpecFullDouble));
5777 break;
5778 case Int52RepUse:
5779 ASSERT(!needsTypeCheck(edge, SpecMachineInt));
5780 break;
5781 case KnownCellUse:
5782 ASSERT(!needsTypeCheck(edge, SpecCell));
5783 break;
5784 case KnownStringUse:
5785 ASSERT(!needsTypeCheck(edge, SpecString));
5786 break;
5787 case Int32Use:
5788 speculateInt32(edge);
5789 break;
5790 case NumberUse:
5791 speculateNumber(edge);
5792 break;
5793 case RealNumberUse:
5794 speculateRealNumber(edge);
5795 break;
5796 case DoubleRepRealUse:
5797 speculateDoubleRepReal(edge);
5798 break;
5799 #if USE(JSVALUE64)
5800 case MachineIntUse:
5801 speculateMachineInt(edge);
5802 break;
5803 case DoubleRepMachineIntUse:
5804 speculateDoubleRepMachineInt(edge);
5805 break;
5806 #endif
5807 case BooleanUse:
5808 speculateBoolean(edge);
5809 break;
5810 case CellUse:
5811 speculateCell(edge);
5812 break;
5813 case ObjectUse:
5814 speculateObject(edge);
5815 break;
5816 case FunctionUse:
5817 speculateFunction(edge);
5818 break;
5819 case FinalObjectUse:
5820 speculateFinalObject(edge);
5821 break;
5822 case ObjectOrOtherUse:
5823 speculateObjectOrOther(edge);
5824 break;
5825 case StringIdentUse:
5826 speculateStringIdent(edge);
5827 break;
5828 case StringUse:
5829 speculateString(edge);
5830 break;
5831 case StringObjectUse:
5832 speculateStringObject(edge);
5833 break;
5834 case StringOrStringObjectUse:
5835 speculateStringOrStringObject(edge);
5836 break;
5837 case NotStringVarUse:
5838 speculateNotStringVar(edge);
5839 break;
5840 case NotCellUse:
5841 speculateNotCell(edge);
5842 break;
5843 case OtherUse:
5844 speculateOther(edge);
5845 break;
5846 case MiscUse:
5847 speculateMisc(edge);
5848 break;
5849 default:
5850 RELEASE_ASSERT_NOT_REACHED();
5851 break;
5852 }
5853 }
5854
5855 void SpeculativeJIT::emitSwitchIntJump(
5856 SwitchData* data, GPRReg value, GPRReg scratch)
5857 {
5858 SimpleJumpTable& table = m_jit.codeBlock()->switchJumpTable(data->switchTableIndex);
5859 table.ensureCTITable();
5860 m_jit.sub32(Imm32(table.min), value);
5861 addBranch(
5862 m_jit.branch32(JITCompiler::AboveOrEqual, value, Imm32(table.ctiOffsets.size())),
5863 data->fallThrough.block);
5864 m_jit.move(TrustedImmPtr(table.ctiOffsets.begin()), scratch);
5865 m_jit.loadPtr(JITCompiler::BaseIndex(scratch, value, JITCompiler::timesPtr()), scratch);
5866 m_jit.jump(scratch);
5867 data->didUseJumpTable = true;
5868 }
5869
5870 void SpeculativeJIT::emitSwitchImm(Node* node, SwitchData* data)
5871 {
5872 switch (node->child1().useKind()) {
5873 case Int32Use: {
5874 SpeculateInt32Operand value(this, node->child1());
5875 GPRTemporary temp(this);
5876 emitSwitchIntJump(data, value.gpr(), temp.gpr());
5877 noResult(node);
5878 break;
5879 }
5880
5881 case UntypedUse: {
5882 JSValueOperand value(this, node->child1());
5883 GPRTemporary temp(this);
5884 JSValueRegs valueRegs = value.jsValueRegs();
5885 GPRReg scratch = temp.gpr();
5886
5887 value.use();
5888
5889 #if USE(JSVALUE64)
5890 JITCompiler::Jump notInt = m_jit.branch64(
5891 JITCompiler::Below, valueRegs.gpr(), GPRInfo::tagTypeNumberRegister);
5892 emitSwitchIntJump(data, valueRegs.gpr(), scratch);
5893 notInt.link(&m_jit);
5894 addBranch(
5895 m_jit.branchTest64(
5896 JITCompiler::Zero, valueRegs.gpr(), GPRInfo::tagTypeNumberRegister),
5897 data->fallThrough.block);
5898 silentSpillAllRegisters(scratch);
5899 callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs.gpr(), data->switchTableIndex);
5900 silentFillAllRegisters(scratch);
5901 m_jit.jump(scratch);
5902 #else
5903 JITCompiler::Jump notInt = m_jit.branch32(
5904 JITCompiler::NotEqual, valueRegs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
5905 emitSwitchIntJump(data, valueRegs.payloadGPR(), scratch);
5906 notInt.link(&m_jit);
5907 addBranch(
5908 m_jit.branch32(
5909 JITCompiler::AboveOrEqual, valueRegs.tagGPR(),
5910 TrustedImm32(JSValue::LowestTag)),
5911 data->fallThrough.block);
5912 silentSpillAllRegisters(scratch);
5913 callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs, data->switchTableIndex);
5914 silentFillAllRegisters(scratch);
5915 m_jit.jump(scratch);
5916 #endif
5917 noResult(node, UseChildrenCalledExplicitly);
5918 break;
5919 }
5920
5921 default:
5922 RELEASE_ASSERT_NOT_REACHED();
5923 break;
5924 }
5925 }
5926
5927 void SpeculativeJIT::emitSwitchCharStringJump(
5928 SwitchData* data, GPRReg value, GPRReg scratch)
5929 {
5930 addBranch(
5931 m_jit.branch32(
5932 MacroAssembler::NotEqual,
5933 MacroAssembler::Address(value, JSString::offsetOfLength()),
5934 TrustedImm32(1)),
5935 data->fallThrough.block);
5936
5937 m_jit.loadPtr(MacroAssembler::Address(value, JSString::offsetOfValue()), scratch);
5938
5939 addSlowPathGenerator(
5940 slowPathCall(
5941 m_jit.branchTestPtr(MacroAssembler::Zero, scratch),
5942 this, operationResolveRope, scratch, value));
5943
5944 m_jit.loadPtr(MacroAssembler::Address(scratch, StringImpl::dataOffset()), value);
5945
5946 JITCompiler::Jump is8Bit = m_jit.branchTest32(
5947 MacroAssembler::NonZero,
5948 MacroAssembler::Address(scratch, StringImpl::flagsOffset()),
5949 TrustedImm32(StringImpl::flagIs8Bit()));
5950
5951 m_jit.load16(MacroAssembler::Address(value), scratch);
5952
5953 JITCompiler::Jump ready = m_jit.jump();
5954
5955 is8Bit.link(&m_jit);
5956 m_jit.load8(MacroAssembler::Address(value), scratch);
5957
5958 ready.link(&m_jit);
5959 emitSwitchIntJump(data, scratch, value);
5960 }
5961
5962 void SpeculativeJIT::emitSwitchChar(Node* node, SwitchData* data)
5963 {
5964 switch (node->child1().useKind()) {
5965 case StringUse: {
5966 SpeculateCellOperand op1(this, node->child1());
5967 GPRTemporary temp(this);
5968
5969 GPRReg op1GPR = op1.gpr();
5970 GPRReg tempGPR = temp.gpr();
5971
5972 op1.use();
5973
5974 speculateString(node->child1(), op1GPR);
5975 emitSwitchCharStringJump(data, op1GPR, tempGPR);
5976 noResult(node, UseChildrenCalledExplicitly);
5977 break;
5978 }
5979
5980 case UntypedUse: {
5981 JSValueOperand op1(this, node->child1());
5982 GPRTemporary temp(this);
5983
5984 JSValueRegs op1Regs = op1.jsValueRegs();
5985 GPRReg tempGPR = temp.gpr();
5986
5987 op1.use();
5988
5989 addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block);
5990
5991 addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block);
5992
5993 emitSwitchCharStringJump(data, op1Regs.payloadGPR(), tempGPR);
5994 noResult(node, UseChildrenCalledExplicitly);
5995 break;
5996 }
5997
5998 default:
5999 RELEASE_ASSERT_NOT_REACHED();
6000 break;
6001 }
6002 }
6003
6004 namespace {
6005
6006 struct CharacterCase {
6007 bool operator<(const CharacterCase& other) const
6008 {
6009 return character < other.character;
6010 }
6011
6012 LChar character;
6013 unsigned begin;
6014 unsigned end;
6015 };
6016
6017 } // anonymous namespace
6018
6019 void SpeculativeJIT::emitBinarySwitchStringRecurse(
6020 SwitchData* data, const Vector<SpeculativeJIT::StringSwitchCase>& cases,
6021 unsigned numChecked, unsigned begin, unsigned end, GPRReg buffer, GPRReg length,
6022 GPRReg temp, unsigned alreadyCheckedLength, bool checkedExactLength)
6023 {
6024 static const bool verbose = false;
6025
6026 if (verbose) {
6027 dataLog("We're down to the following cases, alreadyCheckedLength = ", alreadyCheckedLength, ":\n");
6028 for (unsigned i = begin; i < end; ++i) {
6029 dataLog(" ", cases[i].string, "\n");
6030 }
6031 }
6032
6033 if (begin == end) {
6034 jump(data->fallThrough.block, ForceJump);
6035 return;
6036 }
6037
6038 unsigned minLength = cases[begin].string->length();
6039 unsigned commonChars = minLength;
6040 bool allLengthsEqual = true;
6041 for (unsigned i = begin + 1; i < end; ++i) {
6042 unsigned myCommonChars = numChecked;
6043 for (unsigned j = numChecked;
6044 j < std::min(cases[begin].string->length(), cases[i].string->length());
6045 ++j) {
6046 if (cases[begin].string->at(j) != cases[i].string->at(j)) {
6047 if (verbose)
6048 dataLog("string(", cases[i].string, ")[", j, "] != string(", cases[begin].string, ")[", j, "]\n");
6049 break;
6050 }
6051 myCommonChars++;
6052 }
6053 commonChars = std::min(commonChars, myCommonChars);
6054 if (minLength != cases[i].string->length())
6055 allLengthsEqual = false;
6056 minLength = std::min(minLength, cases[i].string->length());
6057 }
6058
6059 if (checkedExactLength) {
6060 RELEASE_ASSERT(alreadyCheckedLength == minLength);
6061 RELEASE_ASSERT(allLengthsEqual);
6062 }
6063
6064 RELEASE_ASSERT(minLength >= commonChars);
6065
6066 if (verbose)
6067 dataLog("length = ", minLength, ", commonChars = ", commonChars, ", allLengthsEqual = ", allLengthsEqual, "\n");
6068
6069 if (!allLengthsEqual && alreadyCheckedLength < minLength)
6070 branch32(MacroAssembler::Below, length, Imm32(minLength), data->fallThrough.block);
6071 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
6072 branch32(MacroAssembler::NotEqual, length, Imm32(minLength), data->fallThrough.block);
6073
6074 for (unsigned i = numChecked; i < commonChars; ++i) {
6075 branch8(
6076 MacroAssembler::NotEqual, MacroAssembler::Address(buffer, i),
6077 TrustedImm32(cases[begin].string->at(i)), data->fallThrough.block);
6078 }
6079
6080 if (minLength == commonChars) {
6081 // This is the case where one of the cases is a prefix of all of the other cases.
6082 // We've already checked that the input string is a prefix of all of the cases,
6083 // so we just check length to jump to that case.
6084
6085 if (!ASSERT_DISABLED) {
6086 ASSERT(cases[begin].string->length() == commonChars);
6087 for (unsigned i = begin + 1; i < end; ++i)
6088 ASSERT(cases[i].string->length() > commonChars);
6089 }
6090
6091 if (allLengthsEqual) {
6092 RELEASE_ASSERT(end == begin + 1);
6093 jump(cases[begin].target, ForceJump);
6094 return;
6095 }
6096
6097 branch32(MacroAssembler::Equal, length, Imm32(commonChars), cases[begin].target);
6098
6099 // We've checked if the length is >= minLength, and then we checked if the
6100 // length is == commonChars. We get to this point if it is >= minLength but not
6101 // == commonChars. Hence we know that it now must be > minLength, i.e., that
6102 // it's >= minLength + 1.
6103 emitBinarySwitchStringRecurse(
6104 data, cases, commonChars, begin + 1, end, buffer, length, temp, minLength + 1, false);
6105 return;
6106 }
6107
6108 // At this point we know that the string is longer than commonChars, and we've only
6109 // verified commonChars. Use a binary switch on the next unchecked character, i.e.
6110 // string[commonChars].
6111
6112 RELEASE_ASSERT(end >= begin + 2);
6113
6114 m_jit.load8(MacroAssembler::Address(buffer, commonChars), temp);
6115
6116 Vector<CharacterCase> characterCases;
6117 CharacterCase currentCase;
6118 currentCase.character = cases[begin].string->at(commonChars);
6119 currentCase.begin = begin;
6120 currentCase.end = begin + 1;
6121 for (unsigned i = begin + 1; i < end; ++i) {
6122 if (cases[i].string->at(commonChars) != currentCase.character) {
6123 if (verbose)
6124 dataLog("string(", cases[i].string, ")[", commonChars, "] != string(", cases[begin].string, ")[", commonChars, "]\n");
6125 currentCase.end = i;
6126 characterCases.append(currentCase);
6127 currentCase.character = cases[i].string->at(commonChars);
6128 currentCase.begin = i;
6129 currentCase.end = i + 1;
6130 } else
6131 currentCase.end = i + 1;
6132 }
6133 characterCases.append(currentCase);
6134
6135 Vector<int64_t> characterCaseValues;
6136 for (unsigned i = 0; i < characterCases.size(); ++i)
6137 characterCaseValues.append(characterCases[i].character);
6138
6139 BinarySwitch binarySwitch(temp, characterCaseValues, BinarySwitch::Int32);
6140 while (binarySwitch.advance(m_jit)) {
6141 const CharacterCase& myCase = characterCases[binarySwitch.caseIndex()];
6142 emitBinarySwitchStringRecurse(
6143 data, cases, commonChars + 1, myCase.begin, myCase.end, buffer, length,
6144 temp, minLength, allLengthsEqual);
6145 }
6146
6147 addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
6148 }
6149
6150 void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string)
6151 {
6152 data->didUseJumpTable = true;
6153
6154 bool canDoBinarySwitch = true;
6155 unsigned totalLength = 0;
6156
6157 for (unsigned i = data->cases.size(); i--;) {
6158 StringImpl* string = data->cases[i].value.stringImpl();
6159 if (!string->is8Bit()) {
6160 canDoBinarySwitch = false;
6161 break;
6162 }
6163 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
6164 canDoBinarySwitch = false;
6165 break;
6166 }
6167 totalLength += string->length();
6168 }
6169
6170 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
6171 flushRegisters();
6172 callOperation(
6173 operationSwitchString, string, data->switchTableIndex, string);
6174 m_jit.jump(string);
6175 return;
6176 }
6177
6178 GPRTemporary length(this);
6179 GPRTemporary temp(this);
6180
6181 GPRReg lengthGPR = length.gpr();
6182 GPRReg tempGPR = temp.gpr();
6183
6184 m_jit.load32(MacroAssembler::Address(string, JSString::offsetOfLength()), lengthGPR);
6185 m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), tempGPR);
6186
6187 MacroAssembler::JumpList slowCases;
6188 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
6189 slowCases.append(m_jit.branchTest32(
6190 MacroAssembler::Zero,
6191 MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
6192 TrustedImm32(StringImpl::flagIs8Bit())));
6193
6194 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), string);
6195
6196 Vector<StringSwitchCase> cases;
6197 for (unsigned i = 0; i < data->cases.size(); ++i) {
6198 cases.append(
6199 StringSwitchCase(data->cases[i].value.stringImpl(), data->cases[i].target.block));
6200 }
6201
6202 std::sort(cases.begin(), cases.end());
6203
6204 emitBinarySwitchStringRecurse(
6205 data, cases, 0, 0, cases.size(), string, lengthGPR, tempGPR, 0, false);
6206
6207 slowCases.link(&m_jit);
6208 silentSpillAllRegisters(string);
6209 callOperation(operationSwitchString, string, data->switchTableIndex, string);
6210 silentFillAllRegisters(string);
6211 m_jit.jump(string);
6212 }
6213
6214 void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data)
6215 {
6216 switch (node->child1().useKind()) {
6217 case StringIdentUse: {
6218 SpeculateCellOperand op1(this, node->child1());
6219 GPRTemporary temp(this);
6220
6221 GPRReg op1GPR = op1.gpr();
6222 GPRReg tempGPR = temp.gpr();
6223
6224 speculateString(node->child1(), op1GPR);
6225 speculateStringIdentAndLoadStorage(node->child1(), op1GPR, tempGPR);
6226
6227 Vector<int64_t> identifierCaseValues;
6228 for (unsigned i = 0; i < data->cases.size(); ++i) {
6229 identifierCaseValues.append(
6230 static_cast<int64_t>(bitwise_cast<intptr_t>(data->cases[i].value.stringImpl())));
6231 }
6232
6233 BinarySwitch binarySwitch(tempGPR, identifierCaseValues, BinarySwitch::IntPtr);
6234 while (binarySwitch.advance(m_jit))
6235 jump(data->cases[binarySwitch.caseIndex()].target.block, ForceJump);
6236 addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
6237
6238 noResult(node);
6239 break;
6240 }
6241
6242 case StringUse: {
6243 SpeculateCellOperand op1(this, node->child1());
6244
6245 GPRReg op1GPR = op1.gpr();
6246
6247 op1.use();
6248
6249 speculateString(node->child1(), op1GPR);
6250 emitSwitchStringOnString(data, op1GPR);
6251 noResult(node, UseChildrenCalledExplicitly);
6252 break;
6253 }
6254
6255 case UntypedUse: {
6256 JSValueOperand op1(this, node->child1());
6257
6258 JSValueRegs op1Regs = op1.jsValueRegs();
6259
6260 op1.use();
6261
6262 addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block);
6263
6264 addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block);
6265
6266 emitSwitchStringOnString(data, op1Regs.payloadGPR());
6267 noResult(node, UseChildrenCalledExplicitly);
6268 break;
6269 }
6270
6271 default:
6272 RELEASE_ASSERT_NOT_REACHED();
6273 break;
6274 }
6275 }
6276
6277 void SpeculativeJIT::emitSwitch(Node* node)
6278 {
6279 SwitchData* data = node->switchData();
6280 switch (data->kind) {
6281 case SwitchImm: {
6282 emitSwitchImm(node, data);
6283 return;
6284 }
6285 case SwitchChar: {
6286 emitSwitchChar(node, data);
6287 return;
6288 }
6289 case SwitchString: {
6290 emitSwitchString(node, data);
6291 return;
6292 }
6293 case SwitchCell: {
6294 DFG_CRASH(m_jit.graph(), node, "Bad switch kind");
6295 return;
6296 } }
6297 RELEASE_ASSERT_NOT_REACHED();
6298 }
6299
6300 void SpeculativeJIT::addBranch(const MacroAssembler::JumpList& jump, BasicBlock* destination)
6301 {
6302 for (unsigned i = jump.jumps().size(); i--;)
6303 addBranch(jump.jumps()[i], destination);
6304 }
6305
6306 void SpeculativeJIT::linkBranches()
6307 {
6308 for (size_t i = 0; i < m_branches.size(); ++i) {
6309 BranchRecord& branch = m_branches[i];
6310 branch.jump.linkTo(m_jit.blockHeads()[branch.destination->index], &m_jit);
6311 }
6312 }
6313
6314 #if ENABLE(GGC)
6315 void SpeculativeJIT::compileStoreBarrier(Node* node)
6316 {
6317 ASSERT(node->op() == StoreBarrier);
6318
6319 SpeculateCellOperand base(this, node->child1());
6320 GPRTemporary scratch1(this);
6321 GPRTemporary scratch2(this);
6322
6323 writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr());
6324
6325 noResult(node);
6326 }
6327
6328 void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2)
6329 {
6330 ASSERT(scratch1 != scratch2);
6331 WriteBarrierBuffer& writeBarrierBuffer = m_jit.vm()->heap.m_writeBarrierBuffer;
6332 m_jit.load32(writeBarrierBuffer.currentIndexAddress(), scratch2);
6333 JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::TrustedImm32(writeBarrierBuffer.capacity()));
6334
6335 m_jit.add32(TrustedImm32(1), scratch2);
6336 m_jit.store32(scratch2, writeBarrierBuffer.currentIndexAddress());
6337
6338 m_jit.move(TrustedImmPtr(writeBarrierBuffer.buffer()), scratch1);
6339 // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
6340 m_jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
6341
6342 JITCompiler::Jump done = m_jit.jump();
6343 needToFlush.link(&m_jit);
6344
6345 silentSpillAllRegisters(InvalidGPRReg);
6346 callOperation(operationFlushWriteBarrierBuffer, cell);
6347 silentFillAllRegisters(InvalidGPRReg);
6348
6349 done.link(&m_jit);
6350 }
6351
6352 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg scratch1, GPRReg scratch2)
6353 {
6354 JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
6355 storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
6356 ownerIsRememberedOrInEden.link(&m_jit);
6357 }
6358 #else
6359 void SpeculativeJIT::compileStoreBarrier(Node* node)
6360 {
6361 DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate);
6362 noResult(node);
6363 }
6364 #endif // ENABLE(GGC)
6365
6366 } } // namespace JSC::DFG
6367
6368 #endif