]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGSpeculativeJIT.cpp
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / dfg / DFGSpeculativeJIT.cpp
1 /*
2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGBinarySwitch.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGSaneStringGetByValSlowPathGenerator.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "LinkBuffer.h"
39 #include "JSCInlines.h"
40 #include "ScratchRegisterAllocator.h"
41 #include "WriteBarrierBuffer.h"
42 #include <wtf/MathExtras.h>
43
44 namespace JSC { namespace DFG {
45
46 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
47 : m_compileOkay(true)
48 , m_jit(jit)
49 , m_currentNode(0)
50 , m_lastGeneratedNode(LastNodeType)
51 , m_indexInBlock(0)
52 , m_generationInfo(m_jit.graph().frameRegisterCount())
53 , m_state(m_jit.graph())
54 , m_interpreter(m_jit.graph(), m_state)
55 , m_stream(&jit.jitCode()->variableEventStream)
56 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
57 , m_isCheckingArgumentTypes(false)
58 {
59 }
60
61 SpeculativeJIT::~SpeculativeJIT()
62 {
63 }
64
65 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
66 {
67 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
68
69 GPRTemporary scratch(this);
70 GPRTemporary scratch2(this);
71 GPRReg scratchGPR = scratch.gpr();
72 GPRReg scratch2GPR = scratch2.gpr();
73
74 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
75
76 JITCompiler::JumpList slowCases;
77
78 slowCases.append(
79 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
80 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
81 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
82
83 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
84 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
85
86 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
87 #if USE(JSVALUE64)
88 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
89 for (unsigned i = numElements; i < vectorLength; ++i)
90 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
91 #else
92 EncodedValueDescriptor value;
93 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
94 for (unsigned i = numElements; i < vectorLength; ++i) {
95 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
96 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
97 }
98 #endif
99 }
100
101 // I want a slow path that also loads out the storage pointer, and that's
102 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
103 // of work for a very small piece of functionality. :-/
104 addSlowPathGenerator(adoptPtr(
105 new CallArrayAllocatorSlowPathGenerator(
106 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
107 structure, numElements)));
108 }
109
110 void SpeculativeJIT::emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
111 {
112 Structure* structure = m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)->argumentsStructure();
113 emitAllocateDestructibleObject<Arguments>(resultGPR, structure, scratchGPR1, scratchGPR2, slowPath);
114
115 m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfActivation()));
116
117 m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), scratchGPR1);
118 m_jit.sub32(TrustedImm32(1), scratchGPR1);
119 m_jit.store32(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfNumArguments()));
120
121 m_jit.store32(TrustedImm32(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfOverrodeLength()));
122 if (m_jit.isStrictModeFor(m_currentNode->origin.semantic))
123 m_jit.store8(TrustedImm32(1), MacroAssembler::Address(resultGPR, Arguments::offsetOfIsStrictMode()));
124
125 m_jit.storePtr(GPRInfo::callFrameRegister, MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisters()));
126 m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfRegisterArray()));
127 m_jit.storePtr(TrustedImmPtr(0), MacroAssembler::Address(resultGPR, Arguments::offsetOfSlowArgumentData()));
128
129 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), scratchGPR1);
130 m_jit.storePtr(scratchGPR1, MacroAssembler::Address(resultGPR, Arguments::offsetOfCallee()));
131 }
132
133 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
134 {
135 if (!m_compileOkay)
136 return;
137 ASSERT(m_isCheckingArgumentTypes || m_canExit);
138 m_jit.appendExitInfo(jumpToFail);
139 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
140 }
141
142 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
143 {
144 if (!m_compileOkay)
145 return;
146 ASSERT(m_isCheckingArgumentTypes || m_canExit);
147 m_jit.appendExitInfo(jumpsToFail);
148 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
149 }
150
151 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
152 {
153 if (!m_compileOkay)
154 return OSRExitJumpPlaceholder();
155 ASSERT(m_isCheckingArgumentTypes || m_canExit);
156 unsigned index = m_jit.jitCode()->osrExit.size();
157 m_jit.appendExitInfo();
158 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
159 return OSRExitJumpPlaceholder(index);
160 }
161
162 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
163 {
164 ASSERT(m_isCheckingArgumentTypes || m_canExit);
165 return speculationCheck(kind, jsValueSource, nodeUse.node());
166 }
167
168 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
169 {
170 ASSERT(m_isCheckingArgumentTypes || m_canExit);
171 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
172 }
173
174 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
175 {
176 ASSERT(m_isCheckingArgumentTypes || m_canExit);
177 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
178 }
179
180 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
181 {
182 if (!m_compileOkay)
183 return;
184 ASSERT(m_isCheckingArgumentTypes || m_canExit);
185 unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
186 m_jit.appendExitInfo(jumpToFail);
187 m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
188 }
189
190 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
191 {
192 ASSERT(m_isCheckingArgumentTypes || m_canExit);
193 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
194 }
195
196 void SpeculativeJIT::emitInvalidationPoint(Node* node)
197 {
198 if (!m_compileOkay)
199 return;
200 ASSERT(m_canExit);
201 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
202 m_jit.jitCode()->appendOSRExit(OSRExit(
203 UncountableInvalidation, JSValueSource(),
204 m_jit.graph().methodOfGettingAValueProfileFor(node),
205 this, m_stream->size()));
206 info.m_replacementSource = m_jit.watchpointLabel();
207 ASSERT(info.m_replacementSource.isSet());
208 noResult(node);
209 }
210
211 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
212 {
213 ASSERT(m_isCheckingArgumentTypes || m_canExit);
214 if (!m_compileOkay)
215 return;
216 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
217 m_compileOkay = false;
218 }
219
220 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
221 {
222 ASSERT(m_isCheckingArgumentTypes || m_canExit);
223 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
224 }
225
226 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
227 {
228 ASSERT(needsTypeCheck(edge, typesPassedThrough));
229 m_interpreter.filter(edge, typesPassedThrough);
230 speculationCheck(BadType, source, edge.node(), jumpToFail);
231 }
232
233 RegisterSet SpeculativeJIT::usedRegisters()
234 {
235 RegisterSet result;
236
237 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
238 GPRReg gpr = GPRInfo::toRegister(i);
239 if (m_gprs.isInUse(gpr))
240 result.set(gpr);
241 }
242 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
243 FPRReg fpr = FPRInfo::toRegister(i);
244 if (m_fprs.isInUse(fpr))
245 result.set(fpr);
246 }
247
248 result.merge(RegisterSet::specialRegisters());
249
250 return result;
251 }
252
253 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
254 {
255 m_slowPathGenerators.append(slowPathGenerator);
256 }
257
258 void SpeculativeJIT::runSlowPathGenerators()
259 {
260 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
261 m_slowPathGenerators[i]->generate(this);
262 }
263
264 // On Windows we need to wrap fmod; on other platforms we can call it directly.
265 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
266 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
267 static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
268 {
269 return fmod(x, y);
270 }
271 #else
272 #define fmodAsDFGOperation fmod
273 #endif
274
275 void SpeculativeJIT::clearGenerationInfo()
276 {
277 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
278 m_generationInfo[i] = GenerationInfo();
279 m_gprs = RegisterBank<GPRInfo>();
280 m_fprs = RegisterBank<FPRInfo>();
281 }
282
283 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
284 {
285 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
286 Node* node = info.node();
287 DataFormat registerFormat = info.registerFormat();
288 ASSERT(registerFormat != DataFormatNone);
289 ASSERT(registerFormat != DataFormatDouble);
290
291 SilentSpillAction spillAction;
292 SilentFillAction fillAction;
293
294 if (!info.needsSpill())
295 spillAction = DoNothingForSpill;
296 else {
297 #if USE(JSVALUE64)
298 ASSERT(info.gpr() == source);
299 if (registerFormat == DataFormatInt32)
300 spillAction = Store32Payload;
301 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
302 spillAction = StorePtr;
303 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
304 spillAction = Store64;
305 else {
306 ASSERT(registerFormat & DataFormatJS);
307 spillAction = Store64;
308 }
309 #elif USE(JSVALUE32_64)
310 if (registerFormat & DataFormatJS) {
311 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
312 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
313 } else {
314 ASSERT(info.gpr() == source);
315 spillAction = Store32Payload;
316 }
317 #endif
318 }
319
320 if (registerFormat == DataFormatInt32) {
321 ASSERT(info.gpr() == source);
322 ASSERT(isJSInt32(info.registerFormat()));
323 if (node->hasConstant()) {
324 ASSERT(isInt32Constant(node));
325 fillAction = SetInt32Constant;
326 } else
327 fillAction = Load32Payload;
328 } else if (registerFormat == DataFormatBoolean) {
329 #if USE(JSVALUE64)
330 RELEASE_ASSERT_NOT_REACHED();
331 fillAction = DoNothingForFill;
332 #elif USE(JSVALUE32_64)
333 ASSERT(info.gpr() == source);
334 if (node->hasConstant()) {
335 ASSERT(isBooleanConstant(node));
336 fillAction = SetBooleanConstant;
337 } else
338 fillAction = Load32Payload;
339 #endif
340 } else if (registerFormat == DataFormatCell) {
341 ASSERT(info.gpr() == source);
342 if (node->hasConstant()) {
343 JSValue value = valueOfJSConstant(node);
344 ASSERT_UNUSED(value, value.isCell());
345 fillAction = SetCellConstant;
346 } else {
347 #if USE(JSVALUE64)
348 fillAction = LoadPtr;
349 #else
350 fillAction = Load32Payload;
351 #endif
352 }
353 } else if (registerFormat == DataFormatStorage) {
354 ASSERT(info.gpr() == source);
355 fillAction = LoadPtr;
356 } else if (registerFormat == DataFormatInt52) {
357 if (node->hasConstant())
358 fillAction = SetInt52Constant;
359 else if (info.spillFormat() == DataFormatInt52)
360 fillAction = Load64;
361 else if (info.spillFormat() == DataFormatStrictInt52)
362 fillAction = Load64ShiftInt52Left;
363 else if (info.spillFormat() == DataFormatNone)
364 fillAction = Load64;
365 else {
366 RELEASE_ASSERT_NOT_REACHED();
367 fillAction = Load64; // Make GCC happy.
368 }
369 } else if (registerFormat == DataFormatStrictInt52) {
370 if (node->hasConstant())
371 fillAction = SetStrictInt52Constant;
372 else if (info.spillFormat() == DataFormatInt52)
373 fillAction = Load64ShiftInt52Right;
374 else if (info.spillFormat() == DataFormatStrictInt52)
375 fillAction = Load64;
376 else if (info.spillFormat() == DataFormatNone)
377 fillAction = Load64;
378 else {
379 RELEASE_ASSERT_NOT_REACHED();
380 fillAction = Load64; // Make GCC happy.
381 }
382 } else {
383 ASSERT(registerFormat & DataFormatJS);
384 #if USE(JSVALUE64)
385 ASSERT(info.gpr() == source);
386 if (node->hasConstant()) {
387 if (valueOfJSConstant(node).isCell())
388 fillAction = SetTrustedJSConstant;
389 fillAction = SetJSConstant;
390 } else if (info.spillFormat() == DataFormatInt32) {
391 ASSERT(registerFormat == DataFormatJSInt32);
392 fillAction = Load32PayloadBoxInt;
393 } else
394 fillAction = Load64;
395 #else
396 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
397 if (node->hasConstant())
398 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
399 else if (info.payloadGPR() == source)
400 fillAction = Load32Payload;
401 else { // Fill the Tag
402 switch (info.spillFormat()) {
403 case DataFormatInt32:
404 ASSERT(registerFormat == DataFormatJSInt32);
405 fillAction = SetInt32Tag;
406 break;
407 case DataFormatCell:
408 ASSERT(registerFormat == DataFormatJSCell);
409 fillAction = SetCellTag;
410 break;
411 case DataFormatBoolean:
412 ASSERT(registerFormat == DataFormatJSBoolean);
413 fillAction = SetBooleanTag;
414 break;
415 default:
416 fillAction = Load32Tag;
417 break;
418 }
419 }
420 #endif
421 }
422
423 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
424 }
425
426 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
427 {
428 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
429 Node* node = info.node();
430 ASSERT(info.registerFormat() == DataFormatDouble);
431
432 SilentSpillAction spillAction;
433 SilentFillAction fillAction;
434
435 if (!info.needsSpill())
436 spillAction = DoNothingForSpill;
437 else {
438 ASSERT(!node->hasConstant());
439 ASSERT(info.spillFormat() == DataFormatNone);
440 ASSERT(info.fpr() == source);
441 spillAction = StoreDouble;
442 }
443
444 #if USE(JSVALUE64)
445 if (node->hasConstant()) {
446 ASSERT(isNumberConstant(node));
447 fillAction = SetDoubleConstant;
448 } else {
449 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
450 fillAction = LoadDouble;
451 }
452 #elif USE(JSVALUE32_64)
453 ASSERT(info.registerFormat() == DataFormatDouble);
454 if (node->hasConstant()) {
455 ASSERT(isNumberConstant(node));
456 fillAction = SetDoubleConstant;
457 } else
458 fillAction = LoadDouble;
459 #endif
460
461 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
462 }
463
464 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
465 {
466 switch (plan.spillAction()) {
467 case DoNothingForSpill:
468 break;
469 case Store32Tag:
470 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
471 break;
472 case Store32Payload:
473 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
474 break;
475 case StorePtr:
476 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
477 break;
478 #if USE(JSVALUE64)
479 case Store64:
480 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
481 break;
482 #endif
483 case StoreDouble:
484 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
485 break;
486 default:
487 RELEASE_ASSERT_NOT_REACHED();
488 }
489 }
490
491 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
492 {
493 #if USE(JSVALUE32_64)
494 UNUSED_PARAM(canTrample);
495 #endif
496 switch (plan.fillAction()) {
497 case DoNothingForFill:
498 break;
499 case SetInt32Constant:
500 m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
501 break;
502 #if USE(JSVALUE64)
503 case SetInt52Constant:
504 m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
505 break;
506 case SetStrictInt52Constant:
507 m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr());
508 break;
509 #endif // USE(JSVALUE64)
510 case SetBooleanConstant:
511 m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
512 break;
513 case SetCellConstant:
514 m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
515 break;
516 #if USE(JSVALUE64)
517 case SetTrustedJSConstant:
518 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
519 break;
520 case SetJSConstant:
521 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
522 break;
523 case SetDoubleConstant:
524 m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
525 m_jit.move64ToDouble(canTrample, plan.fpr());
526 break;
527 case Load32PayloadBoxInt:
528 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
529 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
530 break;
531 case Load32PayloadConvertToInt52:
532 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
533 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
534 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
535 break;
536 case Load32PayloadSignExtend:
537 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
538 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
539 break;
540 #else
541 case SetJSConstantTag:
542 m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
543 break;
544 case SetJSConstantPayload:
545 m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
546 break;
547 case SetInt32Tag:
548 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
549 break;
550 case SetCellTag:
551 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
552 break;
553 case SetBooleanTag:
554 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
555 break;
556 case SetDoubleConstant:
557 m_jit.loadDouble(TrustedImmPtr(addressOfDoubleConstant(plan.node())), plan.fpr());
558 break;
559 #endif
560 case Load32Tag:
561 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
562 break;
563 case Load32Payload:
564 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
565 break;
566 case LoadPtr:
567 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
568 break;
569 #if USE(JSVALUE64)
570 case Load64:
571 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
572 break;
573 case Load64ShiftInt52Right:
574 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
575 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
576 break;
577 case Load64ShiftInt52Left:
578 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
579 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
580 break;
581 #endif
582 case LoadDouble:
583 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
584 break;
585 default:
586 RELEASE_ASSERT_NOT_REACHED();
587 }
588 }
589
590 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
591 {
592 switch (arrayMode.arrayClass()) {
593 case Array::OriginalArray: {
594 CRASH();
595 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
596 return result;
597 }
598
599 case Array::Array:
600 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
601 return m_jit.branch32(
602 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
603
604 case Array::NonArray:
605 case Array::OriginalNonArray:
606 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
607 return m_jit.branch32(
608 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
609
610 case Array::PossiblyArray:
611 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
612 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
613 }
614
615 RELEASE_ASSERT_NOT_REACHED();
616 return JITCompiler::Jump();
617 }
618
619 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
620 {
621 JITCompiler::JumpList result;
622
623 switch (arrayMode.type()) {
624 case Array::Int32:
625 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
626
627 case Array::Double:
628 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
629
630 case Array::Contiguous:
631 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
632
633 case Array::ArrayStorage:
634 case Array::SlowPutArrayStorage: {
635 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
636
637 if (arrayMode.isJSArray()) {
638 if (arrayMode.isSlowPut()) {
639 result.append(
640 m_jit.branchTest32(
641 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
642 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
643 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
644 result.append(
645 m_jit.branch32(
646 MacroAssembler::Above, tempGPR,
647 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
648 break;
649 }
650 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
651 result.append(
652 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
653 break;
654 }
655 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
656 if (arrayMode.isSlowPut()) {
657 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
658 result.append(
659 m_jit.branch32(
660 MacroAssembler::Above, tempGPR,
661 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
662 break;
663 }
664 result.append(
665 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
666 break;
667 }
668 default:
669 CRASH();
670 break;
671 }
672
673 return result;
674 }
675
676 void SpeculativeJIT::checkArray(Node* node)
677 {
678 ASSERT(node->arrayMode().isSpecific());
679 ASSERT(!node->arrayMode().doesConversion());
680
681 SpeculateCellOperand base(this, node->child1());
682 GPRReg baseReg = base.gpr();
683
684 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
685 noResult(m_currentNode);
686 return;
687 }
688
689 const ClassInfo* expectedClassInfo = 0;
690
691 switch (node->arrayMode().type()) {
692 case Array::String:
693 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
694 break;
695 case Array::Int32:
696 case Array::Double:
697 case Array::Contiguous:
698 case Array::ArrayStorage:
699 case Array::SlowPutArrayStorage: {
700 GPRTemporary temp(this);
701 GPRReg tempGPR = temp.gpr();
702 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
703 speculationCheck(
704 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
705 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
706
707 noResult(m_currentNode);
708 return;
709 }
710 case Array::Arguments:
711 speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node,
712 m_jit.branch8(
713 MacroAssembler::NotEqual,
714 MacroAssembler::Address(baseReg, JSCell::typeInfoTypeOffset()),
715 MacroAssembler::TrustedImm32(ArgumentsType)));
716
717 noResult(m_currentNode);
718 return;
719 default:
720 speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node,
721 m_jit.branch8(
722 MacroAssembler::NotEqual,
723 MacroAssembler::Address(baseReg, JSCell::typeInfoTypeOffset()),
724 MacroAssembler::TrustedImm32(typeForTypedArrayType(node->arrayMode().typedArrayType()))));
725 noResult(m_currentNode);
726 return;
727 }
728
729 RELEASE_ASSERT(expectedClassInfo);
730
731 GPRTemporary temp(this);
732 GPRTemporary temp2(this);
733 m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
734 speculationCheck(
735 BadType, JSValueSource::unboxedCell(baseReg), node,
736 m_jit.branchPtr(
737 MacroAssembler::NotEqual,
738 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
739 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
740
741 noResult(m_currentNode);
742 }
743
744 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
745 {
746 ASSERT(node->arrayMode().doesConversion());
747
748 GPRTemporary temp(this);
749 GPRTemporary structure;
750 GPRReg tempGPR = temp.gpr();
751 GPRReg structureGPR = InvalidGPRReg;
752
753 if (node->op() != ArrayifyToStructure) {
754 GPRTemporary realStructure(this);
755 structure.adopt(realStructure);
756 structureGPR = structure.gpr();
757 }
758
759 // We can skip all that comes next if we already have array storage.
760 MacroAssembler::JumpList slowPath;
761
762 if (node->op() == ArrayifyToStructure) {
763 slowPath.append(m_jit.branchWeakStructure(
764 JITCompiler::NotEqual,
765 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
766 node->structure()));
767 } else {
768 m_jit.load8(
769 MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
770
771 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
772 }
773
774 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
775 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
776
777 noResult(m_currentNode);
778 }
779
780 void SpeculativeJIT::arrayify(Node* node)
781 {
782 ASSERT(node->arrayMode().isSpecific());
783
784 SpeculateCellOperand base(this, node->child1());
785
786 if (!node->child2()) {
787 arrayify(node, base.gpr(), InvalidGPRReg);
788 return;
789 }
790
791 SpeculateInt32Operand property(this, node->child2());
792
793 arrayify(node, base.gpr(), property.gpr());
794 }
795
796 GPRReg SpeculativeJIT::fillStorage(Edge edge)
797 {
798 VirtualRegister virtualRegister = edge->virtualRegister();
799 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
800
801 switch (info.registerFormat()) {
802 case DataFormatNone: {
803 if (info.spillFormat() == DataFormatStorage) {
804 GPRReg gpr = allocate();
805 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
806 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
807 info.fillStorage(*m_stream, gpr);
808 return gpr;
809 }
810
811 // Must be a cell; fill it as a cell and then return the pointer.
812 return fillSpeculateCell(edge);
813 }
814
815 case DataFormatStorage: {
816 GPRReg gpr = info.gpr();
817 m_gprs.lock(gpr);
818 return gpr;
819 }
820
821 default:
822 return fillSpeculateCell(edge);
823 }
824 }
825
826 void SpeculativeJIT::useChildren(Node* node)
827 {
828 if (node->flags() & NodeHasVarArgs) {
829 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
830 if (!!m_jit.graph().m_varArgChildren[childIdx])
831 use(m_jit.graph().m_varArgChildren[childIdx]);
832 }
833 } else {
834 Edge child1 = node->child1();
835 if (!child1) {
836 ASSERT(!node->child2() && !node->child3());
837 return;
838 }
839 use(child1);
840
841 Edge child2 = node->child2();
842 if (!child2) {
843 ASSERT(!node->child3());
844 return;
845 }
846 use(child2);
847
848 Edge child3 = node->child3();
849 if (!child3)
850 return;
851 use(child3);
852 }
853 }
854
855 void SpeculativeJIT::compileIn(Node* node)
856 {
857 SpeculateCellOperand base(this, node->child2());
858 GPRReg baseGPR = base.gpr();
859
860 if (isConstant(node->child1().node())) {
861 JSString* string =
862 jsDynamicCast<JSString*>(valueOfJSConstant(node->child1().node()));
863 if (string && string->tryGetValueImpl()
864 && string->tryGetValueImpl()->isAtomic()) {
865 StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
866
867 GPRTemporary result(this);
868 GPRReg resultGPR = result.gpr();
869
870 use(node->child1());
871
872 MacroAssembler::PatchableJump jump = m_jit.patchableJump();
873 MacroAssembler::Label done = m_jit.label();
874
875 OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
876 jump.m_jump, this, operationInOptimize,
877 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
878 string->tryGetValueImpl());
879
880 stubInfo->codeOrigin = node->origin.semantic;
881 stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
882 stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
883 stubInfo->patch.usedRegisters = usedRegisters();
884 stubInfo->patch.spillMode = NeedToSpill;
885
886 m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
887 addSlowPathGenerator(slowPath.release());
888
889 base.use();
890
891 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
892 return;
893 }
894 }
895
896 JSValueOperand key(this, node->child1());
897 JSValueRegs regs = key.jsValueRegs();
898
899 GPRResult result(this);
900 GPRReg resultGPR = result.gpr();
901
902 base.use();
903 key.use();
904
905 flushRegisters();
906 callOperation(
907 operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
908 baseGPR, regs);
909 blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
910 }
911
912 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
913 {
914 unsigned branchIndexInBlock = detectPeepHoleBranch();
915 if (branchIndexInBlock != UINT_MAX) {
916 Node* branchNode = m_block->at(branchIndexInBlock);
917
918 ASSERT(node->adjustedRefCount() == 1);
919
920 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
921
922 m_indexInBlock = branchIndexInBlock;
923 m_currentNode = branchNode;
924
925 return true;
926 }
927
928 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
929
930 return false;
931 }
932
933 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
934 {
935 unsigned branchIndexInBlock = detectPeepHoleBranch();
936 if (branchIndexInBlock != UINT_MAX) {
937 Node* branchNode = m_block->at(branchIndexInBlock);
938
939 ASSERT(node->adjustedRefCount() == 1);
940
941 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
942
943 m_indexInBlock = branchIndexInBlock;
944 m_currentNode = branchNode;
945
946 return true;
947 }
948
949 nonSpeculativeNonPeepholeStrictEq(node, invert);
950
951 return false;
952 }
953
954 static const char* dataFormatString(DataFormat format)
955 {
956 // These values correspond to the DataFormat enum.
957 const char* strings[] = {
958 "[ ]",
959 "[ i]",
960 "[ d]",
961 "[ c]",
962 "Err!",
963 "Err!",
964 "Err!",
965 "Err!",
966 "[J ]",
967 "[Ji]",
968 "[Jd]",
969 "[Jc]",
970 "Err!",
971 "Err!",
972 "Err!",
973 "Err!",
974 };
975 return strings[format];
976 }
977
978 void SpeculativeJIT::dump(const char* label)
979 {
980 if (label)
981 dataLogF("<%s>\n", label);
982
983 dataLogF(" gprs:\n");
984 m_gprs.dump();
985 dataLogF(" fprs:\n");
986 m_fprs.dump();
987 dataLogF(" VirtualRegisters:\n");
988 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
989 GenerationInfo& info = m_generationInfo[i];
990 if (info.alive())
991 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
992 else
993 dataLogF(" % 3d:[__][__]", i);
994 if (info.registerFormat() == DataFormatDouble)
995 dataLogF(":fpr%d\n", info.fpr());
996 else if (info.registerFormat() != DataFormatNone
997 #if USE(JSVALUE32_64)
998 && !(info.registerFormat() & DataFormatJS)
999 #endif
1000 ) {
1001 ASSERT(info.gpr() != InvalidGPRReg);
1002 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1003 } else
1004 dataLogF("\n");
1005 }
1006 if (label)
1007 dataLogF("</%s>\n", label);
1008 }
1009
1010 GPRTemporary::GPRTemporary()
1011 : m_jit(0)
1012 , m_gpr(InvalidGPRReg)
1013 {
1014 }
1015
1016 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1017 : m_jit(jit)
1018 , m_gpr(InvalidGPRReg)
1019 {
1020 m_gpr = m_jit->allocate();
1021 }
1022
1023 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1024 : m_jit(jit)
1025 , m_gpr(InvalidGPRReg)
1026 {
1027 m_gpr = m_jit->allocate(specific);
1028 }
1029
1030 #if USE(JSVALUE32_64)
1031 GPRTemporary::GPRTemporary(
1032 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1033 : m_jit(jit)
1034 , m_gpr(InvalidGPRReg)
1035 {
1036 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1037 m_gpr = m_jit->reuse(op1.gpr(which));
1038 else
1039 m_gpr = m_jit->allocate();
1040 }
1041 #endif // USE(JSVALUE32_64)
1042
1043 JSValueRegsTemporary::JSValueRegsTemporary() { }
1044
1045 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1046 #if USE(JSVALUE64)
1047 : m_gpr(jit)
1048 #else
1049 : m_payloadGPR(jit)
1050 , m_tagGPR(jit)
1051 #endif
1052 {
1053 }
1054
1055 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1056
1057 JSValueRegs JSValueRegsTemporary::regs()
1058 {
1059 #if USE(JSVALUE64)
1060 return JSValueRegs(m_gpr.gpr());
1061 #else
1062 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1063 #endif
1064 }
1065
1066 void GPRTemporary::adopt(GPRTemporary& other)
1067 {
1068 ASSERT(!m_jit);
1069 ASSERT(m_gpr == InvalidGPRReg);
1070 ASSERT(other.m_jit);
1071 ASSERT(other.m_gpr != InvalidGPRReg);
1072 m_jit = other.m_jit;
1073 m_gpr = other.m_gpr;
1074 other.m_jit = 0;
1075 other.m_gpr = InvalidGPRReg;
1076 }
1077
1078 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1079 : m_jit(jit)
1080 , m_fpr(InvalidFPRReg)
1081 {
1082 m_fpr = m_jit->fprAllocate();
1083 }
1084
1085 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1086 : m_jit(jit)
1087 , m_fpr(InvalidFPRReg)
1088 {
1089 if (m_jit->canReuse(op1.node()))
1090 m_fpr = m_jit->reuse(op1.fpr());
1091 else
1092 m_fpr = m_jit->fprAllocate();
1093 }
1094
1095 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1096 : m_jit(jit)
1097 , m_fpr(InvalidFPRReg)
1098 {
1099 if (m_jit->canReuse(op1.node()))
1100 m_fpr = m_jit->reuse(op1.fpr());
1101 else if (m_jit->canReuse(op2.node()))
1102 m_fpr = m_jit->reuse(op2.fpr());
1103 else
1104 m_fpr = m_jit->fprAllocate();
1105 }
1106
1107 #if USE(JSVALUE32_64)
1108 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1109 : m_jit(jit)
1110 , m_fpr(InvalidFPRReg)
1111 {
1112 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1113 m_fpr = m_jit->reuse(op1.fpr());
1114 else
1115 m_fpr = m_jit->fprAllocate();
1116 }
1117 #endif
1118
1119 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1120 {
1121 BasicBlock* taken = branchNode->branchData()->taken.block;
1122 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1123
1124 SpeculateDoubleOperand op1(this, node->child1());
1125 SpeculateDoubleOperand op2(this, node->child2());
1126
1127 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1128 jump(notTaken);
1129 }
1130
1131 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1132 {
1133 BasicBlock* taken = branchNode->branchData()->taken.block;
1134 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1135
1136 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1137
1138 if (taken == nextBlock()) {
1139 condition = MacroAssembler::NotEqual;
1140 BasicBlock* tmp = taken;
1141 taken = notTaken;
1142 notTaken = tmp;
1143 }
1144
1145 SpeculateCellOperand op1(this, node->child1());
1146 SpeculateCellOperand op2(this, node->child2());
1147
1148 GPRReg op1GPR = op1.gpr();
1149 GPRReg op2GPR = op2.gpr();
1150
1151 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1152 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1153 speculationCheck(
1154 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1155 m_jit.branchStructurePtr(
1156 MacroAssembler::Equal,
1157 MacroAssembler::Address(op1GPR, JSCell::structureIDOffset()),
1158 m_jit.vm()->stringStructure.get()));
1159 }
1160 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1161 speculationCheck(
1162 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1163 m_jit.branchStructurePtr(
1164 MacroAssembler::Equal,
1165 MacroAssembler::Address(op2GPR, JSCell::structureIDOffset()),
1166 m_jit.vm()->stringStructure.get()));
1167 }
1168 } else {
1169 GPRTemporary structure(this);
1170 GPRTemporary temp(this);
1171 GPRReg structureGPR = structure.gpr();
1172
1173 m_jit.emitLoadStructure(op1GPR, structureGPR, temp.gpr());
1174 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1175 speculationCheck(
1176 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1177 m_jit.branchPtr(
1178 MacroAssembler::Equal,
1179 structureGPR,
1180 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1181 }
1182 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1183 m_jit.branchTest8(
1184 MacroAssembler::NonZero,
1185 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1186 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1187
1188 m_jit.emitLoadStructure(op2GPR, structureGPR, temp.gpr());
1189 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1190 speculationCheck(
1191 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1192 m_jit.branchPtr(
1193 MacroAssembler::Equal,
1194 structureGPR,
1195 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1196 }
1197 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1198 m_jit.branchTest8(
1199 MacroAssembler::NonZero,
1200 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1201 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1202 }
1203
1204 branchPtr(condition, op1GPR, op2GPR, taken);
1205 jump(notTaken);
1206 }
1207
1208 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1209 {
1210 BasicBlock* taken = branchNode->branchData()->taken.block;
1211 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1212
1213 // The branch instruction will branch to the taken block.
1214 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1215 if (taken == nextBlock()) {
1216 condition = JITCompiler::invert(condition);
1217 BasicBlock* tmp = taken;
1218 taken = notTaken;
1219 notTaken = tmp;
1220 }
1221
1222 if (isBooleanConstant(node->child1().node())) {
1223 bool imm = valueOfBooleanConstant(node->child1().node());
1224 SpeculateBooleanOperand op2(this, node->child2());
1225 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1226 } else if (isBooleanConstant(node->child2().node())) {
1227 SpeculateBooleanOperand op1(this, node->child1());
1228 bool imm = valueOfBooleanConstant(node->child2().node());
1229 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1230 } else {
1231 SpeculateBooleanOperand op1(this, node->child1());
1232 SpeculateBooleanOperand op2(this, node->child2());
1233 branch32(condition, op1.gpr(), op2.gpr(), taken);
1234 }
1235
1236 jump(notTaken);
1237 }
1238
1239 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1240 {
1241 BasicBlock* taken = branchNode->branchData()->taken.block;
1242 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1243
1244 // The branch instruction will branch to the taken block.
1245 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1246 if (taken == nextBlock()) {
1247 condition = JITCompiler::invert(condition);
1248 BasicBlock* tmp = taken;
1249 taken = notTaken;
1250 notTaken = tmp;
1251 }
1252
1253 if (isInt32Constant(node->child1().node())) {
1254 int32_t imm = valueOfInt32Constant(node->child1().node());
1255 SpeculateInt32Operand op2(this, node->child2());
1256 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1257 } else if (isInt32Constant(node->child2().node())) {
1258 SpeculateInt32Operand op1(this, node->child1());
1259 int32_t imm = valueOfInt32Constant(node->child2().node());
1260 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1261 } else {
1262 SpeculateInt32Operand op1(this, node->child1());
1263 SpeculateInt32Operand op2(this, node->child2());
1264 branch32(condition, op1.gpr(), op2.gpr(), taken);
1265 }
1266
1267 jump(notTaken);
1268 }
1269
1270 // Returns true if the compare is fused with a subsequent branch.
1271 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1272 {
1273 // Fused compare & branch.
1274 unsigned branchIndexInBlock = detectPeepHoleBranch();
1275 if (branchIndexInBlock != UINT_MAX) {
1276 Node* branchNode = m_block->at(branchIndexInBlock);
1277
1278 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1279 // so can be no intervening nodes to also reference the compare.
1280 ASSERT(node->adjustedRefCount() == 1);
1281
1282 if (node->isBinaryUseKind(Int32Use))
1283 compilePeepHoleInt32Branch(node, branchNode, condition);
1284 #if USE(JSVALUE64)
1285 else if (node->isBinaryUseKind(Int52RepUse))
1286 compilePeepHoleInt52Branch(node, branchNode, condition);
1287 #endif // USE(JSVALUE64)
1288 else if (node->isBinaryUseKind(DoubleRepUse))
1289 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1290 else if (node->op() == CompareEq) {
1291 if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1292 // Use non-peephole comparison, for now.
1293 return false;
1294 }
1295 if (node->isBinaryUseKind(BooleanUse))
1296 compilePeepHoleBooleanBranch(node, branchNode, condition);
1297 else if (node->isBinaryUseKind(ObjectUse))
1298 compilePeepHoleObjectEquality(node, branchNode);
1299 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1300 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1301 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1302 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1303 else {
1304 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1305 return true;
1306 }
1307 } else {
1308 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1309 return true;
1310 }
1311
1312 use(node->child1());
1313 use(node->child2());
1314 m_indexInBlock = branchIndexInBlock;
1315 m_currentNode = branchNode;
1316 return true;
1317 }
1318 return false;
1319 }
1320
1321 void SpeculativeJIT::noticeOSRBirth(Node* node)
1322 {
1323 if (!node->hasVirtualRegister())
1324 return;
1325
1326 VirtualRegister virtualRegister = node->virtualRegister();
1327 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1328
1329 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1330 }
1331
1332 void SpeculativeJIT::compileMovHint(Node* node)
1333 {
1334 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1335
1336 Node* child = node->child1().node();
1337 noticeOSRBirth(child);
1338
1339 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1340 }
1341
1342 void SpeculativeJIT::bail(AbortReason reason)
1343 {
1344 m_compileOkay = true;
1345 m_jit.abortWithReason(reason, m_lastGeneratedNode);
1346 clearGenerationInfo();
1347 }
1348
1349 void SpeculativeJIT::compileCurrentBlock()
1350 {
1351 ASSERT(m_compileOkay);
1352
1353 if (!m_block)
1354 return;
1355
1356 ASSERT(m_block->isReachable);
1357
1358 m_jit.blockHeads()[m_block->index] = m_jit.label();
1359
1360 if (!m_block->cfaHasVisited) {
1361 // Don't generate code for basic blocks that are unreachable according to CFA.
1362 // But to be sure that nobody has generated a jump to this block, drop in a
1363 // breakpoint here.
1364 m_jit.abortWithReason(DFGUnreachableBasicBlock);
1365 return;
1366 }
1367
1368 m_stream->appendAndLog(VariableEvent::reset());
1369
1370 m_jit.jitAssertHasValidCallFrame();
1371 m_jit.jitAssertTagsInPlace();
1372 m_jit.jitAssertArgumentCountSane();
1373
1374 m_state.reset();
1375 m_state.beginBasicBlock(m_block);
1376
1377 for (size_t i = m_block->variablesAtHead.size(); i--;) {
1378 int operand = m_block->variablesAtHead.operandForIndex(i);
1379 Node* node = m_block->variablesAtHead[i];
1380 if (!node)
1381 continue; // No need to record dead SetLocal's.
1382
1383 VariableAccessData* variable = node->variableAccessData();
1384 DataFormat format;
1385 if (!node->refCount())
1386 continue; // No need to record dead SetLocal's.
1387 format = dataFormatFor(variable->flushFormat());
1388 m_stream->appendAndLog(
1389 VariableEvent::setLocal(
1390 VirtualRegister(operand),
1391 variable->machineLocal(),
1392 format));
1393 }
1394
1395 m_codeOriginForExitTarget = CodeOrigin();
1396 m_codeOriginForExitProfile = CodeOrigin();
1397
1398 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1399 m_currentNode = m_block->at(m_indexInBlock);
1400
1401 // We may have hit a contradiction that the CFA was aware of but that the JIT
1402 // didn't cause directly.
1403 if (!m_state.isValid()) {
1404 bail(DFGBailedAtTopOfBlock);
1405 return;
1406 }
1407
1408 m_canExit = m_currentNode->canExit();
1409 bool shouldExecuteEffects = m_interpreter.startExecuting(m_currentNode);
1410 m_jit.setForNode(m_currentNode);
1411 m_codeOriginForExitTarget = m_currentNode->origin.forExit;
1412 m_codeOriginForExitProfile = m_currentNode->origin.semantic;
1413 m_lastGeneratedNode = m_currentNode->op();
1414 if (!m_currentNode->shouldGenerate()) {
1415 switch (m_currentNode->op()) {
1416 case JSConstant:
1417 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1418 break;
1419
1420 case WeakJSConstant:
1421 m_jit.addWeakReference(m_currentNode->weakConstant());
1422 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1423 break;
1424
1425 case SetLocal:
1426 RELEASE_ASSERT_NOT_REACHED();
1427 break;
1428
1429 case MovHint:
1430 compileMovHint(m_currentNode);
1431 break;
1432
1433 case ZombieHint: {
1434 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1435 break;
1436 }
1437
1438 default:
1439 if (belongsInMinifiedGraph(m_currentNode->op()))
1440 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1441 break;
1442 }
1443 } else {
1444
1445 if (verboseCompilationEnabled()) {
1446 dataLogF(
1447 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1448 (int)m_currentNode->index(),
1449 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1450 dataLog("\n");
1451 }
1452
1453 compile(m_currentNode);
1454
1455 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1456 m_jit.clearRegisterAllocationOffsets();
1457 #endif
1458
1459 if (!m_compileOkay) {
1460 bail(DFGBailedAtEndOfNode);
1461 return;
1462 }
1463
1464 if (belongsInMinifiedGraph(m_currentNode->op())) {
1465 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1466 noticeOSRBirth(m_currentNode);
1467 }
1468 }
1469
1470 // Make sure that the abstract state is rematerialized for the next node.
1471 if (shouldExecuteEffects)
1472 m_interpreter.executeEffects(m_indexInBlock);
1473 }
1474
1475 // Perform the most basic verification that children have been used correctly.
1476 if (!ASSERT_DISABLED) {
1477 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1478 GenerationInfo& info = m_generationInfo[index];
1479 RELEASE_ASSERT(!info.alive());
1480 }
1481 }
1482 }
1483
1484 // If we are making type predictions about our arguments then
1485 // we need to check that they are correct on function entry.
1486 void SpeculativeJIT::checkArgumentTypes()
1487 {
1488 ASSERT(!m_currentNode);
1489 m_isCheckingArgumentTypes = true;
1490 m_codeOriginForExitTarget = CodeOrigin(0);
1491 m_codeOriginForExitProfile = CodeOrigin(0);
1492
1493 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1494 Node* node = m_jit.graph().m_arguments[i];
1495 if (!node) {
1496 // The argument is dead. We don't do any checks for such arguments.
1497 continue;
1498 }
1499
1500 ASSERT(node->op() == SetArgument);
1501 ASSERT(node->shouldGenerate());
1502
1503 VariableAccessData* variableAccessData = node->variableAccessData();
1504 FlushFormat format = variableAccessData->flushFormat();
1505
1506 if (format == FlushedJSValue)
1507 continue;
1508
1509 VirtualRegister virtualRegister = variableAccessData->local();
1510
1511 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1512
1513 #if USE(JSVALUE64)
1514 switch (format) {
1515 case FlushedInt32: {
1516 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1517 break;
1518 }
1519 case FlushedBoolean: {
1520 GPRTemporary temp(this);
1521 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1522 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1523 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1524 break;
1525 }
1526 case FlushedCell: {
1527 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1528 break;
1529 }
1530 default:
1531 RELEASE_ASSERT_NOT_REACHED();
1532 break;
1533 }
1534 #else
1535 switch (format) {
1536 case FlushedInt32: {
1537 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1538 break;
1539 }
1540 case FlushedBoolean: {
1541 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1542 break;
1543 }
1544 case FlushedCell: {
1545 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1546 break;
1547 }
1548 default:
1549 RELEASE_ASSERT_NOT_REACHED();
1550 break;
1551 }
1552 #endif
1553 }
1554 m_isCheckingArgumentTypes = false;
1555 }
1556
1557 bool SpeculativeJIT::compile()
1558 {
1559 checkArgumentTypes();
1560
1561 ASSERT(!m_currentNode);
1562 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1563 m_jit.setForBlockIndex(blockIndex);
1564 m_block = m_jit.graph().block(blockIndex);
1565 compileCurrentBlock();
1566 }
1567 linkBranches();
1568 return true;
1569 }
1570
1571 void SpeculativeJIT::createOSREntries()
1572 {
1573 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1574 BasicBlock* block = m_jit.graph().block(blockIndex);
1575 if (!block)
1576 continue;
1577 if (!block->isOSRTarget)
1578 continue;
1579
1580 // Currently we don't have OSR entry trampolines. We could add them
1581 // here if need be.
1582 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1583 }
1584 }
1585
1586 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1587 {
1588 unsigned osrEntryIndex = 0;
1589 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1590 BasicBlock* block = m_jit.graph().block(blockIndex);
1591 if (!block)
1592 continue;
1593 if (!block->isOSRTarget)
1594 continue;
1595 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1596 }
1597 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1598 }
1599
1600 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1601 {
1602 Edge child3 = m_jit.graph().varArgChild(node, 2);
1603 Edge child4 = m_jit.graph().varArgChild(node, 3);
1604
1605 ArrayMode arrayMode = node->arrayMode();
1606
1607 GPRReg baseReg = base.gpr();
1608 GPRReg propertyReg = property.gpr();
1609
1610 SpeculateDoubleOperand value(this, child3);
1611
1612 FPRReg valueReg = value.fpr();
1613
1614 DFG_TYPE_CHECK(
1615 JSValueRegs(), child3, SpecFullRealNumber,
1616 m_jit.branchDouble(
1617 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1618
1619 if (!m_compileOkay)
1620 return;
1621
1622 StorageOperand storage(this, child4);
1623 GPRReg storageReg = storage.gpr();
1624
1625 if (node->op() == PutByValAlias) {
1626 // Store the value to the array.
1627 GPRReg propertyReg = property.gpr();
1628 FPRReg valueReg = value.fpr();
1629 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1630
1631 noResult(m_currentNode);
1632 return;
1633 }
1634
1635 GPRTemporary temporary;
1636 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1637
1638 MacroAssembler::Jump slowCase;
1639
1640 if (arrayMode.isInBounds()) {
1641 speculationCheck(
1642 OutOfBounds, JSValueRegs(), 0,
1643 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1644 } else {
1645 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1646
1647 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1648
1649 if (!arrayMode.isOutOfBounds())
1650 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1651
1652 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1653 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1654
1655 inBounds.link(&m_jit);
1656 }
1657
1658 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1659
1660 base.use();
1661 property.use();
1662 value.use();
1663 storage.use();
1664
1665 if (arrayMode.isOutOfBounds()) {
1666 addSlowPathGenerator(
1667 slowPathCall(
1668 slowCase, this,
1669 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1670 NoResult, baseReg, propertyReg, valueReg));
1671 }
1672
1673 noResult(m_currentNode, UseChildrenCalledExplicitly);
1674 }
1675
1676 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1677 {
1678 SpeculateCellOperand string(this, node->child1());
1679 SpeculateStrictInt32Operand index(this, node->child2());
1680 StorageOperand storage(this, node->child3());
1681
1682 GPRReg stringReg = string.gpr();
1683 GPRReg indexReg = index.gpr();
1684 GPRReg storageReg = storage.gpr();
1685
1686 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1687
1688 // unsigned comparison so we can filter out negative indices and indices that are too large
1689 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1690
1691 GPRTemporary scratch(this);
1692 GPRReg scratchReg = scratch.gpr();
1693
1694 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1695
1696 // Load the character into scratchReg
1697 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1698
1699 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1700 JITCompiler::Jump cont8Bit = m_jit.jump();
1701
1702 is16Bit.link(&m_jit);
1703
1704 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1705
1706 cont8Bit.link(&m_jit);
1707
1708 int32Result(scratchReg, m_currentNode);
1709 }
1710
1711 void SpeculativeJIT::compileGetByValOnString(Node* node)
1712 {
1713 SpeculateCellOperand base(this, node->child1());
1714 SpeculateStrictInt32Operand property(this, node->child2());
1715 StorageOperand storage(this, node->child3());
1716 GPRReg baseReg = base.gpr();
1717 GPRReg propertyReg = property.gpr();
1718 GPRReg storageReg = storage.gpr();
1719
1720 GPRTemporary scratch(this);
1721 GPRReg scratchReg = scratch.gpr();
1722 #if USE(JSVALUE32_64)
1723 GPRTemporary resultTag;
1724 GPRReg resultTagReg = InvalidGPRReg;
1725 if (node->arrayMode().isOutOfBounds()) {
1726 GPRTemporary realResultTag(this);
1727 resultTag.adopt(realResultTag);
1728 resultTagReg = resultTag.gpr();
1729 }
1730 #endif
1731
1732 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1733
1734 // unsigned comparison so we can filter out negative indices and indices that are too large
1735 JITCompiler::Jump outOfBounds = m_jit.branch32(
1736 MacroAssembler::AboveOrEqual, propertyReg,
1737 MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1738 if (node->arrayMode().isInBounds())
1739 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1740
1741 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1742
1743 // Load the character into scratchReg
1744 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1745
1746 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1747 JITCompiler::Jump cont8Bit = m_jit.jump();
1748
1749 is16Bit.link(&m_jit);
1750
1751 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1752
1753 JITCompiler::Jump bigCharacter =
1754 m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1755
1756 // 8 bit string values don't need the isASCII check.
1757 cont8Bit.link(&m_jit);
1758
1759 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1760 m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1761 m_jit.loadPtr(scratchReg, scratchReg);
1762
1763 addSlowPathGenerator(
1764 slowPathCall(
1765 bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1766
1767 if (node->arrayMode().isOutOfBounds()) {
1768 #if USE(JSVALUE32_64)
1769 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1770 #endif
1771
1772 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1773 if (globalObject->stringPrototypeChainIsSane()) {
1774 #if USE(JSVALUE64)
1775 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1776 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg)));
1777 #else
1778 addSlowPathGenerator(adoptPtr(new SaneStringGetByValSlowPathGenerator(
1779 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
1780 baseReg, propertyReg)));
1781 #endif
1782 } else {
1783 #if USE(JSVALUE64)
1784 addSlowPathGenerator(
1785 slowPathCall(
1786 outOfBounds, this, operationGetByValStringInt,
1787 scratchReg, baseReg, propertyReg));
1788 #else
1789 addSlowPathGenerator(
1790 slowPathCall(
1791 outOfBounds, this, operationGetByValStringInt,
1792 resultTagReg, scratchReg, baseReg, propertyReg));
1793 #endif
1794 }
1795
1796 #if USE(JSVALUE64)
1797 jsValueResult(scratchReg, m_currentNode);
1798 #else
1799 jsValueResult(resultTagReg, scratchReg, m_currentNode);
1800 #endif
1801 } else
1802 cellResult(scratchReg, m_currentNode);
1803 }
1804
1805 void SpeculativeJIT::compileFromCharCode(Node* node)
1806 {
1807 SpeculateStrictInt32Operand property(this, node->child1());
1808 GPRReg propertyReg = property.gpr();
1809 GPRTemporary smallStrings(this);
1810 GPRTemporary scratch(this);
1811 GPRReg scratchReg = scratch.gpr();
1812 GPRReg smallStringsReg = smallStrings.gpr();
1813
1814 JITCompiler::JumpList slowCases;
1815 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
1816 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
1817 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
1818
1819 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
1820 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
1821 cellResult(scratchReg, m_currentNode);
1822 }
1823
1824 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
1825 {
1826 VirtualRegister virtualRegister = node->virtualRegister();
1827 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1828
1829 switch (info.registerFormat()) {
1830 case DataFormatStorage:
1831 RELEASE_ASSERT_NOT_REACHED();
1832
1833 case DataFormatBoolean:
1834 case DataFormatCell:
1835 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1836 return GeneratedOperandTypeUnknown;
1837
1838 case DataFormatNone:
1839 case DataFormatJSCell:
1840 case DataFormatJS:
1841 case DataFormatJSBoolean:
1842 case DataFormatJSDouble:
1843 return GeneratedOperandJSValue;
1844
1845 case DataFormatJSInt32:
1846 case DataFormatInt32:
1847 return GeneratedOperandInteger;
1848
1849 default:
1850 RELEASE_ASSERT_NOT_REACHED();
1851 return GeneratedOperandTypeUnknown;
1852 }
1853 }
1854
1855 void SpeculativeJIT::compileValueToInt32(Node* node)
1856 {
1857 switch (node->child1().useKind()) {
1858 #if USE(JSVALUE64)
1859 case Int52RepUse: {
1860 SpeculateStrictInt52Operand op1(this, node->child1());
1861 GPRTemporary result(this, Reuse, op1);
1862 GPRReg op1GPR = op1.gpr();
1863 GPRReg resultGPR = result.gpr();
1864 m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
1865 int32Result(resultGPR, node, DataFormatInt32);
1866 return;
1867 }
1868 #endif // USE(JSVALUE64)
1869
1870 case DoubleRepUse: {
1871 GPRTemporary result(this);
1872 SpeculateDoubleOperand op1(this, node->child1());
1873 FPRReg fpr = op1.fpr();
1874 GPRReg gpr = result.gpr();
1875 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
1876
1877 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
1878
1879 int32Result(gpr, node);
1880 return;
1881 }
1882
1883 case NumberUse:
1884 case NotCellUse: {
1885 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
1886 case GeneratedOperandInteger: {
1887 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
1888 GPRTemporary result(this, Reuse, op1);
1889 m_jit.move(op1.gpr(), result.gpr());
1890 int32Result(result.gpr(), node, op1.format());
1891 return;
1892 }
1893 case GeneratedOperandJSValue: {
1894 GPRTemporary result(this);
1895 #if USE(JSVALUE64)
1896 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1897
1898 GPRReg gpr = op1.gpr();
1899 GPRReg resultGpr = result.gpr();
1900 FPRTemporary tempFpr(this);
1901 FPRReg fpr = tempFpr.fpr();
1902
1903 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1904 JITCompiler::JumpList converted;
1905
1906 if (node->child1().useKind() == NumberUse) {
1907 DFG_TYPE_CHECK(
1908 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
1909 m_jit.branchTest64(
1910 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
1911 } else {
1912 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1913
1914 DFG_TYPE_CHECK(
1915 JSValueRegs(gpr), node->child1(), ~SpecCell, branchIsCell(JSValueRegs(gpr)));
1916
1917 // It's not a cell: so true turns into 1 and all else turns into 0.
1918 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
1919 converted.append(m_jit.jump());
1920
1921 isNumber.link(&m_jit);
1922 }
1923
1924 // First, if we get here we have a double encoded as a JSValue
1925 m_jit.move(gpr, resultGpr);
1926 unboxDouble(resultGpr, fpr);
1927
1928 silentSpillAllRegisters(resultGpr);
1929 callOperation(toInt32, resultGpr, fpr);
1930 silentFillAllRegisters(resultGpr);
1931
1932 converted.append(m_jit.jump());
1933
1934 isInteger.link(&m_jit);
1935 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
1936
1937 converted.link(&m_jit);
1938 #else
1939 Node* childNode = node->child1().node();
1940 VirtualRegister virtualRegister = childNode->virtualRegister();
1941 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1942
1943 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
1944
1945 GPRReg payloadGPR = op1.payloadGPR();
1946 GPRReg resultGpr = result.gpr();
1947
1948 JITCompiler::JumpList converted;
1949
1950 if (info.registerFormat() == DataFormatJSInt32)
1951 m_jit.move(payloadGPR, resultGpr);
1952 else {
1953 GPRReg tagGPR = op1.tagGPR();
1954 FPRTemporary tempFpr(this);
1955 FPRReg fpr = tempFpr.fpr();
1956 FPRTemporary scratch(this);
1957
1958 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
1959
1960 if (node->child1().useKind() == NumberUse) {
1961 DFG_TYPE_CHECK(
1962 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
1963 m_jit.branch32(
1964 MacroAssembler::AboveOrEqual, tagGPR,
1965 TrustedImm32(JSValue::LowestTag)));
1966 } else {
1967 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
1968
1969 DFG_TYPE_CHECK(
1970 op1.jsValueRegs(), node->child1(), ~SpecCell,
1971 branchIsCell(op1.jsValueRegs()));
1972
1973 // It's not a cell: so true turns into 1 and all else turns into 0.
1974 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
1975 m_jit.move(TrustedImm32(0), resultGpr);
1976 converted.append(m_jit.jump());
1977
1978 isBoolean.link(&m_jit);
1979 m_jit.move(payloadGPR, resultGpr);
1980 converted.append(m_jit.jump());
1981
1982 isNumber.link(&m_jit);
1983 }
1984
1985 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
1986
1987 silentSpillAllRegisters(resultGpr);
1988 callOperation(toInt32, resultGpr, fpr);
1989 silentFillAllRegisters(resultGpr);
1990
1991 converted.append(m_jit.jump());
1992
1993 isInteger.link(&m_jit);
1994 m_jit.move(payloadGPR, resultGpr);
1995
1996 converted.link(&m_jit);
1997 }
1998 #endif
1999 int32Result(resultGpr, node);
2000 return;
2001 }
2002 case GeneratedOperandTypeUnknown:
2003 RELEASE_ASSERT(!m_compileOkay);
2004 return;
2005 }
2006 RELEASE_ASSERT_NOT_REACHED();
2007 return;
2008 }
2009
2010 default:
2011 ASSERT(!m_compileOkay);
2012 return;
2013 }
2014 }
2015
2016 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2017 {
2018 if (doesOverflow(node->arithMode())) {
2019 // We know that this sometimes produces doubles. So produce a double every
2020 // time. This at least allows subsequent code to not have weird conditionals.
2021
2022 SpeculateInt32Operand op1(this, node->child1());
2023 FPRTemporary result(this);
2024
2025 GPRReg inputGPR = op1.gpr();
2026 FPRReg outputFPR = result.fpr();
2027
2028 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2029
2030 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2031 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2032 positive.link(&m_jit);
2033
2034 doubleResult(outputFPR, node);
2035 return;
2036 }
2037
2038 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2039
2040 SpeculateInt32Operand op1(this, node->child1());
2041 GPRTemporary result(this);
2042
2043 m_jit.move(op1.gpr(), result.gpr());
2044
2045 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2046
2047 int32Result(result.gpr(), node, op1.format());
2048 }
2049
2050 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2051 {
2052 SpeculateDoubleOperand op1(this, node->child1());
2053 FPRTemporary scratch(this);
2054 GPRTemporary result(this);
2055
2056 FPRReg valueFPR = op1.fpr();
2057 FPRReg scratchFPR = scratch.fpr();
2058 GPRReg resultGPR = result.gpr();
2059
2060 JITCompiler::JumpList failureCases;
2061 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2062 m_jit.branchConvertDoubleToInt32(
2063 valueFPR, resultGPR, failureCases, scratchFPR,
2064 shouldCheckNegativeZero(node->arithMode()));
2065 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2066
2067 int32Result(resultGPR, node);
2068 }
2069
2070 void SpeculativeJIT::compileDoubleRep(Node* node)
2071 {
2072 switch (node->child1().useKind()) {
2073 case NumberUse: {
2074 ASSERT(!isNumberConstant(node->child1().node())); // This should have been constant folded.
2075
2076 if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2077 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2078 FPRTemporary result(this);
2079 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2080 doubleResult(result.fpr(), node);
2081 return;
2082 }
2083
2084 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2085 FPRTemporary result(this);
2086
2087 #if USE(JSVALUE64)
2088 GPRTemporary temp(this);
2089
2090 GPRReg op1GPR = op1.gpr();
2091 GPRReg tempGPR = temp.gpr();
2092 FPRReg resultFPR = result.fpr();
2093
2094 JITCompiler::Jump isInteger = m_jit.branch64(
2095 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2096
2097 if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2098 typeCheck(
2099 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2100 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2101 }
2102
2103 m_jit.move(op1GPR, tempGPR);
2104 unboxDouble(tempGPR, resultFPR);
2105 JITCompiler::Jump done = m_jit.jump();
2106
2107 isInteger.link(&m_jit);
2108 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2109 done.link(&m_jit);
2110 #else // USE(JSVALUE64) -> this is the 32_64 case
2111 FPRTemporary temp(this);
2112
2113 GPRReg op1TagGPR = op1.tagGPR();
2114 GPRReg op1PayloadGPR = op1.payloadGPR();
2115 FPRReg tempFPR = temp.fpr();
2116 FPRReg resultFPR = result.fpr();
2117
2118 JITCompiler::Jump isInteger = m_jit.branch32(
2119 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2120
2121 if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2122 typeCheck(
2123 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2124 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2125 }
2126
2127 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2128 JITCompiler::Jump done = m_jit.jump();
2129
2130 isInteger.link(&m_jit);
2131 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2132 done.link(&m_jit);
2133 #endif // USE(JSVALUE64)
2134
2135 doubleResult(resultFPR, node);
2136 return;
2137 }
2138
2139 #if USE(JSVALUE64)
2140 case Int52RepUse: {
2141 SpeculateStrictInt52Operand value(this, node->child1());
2142 FPRTemporary result(this);
2143
2144 GPRReg valueGPR = value.gpr();
2145 FPRReg resultFPR = result.fpr();
2146
2147 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2148
2149 doubleResult(resultFPR, node);
2150 return;
2151 }
2152 #endif // USE(JSVALUE64)
2153
2154 default:
2155 RELEASE_ASSERT_NOT_REACHED();
2156 return;
2157 }
2158 }
2159
2160 void SpeculativeJIT::compileValueRep(Node* node)
2161 {
2162 switch (node->child1().useKind()) {
2163 case DoubleRepUse: {
2164 SpeculateDoubleOperand value(this, node->child1());
2165 JSValueRegsTemporary result(this);
2166
2167 FPRReg valueFPR = value.fpr();
2168 JSValueRegs resultRegs = result.regs();
2169
2170 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2171 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2172 // subject to a prior SetLocal, filtering the value would imply that the corresponding
2173 // local was purified.
2174 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2175 m_jit.purifyNaN(valueFPR);
2176
2177 #if CPU(X86)
2178 // boxDouble() on X86 clobbers the source, so we need to copy.
2179 // FIXME: Don't do that! https://bugs.webkit.org/show_bug.cgi?id=131690
2180 FPRTemporary temp(this);
2181 m_jit.moveDouble(valueFPR, temp.fpr());
2182 valueFPR = temp.fpr();
2183 #endif
2184
2185 boxDouble(valueFPR, resultRegs);
2186
2187 jsValueResult(resultRegs, node);
2188 return;
2189 }
2190
2191 #if USE(JSVALUE64)
2192 case Int52RepUse: {
2193 SpeculateStrictInt52Operand value(this, node->child1());
2194 GPRTemporary result(this);
2195
2196 GPRReg valueGPR = value.gpr();
2197 GPRReg resultGPR = result.gpr();
2198
2199 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2200
2201 jsValueResult(resultGPR, node);
2202 return;
2203 }
2204 #endif // USE(JSVALUE64)
2205
2206 default:
2207 RELEASE_ASSERT_NOT_REACHED();
2208 return;
2209 }
2210 }
2211
2212 static double clampDoubleToByte(double d)
2213 {
2214 d += 0.5;
2215 if (!(d > 0))
2216 d = 0;
2217 else if (d > 255)
2218 d = 255;
2219 return d;
2220 }
2221
2222 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2223 {
2224 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2225 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2226 jit.xorPtr(result, result);
2227 MacroAssembler::Jump clamped = jit.jump();
2228 tooBig.link(&jit);
2229 jit.move(JITCompiler::TrustedImm32(255), result);
2230 clamped.link(&jit);
2231 inBounds.link(&jit);
2232 }
2233
2234 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2235 {
2236 // Unordered compare so we pick up NaN
2237 static const double zero = 0;
2238 static const double byteMax = 255;
2239 static const double half = 0.5;
2240 jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2241 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2242 jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2243 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2244
2245 jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2246 // FIXME: This should probably just use a floating point round!
2247 // https://bugs.webkit.org/show_bug.cgi?id=72054
2248 jit.addDouble(source, scratch);
2249 jit.truncateDoubleToInt32(scratch, result);
2250 MacroAssembler::Jump truncatedInt = jit.jump();
2251
2252 tooSmall.link(&jit);
2253 jit.xorPtr(result, result);
2254 MacroAssembler::Jump zeroed = jit.jump();
2255
2256 tooBig.link(&jit);
2257 jit.move(JITCompiler::TrustedImm32(255), result);
2258
2259 truncatedInt.link(&jit);
2260 zeroed.link(&jit);
2261
2262 }
2263
2264 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2265 {
2266 if (node->op() == PutByValAlias)
2267 return JITCompiler::Jump();
2268 if (JSArrayBufferView* view = m_jit.graph().tryGetFoldableViewForChild1(node)) {
2269 uint32_t length = view->length();
2270 Node* indexNode = m_jit.graph().child(node, 1).node();
2271 if (m_jit.graph().isInt32Constant(indexNode) && static_cast<uint32_t>(m_jit.graph().valueOfInt32Constant(indexNode)) < length)
2272 return JITCompiler::Jump();
2273 return m_jit.branch32(
2274 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2275 }
2276 return m_jit.branch32(
2277 MacroAssembler::AboveOrEqual, indexGPR,
2278 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2279 }
2280
2281 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2282 {
2283 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2284 if (!jump.isSet())
2285 return;
2286 speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2287 }
2288
2289 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2290 {
2291 ASSERT(isInt(type));
2292
2293 SpeculateCellOperand base(this, node->child1());
2294 SpeculateStrictInt32Operand property(this, node->child2());
2295 StorageOperand storage(this, node->child3());
2296
2297 GPRReg baseReg = base.gpr();
2298 GPRReg propertyReg = property.gpr();
2299 GPRReg storageReg = storage.gpr();
2300
2301 GPRTemporary result(this);
2302 GPRReg resultReg = result.gpr();
2303
2304 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2305
2306 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2307 switch (elementSize(type)) {
2308 case 1:
2309 if (isSigned(type))
2310 m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2311 else
2312 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2313 break;
2314 case 2:
2315 if (isSigned(type))
2316 m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2317 else
2318 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2319 break;
2320 case 4:
2321 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2322 break;
2323 default:
2324 CRASH();
2325 }
2326 if (elementSize(type) < 4 || isSigned(type)) {
2327 int32Result(resultReg, node);
2328 return;
2329 }
2330
2331 ASSERT(elementSize(type) == 4 && !isSigned(type));
2332 if (node->shouldSpeculateInt32()) {
2333 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2334 int32Result(resultReg, node);
2335 return;
2336 }
2337
2338 #if USE(JSVALUE64)
2339 if (node->shouldSpeculateMachineInt()) {
2340 m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2341 strictInt52Result(resultReg, node);
2342 return;
2343 }
2344 #endif
2345
2346 FPRTemporary fresult(this);
2347 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2348 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2349 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2350 positive.link(&m_jit);
2351 doubleResult(fresult.fpr(), node);
2352 }
2353
2354 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2355 {
2356 ASSERT(isInt(type));
2357
2358 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2359 GPRReg storageReg = storage.gpr();
2360
2361 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2362
2363 GPRTemporary value;
2364 GPRReg valueGPR = InvalidGPRReg;
2365
2366 if (valueUse->isConstant()) {
2367 JSValue jsValue = valueOfJSConstant(valueUse.node());
2368 if (!jsValue.isNumber()) {
2369 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2370 noResult(node);
2371 return;
2372 }
2373 double d = jsValue.asNumber();
2374 if (isClamped(type)) {
2375 ASSERT(elementSize(type) == 1);
2376 d = clampDoubleToByte(d);
2377 }
2378 GPRTemporary scratch(this);
2379 GPRReg scratchReg = scratch.gpr();
2380 m_jit.move(Imm32(toInt32(d)), scratchReg);
2381 value.adopt(scratch);
2382 valueGPR = scratchReg;
2383 } else {
2384 switch (valueUse.useKind()) {
2385 case Int32Use: {
2386 SpeculateInt32Operand valueOp(this, valueUse);
2387 GPRTemporary scratch(this);
2388 GPRReg scratchReg = scratch.gpr();
2389 m_jit.move(valueOp.gpr(), scratchReg);
2390 if (isClamped(type)) {
2391 ASSERT(elementSize(type) == 1);
2392 compileClampIntegerToByte(m_jit, scratchReg);
2393 }
2394 value.adopt(scratch);
2395 valueGPR = scratchReg;
2396 break;
2397 }
2398
2399 #if USE(JSVALUE64)
2400 case Int52RepUse: {
2401 SpeculateStrictInt52Operand valueOp(this, valueUse);
2402 GPRTemporary scratch(this);
2403 GPRReg scratchReg = scratch.gpr();
2404 m_jit.move(valueOp.gpr(), scratchReg);
2405 if (isClamped(type)) {
2406 ASSERT(elementSize(type) == 1);
2407 MacroAssembler::Jump inBounds = m_jit.branch64(
2408 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2409 MacroAssembler::Jump tooBig = m_jit.branch64(
2410 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2411 m_jit.move(TrustedImm32(0), scratchReg);
2412 MacroAssembler::Jump clamped = m_jit.jump();
2413 tooBig.link(&m_jit);
2414 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2415 clamped.link(&m_jit);
2416 inBounds.link(&m_jit);
2417 }
2418 value.adopt(scratch);
2419 valueGPR = scratchReg;
2420 break;
2421 }
2422 #endif // USE(JSVALUE64)
2423
2424 case DoubleRepUse: {
2425 if (isClamped(type)) {
2426 ASSERT(elementSize(type) == 1);
2427 SpeculateDoubleOperand valueOp(this, valueUse);
2428 GPRTemporary result(this);
2429 FPRTemporary floatScratch(this);
2430 FPRReg fpr = valueOp.fpr();
2431 GPRReg gpr = result.gpr();
2432 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2433 value.adopt(result);
2434 valueGPR = gpr;
2435 } else {
2436 SpeculateDoubleOperand valueOp(this, valueUse);
2437 GPRTemporary result(this);
2438 FPRReg fpr = valueOp.fpr();
2439 GPRReg gpr = result.gpr();
2440 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2441 m_jit.xorPtr(gpr, gpr);
2442 MacroAssembler::Jump fixed = m_jit.jump();
2443 notNaN.link(&m_jit);
2444
2445 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2446 fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2447
2448 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2449
2450 fixed.link(&m_jit);
2451 value.adopt(result);
2452 valueGPR = gpr;
2453 }
2454 break;
2455 }
2456
2457 default:
2458 RELEASE_ASSERT_NOT_REACHED();
2459 break;
2460 }
2461 }
2462
2463 ASSERT_UNUSED(valueGPR, valueGPR != property);
2464 ASSERT(valueGPR != base);
2465 ASSERT(valueGPR != storageReg);
2466 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2467 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2468 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2469 outOfBounds = MacroAssembler::Jump();
2470 }
2471
2472 switch (elementSize(type)) {
2473 case 1:
2474 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2475 break;
2476 case 2:
2477 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2478 break;
2479 case 4:
2480 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2481 break;
2482 default:
2483 CRASH();
2484 }
2485 if (outOfBounds.isSet())
2486 outOfBounds.link(&m_jit);
2487 noResult(node);
2488 }
2489
2490 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2491 {
2492 ASSERT(isFloat(type));
2493
2494 SpeculateCellOperand base(this, node->child1());
2495 SpeculateStrictInt32Operand property(this, node->child2());
2496 StorageOperand storage(this, node->child3());
2497
2498 GPRReg baseReg = base.gpr();
2499 GPRReg propertyReg = property.gpr();
2500 GPRReg storageReg = storage.gpr();
2501
2502 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2503
2504 FPRTemporary result(this);
2505 FPRReg resultReg = result.fpr();
2506 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2507 switch (elementSize(type)) {
2508 case 4:
2509 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2510 m_jit.convertFloatToDouble(resultReg, resultReg);
2511 break;
2512 case 8: {
2513 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2514 break;
2515 }
2516 default:
2517 RELEASE_ASSERT_NOT_REACHED();
2518 }
2519
2520 doubleResult(resultReg, node);
2521 }
2522
2523 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2524 {
2525 ASSERT(isFloat(type));
2526
2527 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2528 GPRReg storageReg = storage.gpr();
2529
2530 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2531 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2532
2533 SpeculateDoubleOperand valueOp(this, valueUse);
2534 FPRTemporary scratch(this);
2535 FPRReg valueFPR = valueOp.fpr();
2536 FPRReg scratchFPR = scratch.fpr();
2537
2538 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2539
2540 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2541 if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2542 speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2543 outOfBounds = MacroAssembler::Jump();
2544 }
2545
2546 switch (elementSize(type)) {
2547 case 4: {
2548 m_jit.moveDouble(valueFPR, scratchFPR);
2549 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2550 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2551 break;
2552 }
2553 case 8:
2554 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2555 break;
2556 default:
2557 RELEASE_ASSERT_NOT_REACHED();
2558 }
2559 if (outOfBounds.isSet())
2560 outOfBounds.link(&m_jit);
2561 noResult(node);
2562 }
2563
2564 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2565 {
2566 // Check that prototype is an object.
2567 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfCellNotObject(prototypeReg));
2568
2569 // Initialize scratchReg with the value being checked.
2570 m_jit.move(valueReg, scratchReg);
2571
2572 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2573 MacroAssembler::Label loop(&m_jit);
2574 m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2575 m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2576 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2577 #if USE(JSVALUE64)
2578 branchIsCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2579 #else
2580 m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2581 #endif
2582
2583 // No match - result is false.
2584 #if USE(JSVALUE64)
2585 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2586 #else
2587 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2588 #endif
2589 MacroAssembler::Jump putResult = m_jit.jump();
2590
2591 isInstance.link(&m_jit);
2592 #if USE(JSVALUE64)
2593 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2594 #else
2595 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2596 #endif
2597
2598 putResult.link(&m_jit);
2599 }
2600
2601 void SpeculativeJIT::compileInstanceOf(Node* node)
2602 {
2603 if (node->child1().useKind() == UntypedUse) {
2604 // It might not be a cell. Speculate less aggressively.
2605 // Or: it might only be used once (i.e. by us), so we get zero benefit
2606 // from speculating any more aggressively than we absolutely need to.
2607
2608 JSValueOperand value(this, node->child1());
2609 SpeculateCellOperand prototype(this, node->child2());
2610 GPRTemporary scratch(this);
2611 GPRTemporary scratch2(this);
2612
2613 GPRReg prototypeReg = prototype.gpr();
2614 GPRReg scratchReg = scratch.gpr();
2615 GPRReg scratch2Reg = scratch2.gpr();
2616
2617 MacroAssembler::Jump isCell = branchIsCell(value.jsValueRegs());
2618 GPRReg valueReg = value.jsValueRegs().payloadGPR();
2619 moveFalseTo(scratchReg);
2620
2621 MacroAssembler::Jump done = m_jit.jump();
2622
2623 isCell.link(&m_jit);
2624
2625 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2626
2627 done.link(&m_jit);
2628
2629 blessedBooleanResult(scratchReg, node);
2630 return;
2631 }
2632
2633 SpeculateCellOperand value(this, node->child1());
2634 SpeculateCellOperand prototype(this, node->child2());
2635
2636 GPRTemporary scratch(this);
2637 GPRTemporary scratch2(this);
2638
2639 GPRReg valueReg = value.gpr();
2640 GPRReg prototypeReg = prototype.gpr();
2641 GPRReg scratchReg = scratch.gpr();
2642 GPRReg scratch2Reg = scratch2.gpr();
2643
2644 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2645
2646 blessedBooleanResult(scratchReg, node);
2647 }
2648
2649 void SpeculativeJIT::compileAdd(Node* node)
2650 {
2651 switch (node->binaryUseKind()) {
2652 case Int32Use: {
2653 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2654
2655 if (isInt32Constant(node->child1().node())) {
2656 int32_t imm1 = valueOfInt32Constant(node->child1().node());
2657 SpeculateInt32Operand op2(this, node->child2());
2658 GPRTemporary result(this);
2659
2660 if (!shouldCheckOverflow(node->arithMode())) {
2661 m_jit.move(op2.gpr(), result.gpr());
2662 m_jit.add32(Imm32(imm1), result.gpr());
2663 } else
2664 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
2665
2666 int32Result(result.gpr(), node);
2667 return;
2668 }
2669
2670 if (isInt32Constant(node->child2().node())) {
2671 SpeculateInt32Operand op1(this, node->child1());
2672 int32_t imm2 = valueOfInt32Constant(node->child2().node());
2673 GPRTemporary result(this);
2674
2675 if (!shouldCheckOverflow(node->arithMode())) {
2676 m_jit.move(op1.gpr(), result.gpr());
2677 m_jit.add32(Imm32(imm2), result.gpr());
2678 } else
2679 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
2680
2681 int32Result(result.gpr(), node);
2682 return;
2683 }
2684
2685 SpeculateInt32Operand op1(this, node->child1());
2686 SpeculateInt32Operand op2(this, node->child2());
2687 GPRTemporary result(this, Reuse, op1, op2);
2688
2689 GPRReg gpr1 = op1.gpr();
2690 GPRReg gpr2 = op2.gpr();
2691 GPRReg gprResult = result.gpr();
2692
2693 if (!shouldCheckOverflow(node->arithMode())) {
2694 if (gpr1 == gprResult)
2695 m_jit.add32(gpr2, gprResult);
2696 else {
2697 m_jit.move(gpr2, gprResult);
2698 m_jit.add32(gpr1, gprResult);
2699 }
2700 } else {
2701 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
2702
2703 if (gpr1 == gprResult)
2704 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
2705 else if (gpr2 == gprResult)
2706 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
2707 else
2708 speculationCheck(Overflow, JSValueRegs(), 0, check);
2709 }
2710
2711 int32Result(gprResult, node);
2712 return;
2713 }
2714
2715 #if USE(JSVALUE64)
2716 case Int52RepUse: {
2717 ASSERT(shouldCheckOverflow(node->arithMode()));
2718 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2719
2720 // Will we need an overflow check? If we can prove that neither input can be
2721 // Int52 then the overflow check will not be necessary.
2722 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2723 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2724 SpeculateWhicheverInt52Operand op1(this, node->child1());
2725 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2726 GPRTemporary result(this, Reuse, op1);
2727 m_jit.move(op1.gpr(), result.gpr());
2728 m_jit.add64(op2.gpr(), result.gpr());
2729 int52Result(result.gpr(), node, op1.format());
2730 return;
2731 }
2732
2733 SpeculateInt52Operand op1(this, node->child1());
2734 SpeculateInt52Operand op2(this, node->child2());
2735 GPRTemporary result(this);
2736 m_jit.move(op1.gpr(), result.gpr());
2737 speculationCheck(
2738 Int52Overflow, JSValueRegs(), 0,
2739 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2740 int52Result(result.gpr(), node);
2741 return;
2742 }
2743 #endif // USE(JSVALUE64)
2744
2745 case DoubleRepUse: {
2746 SpeculateDoubleOperand op1(this, node->child1());
2747 SpeculateDoubleOperand op2(this, node->child2());
2748 FPRTemporary result(this, op1, op2);
2749
2750 FPRReg reg1 = op1.fpr();
2751 FPRReg reg2 = op2.fpr();
2752 m_jit.addDouble(reg1, reg2, result.fpr());
2753
2754 doubleResult(result.fpr(), node);
2755 return;
2756 }
2757
2758 default:
2759 RELEASE_ASSERT_NOT_REACHED();
2760 break;
2761 }
2762 }
2763
2764 void SpeculativeJIT::compileMakeRope(Node* node)
2765 {
2766 ASSERT(node->child1().useKind() == KnownStringUse);
2767 ASSERT(node->child2().useKind() == KnownStringUse);
2768 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
2769
2770 SpeculateCellOperand op1(this, node->child1());
2771 SpeculateCellOperand op2(this, node->child2());
2772 SpeculateCellOperand op3(this, node->child3());
2773 GPRTemporary result(this);
2774 GPRTemporary allocator(this);
2775 GPRTemporary scratch(this);
2776
2777 GPRReg opGPRs[3];
2778 unsigned numOpGPRs;
2779 opGPRs[0] = op1.gpr();
2780 opGPRs[1] = op2.gpr();
2781 if (node->child3()) {
2782 opGPRs[2] = op3.gpr();
2783 numOpGPRs = 3;
2784 } else {
2785 opGPRs[2] = InvalidGPRReg;
2786 numOpGPRs = 2;
2787 }
2788 GPRReg resultGPR = result.gpr();
2789 GPRReg allocatorGPR = allocator.gpr();
2790 GPRReg scratchGPR = scratch.gpr();
2791
2792 JITCompiler::JumpList slowPath;
2793 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
2794 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
2795 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
2796
2797 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
2798 for (unsigned i = 0; i < numOpGPRs; ++i)
2799 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2800 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
2801 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
2802 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
2803 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
2804 if (!ASSERT_DISABLED) {
2805 JITCompiler::Jump ok = m_jit.branch32(
2806 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2807 m_jit.abortWithReason(DFGNegativeStringLength);
2808 ok.link(&m_jit);
2809 }
2810 for (unsigned i = 1; i < numOpGPRs; ++i) {
2811 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
2812 speculationCheck(
2813 Uncountable, JSValueSource(), nullptr,
2814 m_jit.branchAdd32(
2815 JITCompiler::Overflow,
2816 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
2817 }
2818 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
2819 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
2820 if (!ASSERT_DISABLED) {
2821 JITCompiler::Jump ok = m_jit.branch32(
2822 JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
2823 m_jit.abortWithReason(DFGNegativeStringLength);
2824 ok.link(&m_jit);
2825 }
2826 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
2827
2828 switch (numOpGPRs) {
2829 case 2:
2830 addSlowPathGenerator(slowPathCall(
2831 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
2832 break;
2833 case 3:
2834 addSlowPathGenerator(slowPathCall(
2835 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
2836 break;
2837 default:
2838 RELEASE_ASSERT_NOT_REACHED();
2839 break;
2840 }
2841
2842 cellResult(resultGPR, node);
2843 }
2844
2845 void SpeculativeJIT::compileArithSub(Node* node)
2846 {
2847 switch (node->binaryUseKind()) {
2848 case Int32Use: {
2849 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2850
2851 if (isNumberConstant(node->child2().node())) {
2852 SpeculateInt32Operand op1(this, node->child1());
2853 int32_t imm2 = valueOfInt32Constant(node->child2().node());
2854 GPRTemporary result(this);
2855
2856 if (!shouldCheckOverflow(node->arithMode())) {
2857 m_jit.move(op1.gpr(), result.gpr());
2858 m_jit.sub32(Imm32(imm2), result.gpr());
2859 } else {
2860 GPRTemporary scratch(this);
2861 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
2862 }
2863
2864 int32Result(result.gpr(), node);
2865 return;
2866 }
2867
2868 if (isNumberConstant(node->child1().node())) {
2869 int32_t imm1 = valueOfInt32Constant(node->child1().node());
2870 SpeculateInt32Operand op2(this, node->child2());
2871 GPRTemporary result(this);
2872
2873 m_jit.move(Imm32(imm1), result.gpr());
2874 if (!shouldCheckOverflow(node->arithMode()))
2875 m_jit.sub32(op2.gpr(), result.gpr());
2876 else
2877 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2878
2879 int32Result(result.gpr(), node);
2880 return;
2881 }
2882
2883 SpeculateInt32Operand op1(this, node->child1());
2884 SpeculateInt32Operand op2(this, node->child2());
2885 GPRTemporary result(this);
2886
2887 if (!shouldCheckOverflow(node->arithMode())) {
2888 m_jit.move(op1.gpr(), result.gpr());
2889 m_jit.sub32(op2.gpr(), result.gpr());
2890 } else
2891 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
2892
2893 int32Result(result.gpr(), node);
2894 return;
2895 }
2896
2897 #if USE(JSVALUE64)
2898 case Int52RepUse: {
2899 ASSERT(shouldCheckOverflow(node->arithMode()));
2900 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
2901
2902 // Will we need an overflow check? If we can prove that neither input can be
2903 // Int52 then the overflow check will not be necessary.
2904 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)
2905 && !m_state.forNode(node->child2()).couldBeType(SpecInt52)) {
2906 SpeculateWhicheverInt52Operand op1(this, node->child1());
2907 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
2908 GPRTemporary result(this, Reuse, op1);
2909 m_jit.move(op1.gpr(), result.gpr());
2910 m_jit.sub64(op2.gpr(), result.gpr());
2911 int52Result(result.gpr(), node, op1.format());
2912 return;
2913 }
2914
2915 SpeculateInt52Operand op1(this, node->child1());
2916 SpeculateInt52Operand op2(this, node->child2());
2917 GPRTemporary result(this);
2918 m_jit.move(op1.gpr(), result.gpr());
2919 speculationCheck(
2920 Int52Overflow, JSValueRegs(), 0,
2921 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
2922 int52Result(result.gpr(), node);
2923 return;
2924 }
2925 #endif // USE(JSVALUE64)
2926
2927 case DoubleRepUse: {
2928 SpeculateDoubleOperand op1(this, node->child1());
2929 SpeculateDoubleOperand op2(this, node->child2());
2930 FPRTemporary result(this, op1);
2931
2932 FPRReg reg1 = op1.fpr();
2933 FPRReg reg2 = op2.fpr();
2934 m_jit.subDouble(reg1, reg2, result.fpr());
2935
2936 doubleResult(result.fpr(), node);
2937 return;
2938 }
2939
2940 default:
2941 RELEASE_ASSERT_NOT_REACHED();
2942 return;
2943 }
2944 }
2945
2946 void SpeculativeJIT::compileArithNegate(Node* node)
2947 {
2948 switch (node->child1().useKind()) {
2949 case Int32Use: {
2950 SpeculateInt32Operand op1(this, node->child1());
2951 GPRTemporary result(this);
2952
2953 m_jit.move(op1.gpr(), result.gpr());
2954
2955 // Note: there is no notion of being not used as a number, but someone
2956 // caring about negative zero.
2957
2958 if (!shouldCheckOverflow(node->arithMode()))
2959 m_jit.neg32(result.gpr());
2960 else if (!shouldCheckNegativeZero(node->arithMode()))
2961 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
2962 else {
2963 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
2964 m_jit.neg32(result.gpr());
2965 }
2966
2967 int32Result(result.gpr(), node);
2968 return;
2969 }
2970
2971 #if USE(JSVALUE64)
2972 case Int52RepUse: {
2973 ASSERT(shouldCheckOverflow(node->arithMode()));
2974
2975 if (!m_state.forNode(node->child1()).couldBeType(SpecInt52)) {
2976 SpeculateWhicheverInt52Operand op1(this, node->child1());
2977 GPRTemporary result(this);
2978 GPRReg op1GPR = op1.gpr();
2979 GPRReg resultGPR = result.gpr();
2980 m_jit.move(op1GPR, resultGPR);
2981 m_jit.neg64(resultGPR);
2982 if (shouldCheckNegativeZero(node->arithMode())) {
2983 speculationCheck(
2984 NegativeZero, JSValueRegs(), 0,
2985 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
2986 }
2987 int52Result(resultGPR, node, op1.format());
2988 return;
2989 }
2990
2991 SpeculateInt52Operand op1(this, node->child1());
2992 GPRTemporary result(this);
2993 GPRReg op1GPR = op1.gpr();
2994 GPRReg resultGPR = result.gpr();
2995 m_jit.move(op1GPR, resultGPR);
2996 speculationCheck(
2997 Int52Overflow, JSValueRegs(), 0,
2998 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
2999 if (shouldCheckNegativeZero(node->arithMode())) {
3000 speculationCheck(
3001 NegativeZero, JSValueRegs(), 0,
3002 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
3003 }
3004 int52Result(resultGPR, node);
3005 return;
3006 }
3007 #endif // USE(JSVALUE64)
3008
3009 case DoubleRepUse: {
3010 SpeculateDoubleOperand op1(this, node->child1());
3011 FPRTemporary result(this);
3012
3013 m_jit.negateDouble(op1.fpr(), result.fpr());
3014
3015 doubleResult(result.fpr(), node);
3016 return;
3017 }
3018
3019 default:
3020 RELEASE_ASSERT_NOT_REACHED();
3021 return;
3022 }
3023 }
3024 void SpeculativeJIT::compileArithMul(Node* node)
3025 {
3026 switch (node->binaryUseKind()) {
3027 case Int32Use: {
3028 SpeculateInt32Operand op1(this, node->child1());
3029 SpeculateInt32Operand op2(this, node->child2());
3030 GPRTemporary result(this);
3031
3032 GPRReg reg1 = op1.gpr();
3033 GPRReg reg2 = op2.gpr();
3034
3035 // We can perform truncated multiplications if we get to this point, because if the
3036 // fixup phase could not prove that it would be safe, it would have turned us into
3037 // a double multiplication.
3038 if (!shouldCheckOverflow(node->arithMode())) {
3039 m_jit.move(reg1, result.gpr());
3040 m_jit.mul32(reg2, result.gpr());
3041 } else {
3042 speculationCheck(
3043 Overflow, JSValueRegs(), 0,
3044 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3045 }
3046
3047 // Check for negative zero, if the users of this node care about such things.
3048 if (shouldCheckNegativeZero(node->arithMode())) {
3049 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3050 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3051 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3052 resultNonZero.link(&m_jit);
3053 }
3054
3055 int32Result(result.gpr(), node);
3056 return;
3057 }
3058
3059 #if USE(JSVALUE64)
3060 case Int52RepUse: {
3061 ASSERT(shouldCheckOverflow(node->arithMode()));
3062
3063 // This is super clever. We want to do an int52 multiplication and check the
3064 // int52 overflow bit. There is no direct hardware support for this, but we do
3065 // have the ability to do an int64 multiplication and check the int64 overflow
3066 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
3067 // registers, with the high 12 bits being sign-extended. We can do:
3068 //
3069 // (a * (b << 12))
3070 //
3071 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
3072 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
3073 // multiplication overflows is identical to whether the 'a * b' 52-bit
3074 // multiplication overflows.
3075 //
3076 // In our nomenclature, this is:
3077 //
3078 // strictInt52(a) * int52(b) => int52
3079 //
3080 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
3081 // bits.
3082 //
3083 // We don't care which of op1 or op2 serves as the left-shifted operand, so
3084 // we just do whatever is more convenient for op1 and have op2 do the
3085 // opposite. This ensures that we do at most one shift.
3086
3087 SpeculateWhicheverInt52Operand op1(this, node->child1());
3088 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
3089 GPRTemporary result(this);
3090
3091 GPRReg op1GPR = op1.gpr();
3092 GPRReg op2GPR = op2.gpr();
3093 GPRReg resultGPR = result.gpr();
3094
3095 m_jit.move(op1GPR, resultGPR);
3096 speculationCheck(
3097 Int52Overflow, JSValueRegs(), 0,
3098 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
3099
3100 if (shouldCheckNegativeZero(node->arithMode())) {
3101 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
3102 MacroAssembler::NonZero, resultGPR);
3103 speculationCheck(
3104 NegativeZero, JSValueRegs(), 0,
3105 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm64(0)));
3106 speculationCheck(
3107 NegativeZero, JSValueRegs(), 0,
3108 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm64(0)));
3109 resultNonZero.link(&m_jit);
3110 }
3111
3112 int52Result(resultGPR, node);
3113 return;
3114 }
3115 #endif // USE(JSVALUE64)
3116
3117 case DoubleRepUse: {
3118 SpeculateDoubleOperand op1(this, node->child1());
3119 SpeculateDoubleOperand op2(this, node->child2());
3120 FPRTemporary result(this, op1, op2);
3121
3122 FPRReg reg1 = op1.fpr();
3123 FPRReg reg2 = op2.fpr();
3124
3125 m_jit.mulDouble(reg1, reg2, result.fpr());
3126
3127 doubleResult(result.fpr(), node);
3128 return;
3129 }
3130
3131 default:
3132 RELEASE_ASSERT_NOT_REACHED();
3133 return;
3134 }
3135 }
3136
3137 void SpeculativeJIT::compileArithDiv(Node* node)
3138 {
3139 switch (node->binaryUseKind()) {
3140 case Int32Use: {
3141 #if CPU(X86) || CPU(X86_64)
3142 SpeculateInt32Operand op1(this, node->child1());
3143 SpeculateInt32Operand op2(this, node->child2());
3144 GPRTemporary eax(this, X86Registers::eax);
3145 GPRTemporary edx(this, X86Registers::edx);
3146 GPRReg op1GPR = op1.gpr();
3147 GPRReg op2GPR = op2.gpr();
3148
3149 GPRReg op2TempGPR;
3150 GPRReg temp;
3151 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3152 op2TempGPR = allocate();
3153 temp = op2TempGPR;
3154 } else {
3155 op2TempGPR = InvalidGPRReg;
3156 if (op1GPR == X86Registers::eax)
3157 temp = X86Registers::edx;
3158 else
3159 temp = X86Registers::eax;
3160 }
3161
3162 ASSERT(temp != op1GPR);
3163 ASSERT(temp != op2GPR);
3164
3165 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3166
3167 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3168
3169 JITCompiler::JumpList done;
3170 if (shouldCheckOverflow(node->arithMode())) {
3171 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3172 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3173 } else {
3174 // This is the case where we convert the result to an int after we're done, and we
3175 // already know that the denominator is either -1 or 0. So, if the denominator is
3176 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3177 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3178 // are happy to fall through to a normal division, since we're just dividing
3179 // something by negative 1.
3180
3181 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3182 m_jit.move(TrustedImm32(0), eax.gpr());
3183 done.append(m_jit.jump());
3184
3185 notZero.link(&m_jit);
3186 JITCompiler::Jump notNeg2ToThe31 =
3187 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3188 m_jit.zeroExtend32ToPtr(op1GPR, eax.gpr());
3189 done.append(m_jit.jump());
3190
3191 notNeg2ToThe31.link(&m_jit);
3192 }
3193
3194 safeDenominator.link(&m_jit);
3195
3196 // If the user cares about negative zero, then speculate that we're not about
3197 // to produce negative zero.
3198 if (shouldCheckNegativeZero(node->arithMode())) {
3199 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3200 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3201 numeratorNonZero.link(&m_jit);
3202 }
3203
3204 if (op2TempGPR != InvalidGPRReg) {
3205 m_jit.move(op2GPR, op2TempGPR);
3206 op2GPR = op2TempGPR;
3207 }
3208
3209 m_jit.move(op1GPR, eax.gpr());
3210 m_jit.assembler().cdq();
3211 m_jit.assembler().idivl_r(op2GPR);
3212
3213 if (op2TempGPR != InvalidGPRReg)
3214 unlock(op2TempGPR);
3215
3216 // Check that there was no remainder. If there had been, then we'd be obligated to
3217 // produce a double result instead.
3218 if (shouldCheckOverflow(node->arithMode()))
3219 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3220
3221 done.link(&m_jit);
3222 int32Result(eax.gpr(), node);
3223 #elif CPU(APPLE_ARMV7S) || CPU(ARM64)
3224 SpeculateInt32Operand op1(this, node->child1());
3225 SpeculateInt32Operand op2(this, node->child2());
3226 GPRReg op1GPR = op1.gpr();
3227 GPRReg op2GPR = op2.gpr();
3228 GPRTemporary quotient(this);
3229 GPRTemporary multiplyAnswer(this);
3230
3231 // If the user cares about negative zero, then speculate that we're not about
3232 // to produce negative zero.
3233 if (shouldCheckNegativeZero(node->arithMode())) {
3234 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3235 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3236 numeratorNonZero.link(&m_jit);
3237 }
3238
3239 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3240
3241 // Check that there was no remainder. If there had been, then we'd be obligated to
3242 // produce a double result instead.
3243 if (shouldCheckOverflow(node->arithMode())) {
3244 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3245 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3246 }
3247
3248 int32Result(quotient.gpr(), node);
3249 #else
3250 RELEASE_ASSERT_NOT_REACHED();
3251 #endif
3252 break;
3253 }
3254
3255 case DoubleRepUse: {
3256 SpeculateDoubleOperand op1(this, node->child1());
3257 SpeculateDoubleOperand op2(this, node->child2());
3258 FPRTemporary result(this, op1);
3259
3260 FPRReg reg1 = op1.fpr();
3261 FPRReg reg2 = op2.fpr();
3262 m_jit.divDouble(reg1, reg2, result.fpr());
3263
3264 doubleResult(result.fpr(), node);
3265 break;
3266 }
3267
3268 default:
3269 RELEASE_ASSERT_NOT_REACHED();
3270 break;
3271 }
3272 }
3273
3274 void SpeculativeJIT::compileArithMod(Node* node)
3275 {
3276 switch (node->binaryUseKind()) {
3277 case Int32Use: {
3278 // In the fast path, the dividend value could be the final result
3279 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
3280 SpeculateStrictInt32Operand op1(this, node->child1());
3281
3282 if (isInt32Constant(node->child2().node())) {
3283 int32_t divisor = valueOfInt32Constant(node->child2().node());
3284 if (divisor > 1 && hasOneBitSet(divisor)) {
3285 unsigned logarithm = WTF::fastLog2(divisor);
3286 GPRReg dividendGPR = op1.gpr();
3287 GPRTemporary result(this);
3288 GPRReg resultGPR = result.gpr();
3289
3290 // This is what LLVM generates. It's pretty crazy. Here's my
3291 // attempt at understanding it.
3292
3293 // First, compute either divisor - 1, or 0, depending on whether
3294 // the dividend is negative:
3295 //
3296 // If dividend < 0: resultGPR = divisor - 1
3297 // If dividend >= 0: resultGPR = 0
3298 m_jit.move(dividendGPR, resultGPR);
3299 m_jit.rshift32(TrustedImm32(31), resultGPR);
3300 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
3301
3302 // Add in the dividend, so that:
3303 //
3304 // If dividend < 0: resultGPR = dividend + divisor - 1
3305 // If dividend >= 0: resultGPR = dividend
3306 m_jit.add32(dividendGPR, resultGPR);
3307
3308 // Mask so as to only get the *high* bits. This rounds down
3309 // (towards negative infinity) resultGPR to the nearest multiple
3310 // of divisor, so that:
3311 //
3312 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
3313 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3314 //
3315 // Note that this can be simplified to:
3316 //
3317 // If dividend < 0: resultGPR = ceil(dividend / divisor)
3318 // If dividend >= 0: resultGPR = floor(dividend / divisor)
3319 //
3320 // Note that if the dividend is negative, resultGPR will also be negative.
3321 // Regardless of the sign of dividend, resultGPR will be rounded towards
3322 // zero, because of how things are conditionalized.
3323 m_jit.and32(TrustedImm32(-divisor), resultGPR);
3324
3325 // Subtract resultGPR from dividendGPR, which yields the remainder:
3326 //
3327 // resultGPR = dividendGPR - resultGPR
3328 m_jit.neg32(resultGPR);
3329 m_jit.add32(dividendGPR, resultGPR);
3330
3331 if (shouldCheckNegativeZero(node->arithMode())) {
3332 // Check that we're not about to create negative zero.
3333 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3334 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
3335 numeratorPositive.link(&m_jit);
3336 }
3337
3338 int32Result(resultGPR, node);
3339 return;
3340 }
3341 }
3342
3343 #if CPU(X86) || CPU(X86_64)
3344 if (isInt32Constant(node->child2().node())) {
3345 int32_t divisor = valueOfInt32Constant(node->child2().node());
3346 if (divisor && divisor != -1) {
3347 GPRReg op1Gpr = op1.gpr();
3348
3349 GPRTemporary eax(this, X86Registers::eax);
3350 GPRTemporary edx(this, X86Registers::edx);
3351 GPRTemporary scratch(this);
3352 GPRReg scratchGPR = scratch.gpr();
3353
3354 GPRReg op1SaveGPR;
3355 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
3356 op1SaveGPR = allocate();
3357 ASSERT(op1Gpr != op1SaveGPR);
3358 m_jit.move(op1Gpr, op1SaveGPR);
3359 } else
3360 op1SaveGPR = op1Gpr;
3361 ASSERT(op1SaveGPR != X86Registers::eax);
3362 ASSERT(op1SaveGPR != X86Registers::edx);
3363
3364 m_jit.move(op1Gpr, eax.gpr());
3365 m_jit.move(TrustedImm32(divisor), scratchGPR);
3366 m_jit.assembler().cdq();
3367 m_jit.assembler().idivl_r(scratchGPR);
3368 if (shouldCheckNegativeZero(node->arithMode())) {
3369 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3370 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3371 numeratorPositive.link(&m_jit);
3372 }
3373
3374 if (op1SaveGPR != op1Gpr)
3375 unlock(op1SaveGPR);
3376
3377 int32Result(edx.gpr(), node);
3378 return;
3379 }
3380 }
3381 #endif
3382
3383 SpeculateInt32Operand op2(this, node->child2());
3384 #if CPU(X86) || CPU(X86_64)
3385 GPRTemporary eax(this, X86Registers::eax);
3386 GPRTemporary edx(this, X86Registers::edx);
3387 GPRReg op1GPR = op1.gpr();
3388 GPRReg op2GPR = op2.gpr();
3389
3390 GPRReg op2TempGPR;
3391 GPRReg temp;
3392 GPRReg op1SaveGPR;
3393
3394 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3395 op2TempGPR = allocate();
3396 temp = op2TempGPR;
3397 } else {
3398 op2TempGPR = InvalidGPRReg;
3399 if (op1GPR == X86Registers::eax)
3400 temp = X86Registers::edx;
3401 else
3402 temp = X86Registers::eax;
3403 }
3404
3405 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3406 op1SaveGPR = allocate();
3407 ASSERT(op1GPR != op1SaveGPR);
3408 m_jit.move(op1GPR, op1SaveGPR);
3409 } else
3410 op1SaveGPR = op1GPR;
3411
3412 ASSERT(temp != op1GPR);
3413 ASSERT(temp != op2GPR);
3414 ASSERT(op1SaveGPR != X86Registers::eax);
3415 ASSERT(op1SaveGPR != X86Registers::edx);
3416
3417 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3418
3419 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3420
3421 JITCompiler::JumpList done;
3422
3423 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
3424 // separate case for that. But it probably doesn't matter so much.
3425 if (shouldCheckOverflow(node->arithMode())) {
3426 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3427 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3428 } else {
3429 // This is the case where we convert the result to an int after we're done, and we
3430 // already know that the denominator is either -1 or 0. So, if the denominator is
3431 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3432 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
3433 // happy to fall through to a normal division, since we're just dividing something
3434 // by negative 1.
3435
3436 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3437 m_jit.move(TrustedImm32(0), edx.gpr());
3438 done.append(m_jit.jump());
3439
3440 notZero.link(&m_jit);
3441 JITCompiler::Jump notNeg2ToThe31 =
3442 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3443 m_jit.move(TrustedImm32(0), edx.gpr());
3444 done.append(m_jit.jump());
3445
3446 notNeg2ToThe31.link(&m_jit);
3447 }
3448
3449 safeDenominator.link(&m_jit);
3450
3451 if (op2TempGPR != InvalidGPRReg) {
3452 m_jit.move(op2GPR, op2TempGPR);
3453 op2GPR = op2TempGPR;
3454 }
3455
3456 m_jit.move(op1GPR, eax.gpr());
3457 m_jit.assembler().cdq();
3458 m_jit.assembler().idivl_r(op2GPR);
3459
3460 if (op2TempGPR != InvalidGPRReg)
3461 unlock(op2TempGPR);
3462
3463 // Check that we're not about to create negative zero.
3464 if (shouldCheckNegativeZero(node->arithMode())) {
3465 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3466 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3467 numeratorPositive.link(&m_jit);
3468 }
3469
3470 if (op1SaveGPR != op1GPR)
3471 unlock(op1SaveGPR);
3472
3473 done.link(&m_jit);
3474 int32Result(edx.gpr(), node);
3475
3476 #elif CPU(ARM64) || CPU(APPLE_ARMV7S)
3477 GPRTemporary temp(this);
3478 GPRTemporary quotientThenRemainder(this);
3479 GPRTemporary multiplyAnswer(this);
3480 GPRReg dividendGPR = op1.gpr();
3481 GPRReg divisorGPR = op2.gpr();
3482 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3483 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3484
3485 JITCompiler::JumpList done;
3486
3487 if (shouldCheckOverflow(node->arithMode()))
3488 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
3489 else {
3490 JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
3491 m_jit.move(divisorGPR, quotientThenRemainderGPR);
3492 done.append(m_jit.jump());
3493 denominatorNotZero.link(&m_jit);
3494 }
3495
3496 m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3497 // FIXME: It seems like there are cases where we don't need this? What if we have
3498 // arithMode() == Arith::Unchecked?
3499 // https://bugs.webkit.org/show_bug.cgi?id=126444
3500 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3501 #if CPU(APPLE_ARMV7S)
3502 m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3503 #else
3504 m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3505 #endif
3506
3507 // If the user cares about negative zero, then speculate that we're not about
3508 // to produce negative zero.
3509 if (shouldCheckNegativeZero(node->arithMode())) {
3510 // Check that we're not about to create negative zero.
3511 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3512 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3513 numeratorPositive.link(&m_jit);
3514 }
3515
3516 done.link(&m_jit);
3517
3518 int32Result(quotientThenRemainderGPR, node);
3519 #else // not architecture that can do integer division
3520 RELEASE_ASSERT_NOT_REACHED();
3521 #endif
3522 return;
3523 }
3524
3525 case DoubleRepUse: {
3526 SpeculateDoubleOperand op1(this, node->child1());
3527 SpeculateDoubleOperand op2(this, node->child2());
3528
3529 FPRReg op1FPR = op1.fpr();
3530 FPRReg op2FPR = op2.fpr();
3531
3532 flushRegisters();
3533
3534 FPRResult result(this);
3535
3536 callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3537
3538 doubleResult(result.fpr(), node);
3539 return;
3540 }
3541
3542 default:
3543 RELEASE_ASSERT_NOT_REACHED();
3544 return;
3545 }
3546 }
3547
3548 // Returns true if the compare is fused with a subsequent branch.
3549 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
3550 {
3551 if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3552 return true;
3553
3554 if (node->isBinaryUseKind(Int32Use)) {
3555 compileInt32Compare(node, condition);
3556 return false;
3557 }
3558
3559 #if USE(JSVALUE64)
3560 if (node->isBinaryUseKind(Int52RepUse)) {
3561 compileInt52Compare(node, condition);
3562 return false;
3563 }
3564 #endif // USE(JSVALUE64)
3565
3566 if (node->isBinaryUseKind(DoubleRepUse)) {
3567 compileDoubleCompare(node, doubleCondition);
3568 return false;
3569 }
3570
3571 if (node->op() == CompareEq) {
3572 if (node->isBinaryUseKind(StringUse)) {
3573 compileStringEquality(node);
3574 return false;
3575 }
3576
3577 if (node->isBinaryUseKind(BooleanUse)) {
3578 compileBooleanCompare(node, condition);
3579 return false;
3580 }
3581
3582 if (node->isBinaryUseKind(StringIdentUse)) {
3583 compileStringIdentEquality(node);
3584 return false;
3585 }
3586
3587 if (node->isBinaryUseKind(ObjectUse)) {
3588 compileObjectEquality(node);
3589 return false;
3590 }
3591
3592 if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
3593 compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3594 return false;
3595 }
3596
3597 if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
3598 compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3599 return false;
3600 }
3601 }
3602
3603 nonSpeculativeNonPeepholeCompare(node, condition, operation);
3604 return false;
3605 }
3606
3607 bool SpeculativeJIT::compileStrictEq(Node* node)
3608 {
3609 if (node->isBinaryUseKind(BooleanUse)) {
3610 unsigned branchIndexInBlock = detectPeepHoleBranch();
3611 if (branchIndexInBlock != UINT_MAX) {
3612 Node* branchNode = m_block->at(branchIndexInBlock);
3613 compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3614 use(node->child1());
3615 use(node->child2());
3616 m_indexInBlock = branchIndexInBlock;
3617 m_currentNode = branchNode;
3618 return true;
3619 }
3620 compileBooleanCompare(node, MacroAssembler::Equal);
3621 return false;
3622 }
3623
3624 if (node->isBinaryUseKind(Int32Use)) {
3625 unsigned branchIndexInBlock = detectPeepHoleBranch();
3626 if (branchIndexInBlock != UINT_MAX) {
3627 Node* branchNode = m_block->at(branchIndexInBlock);
3628 compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
3629 use(node->child1());
3630 use(node->child2());
3631 m_indexInBlock = branchIndexInBlock;
3632 m_currentNode = branchNode;
3633 return true;
3634 }
3635 compileInt32Compare(node, MacroAssembler::Equal);
3636 return false;
3637 }
3638
3639 #if USE(JSVALUE64)
3640 if (node->isBinaryUseKind(Int52RepUse)) {
3641 unsigned branchIndexInBlock = detectPeepHoleBranch();
3642 if (branchIndexInBlock != UINT_MAX) {
3643 Node* branchNode = m_block->at(branchIndexInBlock);
3644 compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
3645 use(node->child1());
3646 use(node->child2());
3647 m_indexInBlock = branchIndexInBlock;
3648 m_currentNode = branchNode;
3649 return true;
3650 }
3651 compileInt52Compare(node, MacroAssembler::Equal);
3652 return false;
3653 }
3654 #endif // USE(JSVALUE64)
3655
3656 if (node->isBinaryUseKind(DoubleRepUse)) {
3657 unsigned branchIndexInBlock = detectPeepHoleBranch();
3658 if (branchIndexInBlock != UINT_MAX) {
3659 Node* branchNode = m_block->at(branchIndexInBlock);
3660 compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3661 use(node->child1());
3662 use(node->child2());
3663 m_indexInBlock = branchIndexInBlock;
3664 m_currentNode = branchNode;
3665 return true;
3666 }
3667 compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3668 return false;
3669 }
3670
3671 if (node->isBinaryUseKind(StringUse)) {
3672 compileStringEquality(node);
3673 return false;
3674 }
3675
3676 if (node->isBinaryUseKind(StringIdentUse)) {
3677 compileStringIdentEquality(node);
3678 return false;
3679 }
3680
3681 if (node->isBinaryUseKind(ObjectUse)) {
3682 unsigned branchIndexInBlock = detectPeepHoleBranch();
3683 if (branchIndexInBlock != UINT_MAX) {
3684 Node* branchNode = m_block->at(branchIndexInBlock);
3685 compilePeepHoleObjectEquality(node, branchNode);
3686 use(node->child1());
3687 use(node->child2());
3688 m_indexInBlock = branchIndexInBlock;
3689 m_currentNode = branchNode;
3690 return true;
3691 }
3692 compileObjectEquality(node);
3693 return false;
3694 }
3695
3696 if (node->isBinaryUseKind(MiscUse, UntypedUse)
3697 || node->isBinaryUseKind(UntypedUse, MiscUse)) {
3698 compileMiscStrictEq(node);
3699 return false;
3700 }
3701
3702 if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
3703 compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
3704 return false;
3705 }
3706
3707 if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
3708 compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
3709 return false;
3710 }
3711
3712 if (node->isBinaryUseKind(StringUse, UntypedUse)) {
3713 compileStringToUntypedEquality(node, node->child1(), node->child2());
3714 return false;
3715 }
3716
3717 if (node->isBinaryUseKind(UntypedUse, StringUse)) {
3718 compileStringToUntypedEquality(node, node->child2(), node->child1());
3719 return false;
3720 }
3721
3722 RELEASE_ASSERT(node->isBinaryUseKind(UntypedUse));
3723 return nonSpeculativeStrictEq(node);
3724 }
3725
3726 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3727 {
3728 SpeculateBooleanOperand op1(this, node->child1());
3729 SpeculateBooleanOperand op2(this, node->child2());
3730 GPRTemporary result(this);
3731
3732 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3733
3734 unblessedBooleanResult(result.gpr(), node);
3735 }
3736
3737 void SpeculativeJIT::compileStringEquality(
3738 Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
3739 GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
3740 JITCompiler::JumpList fastTrue, JITCompiler::JumpList fastFalse)
3741 {
3742 JITCompiler::JumpList trueCase;
3743 JITCompiler::JumpList falseCase;
3744 JITCompiler::JumpList slowCase;
3745
3746 trueCase.append(fastTrue);
3747 falseCase.append(fastFalse);
3748
3749 m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3750
3751 falseCase.append(m_jit.branch32(
3752 MacroAssembler::NotEqual,
3753 MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3754 lengthGPR));
3755
3756 trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3757
3758 m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3759 m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3760
3761 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3762 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3763
3764 slowCase.append(m_jit.branchTest32(
3765 MacroAssembler::Zero,
3766 MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3767 TrustedImm32(StringImpl::flagIs8Bit())));
3768 slowCase.append(m_jit.branchTest32(
3769 MacroAssembler::Zero,
3770 MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3771 TrustedImm32(StringImpl::flagIs8Bit())));
3772
3773 m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3774 m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3775
3776 MacroAssembler::Label loop = m_jit.label();
3777
3778 m_jit.sub32(TrustedImm32(1), lengthGPR);
3779
3780 // This isn't going to generate the best code on x86. But that's OK, it's still better
3781 // than not inlining.
3782 m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3783 m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
3784 falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
3785
3786 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
3787
3788 trueCase.link(&m_jit);
3789 moveTrueTo(leftTempGPR);
3790
3791 JITCompiler::Jump done = m_jit.jump();
3792
3793 falseCase.link(&m_jit);
3794 moveFalseTo(leftTempGPR);
3795
3796 done.link(&m_jit);
3797 addSlowPathGenerator(
3798 slowPathCall(
3799 slowCase, this, operationCompareStringEq, leftTempGPR, leftGPR, rightGPR));
3800
3801 blessedBooleanResult(leftTempGPR, node);
3802 }
3803
3804 void SpeculativeJIT::compileStringEquality(Node* node)
3805 {
3806 SpeculateCellOperand left(this, node->child1());
3807 SpeculateCellOperand right(this, node->child2());
3808 GPRTemporary length(this);
3809 GPRTemporary leftTemp(this);
3810 GPRTemporary rightTemp(this);
3811 GPRTemporary leftTemp2(this, Reuse, left);
3812 GPRTemporary rightTemp2(this, Reuse, right);
3813
3814 GPRReg leftGPR = left.gpr();
3815 GPRReg rightGPR = right.gpr();
3816 GPRReg lengthGPR = length.gpr();
3817 GPRReg leftTempGPR = leftTemp.gpr();
3818 GPRReg rightTempGPR = rightTemp.gpr();
3819 GPRReg leftTemp2GPR = leftTemp2.gpr();
3820 GPRReg rightTemp2GPR = rightTemp2.gpr();
3821
3822 speculateString(node->child1(), leftGPR);
3823
3824 // It's safe to branch around the type check below, since proving that the values are
3825 // equal does indeed prove that the right value is a string.
3826 JITCompiler::Jump fastTrue = m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR);
3827
3828 speculateString(node->child2(), rightGPR);
3829
3830 compileStringEquality(
3831 node, leftGPR, rightGPR, lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
3832 rightTemp2GPR, fastTrue, JITCompiler::Jump());
3833 }
3834
3835 void SpeculativeJIT::compileStringToUntypedEquality(Node* node, Edge stringEdge, Edge untypedEdge)
3836 {
3837 SpeculateCellOperand left(this, stringEdge);
3838 JSValueOperand right(this, untypedEdge, ManualOperandSpeculation);
3839 GPRTemporary length(this);
3840 GPRTemporary leftTemp(this);
3841 GPRTemporary rightTemp(this);
3842 GPRTemporary leftTemp2(this, Reuse, left);
3843 GPRTemporary rightTemp2(this);
3844
3845 GPRReg leftGPR = left.gpr();
3846 JSValueRegs rightRegs = right.jsValueRegs();
3847 GPRReg lengthGPR = length.gpr();
3848 GPRReg leftTempGPR = leftTemp.gpr();
3849 GPRReg rightTempGPR = rightTemp.gpr();
3850 GPRReg leftTemp2GPR = leftTemp2.gpr();
3851 GPRReg rightTemp2GPR = rightTemp2.gpr();
3852
3853 speculateString(stringEdge, leftGPR);
3854
3855 JITCompiler::JumpList fastTrue;
3856 JITCompiler::JumpList fastFalse;
3857
3858 fastFalse.append(branchNotCell(rightRegs));
3859
3860 // It's safe to branch around the type check below, since proving that the values are
3861 // equal does indeed prove that the right value is a string.
3862 fastTrue.append(m_jit.branchPtr(
3863 MacroAssembler::Equal, leftGPR, rightRegs.payloadGPR()));
3864
3865 fastFalse.append(m_jit.branchStructurePtr(
3866 MacroAssembler::NotEqual,
3867 MacroAssembler::Address(rightRegs.payloadGPR(), JSCell::structureIDOffset()),
3868 m_jit.vm()->stringStructure.get()));
3869
3870 compileStringEquality(
3871 node, leftGPR, rightRegs.payloadGPR(), lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
3872 rightTemp2GPR, fastTrue, fastFalse);
3873 }
3874
3875 void SpeculativeJIT::compileStringIdentEquality(Node* node)
3876 {
3877 SpeculateCellOperand left(this, node->child1());
3878 SpeculateCellOperand right(this, node->child2());
3879 GPRTemporary leftTemp(this);
3880 GPRTemporary rightTemp(this);
3881
3882 GPRReg leftGPR = left.gpr();
3883 GPRReg rightGPR = right.gpr();
3884 GPRReg leftTempGPR = leftTemp.gpr();
3885 GPRReg rightTempGPR = rightTemp.gpr();
3886
3887 speculateString(node->child1(), leftGPR);
3888 speculateString(node->child2(), rightGPR);
3889
3890 speculateStringIdentAndLoadStorage(node->child1(), leftGPR, leftTempGPR);
3891 speculateStringIdentAndLoadStorage(node->child2(), rightGPR, rightTempGPR);
3892
3893 m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, leftTempGPR);
3894
3895 unblessedBooleanResult(leftTempGPR, node);
3896 }
3897
3898 void SpeculativeJIT::compileStringIdentToNotStringVarEquality(
3899 Node* node, Edge stringEdge, Edge notStringVarEdge)
3900 {
3901 SpeculateCellOperand left(this, stringEdge);
3902 JSValueOperand right(this, notStringVarEdge, ManualOperandSpeculation);
3903 GPRTemporary leftTemp(this);
3904 GPRTemporary rightTemp(this);
3905 GPRReg leftTempGPR = leftTemp.gpr();
3906 GPRReg rightTempGPR = rightTemp.gpr();
3907 GPRReg leftGPR = left.gpr();
3908 JSValueRegs rightRegs = right.jsValueRegs();
3909
3910 speculateString(stringEdge, leftGPR);
3911 speculateStringIdentAndLoadStorage(stringEdge, leftGPR, leftTempGPR);
3912
3913 moveFalseTo(rightTempGPR);
3914 JITCompiler::JumpList notString;
3915 notString.append(branchNotCell(rightRegs));
3916 notString.append(m_jit.branchStructurePtr(
3917 MacroAssembler::NotEqual,
3918 MacroAssembler::Address(rightRegs.payloadGPR(), JSCell::structureIDOffset()),
3919 m_jit.vm()->stringStructure.get()));
3920
3921 speculateStringIdentAndLoadStorage(notStringVarEdge, rightRegs.payloadGPR(), rightTempGPR);
3922
3923 m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, rightTempGPR);
3924 notString.link(&m_jit);
3925
3926 unblessedBooleanResult(rightTempGPR, node);
3927 }
3928
3929 void SpeculativeJIT::compileStringZeroLength(Node* node)
3930 {
3931 SpeculateCellOperand str(this, node->child1());
3932 GPRReg strGPR = str.gpr();
3933
3934 // Make sure that this is a string.
3935 speculateString(node->child1(), strGPR);
3936
3937 GPRTemporary eq(this);
3938 GPRReg eqGPR = eq.gpr();
3939
3940 // Fetch the length field from the string object.
3941 m_jit.test32(MacroAssembler::Zero, MacroAssembler::Address(strGPR, JSString::offsetOfLength()), MacroAssembler::TrustedImm32(-1), eqGPR);
3942
3943 unblessedBooleanResult(eqGPR, node);
3944 }
3945
3946 void SpeculativeJIT::compileConstantStoragePointer(Node* node)
3947 {
3948 GPRTemporary storage(this);
3949 GPRReg storageGPR = storage.gpr();
3950 m_jit.move(TrustedImmPtr(node->storagePointer()), storageGPR);
3951 storageResult(storageGPR, node);
3952 }
3953
3954 void SpeculativeJIT::compileGetIndexedPropertyStorage(Node* node)
3955 {
3956 SpeculateCellOperand base(this, node->child1());
3957 GPRReg baseReg = base.gpr();
3958
3959 GPRTemporary storage(this);
3960 GPRReg storageReg = storage.gpr();
3961
3962 switch (node->arrayMode().type()) {
3963 case Array::String:
3964 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg);
3965
3966 addSlowPathGenerator(
3967 slowPathCall(
3968 m_jit.branchTest32(MacroAssembler::Zero, storageReg),
3969 this, operationResolveRope, storageReg, baseReg));
3970
3971 m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
3972 break;
3973
3974 default:
3975 ASSERT(isTypedView(node->arrayMode().typedArrayType()));
3976 m_jit.loadPtr(
3977 MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfVector()),
3978 storageReg);
3979 break;
3980 }
3981
3982 storageResult(storageReg, node);
3983 }
3984
3985 void SpeculativeJIT::compileGetTypedArrayByteOffset(Node* node)
3986 {
3987 SpeculateCellOperand base(this, node->child1());
3988 GPRTemporary vector(this);
3989 GPRTemporary data(this);
3990
3991 GPRReg baseGPR = base.gpr();
3992 GPRReg vectorGPR = vector.gpr();
3993 GPRReg dataGPR = data.gpr();
3994
3995 JITCompiler::Jump emptyByteOffset = m_jit.branch32(
3996 MacroAssembler::NotEqual,
3997 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
3998 TrustedImm32(WastefulTypedArray));
3999
4000 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), dataGPR);
4001 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), vectorGPR);
4002 m_jit.loadPtr(MacroAssembler::Address(dataGPR, Butterfly::offsetOfArrayBuffer()), dataGPR);
4003 m_jit.loadPtr(MacroAssembler::Address(dataGPR, ArrayBuffer::offsetOfData()), dataGPR);
4004 m_jit.subPtr(dataGPR, vectorGPR);
4005
4006 JITCompiler::Jump done = m_jit.jump();
4007
4008 emptyByteOffset.link(&m_jit);
4009 m_jit.move(TrustedImmPtr(0), vectorGPR);
4010
4011 done.link(&m_jit);
4012
4013 int32Result(vectorGPR, node);
4014 }
4015
4016 void SpeculativeJIT::compileGetByValOnArguments(Node* node)
4017 {
4018 SpeculateCellOperand base(this, node->child1());
4019 SpeculateStrictInt32Operand property(this, node->child2());
4020 GPRTemporary result(this);
4021 #if USE(JSVALUE32_64)
4022 GPRTemporary resultTag(this);
4023 #endif
4024 GPRTemporary scratch(this);
4025
4026 GPRReg baseReg = base.gpr();
4027 GPRReg propertyReg = property.gpr();
4028 GPRReg resultReg = result.gpr();
4029 #if USE(JSVALUE32_64)
4030 GPRReg resultTagReg = resultTag.gpr();
4031 #endif
4032 GPRReg scratchReg = scratch.gpr();
4033
4034 if (!m_compileOkay)
4035 return;
4036
4037 ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4038
4039 // Two really lame checks.
4040 speculationCheck(
4041 Uncountable, JSValueSource(), 0,
4042 m_jit.branch32(
4043 MacroAssembler::AboveOrEqual, propertyReg,
4044 MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments())));
4045 speculationCheck(
4046 Uncountable, JSValueSource(), 0,
4047 m_jit.branchTestPtr(
4048 MacroAssembler::NonZero,
4049 MacroAssembler::Address(
4050 baseReg, Arguments::offsetOfSlowArgumentData())));
4051
4052 m_jit.move(propertyReg, resultReg);
4053 m_jit.signExtend32ToPtr(resultReg, resultReg);
4054 m_jit.loadPtr(
4055 MacroAssembler::Address(baseReg, Arguments::offsetOfRegisters()),
4056 scratchReg);
4057
4058 #if USE(JSVALUE32_64)
4059 m_jit.load32(
4060 MacroAssembler::BaseIndex(
4061 scratchReg, resultReg, MacroAssembler::TimesEight,
4062 CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register) +
4063 OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
4064 resultTagReg);
4065 m_jit.load32(
4066 MacroAssembler::BaseIndex(
4067 scratchReg, resultReg, MacroAssembler::TimesEight,
4068 CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register) +
4069 OBJECT_OFFSETOF(JSValue, u.asBits.payload)),
4070 resultReg);
4071 jsValueResult(resultTagReg, resultReg, node);
4072 #else
4073 m_jit.load64(
4074 MacroAssembler::BaseIndex(
4075 scratchReg, resultReg, MacroAssembler::TimesEight,
4076 CallFrame::thisArgumentOffset() * sizeof(Register) + sizeof(Register)),
4077 resultReg);
4078 jsValueResult(resultReg, node);
4079 #endif
4080 }
4081
4082 void SpeculativeJIT::compileGetArgumentsLength(Node* node)
4083 {
4084 SpeculateCellOperand base(this, node->child1());
4085 GPRTemporary result(this, Reuse, base);
4086
4087 GPRReg baseReg = base.gpr();
4088 GPRReg resultReg = result.gpr();
4089
4090 if (!m_compileOkay)
4091 return;
4092
4093 ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4094
4095 speculationCheck(
4096 Uncountable, JSValueSource(), 0,
4097 m_jit.branchTest8(
4098 MacroAssembler::NonZero,
4099 MacroAssembler::Address(baseReg, Arguments::offsetOfOverrodeLength())));
4100
4101 m_jit.load32(
4102 MacroAssembler::Address(baseReg, Arguments::offsetOfNumArguments()),
4103 resultReg);
4104 int32Result(resultReg, node);
4105 }
4106
4107 void SpeculativeJIT::compileGetArrayLength(Node* node)
4108 {
4109 switch (node->arrayMode().type()) {
4110 case Array::Int32:
4111 case Array::Double:
4112 case Array::Contiguous: {
4113 StorageOperand storage(this, node->child2());
4114 GPRTemporary result(this, Reuse, storage);
4115 GPRReg storageReg = storage.gpr();
4116 GPRReg resultReg = result.gpr();
4117 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
4118
4119 int32Result(resultReg, node);
4120 break;
4121 }
4122 case Array::ArrayStorage:
4123 case Array::SlowPutArrayStorage: {
4124 StorageOperand storage(this, node->child2());
4125 GPRTemporary result(this, Reuse, storage);
4126 GPRReg storageReg = storage.gpr();
4127 GPRReg resultReg = result.gpr();
4128 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
4129
4130 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, MacroAssembler::TrustedImm32(0)));
4131
4132 int32Result(resultReg, node);
4133 break;
4134 }
4135 case Array::String: {
4136 SpeculateCellOperand base(this, node->child1());
4137 GPRTemporary result(this, Reuse, base);
4138 GPRReg baseGPR = base.gpr();
4139 GPRReg resultGPR = result.gpr();
4140 m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
4141 int32Result(resultGPR, node);
4142 break;
4143 }
4144 case Array::Arguments: {
4145 compileGetArgumentsLength(node);
4146 break;
4147 }
4148 default: {
4149 ASSERT(isTypedView(node->arrayMode().typedArrayType()));
4150 SpeculateCellOperand base(this, node->child1());
4151 GPRTemporary result(this, Reuse, base);
4152 GPRReg baseGPR = base.gpr();
4153 GPRReg resultGPR = result.gpr();
4154 m_jit.load32(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()), resultGPR);
4155 int32Result(resultGPR, node);
4156 break;
4157 } }
4158 }
4159
4160 void SpeculativeJIT::compileNewFunctionNoCheck(Node* node)
4161 {
4162 GPRResult result(this);
4163 GPRReg resultGPR = result.gpr();
4164 flushRegisters();
4165 callOperation(
4166 operationNewFunctionNoCheck, resultGPR, m_jit.codeBlock()->functionDecl(node->functionDeclIndex()));
4167 cellResult(resultGPR, node);
4168 }
4169
4170 void SpeculativeJIT::compileNewFunctionExpression(Node* node)
4171 {
4172 GPRResult result(this);
4173 GPRReg resultGPR = result.gpr();
4174 flushRegisters();
4175 callOperation(
4176 operationNewFunctionNoCheck,
4177 resultGPR,
4178 m_jit.codeBlock()->functionExpr(node->functionExprIndex()));
4179 cellResult(resultGPR, node);
4180 }
4181
4182 bool SpeculativeJIT::compileRegExpExec(Node* node)
4183 {
4184 unsigned branchIndexInBlock = detectPeepHoleBranch();
4185 if (branchIndexInBlock == UINT_MAX)
4186 return false;
4187 Node* branchNode = m_block->at(branchIndexInBlock);
4188 ASSERT(node->adjustedRefCount() == 1);
4189
4190 BasicBlock* taken = branchNode->branchData()->taken.block;
4191 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
4192
4193 bool invert = false;
4194 if (taken == nextBlock()) {
4195 invert = true;
4196 BasicBlock* tmp = taken;
4197 taken = notTaken;
4198 notTaken = tmp;
4199 }
4200
4201 SpeculateCellOperand base(this, node->child1());
4202 SpeculateCellOperand argument(this, node->child2());
4203 GPRReg baseGPR = base.gpr();
4204 GPRReg argumentGPR = argument.gpr();
4205
4206 flushRegisters();
4207 GPRResult result(this);
4208 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
4209
4210 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
4211 jump(notTaken);
4212
4213 use(node->child1());
4214 use(node->child2());
4215 m_indexInBlock = branchIndexInBlock;
4216 m_currentNode = branchNode;
4217
4218 return true;
4219 }
4220
4221 void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
4222 {
4223 if (node->structureTransitionData().previousStructure->couldHaveIndexingHeader()) {
4224 SpeculateCellOperand base(this, node->child1());
4225
4226 GPRReg baseGPR = base.gpr();
4227
4228 flushRegisters();
4229
4230 GPRResult result(this);
4231 callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity, result.gpr(), baseGPR);
4232
4233 storageResult(result.gpr(), node);
4234 return;
4235 }
4236
4237 SpeculateCellOperand base(this, node->child1());
4238 GPRTemporary scratch1(this);
4239
4240 GPRReg baseGPR = base.gpr();
4241 GPRReg scratchGPR1 = scratch1.gpr();
4242
4243 ASSERT(!node->structureTransitionData().previousStructure->outOfLineCapacity());
4244 ASSERT(initialOutOfLineCapacity == node->structureTransitionData().newStructure->outOfLineCapacity());
4245
4246 JITCompiler::Jump slowPath =
4247 emitAllocateBasicStorage(
4248 TrustedImm32(initialOutOfLineCapacity * sizeof(JSValue)), scratchGPR1);
4249
4250 m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader)), scratchGPR1);
4251
4252 addSlowPathGenerator(
4253 slowPathCall(slowPath, this, operationAllocatePropertyStorageWithInitialCapacity, scratchGPR1));
4254
4255 m_jit.storePtr(scratchGPR1, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
4256
4257 storageResult(scratchGPR1, node);
4258 }
4259
4260 void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
4261 {
4262 size_t oldSize = node->structureTransitionData().previousStructure->outOfLineCapacity() * sizeof(JSValue);
4263 size_t newSize = oldSize * outOfLineGrowthFactor;
4264 ASSERT(newSize == node->structureTransitionData().newStructure->outOfLineCapacity() * sizeof(JSValue));
4265
4266 if (node->structureTransitionData().previousStructure->couldHaveIndexingHeader()) {
4267 SpeculateCellOperand base(this, node->child1());
4268
4269 GPRReg baseGPR = base.gpr();
4270
4271 flushRegisters();
4272
4273 GPRResult result(this);
4274 callOperation(operationReallocateButterflyToGrowPropertyStorage, result.gpr(), baseGPR, newSize / sizeof(JSValue));
4275
4276 storageResult(result.gpr(), node);
4277 return;
4278 }
4279
4280 SpeculateCellOperand base(this, node->child1());
4281 StorageOperand oldStorage(this, node->child2());
4282 GPRTemporary scratch1(this);
4283 GPRTemporary scratch2(this);
4284
4285 GPRReg baseGPR = base.gpr();
4286 GPRReg oldStorageGPR = oldStorage.gpr();
4287 GPRReg scratchGPR1 = scratch1.gpr();
4288 GPRReg scratchGPR2 = scratch2.gpr();
4289
4290 JITCompiler::Jump slowPath =
4291 emitAllocateBasicStorage(TrustedImm32(newSize), scratchGPR1);
4292
4293 m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(IndexingHeader)), scratchGPR1);
4294
4295 addSlowPathGenerator(
4296 slowPathCall(slowPath, this, operationAllocatePropertyStorage, scratchGPR1, newSize / sizeof(JSValue)));
4297
4298 // We have scratchGPR1 = new storage, scratchGPR2 = scratch
4299 for (ptrdiff_t offset = 0; offset < static_cast<ptrdiff_t>(oldSize); offset += sizeof(void*)) {
4300 m_jit.loadPtr(JITCompiler::Address(oldStorageGPR, -(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR2);
4301 m_jit.storePtr(scratchGPR2, JITCompiler::Address(scratchGPR1, -(offset + sizeof(JSValue) + sizeof(void*))));
4302 }
4303 m_jit.storePtr(scratchGPR1, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
4304
4305 storageResult(scratchGPR1, node);
4306 }
4307
4308 GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode)
4309 {
4310 if (!putByValWillNeedExtraRegister(arrayMode))
4311 return InvalidGPRReg;
4312
4313 GPRTemporary realTemporary(this);
4314 temporary.adopt(realTemporary);
4315 return temporary.gpr();
4316 }
4317
4318 void SpeculativeJIT::compileToStringOnCell(Node* node)
4319 {
4320 SpeculateCellOperand op1(this, node->child1());
4321 GPRReg op1GPR = op1.gpr();
4322
4323 switch (node->child1().useKind()) {
4324 case StringObjectUse: {
4325 GPRTemporary result(this);
4326 GPRReg resultGPR = result.gpr();
4327
4328 speculateStringObject(node->child1(), op1GPR);
4329 m_interpreter.filter(node->child1(), SpecStringObject);
4330
4331 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
4332 cellResult(resultGPR, node);
4333 break;
4334 }
4335
4336 case StringOrStringObjectUse: {
4337 GPRTemporary result(this);
4338 GPRReg resultGPR = result.gpr();
4339
4340 m_jit.load32(JITCompiler::Address(op1GPR, JSCell::structureIDOffset()), resultGPR);
4341 JITCompiler::Jump isString = m_jit.branchStructurePtr(
4342 JITCompiler::Equal,
4343 resultGPR,
4344 m_jit.vm()->stringStructure.get());
4345
4346 speculateStringObjectForStructure(node->child1(), resultGPR);
4347
4348 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
4349
4350 JITCompiler::Jump done = m_jit.jump();
4351 isString.link(&m_jit);
4352 m_jit.move(op1GPR, resultGPR);
4353 done.link(&m_jit);
4354
4355 m_interpreter.filter(node->child1(), SpecString | SpecStringObject);
4356
4357 cellResult(resultGPR, node);
4358 break;
4359 }
4360
4361 case CellUse: {
4362 GPRResult result(this);
4363 GPRReg resultGPR = result.gpr();
4364
4365 // We flush registers instead of silent spill/fill because in this mode we
4366 // believe that most likely the input is not a string, and we need to take
4367 // slow path.
4368 flushRegisters();
4369 JITCompiler::Jump done;
4370 if (node->child1()->prediction() & SpecString) {
4371 JITCompiler::Jump needCall = m_jit.branchStructurePtr(
4372 JITCompiler::NotEqual,
4373 JITCompiler::Address(op1GPR, JSCell::structureIDOffset()),
4374 m_jit.vm()->stringStructure.get());
4375 m_jit.move(op1GPR, resultGPR);
4376 done = m_jit.jump();
4377 needCall.link(&m_jit);
4378 }
4379 callOperation(operationToStringOnCell, resultGPR, op1GPR);
4380 if (done.isSet())
4381 done.link(&m_jit);
4382 cellResult(resultGPR, node);
4383 break;
4384 }
4385
4386 default:
4387 RELEASE_ASSERT_NOT_REACHED();
4388 }
4389 }
4390
4391 void SpeculativeJIT::compileNewStringObject(Node* node)
4392 {
4393 SpeculateCellOperand operand(this, node->child1());
4394
4395 GPRTemporary result(this);
4396 GPRTemporary scratch1(this);
4397 GPRTemporary scratch2(this);
4398
4399 GPRReg operandGPR = operand.gpr();
4400 GPRReg resultGPR = result.gpr();
4401 GPRReg scratch1GPR = scratch1.gpr();
4402 GPRReg scratch2GPR = scratch2.gpr();
4403
4404 JITCompiler::JumpList slowPath;
4405
4406 emitAllocateJSObject<StringObject>(
4407 resultGPR, TrustedImmPtr(node->structure()), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
4408 slowPath);
4409
4410 m_jit.storePtr(
4411 TrustedImmPtr(StringObject::info()),
4412 JITCompiler::Address(resultGPR, JSDestructibleObject::classInfoOffset()));
4413 #if USE(JSVALUE64)
4414 m_jit.store64(
4415 operandGPR, JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset()));
4416 #else
4417 m_jit.store32(
4418 TrustedImm32(JSValue::CellTag),
4419 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
4420 m_jit.store32(
4421 operandGPR,
4422 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
4423 #endif
4424
4425 addSlowPathGenerator(slowPathCall(
4426 slowPath, this, operationNewStringObject, resultGPR, operandGPR, node->structure()));
4427
4428 cellResult(resultGPR, node);
4429 }
4430
4431 void SpeculativeJIT::compileNewTypedArray(Node* node)
4432 {
4433 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
4434 TypedArrayType type = node->typedArrayType();
4435 Structure* structure = globalObject->typedArrayStructure(type);
4436
4437 SpeculateInt32Operand size(this, node->child1());
4438 GPRReg sizeGPR = size.gpr();
4439
4440 GPRTemporary result(this);
4441 GPRTemporary storage(this);
4442 GPRTemporary scratch(this);
4443 GPRTemporary scratch2(this);
4444 GPRReg resultGPR = result.gpr();
4445 GPRReg storageGPR = storage.gpr();
4446 GPRReg scratchGPR = scratch.gpr();
4447 GPRReg scratchGPR2 = scratch2.gpr();
4448
4449 JITCompiler::JumpList slowCases;
4450
4451 slowCases.append(m_jit.branch32(
4452 MacroAssembler::Above, sizeGPR, TrustedImm32(JSArrayBufferView::fastSizeLimit)));
4453 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, sizeGPR));
4454
4455 m_jit.move(sizeGPR, scratchGPR);
4456 m_jit.lshift32(TrustedImm32(logElementSize(type)), scratchGPR);
4457 if (elementSize(type) < 8) {
4458 m_jit.add32(TrustedImm32(7), scratchGPR);
4459 m_jit.and32(TrustedImm32(~7), scratchGPR);
4460 }
4461 slowCases.append(
4462 emitAllocateBasicStorage(scratchGPR, storageGPR));
4463
4464 m_jit.subPtr(scratchGPR, storageGPR);
4465
4466 emitAllocateJSObject<JSArrayBufferView>(
4467 resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR, scratchGPR2,
4468 slowCases);
4469
4470 m_jit.storePtr(
4471 storageGPR,
4472 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfVector()));
4473 m_jit.store32(
4474 sizeGPR,
4475 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfLength()));
4476 m_jit.store32(
4477 TrustedImm32(FastTypedArray),
4478 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfMode()));
4479
4480 #if USE(JSVALUE32_64)
4481 MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, sizeGPR);
4482 m_jit.move(sizeGPR, scratchGPR);
4483 if (elementSize(type) != 4) {
4484 if (elementSize(type) > 4)
4485 m_jit.lshift32(TrustedImm32(logElementSize(type) - 2), scratchGPR);
4486 else {
4487 if (elementSize(type) > 1)
4488 m_jit.lshift32(TrustedImm32(logElementSize(type)), scratchGPR);
4489 m_jit.add32(TrustedImm32(3), scratchGPR);
4490 m_jit.urshift32(TrustedImm32(2), scratchGPR);
4491 }
4492 }
4493 MacroAssembler::Label loop = m_jit.label();
4494 m_jit.sub32(TrustedImm32(1), scratchGPR);
4495 m_jit.store32(
4496 TrustedImm32(0),
4497 MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesFour));
4498 m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
4499 done.link(&m_jit);
4500 #endif // USE(JSVALUE32_64)
4501
4502 addSlowPathGenerator(slowPathCall(
4503 slowCases, this, operationNewTypedArrayWithSizeForType(type),
4504 resultGPR, structure, sizeGPR));
4505
4506 cellResult(resultGPR, node);
4507 }
4508
4509 void SpeculativeJIT::speculateInt32(Edge edge)
4510 {
4511 if (!needsTypeCheck(edge, SpecInt32))
4512 return;
4513
4514 (SpeculateInt32Operand(this, edge)).gpr();
4515 }
4516
4517 void SpeculativeJIT::speculateNumber(Edge edge)
4518 {
4519 if (!needsTypeCheck(edge, SpecBytecodeNumber))
4520 return;
4521
4522 JSValueOperand value(this, edge, ManualOperandSpeculation);
4523 #if USE(JSVALUE64)
4524 GPRReg gpr = value.gpr();
4525 typeCheck(
4526 JSValueRegs(gpr), edge, SpecBytecodeNumber,
4527 m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
4528 #else
4529 GPRReg tagGPR = value.tagGPR();
4530 DFG_TYPE_CHECK(
4531 value.jsValueRegs(), edge, ~SpecInt32,
4532 m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)));
4533 DFG_TYPE_CHECK(
4534 value.jsValueRegs(), edge, SpecBytecodeNumber,
4535 m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
4536 #endif
4537 }
4538
4539 void SpeculativeJIT::speculateDoubleReal(Edge edge)
4540 {
4541 if (!needsTypeCheck(edge, SpecDoubleReal))
4542 return;
4543
4544 SpeculateDoubleOperand operand(this, edge);
4545 FPRReg fpr = operand.fpr();
4546 typeCheck(
4547 JSValueRegs(), edge, SpecDoubleReal,
4548 m_jit.branchDouble(
4549 MacroAssembler::DoubleNotEqualOrUnordered, fpr, fpr));
4550 }
4551
4552 void SpeculativeJIT::speculateBoolean(Edge edge)
4553 {
4554 if (!needsTypeCheck(edge, SpecBoolean))
4555 return;
4556
4557 (SpeculateBooleanOperand(this, edge)).gpr();
4558 }
4559
4560 void SpeculativeJIT::speculateCell(Edge edge)
4561 {
4562 if (!needsTypeCheck(edge, SpecCell))
4563 return;
4564
4565 (SpeculateCellOperand(this, edge)).gpr();
4566 }
4567
4568 void SpeculativeJIT::speculateObject(Edge edge)
4569 {
4570 if (!needsTypeCheck(edge, SpecObject))
4571 return;
4572
4573 SpeculateCellOperand operand(this, edge);
4574 GPRReg gpr = operand.gpr();
4575 DFG_TYPE_CHECK(
4576 JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchStructurePtr(
4577 MacroAssembler::Equal,
4578 MacroAssembler::Address(gpr, JSCell::structureIDOffset()),
4579 m_jit.vm()->stringStructure.get()));
4580 }
4581
4582 void SpeculativeJIT::speculateFinalObject(Edge edge)
4583 {
4584 if (!needsTypeCheck(edge, SpecFinalObject))
4585 return;
4586
4587 SpeculateCellOperand operand(this, edge);
4588 GPRReg gpr = operand.gpr();
4589 DFG_TYPE_CHECK(
4590 JSValueSource::unboxedCell(gpr), edge, SpecFinalObject, m_jit.branch8(
4591 MacroAssembler::NotEqual,
4592 MacroAssembler::Address(gpr, JSCell::typeInfoTypeOffset()),
4593 TrustedImm32(FinalObjectType)));
4594 }
4595
4596 void SpeculativeJIT::speculateObjectOrOther(Edge edge)
4597 {
4598 if (!needsTypeCheck(edge, SpecObject | SpecOther))
4599 return;
4600
4601 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4602 GPRTemporary temp(this);
4603 GPRReg tempGPR = temp.gpr();
4604 MacroAssembler::Jump notCell = branchNotCell(operand.jsValueRegs());
4605 GPRReg gpr = operand.jsValueRegs().payloadGPR();
4606 DFG_TYPE_CHECK(
4607 operand.jsValueRegs(), edge, (~SpecCell) | SpecObject, m_jit.branchStructurePtr(
4608 MacroAssembler::Equal,
4609 MacroAssembler::Address(gpr, JSCell::structureIDOffset()),
4610 m_jit.vm()->stringStructure.get()));
4611 MacroAssembler::Jump done = m_jit.jump();
4612 notCell.link(&m_jit);
4613 if (needsTypeCheck(edge, SpecCell | SpecOther)) {
4614 typeCheck(
4615 operand.jsValueRegs(), edge, SpecCell | SpecOther,
4616 branchNotOther(operand.jsValueRegs(), tempGPR));
4617 }
4618 done.link(&m_jit);
4619 }
4620
4621 void SpeculativeJIT::speculateString(Edge edge, GPRReg cell)
4622 {
4623 DFG_TYPE_CHECK(
4624 JSValueSource::unboxedCell(cell), edge, SpecString | ~SpecCell,
4625 m_jit.branchStructurePtr(
4626 MacroAssembler::NotEqual,
4627 MacroAssembler::Address(cell, JSCell::structureIDOffset()),
4628 m_jit.vm()->stringStructure.get()));
4629 }
4630
4631 void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage)
4632 {
4633 m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), storage);
4634
4635 if (!needsTypeCheck(edge, SpecStringIdent | ~SpecString))
4636 return;
4637
4638 speculationCheck(
4639 BadType, JSValueSource::unboxedCell(string), edge,
4640 m_jit.branchTestPtr(MacroAssembler::Zero, storage));
4641 speculationCheck(
4642 BadType, JSValueSource::unboxedCell(string), edge, m_jit.branchTest32(
4643 MacroAssembler::Zero,
4644 MacroAssembler::Address(storage, StringImpl::flagsOffset()),
4645 MacroAssembler::TrustedImm32(StringImpl::flagIsAtomic())));
4646
4647 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
4648 }
4649
4650 void SpeculativeJIT::speculateStringIdent(Edge edge, GPRReg string)
4651 {
4652 if (!needsTypeCheck(edge, SpecStringIdent))
4653 return;
4654
4655 GPRTemporary temp(this);
4656 speculateStringIdentAndLoadStorage(edge, string, temp.gpr());
4657 }
4658
4659 void SpeculativeJIT::speculateStringIdent(Edge edge)
4660 {
4661 if (!needsTypeCheck(edge, SpecStringIdent))
4662 return;
4663
4664 SpeculateCellOperand operand(this, edge);
4665 GPRReg gpr = operand.gpr();
4666 speculateString(edge, gpr);
4667 speculateStringIdent(edge, gpr);
4668 }
4669
4670 void SpeculativeJIT::speculateString(Edge edge)
4671 {
4672 if (!needsTypeCheck(edge, SpecString))
4673 return;
4674
4675 SpeculateCellOperand operand(this, edge);
4676 speculateString(edge, operand.gpr());
4677 }
4678
4679 void SpeculativeJIT::speculateStringObject(Edge edge, GPRReg gpr)
4680 {
4681 speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureIDOffset()));
4682 }
4683
4684 void SpeculativeJIT::speculateStringObject(Edge edge)
4685 {
4686 if (!needsTypeCheck(edge, SpecStringObject))
4687 return;
4688
4689 SpeculateCellOperand operand(this, edge);
4690 GPRReg gpr = operand.gpr();
4691 if (!needsTypeCheck(edge, SpecStringObject))
4692 return;
4693
4694 speculateStringObject(edge, gpr);
4695 m_interpreter.filter(edge, SpecStringObject);
4696 }
4697
4698 void SpeculativeJIT::speculateStringOrStringObject(Edge edge)
4699 {
4700 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
4701 return;
4702
4703 SpeculateCellOperand operand(this, edge);
4704 GPRReg gpr = operand.gpr();
4705 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
4706 return;
4707
4708 GPRTemporary structureID(this);
4709 GPRReg structureIDGPR = structureID.gpr();
4710
4711 m_jit.load32(JITCompiler::Address(gpr, JSCell::structureIDOffset()), structureIDGPR);
4712 JITCompiler::Jump isString = m_jit.branchStructurePtr(
4713 JITCompiler::Equal,
4714 structureIDGPR,
4715 m_jit.vm()->stringStructure.get());
4716
4717 speculateStringObjectForStructure(edge, structureIDGPR);
4718
4719 isString.link(&m_jit);
4720
4721 m_interpreter.filter(edge, SpecString | SpecStringObject);
4722 }
4723
4724 void SpeculativeJIT::speculateNotStringVar(Edge edge)
4725 {
4726 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4727 GPRTemporary temp(this);
4728 GPRReg tempGPR = temp.gpr();
4729
4730 JITCompiler::Jump notCell = branchNotCell(operand.jsValueRegs());
4731 GPRReg cell = operand.jsValueRegs().payloadGPR();
4732
4733 JITCompiler::Jump notString = m_jit.branchStructurePtr(
4734 MacroAssembler::NotEqual,
4735 MacroAssembler::Address(cell, JSCell::structureIDOffset()),
4736 m_jit.vm()->stringStructure.get());
4737
4738 speculateStringIdentAndLoadStorage(edge, cell, tempGPR);
4739
4740 notString.link(&m_jit);
4741 notCell.link(&m_jit);
4742 }
4743
4744 void SpeculativeJIT::speculateNotCell(Edge edge)
4745 {
4746 if (!needsTypeCheck(edge, ~SpecCell))
4747 return;
4748
4749 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4750 typeCheck(operand.jsValueRegs(), edge, ~SpecCell, branchIsCell(operand.jsValueRegs()));
4751 }
4752
4753 void SpeculativeJIT::speculateOther(Edge edge)
4754 {
4755 if (!needsTypeCheck(edge, SpecOther))
4756 return;
4757
4758 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4759 GPRTemporary temp(this);
4760 GPRReg tempGPR = temp.gpr();
4761 typeCheck(
4762 operand.jsValueRegs(), edge, SpecOther,
4763 branchNotOther(operand.jsValueRegs(), tempGPR));
4764 }
4765
4766 void SpeculativeJIT::speculateMisc(Edge edge, JSValueRegs regs)
4767 {
4768 #if USE(JSVALUE64)
4769 DFG_TYPE_CHECK(
4770 regs, edge, SpecMisc,
4771 m_jit.branch64(MacroAssembler::Above, regs.gpr(), MacroAssembler::TrustedImm64(TagBitTypeOther | TagBitBool | TagBitUndefined)));
4772 #else
4773 DFG_TYPE_CHECK(
4774 regs, edge, ~SpecInt32,
4775 m_jit.branch32(MacroAssembler::Equal, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::Int32Tag)));
4776 DFG_TYPE_CHECK(
4777 regs, edge, SpecMisc,
4778 m_jit.branch32(MacroAssembler::Below, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::UndefinedTag)));
4779 #endif
4780 }
4781
4782 void SpeculativeJIT::speculateMisc(Edge edge)
4783 {
4784 if (!needsTypeCheck(edge, SpecMisc))
4785 return;
4786
4787 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4788 speculateMisc(edge, operand.jsValueRegs());
4789 }
4790
4791 void SpeculativeJIT::speculate(Node*, Edge edge)
4792 {
4793 switch (edge.useKind()) {
4794 case UntypedUse:
4795 break;
4796 case KnownInt32Use:
4797 ASSERT(!needsTypeCheck(edge, SpecInt32));
4798 break;
4799 case DoubleRepUse:
4800 ASSERT(!needsTypeCheck(edge, SpecFullDouble));
4801 break;
4802 case Int52RepUse:
4803 ASSERT(!needsTypeCheck(edge, SpecMachineInt));
4804 break;
4805 case KnownCellUse:
4806 ASSERT(!needsTypeCheck(edge, SpecCell));
4807 break;
4808 case KnownStringUse:
4809 ASSERT(!needsTypeCheck(edge, SpecString));
4810 break;
4811 case Int32Use:
4812 speculateInt32(edge);
4813 break;
4814 case NumberUse:
4815 speculateNumber(edge);
4816 break;
4817 case DoubleRepRealUse:
4818 speculateDoubleReal(edge);
4819 break;
4820 #if USE(JSVALUE64)
4821 case MachineIntUse:
4822 speculateMachineInt(edge);
4823 break;
4824 case DoubleRepMachineIntUse:
4825 speculateDoubleRepMachineInt(edge);
4826 break;
4827 #endif
4828 case BooleanUse:
4829 speculateBoolean(edge);
4830 break;
4831 case CellUse:
4832 speculateCell(edge);
4833 break;
4834 case ObjectUse:
4835 speculateObject(edge);
4836 break;
4837 case FinalObjectUse:
4838 speculateFinalObject(edge);
4839 break;
4840 case ObjectOrOtherUse:
4841 speculateObjectOrOther(edge);
4842 break;
4843 case StringIdentUse:
4844 speculateStringIdent(edge);
4845 break;
4846 case StringUse:
4847 speculateString(edge);
4848 break;
4849 case StringObjectUse:
4850 speculateStringObject(edge);
4851 break;
4852 case StringOrStringObjectUse:
4853 speculateStringOrStringObject(edge);
4854 break;
4855 case NotStringVarUse:
4856 speculateNotStringVar(edge);
4857 break;
4858 case NotCellUse:
4859 speculateNotCell(edge);
4860 break;
4861 case OtherUse:
4862 speculateOther(edge);
4863 break;
4864 case MiscUse:
4865 speculateMisc(edge);
4866 break;
4867 default:
4868 RELEASE_ASSERT_NOT_REACHED();
4869 break;
4870 }
4871 }
4872
4873 void SpeculativeJIT::emitSwitchIntJump(
4874 SwitchData* data, GPRReg value, GPRReg scratch)
4875 {
4876 SimpleJumpTable& table = m_jit.codeBlock()->switchJumpTable(data->switchTableIndex);
4877 table.ensureCTITable();
4878 m_jit.sub32(Imm32(table.min), value);
4879 addBranch(
4880 m_jit.branch32(JITCompiler::AboveOrEqual, value, Imm32(table.ctiOffsets.size())),
4881 data->fallThrough.block);
4882 m_jit.move(TrustedImmPtr(table.ctiOffsets.begin()), scratch);
4883 m_jit.loadPtr(JITCompiler::BaseIndex(scratch, value, JITCompiler::timesPtr()), scratch);
4884 m_jit.jump(scratch);
4885 data->didUseJumpTable = true;
4886 }
4887
4888 void SpeculativeJIT::emitSwitchImm(Node* node, SwitchData* data)
4889 {
4890 switch (node->child1().useKind()) {
4891 case Int32Use: {
4892 SpeculateInt32Operand value(this, node->child1());
4893 GPRTemporary temp(this);
4894 emitSwitchIntJump(data, value.gpr(), temp.gpr());
4895 noResult(node);
4896 break;
4897 }
4898
4899 case UntypedUse: {
4900 JSValueOperand value(this, node->child1());
4901 GPRTemporary temp(this);
4902 JSValueRegs valueRegs = value.jsValueRegs();
4903 GPRReg scratch = temp.gpr();
4904
4905 value.use();
4906
4907 #if USE(JSVALUE64)
4908 JITCompiler::Jump notInt = m_jit.branch64(
4909 JITCompiler::Below, valueRegs.gpr(), GPRInfo::tagTypeNumberRegister);
4910 emitSwitchIntJump(data, valueRegs.gpr(), scratch);
4911 notInt.link(&m_jit);
4912 addBranch(
4913 m_jit.branchTest64(
4914 JITCompiler::Zero, valueRegs.gpr(), GPRInfo::tagTypeNumberRegister),
4915 data->fallThrough.block);
4916 silentSpillAllRegisters(scratch);
4917 callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs.gpr(), data->switchTableIndex);
4918 silentFillAllRegisters(scratch);
4919 m_jit.jump(scratch);
4920 #else
4921 JITCompiler::Jump notInt = m_jit.branch32(
4922 JITCompiler::NotEqual, valueRegs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
4923 emitSwitchIntJump(data, valueRegs.payloadGPR(), scratch);
4924 notInt.link(&m_jit);
4925 addBranch(
4926 m_jit.branch32(
4927 JITCompiler::AboveOrEqual, valueRegs.tagGPR(),
4928 TrustedImm32(JSValue::LowestTag)),
4929 data->fallThrough.block);
4930 silentSpillAllRegisters(scratch);
4931 callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs, data->switchTableIndex);
4932 silentFillAllRegisters(scratch);
4933 m_jit.jump(scratch);
4934 #endif
4935 noResult(node, UseChildrenCalledExplicitly);
4936 break;
4937 }
4938
4939 default:
4940 RELEASE_ASSERT_NOT_REACHED();
4941 break;
4942 }
4943 }
4944
4945 void SpeculativeJIT::emitSwitchCharStringJump(
4946 SwitchData* data, GPRReg value, GPRReg scratch)
4947 {
4948 addBranch(
4949 m_jit.branch32(
4950 MacroAssembler::NotEqual,
4951 MacroAssembler::Address(value, JSString::offsetOfLength()),
4952 TrustedImm32(1)),
4953 data->fallThrough.block);
4954
4955 m_jit.loadPtr(MacroAssembler::Address(value, JSString::offsetOfValue()), scratch);
4956
4957 addSlowPathGenerator(
4958 slowPathCall(
4959 m_jit.branchTestPtr(MacroAssembler::Zero, scratch),
4960 this, operationResolveRope, scratch, value));
4961
4962 m_jit.loadPtr(MacroAssembler::Address(scratch, StringImpl::dataOffset()), value);
4963
4964 JITCompiler::Jump is8Bit = m_jit.branchTest32(
4965 MacroAssembler::NonZero,
4966 MacroAssembler::Address(scratch, StringImpl::flagsOffset()),
4967 TrustedImm32(StringImpl::flagIs8Bit()));
4968
4969 m_jit.load16(MacroAssembler::Address(value), scratch);
4970
4971 JITCompiler::Jump ready = m_jit.jump();
4972
4973 is8Bit.link(&m_jit);
4974 m_jit.load8(MacroAssembler::Address(value), scratch);
4975
4976 ready.link(&m_jit);
4977 emitSwitchIntJump(data, scratch, value);
4978 }
4979
4980 void SpeculativeJIT::emitSwitchChar(Node* node, SwitchData* data)
4981 {
4982 switch (node->child1().useKind()) {
4983 case StringUse: {
4984 SpeculateCellOperand op1(this, node->child1());
4985 GPRTemporary temp(this);
4986
4987 GPRReg op1GPR = op1.gpr();
4988 GPRReg tempGPR = temp.gpr();
4989
4990 op1.use();
4991
4992 speculateString(node->child1(), op1GPR);
4993 emitSwitchCharStringJump(data, op1GPR, tempGPR);
4994 noResult(node, UseChildrenCalledExplicitly);
4995 break;
4996 }
4997
4998 case UntypedUse: {
4999 JSValueOperand op1(this, node->child1());
5000 GPRTemporary temp(this);
5001
5002 JSValueRegs op1Regs = op1.jsValueRegs();
5003 GPRReg tempGPR = temp.gpr();
5004
5005 op1.use();
5006
5007 addBranch(branchNotCell(op1Regs), data->fallThrough.block);
5008
5009 addBranch(
5010 m_jit.branchStructurePtr(
5011 MacroAssembler::NotEqual,
5012 MacroAssembler::Address(op1Regs.payloadGPR(), JSCell::structureIDOffset()),
5013 m_jit.vm()->stringStructure.get()),
5014 data->fallThrough.block);
5015
5016 emitSwitchCharStringJump(data, op1Regs.payloadGPR(), tempGPR);
5017 noResult(node, UseChildrenCalledExplicitly);
5018 break;
5019 }
5020
5021 default:
5022 RELEASE_ASSERT_NOT_REACHED();
5023 break;
5024 }
5025 }
5026
5027 bool SpeculativeJIT::StringSwitchCase::operator<(
5028 const SpeculativeJIT::StringSwitchCase& other) const
5029 {
5030 unsigned minLength = std::min(string->length(), other.string->length());
5031 for (unsigned i = 0; i < minLength; ++i) {
5032 if (string->at(i) == other.string->at(i))
5033 continue;
5034 return string->at(i) < other.string->at(i);
5035 }
5036 return string->length() < other.string->length();
5037 }
5038
5039 namespace {
5040
5041 struct CharacterCase {
5042 bool operator<(const CharacterCase& other) const
5043 {
5044 return character < other.character;
5045 }
5046
5047 LChar character;
5048 unsigned begin;
5049 unsigned end;
5050 };
5051
5052 } // anonymous namespace
5053
5054 void SpeculativeJIT::emitBinarySwitchStringRecurse(
5055 SwitchData* data, const Vector<SpeculativeJIT::StringSwitchCase>& cases,
5056 unsigned numChecked, unsigned begin, unsigned end, GPRReg buffer, GPRReg length,
5057 GPRReg temp, unsigned alreadyCheckedLength, bool checkedExactLength)
5058 {
5059 static const bool verbose = false;
5060
5061 if (verbose) {
5062 dataLog("We're down to the following cases, alreadyCheckedLength = ", alreadyCheckedLength, ":\n");
5063 for (unsigned i = begin; i < end; ++i) {
5064 dataLog(" ", cases[i].string, "\n");
5065 }
5066 }
5067
5068 if (begin == end) {
5069 jump(data->fallThrough.block, ForceJump);
5070 return;
5071 }
5072
5073 unsigned minLength = cases[begin].string->length();
5074 unsigned commonChars = minLength;
5075 bool allLengthsEqual = true;
5076 for (unsigned i = begin + 1; i < end; ++i) {
5077 unsigned myCommonChars = numChecked;
5078 for (unsigned j = numChecked;
5079 j < std::min(cases[begin].string->length(), cases[i].string->length());
5080 ++j) {
5081 if (cases[begin].string->at(j) != cases[i].string->at(j)) {
5082 if (verbose)
5083 dataLog("string(", cases[i].string, ")[", j, "] != string(", cases[begin].string, ")[", j, "]\n");
5084 break;
5085 }
5086 myCommonChars++;
5087 }
5088 commonChars = std::min(commonChars, myCommonChars);
5089 if (minLength != cases[i].string->length())
5090 allLengthsEqual = false;
5091 minLength = std::min(minLength, cases[i].string->length());
5092 }
5093
5094 if (checkedExactLength) {
5095 RELEASE_ASSERT(alreadyCheckedLength == minLength);
5096 RELEASE_ASSERT(allLengthsEqual);
5097 }
5098
5099 RELEASE_ASSERT(minLength >= commonChars);
5100
5101 if (verbose)
5102 dataLog("length = ", minLength, ", commonChars = ", commonChars, ", allLengthsEqual = ", allLengthsEqual, "\n");
5103
5104 if (!allLengthsEqual && alreadyCheckedLength < minLength)
5105 branch32(MacroAssembler::Below, length, Imm32(minLength), data->fallThrough.block);
5106 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
5107 branch32(MacroAssembler::NotEqual, length, Imm32(minLength), data->fallThrough.block);
5108
5109 for (unsigned i = numChecked; i < commonChars; ++i) {
5110 branch8(
5111 MacroAssembler::NotEqual, MacroAssembler::Address(buffer, i),
5112 TrustedImm32(cases[begin].string->at(i)), data->fallThrough.block);
5113 }
5114
5115 if (minLength == commonChars) {
5116 // This is the case where one of the cases is a prefix of all of the other cases.
5117 // We've already checked that the input string is a prefix of all of the cases,
5118 // so we just check length to jump to that case.
5119
5120 if (!ASSERT_DISABLED) {
5121 ASSERT(cases[begin].string->length() == commonChars);
5122 for (unsigned i = begin + 1; i < end; ++i)
5123 ASSERT(cases[i].string->length() > commonChars);
5124 }
5125
5126 if (allLengthsEqual) {
5127 RELEASE_ASSERT(end == begin + 1);
5128 jump(cases[begin].target, ForceJump);
5129 return;
5130 }
5131
5132 branch32(MacroAssembler::Equal, length, Imm32(commonChars), cases[begin].target);
5133
5134 // We've checked if the length is >= minLength, and then we checked if the
5135 // length is == commonChars. We get to this point if it is >= minLength but not
5136 // == commonChars. Hence we know that it now must be > minLength, i.e., that
5137 // it's >= minLength + 1.
5138 emitBinarySwitchStringRecurse(
5139 data, cases, commonChars, begin + 1, end, buffer, length, temp, minLength + 1, false);
5140 return;
5141 }
5142
5143 // At this point we know that the string is longer than commonChars, and we've only
5144 // verified commonChars. Use a binary switch on the next unchecked character, i.e.
5145 // string[commonChars].
5146
5147 RELEASE_ASSERT(end >= begin + 2);
5148
5149 m_jit.load8(MacroAssembler::Address(buffer, commonChars), temp);
5150
5151 Vector<CharacterCase> characterCases;
5152 CharacterCase currentCase;
5153 currentCase.character = cases[begin].string->at(commonChars);
5154 currentCase.begin = begin;
5155 currentCase.end = begin + 1;
5156 for (unsigned i = begin + 1; i < end; ++i) {
5157 if (cases[i].string->at(commonChars) != currentCase.character) {
5158 if (verbose)
5159 dataLog("string(", cases[i].string, ")[", commonChars, "] != string(", cases[begin].string, ")[", commonChars, "]\n");
5160 currentCase.end = i;
5161 characterCases.append(currentCase);
5162 currentCase.character = cases[i].string->at(commonChars);
5163 currentCase.begin = i;
5164 currentCase.end = i + 1;
5165 } else
5166 currentCase.end = i + 1;
5167 }
5168 characterCases.append(currentCase);
5169
5170 Vector<int64_t> characterCaseValues;
5171 for (unsigned i = 0; i < characterCases.size(); ++i)
5172 characterCaseValues.append(characterCases[i].character);
5173
5174 BinarySwitch binarySwitch(temp, characterCaseValues, BinarySwitch::Int32);
5175 while (binarySwitch.advance(m_jit)) {
5176 const CharacterCase& myCase = characterCases[binarySwitch.caseIndex()];
5177 emitBinarySwitchStringRecurse(
5178 data, cases, commonChars + 1, myCase.begin, myCase.end, buffer, length,
5179 temp, minLength, allLengthsEqual);
5180 }
5181
5182 addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
5183 }
5184
5185 void SpeculativeJIT::emitSwitchStringOnString(SwitchData* data, GPRReg string)
5186 {
5187 data->didUseJumpTable = true;
5188
5189 bool canDoBinarySwitch = true;
5190 unsigned totalLength = 0;
5191
5192 for (unsigned i = data->cases.size(); i--;) {
5193 StringImpl* string = data->cases[i].value.stringImpl();
5194 if (!string->is8Bit()) {
5195 canDoBinarySwitch = false;
5196 break;
5197 }
5198 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
5199 canDoBinarySwitch = false;
5200 break;
5201 }
5202 totalLength += string->length();
5203 }
5204
5205 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
5206 flushRegisters();
5207 callOperation(
5208 operationSwitchString, string, data->switchTableIndex, string);
5209 m_jit.jump(string);
5210 return;
5211 }
5212
5213 GPRTemporary length(this);
5214 GPRTemporary temp(this);
5215
5216 GPRReg lengthGPR = length.gpr();
5217 GPRReg tempGPR = temp.gpr();
5218
5219 m_jit.load32(MacroAssembler::Address(string, JSString::offsetOfLength()), lengthGPR);
5220 m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), tempGPR);
5221
5222 MacroAssembler::JumpList slowCases;
5223 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
5224 slowCases.append(m_jit.branchTest32(
5225 MacroAssembler::Zero,
5226 MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
5227 TrustedImm32(StringImpl::flagIs8Bit())));
5228
5229 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), string);
5230
5231 Vector<StringSwitchCase> cases;
5232 for (unsigned i = 0; i < data->cases.size(); ++i) {
5233 cases.append(
5234 StringSwitchCase(data->cases[i].value.stringImpl(), data->cases[i].target.block));
5235 }
5236
5237 std::sort(cases.begin(), cases.end());
5238
5239 emitBinarySwitchStringRecurse(
5240 data, cases, 0, 0, cases.size(), string, lengthGPR, tempGPR, 0, false);
5241
5242 slowCases.link(&m_jit);
5243 silentSpillAllRegisters(string);
5244 callOperation(operationSwitchString, string, data->switchTableIndex, string);
5245 silentFillAllRegisters(string);
5246 m_jit.jump(string);
5247 }
5248
5249 void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data)
5250 {
5251 switch (node->child1().useKind()) {
5252 case StringIdentUse: {
5253 SpeculateCellOperand op1(this, node->child1());
5254 GPRTemporary temp(this);
5255
5256 GPRReg op1GPR = op1.gpr();
5257 GPRReg tempGPR = temp.gpr();
5258
5259 speculateString(node->child1(), op1GPR);
5260 speculateStringIdentAndLoadStorage(node->child1(), op1GPR, tempGPR);
5261
5262 Vector<int64_t> identifierCaseValues;
5263 for (unsigned i = 0; i < data->cases.size(); ++i) {
5264 identifierCaseValues.append(
5265 static_cast<int64_t>(bitwise_cast<intptr_t>(data->cases[i].value.stringImpl())));
5266 }
5267
5268 BinarySwitch binarySwitch(tempGPR, identifierCaseValues, BinarySwitch::IntPtr);
5269 while (binarySwitch.advance(m_jit))
5270 jump(data->cases[binarySwitch.caseIndex()].target.block, ForceJump);
5271 addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
5272
5273 noResult(node);
5274 break;
5275 }
5276
5277 case StringUse: {
5278 SpeculateCellOperand op1(this, node->child1());
5279
5280 GPRReg op1GPR = op1.gpr();
5281
5282 op1.use();
5283
5284 speculateString(node->child1(), op1GPR);
5285 emitSwitchStringOnString(data, op1GPR);
5286 noResult(node, UseChildrenCalledExplicitly);
5287 break;
5288 }
5289
5290 case UntypedUse: {
5291 JSValueOperand op1(this, node->child1());
5292
5293 JSValueRegs op1Regs = op1.jsValueRegs();
5294
5295 op1.use();
5296
5297 addBranch(branchNotCell(op1Regs), data->fallThrough.block);
5298
5299 addBranch(
5300 m_jit.branchStructurePtr(
5301 MacroAssembler::NotEqual,
5302 MacroAssembler::Address(op1Regs.payloadGPR(), JSCell::structureIDOffset()),
5303 m_jit.vm()->stringStructure.get()),
5304 data->fallThrough.block);
5305
5306 emitSwitchStringOnString(data, op1Regs.payloadGPR());
5307 noResult(node, UseChildrenCalledExplicitly);
5308 break;
5309 }
5310
5311 default:
5312 RELEASE_ASSERT_NOT_REACHED();
5313 break;
5314 }
5315 }
5316
5317 void SpeculativeJIT::emitSwitch(Node* node)
5318 {
5319 SwitchData* data = node->switchData();
5320 switch (data->kind) {
5321 case SwitchImm: {
5322 emitSwitchImm(node, data);
5323 return;
5324 }
5325 case SwitchChar: {
5326 emitSwitchChar(node, data);
5327 return;
5328 }
5329 case SwitchString: {
5330 emitSwitchString(node, data);
5331 return;
5332 } }
5333 RELEASE_ASSERT_NOT_REACHED();
5334 }
5335
5336 void SpeculativeJIT::addBranch(const MacroAssembler::JumpList& jump, BasicBlock* destination)
5337 {
5338 for (unsigned i = jump.jumps().size(); i--;)
5339 addBranch(jump.jumps()[i], destination);
5340 }
5341
5342 void SpeculativeJIT::linkBranches()
5343 {
5344 for (size_t i = 0; i < m_branches.size(); ++i) {
5345 BranchRecord& branch = m_branches[i];
5346 branch.jump.linkTo(m_jit.blockHeads()[branch.destination->index], &m_jit);
5347 }
5348 }
5349
5350 #if ENABLE(GGC)
5351 void SpeculativeJIT::compileStoreBarrier(Node* node)
5352 {
5353 switch (node->op()) {
5354 case StoreBarrier: {
5355 SpeculateCellOperand base(this, node->child1());
5356 GPRTemporary scratch1(this);
5357 GPRTemporary scratch2(this);
5358
5359 writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr());
5360 break;
5361 }
5362
5363 case StoreBarrierWithNullCheck: {
5364 JSValueOperand base(this, node->child1());
5365 GPRTemporary scratch1(this);
5366 GPRTemporary scratch2(this);
5367
5368 #if USE(JSVALUE64)
5369 JITCompiler::Jump isNull = m_jit.branchTest64(JITCompiler::Zero, base.gpr());
5370 writeBarrier(base.gpr(), scratch1.gpr(), scratch2.gpr());
5371 #else
5372 JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, base.tagGPR(), TrustedImm32(JSValue::EmptyValueTag));
5373 writeBarrier(base.payloadGPR(), scratch1.gpr(), scratch2.gpr());
5374 #endif
5375 isNull.link(&m_jit);
5376 break;
5377 }
5378
5379 default:
5380 RELEASE_ASSERT_NOT_REACHED();
5381 break;
5382 }
5383
5384 noResult(node);
5385 }
5386
5387 void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2)
5388 {
5389 ASSERT(scratch1 != scratch2);
5390 WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer;
5391 m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1);
5392 m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
5393 JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
5394
5395 m_jit.add32(TrustedImm32(1), scratch2);
5396 m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
5397
5398 m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
5399 // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
5400 m_jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
5401
5402 JITCompiler::Jump done = m_jit.jump();
5403 needToFlush.link(&m_jit);
5404
5405 silentSpillAllRegisters(InvalidGPRReg);
5406 callOperation(operationFlushWriteBarrierBuffer, cell);
5407 silentFillAllRegisters(InvalidGPRReg);
5408
5409 done.link(&m_jit);
5410 }
5411
5412 void SpeculativeJIT::storeToWriteBarrierBuffer(JSCell* cell, GPRReg scratch1, GPRReg scratch2)
5413 {
5414 ASSERT(scratch1 != scratch2);
5415 WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer;
5416 m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1);
5417 m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
5418 JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
5419
5420 m_jit.add32(TrustedImm32(1), scratch2);
5421 m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
5422
5423 m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
5424 // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
5425 m_jit.storePtr(TrustedImmPtr(cell), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
5426
5427 JITCompiler::Jump done = m_jit.jump();
5428 needToFlush.link(&m_jit);
5429
5430 // Call C slow path
5431 silentSpillAllRegisters(InvalidGPRReg);
5432 callOperation(operationFlushWriteBarrierBuffer, cell);
5433 silentFillAllRegisters(InvalidGPRReg);
5434
5435 done.link(&m_jit);
5436 }
5437
5438 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, GPRReg scratch1, GPRReg scratch2)
5439 {
5440 if (Heap::isMarked(value))
5441 return;
5442
5443 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered = m_jit.checkMarkByte(ownerGPR);
5444 storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
5445 ownerNotMarkedOrAlreadyRemembered.link(&m_jit);
5446 }
5447
5448 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg scratch1, GPRReg scratch2)
5449 {
5450 JITCompiler::Jump ownerNotMarkedOrAlreadyRemembered = m_jit.checkMarkByte(ownerGPR);
5451 storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
5452 ownerNotMarkedOrAlreadyRemembered.link(&m_jit);
5453 }
5454 #else
5455 void SpeculativeJIT::compileStoreBarrier(Node* node)
5456 {
5457 DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate);
5458 noResult(node);
5459 }
5460 #endif // ENABLE(GGC)
5461
5462 } } // namespace JSC::DFG
5463
5464 #endif