]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGSpeculativeJIT.cpp
JavaScriptCore-1218.33.tar.gz
[apple/javascriptcore.git] / dfg / DFGSpeculativeJIT.cpp
1 /*
2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "Arguments.h"
32 #include "DFGArrayifySlowPathGenerator.h"
33 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
34 #include "DFGSlowPathGenerator.h"
35 #include "JSCJSValueInlines.h"
36 #include "LinkBuffer.h"
37 #include <wtf/MathExtras.h>
38
39 namespace JSC { namespace DFG {
40
41 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
42 : m_compileOkay(true)
43 , m_jit(jit)
44 , m_currentNode(0)
45 , m_indexInBlock(0)
46 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
47 , m_blockHeads(jit.graph().m_blocks.size())
48 , m_arguments(jit.codeBlock()->numParameters())
49 , m_variables(jit.graph().m_localVars)
50 , m_lastSetOperand(std::numeric_limits<int>::max())
51 , m_state(m_jit.graph())
52 , m_stream(&jit.codeBlock()->variableEventStream())
53 , m_minifiedGraph(&jit.codeBlock()->minifiedDFG())
54 , m_isCheckingArgumentTypes(false)
55 {
56 }
57
58 SpeculativeJIT::~SpeculativeJIT()
59 {
60 }
61
62 void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
63 {
64 ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
65
66 GPRTemporary scratch(this);
67 GPRTemporary scratch2(this);
68 GPRReg scratchGPR = scratch.gpr();
69 GPRReg scratch2GPR = scratch2.gpr();
70
71 unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
72
73 JITCompiler::JumpList slowCases;
74
75 slowCases.append(
76 emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
77 m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
78 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
79
80 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
81 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
82
83 if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
84 #if USE(JSVALUE64)
85 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
86 for (unsigned i = numElements; i < vectorLength; ++i)
87 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
88 #else
89 EncodedValueDescriptor value;
90 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
91 for (unsigned i = numElements; i < vectorLength; ++i) {
92 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
93 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
94 }
95 #endif
96 }
97
98 // I want a slow path that also loads out the storage pointer, and that's
99 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
100 // of work for a very small piece of functionality. :-/
101 addSlowPathGenerator(adoptPtr(
102 new CallArrayAllocatorSlowPathGenerator(
103 slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
104 structure, numElements)));
105 }
106
107 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
108 {
109 if (!m_compileOkay)
110 return;
111 ASSERT(m_isCheckingArgumentTypes || m_canExit);
112 m_jit.appendExitInfo(jumpToFail);
113 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
114 }
115
116 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
117 {
118 if (!m_compileOkay)
119 return;
120 ASSERT(m_isCheckingArgumentTypes || m_canExit);
121 m_jit.appendExitInfo(jumpsToFail);
122 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
123 }
124
125 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
126 {
127 if (!m_compileOkay)
128 return;
129 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
130 if (m_speculationDirection == ForwardSpeculation)
131 convertLastOSRExitToForward();
132 }
133
134 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
135 {
136 ASSERT(m_isCheckingArgumentTypes || m_canExit);
137 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
138 }
139
140 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
141 {
142 if (!m_compileOkay)
143 return OSRExitJumpPlaceholder();
144 ASSERT(m_isCheckingArgumentTypes || m_canExit);
145 unsigned index = m_jit.codeBlock()->numberOfOSRExits();
146 m_jit.appendExitInfo();
147 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
148 return OSRExitJumpPlaceholder(index);
149 }
150
151 OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
152 {
153 ASSERT(m_isCheckingArgumentTypes || m_canExit);
154 return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
155 }
156
157 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
158 {
159 if (!m_compileOkay)
160 return;
161 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
162 if (m_speculationDirection == ForwardSpeculation)
163 convertLastOSRExitToForward();
164 }
165
166 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
167 {
168 ASSERT(m_isCheckingArgumentTypes || m_canExit);
169 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
170 }
171
172 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
173 {
174 if (!m_compileOkay)
175 return;
176 ASSERT(m_isCheckingArgumentTypes || m_canExit);
177 m_jit.codeBlock()->appendSpeculationRecovery(recovery);
178 m_jit.appendExitInfo(jumpToFail);
179 m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), m_jit.codeBlock()->numberOfSpeculationRecoveries()));
180 }
181
182 void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
183 {
184 ASSERT(m_isCheckingArgumentTypes || m_canExit);
185 backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
186 }
187
188 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
189 {
190 if (!m_compileOkay)
191 return;
192 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
193 if (m_speculationDirection == ForwardSpeculation)
194 convertLastOSRExitToForward();
195 }
196
197 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
198 {
199 speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
200 }
201
202 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
203 {
204 if (!m_compileOkay)
205 return 0;
206 ASSERT(m_isCheckingArgumentTypes || m_canExit);
207 m_jit.appendExitInfo(JITCompiler::JumpList());
208 OSRExit& exit = m_jit.codeBlock()->osrExit(
209 m_jit.codeBlock()->appendOSRExit(OSRExit(
210 kind, jsValueSource,
211 m_jit.graph().methodOfGettingAValueProfileFor(node),
212 this, m_stream->size())));
213 exit.m_watchpointIndex = m_jit.codeBlock()->appendWatchpoint(
214 JumpReplacementWatchpoint(m_jit.watchpointLabel()));
215 if (m_speculationDirection == ForwardSpeculation)
216 convertLastOSRExitToForward();
217 return &m_jit.codeBlock()->watchpoint(exit.m_watchpointIndex);
218 }
219
220 JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
221 {
222 return speculationWatchpoint(kind, JSValueSource(), 0);
223 }
224
225 void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
226 {
227 if (!valueRecovery) {
228 // Check that either the current node is a SetLocal, or the preceding node was a
229 // SetLocal with the same code origin.
230 if (!m_currentNode->containsMovHint()) {
231 Node* setLocal = m_jit.graph().m_blocks[m_block]->at(m_indexInBlock - 1);
232 ASSERT_UNUSED(setLocal, setLocal->containsMovHint());
233 ASSERT_UNUSED(setLocal, setLocal->codeOrigin == m_currentNode->codeOrigin);
234 }
235
236 // Find the next node.
237 unsigned indexInBlock = m_indexInBlock + 1;
238 Node* node = 0;
239 for (;;) {
240 if (indexInBlock == m_jit.graph().m_blocks[m_block]->size()) {
241 // This is an inline return. Give up and do a backwards speculation. This is safe
242 // because an inline return has its own bytecode index and it's always safe to
243 // reexecute that bytecode.
244 ASSERT(node->op() == Jump);
245 return;
246 }
247 node = m_jit.graph().m_blocks[m_block]->at(indexInBlock);
248 if (node->codeOrigin != m_currentNode->codeOrigin)
249 break;
250 indexInBlock++;
251 }
252
253 ASSERT(node->codeOrigin != m_currentNode->codeOrigin);
254 OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
255 exit.m_codeOrigin = node->codeOrigin;
256 return;
257 }
258
259 unsigned setLocalIndexInBlock = m_indexInBlock + 1;
260
261 Node* setLocal = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock);
262 bool hadInt32ToDouble = false;
263
264 if (setLocal->op() == ForwardInt32ToDouble) {
265 setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
266 hadInt32ToDouble = true;
267 }
268 if (setLocal->op() == Flush || setLocal->op() == Phantom)
269 setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
270
271 if (hadInt32ToDouble)
272 ASSERT(setLocal->child1()->child1() == m_currentNode);
273 else
274 ASSERT(setLocal->child1() == m_currentNode);
275 ASSERT(setLocal->containsMovHint());
276 ASSERT(setLocal->codeOrigin == m_currentNode->codeOrigin);
277
278 Node* nextNode = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1);
279 if (nextNode->op() == Jump && nextNode->codeOrigin == m_currentNode->codeOrigin) {
280 // We're at an inlined return. Use a backward speculation instead.
281 return;
282 }
283 ASSERT(nextNode->codeOrigin != m_currentNode->codeOrigin);
284
285 OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
286 exit.m_codeOrigin = nextNode->codeOrigin;
287
288 exit.m_lastSetOperand = setLocal->local();
289 exit.m_valueRecoveryOverride = adoptRef(
290 new ValueRecoveryOverride(setLocal->local(), valueRecovery));
291 }
292
293 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
294 {
295 ASSERT(m_isCheckingArgumentTypes || m_canExit);
296 backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
297 convertLastOSRExitToForward(valueRecovery);
298 }
299
300 void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
301 {
302 ASSERT(m_isCheckingArgumentTypes || m_canExit);
303 backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
304 convertLastOSRExitToForward(valueRecovery);
305 }
306
307 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
308 {
309 ASSERT(m_isCheckingArgumentTypes || m_canExit);
310 #if DFG_ENABLE(DEBUG_VERBOSE)
311 dataLogF("SpeculativeJIT was terminated.\n");
312 #endif
313 if (!m_compileOkay)
314 return;
315 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
316 m_compileOkay = false;
317 }
318
319 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
320 {
321 ASSERT(m_isCheckingArgumentTypes || m_canExit);
322 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
323 }
324
325 void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
326 {
327 ASSERT(needsTypeCheck(edge, typesPassedThrough));
328 m_state.forNode(edge).filter(typesPassedThrough);
329 backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
330 }
331
332 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
333 {
334 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
335 if (m_speculationDirection == ForwardSpeculation)
336 convertLastOSRExitToForward();
337 }
338
339 void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
340 {
341 backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
342 convertLastOSRExitToForward(valueRecovery);
343 }
344
345 void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
346 {
347 m_slowPathGenerators.append(slowPathGenerator);
348 }
349
350 void SpeculativeJIT::runSlowPathGenerators()
351 {
352 #if DFG_ENABLE(DEBUG_VERBOSE)
353 dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
354 #endif
355 for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
356 m_slowPathGenerators[i]->generate(this);
357 }
358
359 // On Windows we need to wrap fmod; on other platforms we can call it directly.
360 // On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
361 #if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
362 static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
363 {
364 return fmod(x, y);
365 }
366 #else
367 #define fmodAsDFGOperation fmod
368 #endif
369
370 void SpeculativeJIT::clearGenerationInfo()
371 {
372 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
373 m_generationInfo[i] = GenerationInfo();
374 m_gprs = RegisterBank<GPRInfo>();
375 m_fprs = RegisterBank<FPRInfo>();
376 }
377
378 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
379 {
380 GenerationInfo& info = m_generationInfo[spillMe];
381 Node* node = info.node();
382 DataFormat registerFormat = info.registerFormat();
383 ASSERT(registerFormat != DataFormatNone);
384 ASSERT(registerFormat != DataFormatDouble);
385
386 SilentSpillAction spillAction;
387 SilentFillAction fillAction;
388
389 if (!info.needsSpill())
390 spillAction = DoNothingForSpill;
391 else {
392 #if USE(JSVALUE64)
393 ASSERT(info.gpr() == source);
394 if (registerFormat == DataFormatInteger)
395 spillAction = Store32Payload;
396 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
397 spillAction = StorePtr;
398 else {
399 ASSERT(registerFormat & DataFormatJS);
400 spillAction = Store64;
401 }
402 #elif USE(JSVALUE32_64)
403 if (registerFormat & DataFormatJS) {
404 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
405 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
406 } else {
407 ASSERT(info.gpr() == source);
408 spillAction = Store32Payload;
409 }
410 #endif
411 }
412
413 if (registerFormat == DataFormatInteger) {
414 ASSERT(info.gpr() == source);
415 ASSERT(isJSInteger(info.registerFormat()));
416 if (node->hasConstant()) {
417 ASSERT(isInt32Constant(node));
418 fillAction = SetInt32Constant;
419 } else
420 fillAction = Load32Payload;
421 } else if (registerFormat == DataFormatBoolean) {
422 #if USE(JSVALUE64)
423 RELEASE_ASSERT_NOT_REACHED();
424 fillAction = DoNothingForFill;
425 #elif USE(JSVALUE32_64)
426 ASSERT(info.gpr() == source);
427 if (node->hasConstant()) {
428 ASSERT(isBooleanConstant(node));
429 fillAction = SetBooleanConstant;
430 } else
431 fillAction = Load32Payload;
432 #endif
433 } else if (registerFormat == DataFormatCell) {
434 ASSERT(info.gpr() == source);
435 if (node->hasConstant()) {
436 JSValue value = valueOfJSConstant(node);
437 ASSERT_UNUSED(value, value.isCell());
438 fillAction = SetCellConstant;
439 } else {
440 #if USE(JSVALUE64)
441 fillAction = LoadPtr;
442 #else
443 fillAction = Load32Payload;
444 #endif
445 }
446 } else if (registerFormat == DataFormatStorage) {
447 ASSERT(info.gpr() == source);
448 fillAction = LoadPtr;
449 } else {
450 ASSERT(registerFormat & DataFormatJS);
451 #if USE(JSVALUE64)
452 ASSERT(info.gpr() == source);
453 if (node->hasConstant()) {
454 if (valueOfJSConstant(node).isCell())
455 fillAction = SetTrustedJSConstant;
456 else
457 fillAction = SetJSConstant;
458 } else if (info.spillFormat() == DataFormatInteger) {
459 ASSERT(registerFormat == DataFormatJSInteger);
460 fillAction = Load32PayloadBoxInt;
461 } else if (info.spillFormat() == DataFormatDouble) {
462 ASSERT(registerFormat == DataFormatJSDouble);
463 fillAction = LoadDoubleBoxDouble;
464 } else
465 fillAction = Load64;
466 #else
467 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
468 if (node->hasConstant())
469 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
470 else if (info.payloadGPR() == source)
471 fillAction = Load32Payload;
472 else { // Fill the Tag
473 switch (info.spillFormat()) {
474 case DataFormatInteger:
475 ASSERT(registerFormat == DataFormatJSInteger);
476 fillAction = SetInt32Tag;
477 break;
478 case DataFormatCell:
479 ASSERT(registerFormat == DataFormatJSCell);
480 fillAction = SetCellTag;
481 break;
482 case DataFormatBoolean:
483 ASSERT(registerFormat == DataFormatJSBoolean);
484 fillAction = SetBooleanTag;
485 break;
486 default:
487 fillAction = Load32Tag;
488 break;
489 }
490 }
491 #endif
492 }
493
494 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
495 }
496
497 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
498 {
499 GenerationInfo& info = m_generationInfo[spillMe];
500 Node* node = info.node();
501 ASSERT(info.registerFormat() == DataFormatDouble);
502
503 SilentSpillAction spillAction;
504 SilentFillAction fillAction;
505
506 if (!info.needsSpill())
507 spillAction = DoNothingForSpill;
508 else {
509 ASSERT(!node->hasConstant());
510 ASSERT(info.spillFormat() == DataFormatNone);
511 ASSERT(info.fpr() == source);
512 spillAction = StoreDouble;
513 }
514
515 #if USE(JSVALUE64)
516 if (node->hasConstant()) {
517 ASSERT(isNumberConstant(node));
518 fillAction = SetDoubleConstant;
519 } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
520 // it was already spilled previously and not as a double, which means we need unboxing.
521 ASSERT(info.spillFormat() & DataFormatJS);
522 fillAction = LoadJSUnboxDouble;
523 } else
524 fillAction = LoadDouble;
525 #elif USE(JSVALUE32_64)
526 ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
527 if (node->hasConstant()) {
528 ASSERT(isNumberConstant(node));
529 fillAction = SetDoubleConstant;
530 } else
531 fillAction = LoadDouble;
532 #endif
533
534 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
535 }
536
537 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
538 {
539 switch (plan.spillAction()) {
540 case DoNothingForSpill:
541 break;
542 case Store32Tag:
543 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
544 break;
545 case Store32Payload:
546 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
547 break;
548 case StorePtr:
549 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
550 break;
551 #if USE(JSVALUE64)
552 case Store64:
553 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
554 break;
555 #endif
556 case StoreDouble:
557 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
558 break;
559 default:
560 RELEASE_ASSERT_NOT_REACHED();
561 }
562 }
563
564 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
565 {
566 #if USE(JSVALUE32_64)
567 UNUSED_PARAM(canTrample);
568 #endif
569 switch (plan.fillAction()) {
570 case DoNothingForFill:
571 break;
572 case SetInt32Constant:
573 m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
574 break;
575 case SetBooleanConstant:
576 m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
577 break;
578 case SetCellConstant:
579 m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
580 break;
581 #if USE(JSVALUE64)
582 case SetTrustedJSConstant:
583 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
584 break;
585 case SetJSConstant:
586 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
587 break;
588 case SetDoubleConstant:
589 m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
590 m_jit.move64ToDouble(canTrample, plan.fpr());
591 break;
592 case Load32PayloadBoxInt:
593 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
594 m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
595 break;
596 case LoadDoubleBoxDouble:
597 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
598 m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
599 break;
600 case LoadJSUnboxDouble:
601 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
602 unboxDouble(canTrample, plan.fpr());
603 break;
604 #else
605 case SetJSConstantTag:
606 m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
607 break;
608 case SetJSConstantPayload:
609 m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
610 break;
611 case SetInt32Tag:
612 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
613 break;
614 case SetCellTag:
615 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
616 break;
617 case SetBooleanTag:
618 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
619 break;
620 case SetDoubleConstant:
621 m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
622 break;
623 #endif
624 case Load32Tag:
625 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
626 break;
627 case Load32Payload:
628 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
629 break;
630 case LoadPtr:
631 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
632 break;
633 #if USE(JSVALUE64)
634 case Load64:
635 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
636 break;
637 #endif
638 case LoadDouble:
639 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
640 break;
641 default:
642 RELEASE_ASSERT_NOT_REACHED();
643 }
644 }
645
646 const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode)
647 {
648 switch (arrayMode.type()) {
649 case Array::Int8Array:
650 return &m_jit.vm()->int8ArrayDescriptor();
651 case Array::Int16Array:
652 return &m_jit.vm()->int16ArrayDescriptor();
653 case Array::Int32Array:
654 return &m_jit.vm()->int32ArrayDescriptor();
655 case Array::Uint8Array:
656 return &m_jit.vm()->uint8ArrayDescriptor();
657 case Array::Uint8ClampedArray:
658 return &m_jit.vm()->uint8ClampedArrayDescriptor();
659 case Array::Uint16Array:
660 return &m_jit.vm()->uint16ArrayDescriptor();
661 case Array::Uint32Array:
662 return &m_jit.vm()->uint32ArrayDescriptor();
663 case Array::Float32Array:
664 return &m_jit.vm()->float32ArrayDescriptor();
665 case Array::Float64Array:
666 return &m_jit.vm()->float64ArrayDescriptor();
667 default:
668 return 0;
669 }
670 }
671
672 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
673 {
674 switch (arrayMode.arrayClass()) {
675 case Array::OriginalArray: {
676 CRASH();
677 JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
678 return result;
679 }
680
681 case Array::Array:
682 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
683 return m_jit.branch32(
684 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
685
686 default:
687 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
688 return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
689 }
690 }
691
692 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
693 {
694 JITCompiler::JumpList result;
695
696 switch (arrayMode.type()) {
697 case Array::Int32:
698 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
699
700 case Array::Double:
701 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
702
703 case Array::Contiguous:
704 return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
705
706 case Array::ArrayStorage:
707 case Array::SlowPutArrayStorage: {
708 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
709
710 if (arrayMode.isJSArray()) {
711 if (arrayMode.isSlowPut()) {
712 result.append(
713 m_jit.branchTest32(
714 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
715 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
716 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
717 result.append(
718 m_jit.branch32(
719 MacroAssembler::Above, tempGPR,
720 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
721 break;
722 }
723 m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
724 result.append(
725 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
726 break;
727 }
728 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
729 if (arrayMode.isSlowPut()) {
730 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
731 result.append(
732 m_jit.branch32(
733 MacroAssembler::Above, tempGPR,
734 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
735 break;
736 }
737 result.append(
738 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
739 break;
740 }
741 default:
742 CRASH();
743 break;
744 }
745
746 return result;
747 }
748
749 void SpeculativeJIT::checkArray(Node* node)
750 {
751 ASSERT(node->arrayMode().isSpecific());
752 ASSERT(!node->arrayMode().doesConversion());
753
754 SpeculateCellOperand base(this, node->child1());
755 GPRReg baseReg = base.gpr();
756
757 const TypedArrayDescriptor* result = typedArrayDescriptor(node->arrayMode());
758
759 if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
760 noResult(m_currentNode);
761 return;
762 }
763
764 const ClassInfo* expectedClassInfo = 0;
765
766 switch (node->arrayMode().type()) {
767 case Array::String:
768 expectedClassInfo = &JSString::s_info;
769 break;
770 case Array::Int32:
771 case Array::Double:
772 case Array::Contiguous:
773 case Array::ArrayStorage:
774 case Array::SlowPutArrayStorage: {
775 GPRTemporary temp(this);
776 GPRReg tempGPR = temp.gpr();
777 m_jit.loadPtr(
778 MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
779 m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
780 speculationCheck(
781 BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
782 jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
783
784 noResult(m_currentNode);
785 return;
786 }
787 case Array::Arguments:
788 expectedClassInfo = &Arguments::s_info;
789 break;
790 case Array::Int8Array:
791 case Array::Int16Array:
792 case Array::Int32Array:
793 case Array::Uint8Array:
794 case Array::Uint8ClampedArray:
795 case Array::Uint16Array:
796 case Array::Uint32Array:
797 case Array::Float32Array:
798 case Array::Float64Array:
799 expectedClassInfo = result->m_classInfo;
800 break;
801 default:
802 RELEASE_ASSERT_NOT_REACHED();
803 break;
804 }
805
806 GPRTemporary temp(this);
807 m_jit.loadPtr(
808 MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
809 speculationCheck(
810 Uncountable, JSValueRegs(), 0,
811 m_jit.branchPtr(
812 MacroAssembler::NotEqual,
813 MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
814 MacroAssembler::TrustedImmPtr(expectedClassInfo)));
815
816 noResult(m_currentNode);
817 }
818
819 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
820 {
821 ASSERT(node->arrayMode().doesConversion());
822
823 GPRTemporary temp(this);
824 GPRTemporary structure;
825 GPRReg tempGPR = temp.gpr();
826 GPRReg structureGPR = InvalidGPRReg;
827
828 if (node->op() != ArrayifyToStructure) {
829 GPRTemporary realStructure(this);
830 structure.adopt(realStructure);
831 structureGPR = structure.gpr();
832 }
833
834 // We can skip all that comes next if we already have array storage.
835 MacroAssembler::JumpList slowPath;
836
837 if (node->op() == ArrayifyToStructure) {
838 slowPath.append(m_jit.branchWeakPtr(
839 JITCompiler::NotEqual,
840 JITCompiler::Address(baseReg, JSCell::structureOffset()),
841 node->structure()));
842 } else {
843 m_jit.loadPtr(
844 MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
845
846 m_jit.load8(
847 MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
848
849 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
850 }
851
852 addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
853 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
854
855 noResult(m_currentNode);
856 }
857
858 void SpeculativeJIT::arrayify(Node* node)
859 {
860 ASSERT(node->arrayMode().isSpecific());
861
862 SpeculateCellOperand base(this, node->child1());
863
864 if (!node->child2()) {
865 arrayify(node, base.gpr(), InvalidGPRReg);
866 return;
867 }
868
869 SpeculateIntegerOperand property(this, node->child2());
870
871 arrayify(node, base.gpr(), property.gpr());
872 }
873
874 GPRReg SpeculativeJIT::fillStorage(Edge edge)
875 {
876 VirtualRegister virtualRegister = edge->virtualRegister();
877 GenerationInfo& info = m_generationInfo[virtualRegister];
878
879 switch (info.registerFormat()) {
880 case DataFormatNone: {
881 if (info.spillFormat() == DataFormatStorage) {
882 GPRReg gpr = allocate();
883 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
884 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
885 info.fillStorage(*m_stream, gpr);
886 return gpr;
887 }
888
889 // Must be a cell; fill it as a cell and then return the pointer.
890 return fillSpeculateCell(edge);
891 }
892
893 case DataFormatStorage: {
894 GPRReg gpr = info.gpr();
895 m_gprs.lock(gpr);
896 return gpr;
897 }
898
899 default:
900 return fillSpeculateCell(edge);
901 }
902 }
903
904 void SpeculativeJIT::useChildren(Node* node)
905 {
906 if (node->flags() & NodeHasVarArgs) {
907 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
908 if (!!m_jit.graph().m_varArgChildren[childIdx])
909 use(m_jit.graph().m_varArgChildren[childIdx]);
910 }
911 } else {
912 Edge child1 = node->child1();
913 if (!child1) {
914 ASSERT(!node->child2() && !node->child3());
915 return;
916 }
917 use(child1);
918
919 Edge child2 = node->child2();
920 if (!child2) {
921 ASSERT(!node->child3());
922 return;
923 }
924 use(child2);
925
926 Edge child3 = node->child3();
927 if (!child3)
928 return;
929 use(child3);
930 }
931 }
932
933 void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
934 {
935 UNUSED_PARAM(jit);
936 UNUSED_PARAM(owner);
937 UNUSED_PARAM(scratch1);
938 UNUSED_PARAM(scratch2);
939 UNUSED_PARAM(useKind);
940 ASSERT(owner != scratch1);
941 ASSERT(owner != scratch2);
942 ASSERT(scratch1 != scratch2);
943
944 #if ENABLE(WRITE_BARRIER_PROFILING)
945 JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
946 #endif
947 }
948
949 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
950 {
951 UNUSED_PARAM(ownerGPR);
952 UNUSED_PARAM(valueGPR);
953 UNUSED_PARAM(scratch1);
954 UNUSED_PARAM(scratch2);
955 UNUSED_PARAM(useKind);
956
957 if (isKnownNotCell(valueUse.node()))
958 return;
959
960 #if ENABLE(WRITE_BARRIER_PROFILING)
961 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
962 #endif
963 }
964
965 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
966 {
967 UNUSED_PARAM(ownerGPR);
968 UNUSED_PARAM(value);
969 UNUSED_PARAM(scratch1);
970 UNUSED_PARAM(scratch2);
971 UNUSED_PARAM(useKind);
972
973 if (Heap::isMarked(value))
974 return;
975
976 #if ENABLE(WRITE_BARRIER_PROFILING)
977 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
978 #endif
979 }
980
981 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
982 {
983 UNUSED_PARAM(owner);
984 UNUSED_PARAM(valueGPR);
985 UNUSED_PARAM(scratch);
986 UNUSED_PARAM(useKind);
987
988 if (isKnownNotCell(valueUse.node()))
989 return;
990
991 #if ENABLE(WRITE_BARRIER_PROFILING)
992 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
993 #endif
994 }
995
996 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
997 {
998 unsigned branchIndexInBlock = detectPeepHoleBranch();
999 if (branchIndexInBlock != UINT_MAX) {
1000 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
1001
1002 ASSERT(node->adjustedRefCount() == 1);
1003
1004 nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1005
1006 m_indexInBlock = branchIndexInBlock;
1007 m_currentNode = branchNode;
1008
1009 return true;
1010 }
1011
1012 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1013
1014 return false;
1015 }
1016
1017 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1018 {
1019 unsigned branchIndexInBlock = detectPeepHoleBranch();
1020 if (branchIndexInBlock != UINT_MAX) {
1021 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
1022
1023 ASSERT(node->adjustedRefCount() == 1);
1024
1025 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1026
1027 m_indexInBlock = branchIndexInBlock;
1028 m_currentNode = branchNode;
1029
1030 return true;
1031 }
1032
1033 nonSpeculativeNonPeepholeStrictEq(node, invert);
1034
1035 return false;
1036 }
1037
1038 #ifndef NDEBUG
1039 static const char* dataFormatString(DataFormat format)
1040 {
1041 // These values correspond to the DataFormat enum.
1042 const char* strings[] = {
1043 "[ ]",
1044 "[ i]",
1045 "[ d]",
1046 "[ c]",
1047 "Err!",
1048 "Err!",
1049 "Err!",
1050 "Err!",
1051 "[J ]",
1052 "[Ji]",
1053 "[Jd]",
1054 "[Jc]",
1055 "Err!",
1056 "Err!",
1057 "Err!",
1058 "Err!",
1059 };
1060 return strings[format];
1061 }
1062
1063 void SpeculativeJIT::dump(const char* label)
1064 {
1065 if (label)
1066 dataLogF("<%s>\n", label);
1067
1068 dataLogF(" gprs:\n");
1069 m_gprs.dump();
1070 dataLogF(" fprs:\n");
1071 m_fprs.dump();
1072 dataLogF(" VirtualRegisters:\n");
1073 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1074 GenerationInfo& info = m_generationInfo[i];
1075 if (info.alive())
1076 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1077 else
1078 dataLogF(" % 3d:[__][__]", i);
1079 if (info.registerFormat() == DataFormatDouble)
1080 dataLogF(":fpr%d\n", info.fpr());
1081 else if (info.registerFormat() != DataFormatNone
1082 #if USE(JSVALUE32_64)
1083 && !(info.registerFormat() & DataFormatJS)
1084 #endif
1085 ) {
1086 ASSERT(info.gpr() != InvalidGPRReg);
1087 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1088 } else
1089 dataLogF("\n");
1090 }
1091 if (label)
1092 dataLogF("</%s>\n", label);
1093 }
1094 #endif
1095
1096
1097 #if DFG_ENABLE(CONSISTENCY_CHECK)
1098 void SpeculativeJIT::checkConsistency()
1099 {
1100 bool failed = false;
1101
1102 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1103 if (iter.isLocked()) {
1104 dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
1105 failed = true;
1106 }
1107 }
1108 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1109 if (iter.isLocked()) {
1110 dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
1111 failed = true;
1112 }
1113 }
1114
1115 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1116 VirtualRegister virtualRegister = (VirtualRegister)i;
1117 GenerationInfo& info = m_generationInfo[virtualRegister];
1118 if (!info.alive())
1119 continue;
1120 switch (info.registerFormat()) {
1121 case DataFormatNone:
1122 break;
1123 case DataFormatJS:
1124 case DataFormatJSInteger:
1125 case DataFormatJSDouble:
1126 case DataFormatJSCell:
1127 case DataFormatJSBoolean:
1128 #if USE(JSVALUE32_64)
1129 break;
1130 #endif
1131 case DataFormatInteger:
1132 case DataFormatCell:
1133 case DataFormatBoolean:
1134 case DataFormatStorage: {
1135 GPRReg gpr = info.gpr();
1136 ASSERT(gpr != InvalidGPRReg);
1137 if (m_gprs.name(gpr) != virtualRegister) {
1138 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
1139 failed = true;
1140 }
1141 break;
1142 }
1143 case DataFormatDouble: {
1144 FPRReg fpr = info.fpr();
1145 ASSERT(fpr != InvalidFPRReg);
1146 if (m_fprs.name(fpr) != virtualRegister) {
1147 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
1148 failed = true;
1149 }
1150 break;
1151 }
1152 case DataFormatOSRMarker:
1153 case DataFormatDead:
1154 case DataFormatArguments:
1155 RELEASE_ASSERT_NOT_REACHED();
1156 break;
1157 }
1158 }
1159
1160 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
1161 VirtualRegister virtualRegister = iter.name();
1162 if (virtualRegister == InvalidVirtualRegister)
1163 continue;
1164
1165 GenerationInfo& info = m_generationInfo[virtualRegister];
1166 #if USE(JSVALUE64)
1167 if (iter.regID() != info.gpr()) {
1168 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1169 failed = true;
1170 }
1171 #else
1172 if (!(info.registerFormat() & DataFormatJS)) {
1173 if (iter.regID() != info.gpr()) {
1174 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1175 failed = true;
1176 }
1177 } else {
1178 if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
1179 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1180 failed = true;
1181 }
1182 }
1183 #endif
1184 }
1185
1186 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
1187 VirtualRegister virtualRegister = iter.name();
1188 if (virtualRegister == InvalidVirtualRegister)
1189 continue;
1190
1191 GenerationInfo& info = m_generationInfo[virtualRegister];
1192 if (iter.regID() != info.fpr()) {
1193 dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
1194 failed = true;
1195 }
1196 }
1197
1198 if (failed) {
1199 dump();
1200 CRASH();
1201 }
1202 }
1203 #endif
1204
1205 GPRTemporary::GPRTemporary()
1206 : m_jit(0)
1207 , m_gpr(InvalidGPRReg)
1208 {
1209 }
1210
1211 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1212 : m_jit(jit)
1213 , m_gpr(InvalidGPRReg)
1214 {
1215 m_gpr = m_jit->allocate();
1216 }
1217
1218 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1219 : m_jit(jit)
1220 , m_gpr(InvalidGPRReg)
1221 {
1222 m_gpr = m_jit->allocate(specific);
1223 }
1224
1225 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1)
1226 : m_jit(jit)
1227 , m_gpr(InvalidGPRReg)
1228 {
1229 if (m_jit->canReuse(op1.node()))
1230 m_gpr = m_jit->reuse(op1.gpr());
1231 else
1232 m_gpr = m_jit->allocate();
1233 }
1234
1235 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
1236 : m_jit(jit)
1237 , m_gpr(InvalidGPRReg)
1238 {
1239 if (m_jit->canReuse(op1.node()))
1240 m_gpr = m_jit->reuse(op1.gpr());
1241 else if (m_jit->canReuse(op2.node()))
1242 m_gpr = m_jit->reuse(op2.gpr());
1243 else
1244 m_gpr = m_jit->allocate();
1245 }
1246
1247 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1)
1248 : m_jit(jit)
1249 , m_gpr(InvalidGPRReg)
1250 {
1251 if (m_jit->canReuse(op1.node()))
1252 m_gpr = m_jit->reuse(op1.gpr());
1253 else
1254 m_gpr = m_jit->allocate();
1255 }
1256
1257 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1)
1258 : m_jit(jit)
1259 , m_gpr(InvalidGPRReg)
1260 {
1261 if (m_jit->canReuse(op1.node()))
1262 m_gpr = m_jit->reuse(op1.gpr());
1263 else
1264 m_gpr = m_jit->allocate();
1265 }
1266
1267 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2)
1268 : m_jit(jit)
1269 , m_gpr(InvalidGPRReg)
1270 {
1271 if (m_jit->canReuse(op1.node()))
1272 m_gpr = m_jit->reuse(op1.gpr());
1273 else if (m_jit->canReuse(op2.node()))
1274 m_gpr = m_jit->reuse(op2.gpr());
1275 else
1276 m_gpr = m_jit->allocate();
1277 }
1278
1279 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1)
1280 : m_jit(jit)
1281 , m_gpr(InvalidGPRReg)
1282 {
1283 if (m_jit->canReuse(op1.node()))
1284 m_gpr = m_jit->reuse(op1.gpr());
1285 else
1286 m_gpr = m_jit->allocate();
1287 }
1288
1289 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1)
1290 : m_jit(jit)
1291 , m_gpr(InvalidGPRReg)
1292 {
1293 if (m_jit->canReuse(op1.node()))
1294 m_gpr = m_jit->reuse(op1.gpr());
1295 else
1296 m_gpr = m_jit->allocate();
1297 }
1298
1299 #if USE(JSVALUE64)
1300 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1301 : m_jit(jit)
1302 , m_gpr(InvalidGPRReg)
1303 {
1304 if (m_jit->canReuse(op1.node()))
1305 m_gpr = m_jit->reuse(op1.gpr());
1306 else
1307 m_gpr = m_jit->allocate();
1308 }
1309 #else
1310 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag)
1311 : m_jit(jit)
1312 , m_gpr(InvalidGPRReg)
1313 {
1314 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1315 m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
1316 else
1317 m_gpr = m_jit->allocate();
1318 }
1319 #endif
1320
1321 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1)
1322 : m_jit(jit)
1323 , m_gpr(InvalidGPRReg)
1324 {
1325 if (m_jit->canReuse(op1.node()))
1326 m_gpr = m_jit->reuse(op1.gpr());
1327 else
1328 m_gpr = m_jit->allocate();
1329 }
1330
1331 void GPRTemporary::adopt(GPRTemporary& other)
1332 {
1333 ASSERT(!m_jit);
1334 ASSERT(m_gpr == InvalidGPRReg);
1335 ASSERT(other.m_jit);
1336 ASSERT(other.m_gpr != InvalidGPRReg);
1337 m_jit = other.m_jit;
1338 m_gpr = other.m_gpr;
1339 other.m_jit = 0;
1340 other.m_gpr = InvalidGPRReg;
1341 }
1342
1343 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1344 : m_jit(jit)
1345 , m_fpr(InvalidFPRReg)
1346 {
1347 m_fpr = m_jit->fprAllocate();
1348 }
1349
1350 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1351 : m_jit(jit)
1352 , m_fpr(InvalidFPRReg)
1353 {
1354 if (m_jit->canReuse(op1.node()))
1355 m_fpr = m_jit->reuse(op1.fpr());
1356 else
1357 m_fpr = m_jit->fprAllocate();
1358 }
1359
1360 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1361 : m_jit(jit)
1362 , m_fpr(InvalidFPRReg)
1363 {
1364 if (m_jit->canReuse(op1.node()))
1365 m_fpr = m_jit->reuse(op1.fpr());
1366 else if (m_jit->canReuse(op2.node()))
1367 m_fpr = m_jit->reuse(op2.fpr());
1368 else
1369 m_fpr = m_jit->fprAllocate();
1370 }
1371
1372 #if USE(JSVALUE32_64)
1373 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1374 : m_jit(jit)
1375 , m_fpr(InvalidFPRReg)
1376 {
1377 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1378 m_fpr = m_jit->reuse(op1.fpr());
1379 else
1380 m_fpr = m_jit->fprAllocate();
1381 }
1382 #endif
1383
1384 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1385 {
1386 BlockIndex taken = branchNode->takenBlockIndex();
1387 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1388
1389 SpeculateDoubleOperand op1(this, node->child1());
1390 SpeculateDoubleOperand op2(this, node->child2());
1391
1392 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1393 jump(notTaken);
1394 }
1395
1396 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1397 {
1398 BlockIndex taken = branchNode->takenBlockIndex();
1399 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1400
1401 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1402
1403 if (taken == nextBlock()) {
1404 condition = MacroAssembler::NotEqual;
1405 BlockIndex tmp = taken;
1406 taken = notTaken;
1407 notTaken = tmp;
1408 }
1409
1410 SpeculateCellOperand op1(this, node->child1());
1411 SpeculateCellOperand op2(this, node->child2());
1412
1413 GPRReg op1GPR = op1.gpr();
1414 GPRReg op2GPR = op2.gpr();
1415
1416 if (m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
1417 m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
1418 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1419 speculationCheck(
1420 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1421 m_jit.branchPtr(
1422 MacroAssembler::Equal,
1423 MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
1424 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1425 }
1426 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1427 speculationCheck(
1428 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1429 m_jit.branchPtr(
1430 MacroAssembler::Equal,
1431 MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
1432 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1433 }
1434 } else {
1435 GPRTemporary structure(this);
1436 GPRReg structureGPR = structure.gpr();
1437
1438 m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
1439 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1440 speculationCheck(
1441 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1442 m_jit.branchPtr(
1443 MacroAssembler::Equal,
1444 structureGPR,
1445 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1446 }
1447 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1448 m_jit.branchTest8(
1449 MacroAssembler::NonZero,
1450 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1451 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1452
1453 m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
1454 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1455 speculationCheck(
1456 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1457 m_jit.branchPtr(
1458 MacroAssembler::Equal,
1459 structureGPR,
1460 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
1461 }
1462 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1463 m_jit.branchTest8(
1464 MacroAssembler::NonZero,
1465 MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
1466 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1467 }
1468
1469 branchPtr(condition, op1GPR, op2GPR, taken);
1470 jump(notTaken);
1471 }
1472
1473 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1474 {
1475 BlockIndex taken = branchNode->takenBlockIndex();
1476 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1477
1478 // The branch instruction will branch to the taken block.
1479 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1480 if (taken == nextBlock()) {
1481 condition = JITCompiler::invert(condition);
1482 BlockIndex tmp = taken;
1483 taken = notTaken;
1484 notTaken = tmp;
1485 }
1486
1487 if (isBooleanConstant(node->child1().node())) {
1488 bool imm = valueOfBooleanConstant(node->child1().node());
1489 SpeculateBooleanOperand op2(this, node->child2());
1490 branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
1491 } else if (isBooleanConstant(node->child2().node())) {
1492 SpeculateBooleanOperand op1(this, node->child1());
1493 bool imm = valueOfBooleanConstant(node->child2().node());
1494 branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
1495 } else {
1496 SpeculateBooleanOperand op1(this, node->child1());
1497 SpeculateBooleanOperand op2(this, node->child2());
1498 branch32(condition, op1.gpr(), op2.gpr(), taken);
1499 }
1500
1501 jump(notTaken);
1502 }
1503
1504 void SpeculativeJIT::compilePeepHoleIntegerBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1505 {
1506 BlockIndex taken = branchNode->takenBlockIndex();
1507 BlockIndex notTaken = branchNode->notTakenBlockIndex();
1508
1509 // The branch instruction will branch to the taken block.
1510 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1511 if (taken == nextBlock()) {
1512 condition = JITCompiler::invert(condition);
1513 BlockIndex tmp = taken;
1514 taken = notTaken;
1515 notTaken = tmp;
1516 }
1517
1518 if (isInt32Constant(node->child1().node())) {
1519 int32_t imm = valueOfInt32Constant(node->child1().node());
1520 SpeculateIntegerOperand op2(this, node->child2());
1521 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1522 } else if (isInt32Constant(node->child2().node())) {
1523 SpeculateIntegerOperand op1(this, node->child1());
1524 int32_t imm = valueOfInt32Constant(node->child2().node());
1525 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1526 } else {
1527 SpeculateIntegerOperand op1(this, node->child1());
1528 SpeculateIntegerOperand op2(this, node->child2());
1529 branch32(condition, op1.gpr(), op2.gpr(), taken);
1530 }
1531
1532 jump(notTaken);
1533 }
1534
1535 // Returns true if the compare is fused with a subsequent branch.
1536 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
1537 {
1538 // Fused compare & branch.
1539 unsigned branchIndexInBlock = detectPeepHoleBranch();
1540 if (branchIndexInBlock != UINT_MAX) {
1541 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
1542
1543 // detectPeepHoleBranch currently only permits the branch to be the very next node,
1544 // so can be no intervening nodes to also reference the compare.
1545 ASSERT(node->adjustedRefCount() == 1);
1546
1547 if (node->isBinaryUseKind(Int32Use))
1548 compilePeepHoleIntegerBranch(node, branchNode, condition);
1549 else if (node->isBinaryUseKind(NumberUse))
1550 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1551 else if (node->op() == CompareEq) {
1552 if (node->isBinaryUseKind(StringUse)) {
1553 // Use non-peephole comparison, for now.
1554 return false;
1555 }
1556 if (node->isBinaryUseKind(BooleanUse))
1557 compilePeepHoleBooleanBranch(node, branchNode, condition);
1558 else if (node->isBinaryUseKind(ObjectUse))
1559 compilePeepHoleObjectEquality(node, branchNode);
1560 else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
1561 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1562 else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
1563 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1564 else {
1565 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1566 return true;
1567 }
1568 } else {
1569 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1570 return true;
1571 }
1572
1573 use(node->child1());
1574 use(node->child2());
1575 m_indexInBlock = branchIndexInBlock;
1576 m_currentNode = branchNode;
1577 return true;
1578 }
1579 return false;
1580 }
1581
1582 void SpeculativeJIT::noticeOSRBirth(Node* node)
1583 {
1584 if (!node->hasVirtualRegister())
1585 return;
1586
1587 VirtualRegister virtualRegister = node->virtualRegister();
1588 GenerationInfo& info = m_generationInfo[virtualRegister];
1589
1590 info.noticeOSRBirth(*m_stream, node, virtualRegister);
1591 }
1592
1593 void SpeculativeJIT::compileMovHint(Node* node)
1594 {
1595 ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1596
1597 m_lastSetOperand = node->local();
1598
1599 Node* child = node->child1().node();
1600 noticeOSRBirth(child);
1601
1602 if (child->op() == UInt32ToNumber)
1603 noticeOSRBirth(child->child1().node());
1604
1605 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
1606 }
1607
1608 void SpeculativeJIT::compileMovHintAndCheck(Node* node)
1609 {
1610 compileMovHint(node);
1611 speculate(node, node->child1());
1612 noResult(node);
1613 }
1614
1615 void SpeculativeJIT::compileInlineStart(Node* node)
1616 {
1617 InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
1618 int argumentCountIncludingThis = inlineCallFrame->arguments.size();
1619 unsigned argumentPositionStart = node->argumentPositionStart();
1620 CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
1621 for (int i = 0; i < argumentCountIncludingThis; ++i) {
1622 ValueRecovery recovery;
1623 if (codeBlock->isCaptured(argumentToOperand(i)))
1624 recovery = ValueRecovery::alreadyInJSStack();
1625 else {
1626 ArgumentPosition& argumentPosition =
1627 m_jit.graph().m_argumentPositions[argumentPositionStart + i];
1628 ValueSource valueSource;
1629 if (!argumentPosition.shouldUnboxIfPossible())
1630 valueSource = ValueSource(ValueInJSStack);
1631 else if (argumentPosition.shouldUseDoubleFormat())
1632 valueSource = ValueSource(DoubleInJSStack);
1633 else if (isInt32Speculation(argumentPosition.prediction()))
1634 valueSource = ValueSource(Int32InJSStack);
1635 else if (isCellSpeculation(argumentPosition.prediction()))
1636 valueSource = ValueSource(CellInJSStack);
1637 else if (isBooleanSpeculation(argumentPosition.prediction()))
1638 valueSource = ValueSource(BooleanInJSStack);
1639 else
1640 valueSource = ValueSource(ValueInJSStack);
1641 recovery = computeValueRecoveryFor(valueSource);
1642 }
1643 // The recovery should refer either to something that has already been
1644 // stored into the stack at the right place, or to a constant,
1645 // since the Arguments code isn't smart enough to handle anything else.
1646 // The exception is the this argument, which we don't really need to be
1647 // able to recover.
1648 #if DFG_ENABLE(DEBUG_VERBOSE)
1649 dataLogF("\nRecovery for argument %d: ", i);
1650 recovery.dump(WTF::dataFile());
1651 #endif
1652 inlineCallFrame->arguments[i] = recovery;
1653 }
1654 }
1655
1656 void SpeculativeJIT::compile(BasicBlock& block)
1657 {
1658 ASSERT(m_compileOkay);
1659
1660 if (!block.isReachable)
1661 return;
1662
1663 if (!block.cfaHasVisited) {
1664 // Don't generate code for basic blocks that are unreachable according to CFA.
1665 // But to be sure that nobody has generated a jump to this block, drop in a
1666 // breakpoint here.
1667 #if !ASSERT_DISABLED
1668 m_jit.breakpoint();
1669 #endif
1670 return;
1671 }
1672
1673 m_blockHeads[m_block] = m_jit.label();
1674 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
1675 m_jit.breakpoint();
1676 #endif
1677
1678 #if DFG_ENABLE(DEBUG_VERBOSE)
1679 dataLogF("Setting up state for block #%u: ", m_block);
1680 #endif
1681
1682 m_stream->appendAndLog(VariableEvent::reset());
1683
1684 m_jit.jitAssertHasValidCallFrame();
1685
1686 ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
1687 for (size_t i = 0; i < m_arguments.size(); ++i) {
1688 ValueSource valueSource = ValueSource(ValueInJSStack);
1689 m_arguments[i] = valueSource;
1690 m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
1691 }
1692
1693 m_state.reset();
1694 m_state.beginBasicBlock(&block);
1695
1696 ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
1697 for (size_t i = 0; i < m_variables.size(); ++i) {
1698 Node* node = block.variablesAtHead.local(i);
1699 ValueSource valueSource;
1700 if (!node)
1701 valueSource = ValueSource(SourceIsDead);
1702 else if (node->variableAccessData()->isArgumentsAlias())
1703 valueSource = ValueSource(ArgumentsSource);
1704 else if (!node->refCount())
1705 valueSource = ValueSource(SourceIsDead);
1706 else if (!node->variableAccessData()->shouldUnboxIfPossible())
1707 valueSource = ValueSource(ValueInJSStack);
1708 else if (node->variableAccessData()->shouldUseDoubleFormat())
1709 valueSource = ValueSource(DoubleInJSStack);
1710 else
1711 valueSource = ValueSource::forSpeculation(node->variableAccessData()->argumentAwarePrediction());
1712 m_variables[i] = valueSource;
1713 // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
1714 m_stream->appendAndLog(VariableEvent::setLocal(i, valueSource.dataFormat()));
1715 }
1716
1717 m_lastSetOperand = std::numeric_limits<int>::max();
1718 m_codeOriginForOSR = CodeOrigin();
1719
1720 if (DFG_ENABLE_EDGE_CODE_VERIFICATION) {
1721 JITCompiler::Jump verificationSucceeded =
1722 m_jit.branch32(JITCompiler::Equal, GPRInfo::regT0, TrustedImm32(m_block));
1723 m_jit.breakpoint();
1724 verificationSucceeded.link(&m_jit);
1725 }
1726
1727 #if DFG_ENABLE(DEBUG_VERBOSE)
1728 dataLogF("\n");
1729 #endif
1730
1731 for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) {
1732 m_currentNode = block[m_indexInBlock];
1733 #if !ASSERT_DISABLED
1734 m_canExit = m_currentNode->canExit();
1735 #endif
1736 bool shouldExecuteEffects = m_state.startExecuting(m_currentNode);
1737 m_jit.setForNode(m_currentNode);
1738 m_codeOriginForOSR = m_currentNode->codeOrigin;
1739 if (!m_currentNode->shouldGenerate()) {
1740 #if DFG_ENABLE(DEBUG_VERBOSE)
1741 dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1742 #endif
1743 switch (m_currentNode->op()) {
1744 case JSConstant:
1745 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1746 break;
1747
1748 case WeakJSConstant:
1749 m_jit.addWeakReference(m_currentNode->weakConstant());
1750 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1751 break;
1752
1753 case SetLocal:
1754 RELEASE_ASSERT_NOT_REACHED();
1755 break;
1756
1757 case MovHint:
1758 compileMovHint(m_currentNode);
1759 break;
1760
1761 case ZombieHint: {
1762 m_lastSetOperand = m_currentNode->local();
1763 m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
1764 break;
1765 }
1766
1767 default:
1768 if (belongsInMinifiedGraph(m_currentNode->op()))
1769 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1770 break;
1771 }
1772 } else {
1773
1774 if (verboseCompilationEnabled()) {
1775 dataLogF(
1776 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1777 (int)m_currentNode->index(),
1778 m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
1779 #if DFG_ENABLE(DEBUG_VERBOSE)
1780 dataLog(" ");
1781 #else
1782 dataLog("\n");
1783 #endif
1784 }
1785 #if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
1786 m_jit.breakpoint();
1787 #endif
1788 #if DFG_ENABLE(XOR_DEBUG_AID)
1789 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1790 m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
1791 #endif
1792 checkConsistency();
1793
1794 m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
1795
1796 compile(m_currentNode);
1797 if (!m_compileOkay) {
1798 m_compileOkay = true;
1799 clearGenerationInfo();
1800 return;
1801 }
1802
1803 if (belongsInMinifiedGraph(m_currentNode->op())) {
1804 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1805 noticeOSRBirth(m_currentNode);
1806 }
1807
1808 #if DFG_ENABLE(DEBUG_VERBOSE)
1809 if (m_currentNode->hasResult()) {
1810 GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
1811 dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
1812 if (info.registerFormat() != DataFormatNone) {
1813 if (info.registerFormat() == DataFormatDouble)
1814 dataLogF(", %s", FPRInfo::debugName(info.fpr()));
1815 #if USE(JSVALUE32_64)
1816 else if (info.registerFormat() & DataFormatJS)
1817 dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
1818 #endif
1819 else
1820 dataLogF(", %s", GPRInfo::debugName(info.gpr()));
1821 }
1822 dataLogF(" ");
1823 } else
1824 dataLogF(" ");
1825 #endif
1826 }
1827
1828 #if DFG_ENABLE(DEBUG_VERBOSE)
1829 dataLogF("\n");
1830 #endif
1831
1832 // Make sure that the abstract state is rematerialized for the next node.
1833 if (shouldExecuteEffects)
1834 m_state.executeEffects(m_indexInBlock);
1835
1836 if (m_currentNode->shouldGenerate())
1837 checkConsistency();
1838 }
1839
1840 // Perform the most basic verification that children have been used correctly.
1841 #if !ASSERT_DISABLED
1842 for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1843 GenerationInfo& info = m_generationInfo[index];
1844 ASSERT(!info.alive());
1845 }
1846 #endif
1847 }
1848
1849 // If we are making type predictions about our arguments then
1850 // we need to check that they are correct on function entry.
1851 void SpeculativeJIT::checkArgumentTypes()
1852 {
1853 ASSERT(!m_currentNode);
1854 m_isCheckingArgumentTypes = true;
1855 m_speculationDirection = BackwardSpeculation;
1856 m_codeOriginForOSR = CodeOrigin(0);
1857
1858 for (size_t i = 0; i < m_arguments.size(); ++i)
1859 m_arguments[i] = ValueSource(ValueInJSStack);
1860 for (size_t i = 0; i < m_variables.size(); ++i)
1861 m_variables[i] = ValueSource(ValueInJSStack);
1862
1863 for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1864 Node* node = m_jit.graph().m_arguments[i];
1865 ASSERT(node->op() == SetArgument);
1866 if (!node->shouldGenerate()) {
1867 // The argument is dead. We don't do any checks for such arguments.
1868 continue;
1869 }
1870
1871 VariableAccessData* variableAccessData = node->variableAccessData();
1872 if (!variableAccessData->isProfitableToUnbox())
1873 continue;
1874
1875 VirtualRegister virtualRegister = variableAccessData->local();
1876 SpeculatedType predictedType = variableAccessData->prediction();
1877
1878 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1879
1880 #if USE(JSVALUE64)
1881 if (isInt32Speculation(predictedType))
1882 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1883 else if (isBooleanSpeculation(predictedType)) {
1884 GPRTemporary temp(this);
1885 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1886 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1887 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1888 } else if (isCellSpeculation(predictedType))
1889 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1890 #else
1891 if (isInt32Speculation(predictedType))
1892 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1893 else if (isBooleanSpeculation(predictedType))
1894 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1895 else if (isCellSpeculation(predictedType))
1896 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1897 #endif
1898 }
1899 m_isCheckingArgumentTypes = false;
1900 }
1901
1902 bool SpeculativeJIT::compile()
1903 {
1904 checkArgumentTypes();
1905
1906 if (DFG_ENABLE_EDGE_CODE_VERIFICATION)
1907 m_jit.move(TrustedImm32(0), GPRInfo::regT0);
1908
1909 ASSERT(!m_currentNode);
1910 for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
1911 m_jit.setForBlock(m_block);
1912 BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
1913 if (block)
1914 compile(*block);
1915 }
1916 linkBranches();
1917 return true;
1918 }
1919
1920 void SpeculativeJIT::createOSREntries()
1921 {
1922 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
1923 BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
1924 if (!block)
1925 continue;
1926 if (!block->isOSRTarget)
1927 continue;
1928
1929 // Currently we only need to create OSR entry trampolines when using edge code
1930 // verification. But in the future, we'll need this for other things as well (like
1931 // when we have global reg alloc).
1932 // If we don't need OSR entry trampolin
1933 if (!DFG_ENABLE_EDGE_CODE_VERIFICATION) {
1934 m_osrEntryHeads.append(m_blockHeads[blockIndex]);
1935 continue;
1936 }
1937
1938 m_osrEntryHeads.append(m_jit.label());
1939 m_jit.move(TrustedImm32(blockIndex), GPRInfo::regT0);
1940 m_jit.jump().linkTo(m_blockHeads[blockIndex], &m_jit);
1941 }
1942 }
1943
1944 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1945 {
1946 unsigned osrEntryIndex = 0;
1947 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
1948 BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
1949 if (!block)
1950 continue;
1951 if (!block->isOSRTarget)
1952 continue;
1953 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1954 }
1955 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1956 }
1957
1958 ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
1959 {
1960 if (valueSource.isInJSStack())
1961 return valueSource.valueRecovery();
1962
1963 ASSERT(valueSource.kind() == HaveNode);
1964 Node* node = valueSource.id().node(m_jit.graph());
1965 if (isConstant(node))
1966 return ValueRecovery::constant(valueOfJSConstant(node));
1967
1968 return ValueRecovery();
1969 }
1970
1971 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1972 {
1973 Edge child3 = m_jit.graph().varArgChild(node, 2);
1974 Edge child4 = m_jit.graph().varArgChild(node, 3);
1975
1976 ArrayMode arrayMode = node->arrayMode();
1977
1978 GPRReg baseReg = base.gpr();
1979 GPRReg propertyReg = property.gpr();
1980
1981 SpeculateDoubleOperand value(this, child3);
1982
1983 FPRReg valueReg = value.fpr();
1984
1985 DFG_TYPE_CHECK(
1986 JSValueRegs(), child3, SpecRealNumber,
1987 m_jit.branchDouble(
1988 MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1989
1990 if (!m_compileOkay)
1991 return;
1992
1993 StorageOperand storage(this, child4);
1994 GPRReg storageReg = storage.gpr();
1995
1996 if (node->op() == PutByValAlias) {
1997 // Store the value to the array.
1998 GPRReg propertyReg = property.gpr();
1999 FPRReg valueReg = value.fpr();
2000 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2001
2002 noResult(m_currentNode);
2003 return;
2004 }
2005
2006 GPRTemporary temporary;
2007 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2008
2009 MacroAssembler::Jump slowCase;
2010
2011 if (arrayMode.isInBounds()) {
2012 speculationCheck(
2013 StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
2014 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2015 } else {
2016 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2017
2018 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2019
2020 if (!arrayMode.isOutOfBounds())
2021 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2022
2023 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2024 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2025
2026 inBounds.link(&m_jit);
2027 }
2028
2029 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2030
2031 base.use();
2032 property.use();
2033 value.use();
2034 storage.use();
2035
2036 if (arrayMode.isOutOfBounds()) {
2037 addSlowPathGenerator(
2038 slowPathCall(
2039 slowCase, this,
2040 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2041 NoResult, baseReg, propertyReg, valueReg));
2042 }
2043
2044 noResult(m_currentNode, UseChildrenCalledExplicitly);
2045 }
2046
2047 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2048 {
2049 SpeculateCellOperand string(this, node->child1());
2050 SpeculateStrictInt32Operand index(this, node->child2());
2051 StorageOperand storage(this, node->child3());
2052
2053 GPRReg stringReg = string.gpr();
2054 GPRReg indexReg = index.gpr();
2055 GPRReg storageReg = storage.gpr();
2056
2057 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2058
2059 // unsigned comparison so we can filter out negative indices and indices that are too large
2060 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2061
2062 GPRTemporary scratch(this);
2063 GPRReg scratchReg = scratch.gpr();
2064
2065 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2066
2067 // Load the character into scratchReg
2068 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2069
2070 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2071 JITCompiler::Jump cont8Bit = m_jit.jump();
2072
2073 is16Bit.link(&m_jit);
2074
2075 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2076
2077 cont8Bit.link(&m_jit);
2078
2079 integerResult(scratchReg, m_currentNode);
2080 }
2081
2082 void SpeculativeJIT::compileGetByValOnString(Node* node)
2083 {
2084 SpeculateCellOperand base(this, node->child1());
2085 SpeculateStrictInt32Operand property(this, node->child2());
2086 StorageOperand storage(this, node->child3());
2087 GPRReg baseReg = base.gpr();
2088 GPRReg propertyReg = property.gpr();
2089 GPRReg storageReg = storage.gpr();
2090
2091 ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2092
2093 // unsigned comparison so we can filter out negative indices and indices that are too large
2094 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength())));
2095
2096 GPRTemporary scratch(this);
2097 GPRReg scratchReg = scratch.gpr();
2098
2099 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2100
2101 // Load the character into scratchReg
2102 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2103
2104 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2105 JITCompiler::Jump cont8Bit = m_jit.jump();
2106
2107 is16Bit.link(&m_jit);
2108
2109 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2110
2111 // We only support ascii characters
2112 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100)));
2113
2114 // 8 bit string values don't need the isASCII check.
2115 cont8Bit.link(&m_jit);
2116
2117 GPRTemporary smallStrings(this);
2118 GPRReg smallStringsReg = smallStrings.gpr();
2119 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2120 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
2121 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2122 cellResult(scratchReg, m_currentNode);
2123 }
2124
2125 void SpeculativeJIT::compileFromCharCode(Node* node)
2126 {
2127 SpeculateStrictInt32Operand property(this, node->child1());
2128 GPRReg propertyReg = property.gpr();
2129 GPRTemporary smallStrings(this);
2130 GPRTemporary scratch(this);
2131 GPRReg scratchReg = scratch.gpr();
2132 GPRReg smallStringsReg = smallStrings.gpr();
2133
2134 JITCompiler::JumpList slowCases;
2135 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2136 m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2137 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2138
2139 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2140 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2141 cellResult(scratchReg, m_currentNode);
2142 }
2143
2144 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2145 {
2146 #if DFG_ENABLE(DEBUG_VERBOSE)
2147 dataLogF("checkGeneratedTypeForToInt32@%d ", node->index());
2148 #endif
2149 VirtualRegister virtualRegister = node->virtualRegister();
2150 GenerationInfo& info = m_generationInfo[virtualRegister];
2151
2152 switch (info.registerFormat()) {
2153 case DataFormatStorage:
2154 RELEASE_ASSERT_NOT_REACHED();
2155
2156 case DataFormatBoolean:
2157 case DataFormatCell:
2158 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2159 return GeneratedOperandTypeUnknown;
2160
2161 case DataFormatNone:
2162 case DataFormatJSCell:
2163 case DataFormatJS:
2164 case DataFormatJSBoolean:
2165 return GeneratedOperandJSValue;
2166
2167 case DataFormatJSInteger:
2168 case DataFormatInteger:
2169 return GeneratedOperandInteger;
2170
2171 case DataFormatJSDouble:
2172 case DataFormatDouble:
2173 return GeneratedOperandDouble;
2174
2175 default:
2176 RELEASE_ASSERT_NOT_REACHED();
2177 return GeneratedOperandTypeUnknown;
2178 }
2179 }
2180
2181 void SpeculativeJIT::compileValueToInt32(Node* node)
2182 {
2183 switch (node->child1().useKind()) {
2184 case Int32Use: {
2185 SpeculateIntegerOperand op1(this, node->child1());
2186 GPRTemporary result(this, op1);
2187 m_jit.move(op1.gpr(), result.gpr());
2188 integerResult(result.gpr(), node, op1.format());
2189 return;
2190 }
2191
2192 case NumberUse:
2193 case NotCellUse: {
2194 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2195 case GeneratedOperandInteger: {
2196 SpeculateIntegerOperand op1(this, node->child1(), ManualOperandSpeculation);
2197 GPRTemporary result(this, op1);
2198 m_jit.move(op1.gpr(), result.gpr());
2199 integerResult(result.gpr(), node, op1.format());
2200 return;
2201 }
2202 case GeneratedOperandDouble: {
2203 GPRTemporary result(this);
2204 SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
2205 FPRReg fpr = op1.fpr();
2206 GPRReg gpr = result.gpr();
2207 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2208
2209 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
2210
2211 integerResult(gpr, node);
2212 return;
2213 }
2214 case GeneratedOperandJSValue: {
2215 GPRTemporary result(this);
2216 #if USE(JSVALUE64)
2217 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2218
2219 GPRReg gpr = op1.gpr();
2220 GPRReg resultGpr = result.gpr();
2221 FPRTemporary tempFpr(this);
2222 FPRReg fpr = tempFpr.fpr();
2223
2224 JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2225 JITCompiler::JumpList converted;
2226
2227 if (node->child1().useKind() == NumberUse) {
2228 DFG_TYPE_CHECK(
2229 JSValueRegs(gpr), node->child1(), SpecNumber,
2230 m_jit.branchTest64(
2231 MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2232 } else {
2233 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2234
2235 DFG_TYPE_CHECK(
2236 JSValueRegs(gpr), node->child1(), ~SpecCell,
2237 m_jit.branchTest64(
2238 JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
2239
2240 // It's not a cell: so true turns into 1 and all else turns into 0.
2241 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2242 converted.append(m_jit.jump());
2243
2244 isNumber.link(&m_jit);
2245 }
2246
2247 // First, if we get here we have a double encoded as a JSValue
2248 m_jit.move(gpr, resultGpr);
2249 unboxDouble(resultGpr, fpr);
2250
2251 silentSpillAllRegisters(resultGpr);
2252 callOperation(toInt32, resultGpr, fpr);
2253 silentFillAllRegisters(resultGpr);
2254
2255 converted.append(m_jit.jump());
2256
2257 isInteger.link(&m_jit);
2258 m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2259
2260 converted.link(&m_jit);
2261 #else
2262 Node* childNode = node->child1().node();
2263 VirtualRegister virtualRegister = childNode->virtualRegister();
2264 GenerationInfo& info = m_generationInfo[virtualRegister];
2265
2266 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2267
2268 GPRReg payloadGPR = op1.payloadGPR();
2269 GPRReg resultGpr = result.gpr();
2270
2271 JITCompiler::JumpList converted;
2272
2273 if (info.registerFormat() == DataFormatJSInteger)
2274 m_jit.move(payloadGPR, resultGpr);
2275 else {
2276 GPRReg tagGPR = op1.tagGPR();
2277 FPRTemporary tempFpr(this);
2278 FPRReg fpr = tempFpr.fpr();
2279 FPRTemporary scratch(this);
2280
2281 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2282
2283 if (node->child1().useKind() == NumberUse) {
2284 DFG_TYPE_CHECK(
2285 JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecNumber,
2286 m_jit.branch32(
2287 MacroAssembler::AboveOrEqual, tagGPR,
2288 TrustedImm32(JSValue::LowestTag)));
2289 } else {
2290 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2291
2292 DFG_TYPE_CHECK(
2293 JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
2294 m_jit.branch32(
2295 JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
2296
2297 // It's not a cell: so true turns into 1 and all else turns into 0.
2298 JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2299 m_jit.move(TrustedImm32(0), resultGpr);
2300 converted.append(m_jit.jump());
2301
2302 isBoolean.link(&m_jit);
2303 m_jit.move(payloadGPR, resultGpr);
2304 converted.append(m_jit.jump());
2305
2306 isNumber.link(&m_jit);
2307 }
2308
2309 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2310
2311 silentSpillAllRegisters(resultGpr);
2312 callOperation(toInt32, resultGpr, fpr);
2313 silentFillAllRegisters(resultGpr);
2314
2315 converted.append(m_jit.jump());
2316
2317 isInteger.link(&m_jit);
2318 m_jit.move(payloadGPR, resultGpr);
2319
2320 converted.link(&m_jit);
2321 }
2322 #endif
2323 integerResult(resultGpr, node);
2324 return;
2325 }
2326 case GeneratedOperandTypeUnknown:
2327 RELEASE_ASSERT(!m_compileOkay);
2328 return;
2329 }
2330 RELEASE_ASSERT_NOT_REACHED();
2331 return;
2332 }
2333
2334 case BooleanUse: {
2335 SpeculateBooleanOperand op1(this, node->child1());
2336 GPRTemporary result(this, op1);
2337
2338 m_jit.move(op1.gpr(), result.gpr());
2339 m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
2340
2341 integerResult(result.gpr(), node);
2342 return;
2343 }
2344
2345 default:
2346 ASSERT(!m_compileOkay);
2347 return;
2348 }
2349 }
2350
2351 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2352 {
2353 if (!nodeCanSpeculateInteger(node->arithNodeFlags())) {
2354 // We know that this sometimes produces doubles. So produce a double every
2355 // time. This at least allows subsequent code to not have weird conditionals.
2356
2357 IntegerOperand op1(this, node->child1());
2358 FPRTemporary result(this);
2359
2360 GPRReg inputGPR = op1.gpr();
2361 FPRReg outputFPR = result.fpr();
2362
2363 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2364
2365 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2366 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2367 positive.link(&m_jit);
2368
2369 doubleResult(outputFPR, node);
2370 return;
2371 }
2372
2373 IntegerOperand op1(this, node->child1());
2374 GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
2375
2376 m_jit.move(op1.gpr(), result.gpr());
2377
2378 // Test the operand is positive. This is a very special speculation check - we actually
2379 // use roll-forward speculation here, where if this fails, we jump to the baseline
2380 // instruction that follows us, rather than the one we're executing right now. We have
2381 // to do this because by this point, the original values necessary to compile whatever
2382 // operation the UInt32ToNumber originated from might be dead.
2383 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
2384
2385 integerResult(result.gpr(), node, op1.format());
2386 }
2387
2388 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2389 {
2390 SpeculateDoubleOperand op1(this, node->child1());
2391 FPRTemporary scratch(this);
2392 GPRTemporary result(this);
2393
2394 FPRReg valueFPR = op1.fpr();
2395 FPRReg scratchFPR = scratch.fpr();
2396 GPRReg resultGPR = result.gpr();
2397
2398 JITCompiler::JumpList failureCases;
2399 bool negZeroCheck = !nodeCanIgnoreNegativeZero(node->arithNodeFlags());
2400 m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
2401 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
2402
2403 integerResult(resultGPR, node);
2404 }
2405
2406 void SpeculativeJIT::compileInt32ToDouble(Node* node)
2407 {
2408 ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
2409
2410 if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
2411 SpeculateIntegerOperand op1(this, node->child1(), ManualOperandSpeculation);
2412 FPRTemporary result(this);
2413 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2414 doubleResult(result.fpr(), node);
2415 return;
2416 }
2417
2418 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2419 FPRTemporary result(this);
2420
2421 #if USE(JSVALUE64)
2422 GPRTemporary temp(this);
2423
2424 GPRReg op1GPR = op1.gpr();
2425 GPRReg tempGPR = temp.gpr();
2426 FPRReg resultFPR = result.fpr();
2427
2428 JITCompiler::Jump isInteger = m_jit.branch64(
2429 MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2430
2431 if (needsTypeCheck(node->child1(), SpecNumber)) {
2432 if (node->op() == ForwardInt32ToDouble) {
2433 forwardTypeCheck(
2434 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2435 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
2436 ValueRecovery::inGPR(op1GPR, DataFormatJS));
2437 } else {
2438 backwardTypeCheck(
2439 JSValueRegs(op1GPR), node->child1(), SpecNumber,
2440 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2441 }
2442 }
2443
2444 m_jit.move(op1GPR, tempGPR);
2445 unboxDouble(tempGPR, resultFPR);
2446 JITCompiler::Jump done = m_jit.jump();
2447
2448 isInteger.link(&m_jit);
2449 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2450 done.link(&m_jit);
2451 #else
2452 FPRTemporary temp(this);
2453
2454 GPRReg op1TagGPR = op1.tagGPR();
2455 GPRReg op1PayloadGPR = op1.payloadGPR();
2456 FPRReg tempFPR = temp.fpr();
2457 FPRReg resultFPR = result.fpr();
2458
2459 JITCompiler::Jump isInteger = m_jit.branch32(
2460 MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2461
2462 if (needsTypeCheck(node->child1(), SpecNumber)) {
2463 if (node->op() == ForwardInt32ToDouble) {
2464 forwardTypeCheck(
2465 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2466 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
2467 ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
2468 } else {
2469 backwardTypeCheck(
2470 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
2471 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2472 }
2473 }
2474
2475 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2476 JITCompiler::Jump done = m_jit.jump();
2477
2478 isInteger.link(&m_jit);
2479 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2480 done.link(&m_jit);
2481 #endif
2482
2483 doubleResult(resultFPR, node);
2484 }
2485
2486 static double clampDoubleToByte(double d)
2487 {
2488 d += 0.5;
2489 if (!(d > 0))
2490 d = 0;
2491 else if (d > 255)
2492 d = 255;
2493 return d;
2494 }
2495
2496 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2497 {
2498 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2499 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2500 jit.xorPtr(result, result);
2501 MacroAssembler::Jump clamped = jit.jump();
2502 tooBig.link(&jit);
2503 jit.move(JITCompiler::TrustedImm32(255), result);
2504 clamped.link(&jit);
2505 inBounds.link(&jit);
2506 }
2507
2508 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2509 {
2510 // Unordered compare so we pick up NaN
2511 static const double zero = 0;
2512 static const double byteMax = 255;
2513 static const double half = 0.5;
2514 jit.loadDouble(&zero, scratch);
2515 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2516 jit.loadDouble(&byteMax, scratch);
2517 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2518
2519 jit.loadDouble(&half, scratch);
2520 // FIXME: This should probably just use a floating point round!
2521 // https://bugs.webkit.org/show_bug.cgi?id=72054
2522 jit.addDouble(source, scratch);
2523 jit.truncateDoubleToInt32(scratch, result);
2524 MacroAssembler::Jump truncatedInt = jit.jump();
2525
2526 tooSmall.link(&jit);
2527 jit.xorPtr(result, result);
2528 MacroAssembler::Jump zeroed = jit.jump();
2529
2530 tooBig.link(&jit);
2531 jit.move(JITCompiler::TrustedImm32(255), result);
2532
2533 truncatedInt.link(&jit);
2534 zeroed.link(&jit);
2535
2536 }
2537
2538 void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& descriptor, Node* node, size_t elementSize, TypedArraySignedness signedness)
2539 {
2540 SpeculateCellOperand base(this, node->child1());
2541 SpeculateStrictInt32Operand property(this, node->child2());
2542 StorageOperand storage(this, node->child3());
2543
2544 GPRReg baseReg = base.gpr();
2545 GPRReg propertyReg = property.gpr();
2546 GPRReg storageReg = storage.gpr();
2547
2548 GPRTemporary result(this);
2549 GPRReg resultReg = result.gpr();
2550
2551 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2552
2553 speculationCheck(
2554 Uncountable, JSValueRegs(), 0,
2555 m_jit.branch32(
2556 MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
2557 switch (elementSize) {
2558 case 1:
2559 if (signedness == SignedTypedArray)
2560 m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2561 else
2562 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2563 break;
2564 case 2:
2565 if (signedness == SignedTypedArray)
2566 m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2567 else
2568 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2569 break;
2570 case 4:
2571 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2572 break;
2573 default:
2574 CRASH();
2575 }
2576 if (elementSize < 4 || signedness == SignedTypedArray) {
2577 integerResult(resultReg, node);
2578 return;
2579 }
2580
2581 ASSERT(elementSize == 4 && signedness == UnsignedTypedArray);
2582 if (node->shouldSpeculateInteger()) {
2583 forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
2584 integerResult(resultReg, node);
2585 return;
2586 }
2587
2588 FPRTemporary fresult(this);
2589 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2590 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2591 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2592 positive.link(&m_jit);
2593 doubleResult(fresult.fpr(), node);
2594 }
2595
2596 void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node* node, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding)
2597 {
2598 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2599 GPRReg storageReg = storage.gpr();
2600
2601 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2602
2603 GPRTemporary value;
2604 GPRReg valueGPR;
2605
2606 if (valueUse->isConstant()) {
2607 JSValue jsValue = valueOfJSConstant(valueUse.node());
2608 if (!jsValue.isNumber()) {
2609 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2610 noResult(node);
2611 return;
2612 }
2613 double d = jsValue.asNumber();
2614 if (rounding == ClampRounding) {
2615 ASSERT(elementSize == 1);
2616 d = clampDoubleToByte(d);
2617 }
2618 GPRTemporary scratch(this);
2619 GPRReg scratchReg = scratch.gpr();
2620 m_jit.move(Imm32(toInt32(d)), scratchReg);
2621 value.adopt(scratch);
2622 valueGPR = scratchReg;
2623 } else {
2624 switch (valueUse.useKind()) {
2625 case Int32Use: {
2626 SpeculateIntegerOperand valueOp(this, valueUse);
2627 GPRTemporary scratch(this);
2628 GPRReg scratchReg = scratch.gpr();
2629 m_jit.move(valueOp.gpr(), scratchReg);
2630 if (rounding == ClampRounding) {
2631 ASSERT(elementSize == 1);
2632 compileClampIntegerToByte(m_jit, scratchReg);
2633 }
2634 value.adopt(scratch);
2635 valueGPR = scratchReg;
2636 break;
2637 }
2638
2639 case NumberUse: {
2640 if (rounding == ClampRounding) {
2641 ASSERT(elementSize == 1);
2642 SpeculateDoubleOperand valueOp(this, valueUse);
2643 GPRTemporary result(this);
2644 FPRTemporary floatScratch(this);
2645 FPRReg fpr = valueOp.fpr();
2646 GPRReg gpr = result.gpr();
2647 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2648 value.adopt(result);
2649 valueGPR = gpr;
2650 } else {
2651 SpeculateDoubleOperand valueOp(this, valueUse);
2652 GPRTemporary result(this);
2653 FPRReg fpr = valueOp.fpr();
2654 GPRReg gpr = result.gpr();
2655 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2656 m_jit.xorPtr(gpr, gpr);
2657 MacroAssembler::Jump fixed = m_jit.jump();
2658 notNaN.link(&m_jit);
2659
2660 MacroAssembler::Jump failed;
2661 if (signedness == SignedTypedArray)
2662 failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2663 else
2664 failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2665
2666 addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
2667
2668 fixed.link(&m_jit);
2669 value.adopt(result);
2670 valueGPR = gpr;
2671 }
2672 break;
2673 }
2674
2675 default:
2676 RELEASE_ASSERT_NOT_REACHED();
2677 break;
2678 }
2679 }
2680
2681 ASSERT_UNUSED(valueGPR, valueGPR != property);
2682 ASSERT(valueGPR != base);
2683 ASSERT(valueGPR != storageReg);
2684 MacroAssembler::Jump outOfBounds;
2685 if (node->op() == PutByVal)
2686 outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
2687
2688 switch (elementSize) {
2689 case 1:
2690 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2691 break;
2692 case 2:
2693 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2694 break;
2695 case 4:
2696 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2697 break;
2698 default:
2699 CRASH();
2700 }
2701 if (node->op() == PutByVal)
2702 outOfBounds.link(&m_jit);
2703 noResult(node);
2704 }
2705
2706 void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor& descriptor, Node* node, size_t elementSize)
2707 {
2708 SpeculateCellOperand base(this, node->child1());
2709 SpeculateStrictInt32Operand property(this, node->child2());
2710 StorageOperand storage(this, node->child3());
2711
2712 GPRReg baseReg = base.gpr();
2713 GPRReg propertyReg = property.gpr();
2714 GPRReg storageReg = storage.gpr();
2715
2716 ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2717
2718 FPRTemporary result(this);
2719 FPRReg resultReg = result.fpr();
2720 speculationCheck(
2721 Uncountable, JSValueRegs(), 0,
2722 m_jit.branch32(
2723 MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
2724 switch (elementSize) {
2725 case 4:
2726 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2727 m_jit.convertFloatToDouble(resultReg, resultReg);
2728 break;
2729 case 8: {
2730 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2731 break;
2732 }
2733 default:
2734 RELEASE_ASSERT_NOT_REACHED();
2735 }
2736
2737 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
2738 static const double NaN = QNaN;
2739 m_jit.loadDouble(&NaN, resultReg);
2740 notNaN.link(&m_jit);
2741
2742 doubleResult(resultReg, node);
2743 }
2744
2745 void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node* node, size_t elementSize)
2746 {
2747 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2748 GPRReg storageReg = storage.gpr();
2749
2750 Edge baseUse = m_jit.graph().varArgChild(node, 0);
2751 Edge valueUse = m_jit.graph().varArgChild(node, 2);
2752
2753 SpeculateDoubleOperand valueOp(this, valueUse);
2754 FPRTemporary scratch(this);
2755 FPRReg valueFPR = valueOp.fpr();
2756 FPRReg scratchFPR = scratch.fpr();
2757
2758 ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2759
2760 MacroAssembler::Jump outOfBounds;
2761 if (node->op() == PutByVal)
2762 outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
2763
2764 switch (elementSize) {
2765 case 4: {
2766 m_jit.moveDouble(valueFPR, scratchFPR);
2767 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2768 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2769 break;
2770 }
2771 case 8:
2772 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2773 break;
2774 default:
2775 RELEASE_ASSERT_NOT_REACHED();
2776 }
2777 if (node->op() == PutByVal)
2778 outOfBounds.link(&m_jit);
2779 noResult(node);
2780 }
2781
2782 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
2783 {
2784 // Check that prototype is an object.
2785 m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
2786 speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
2787
2788 // Initialize scratchReg with the value being checked.
2789 m_jit.move(valueReg, scratchReg);
2790
2791 // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2792 MacroAssembler::Label loop(&m_jit);
2793 m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
2794 #if USE(JSVALUE64)
2795 m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
2796 MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
2797 m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
2798 #else
2799 m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
2800 MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2801 m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2802 #endif
2803
2804 // No match - result is false.
2805 #if USE(JSVALUE64)
2806 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2807 #else
2808 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2809 #endif
2810 MacroAssembler::Jump putResult = m_jit.jump();
2811
2812 isInstance.link(&m_jit);
2813 #if USE(JSVALUE64)
2814 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2815 #else
2816 m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2817 #endif
2818
2819 putResult.link(&m_jit);
2820 }
2821
2822 void SpeculativeJIT::compileInstanceOf(Node* node)
2823 {
2824 if (node->child1().useKind() == UntypedUse) {
2825 // It might not be a cell. Speculate less aggressively.
2826 // Or: it might only be used once (i.e. by us), so we get zero benefit
2827 // from speculating any more aggressively than we absolutely need to.
2828
2829 JSValueOperand value(this, node->child1());
2830 SpeculateCellOperand prototype(this, node->child2());
2831 GPRTemporary scratch(this);
2832
2833 GPRReg prototypeReg = prototype.gpr();
2834 GPRReg scratchReg = scratch.gpr();
2835
2836 #if USE(JSVALUE64)
2837 GPRReg valueReg = value.gpr();
2838 MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
2839 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2840 #else
2841 GPRReg valueTagReg = value.tagGPR();
2842 GPRReg valueReg = value.payloadGPR();
2843 MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
2844 m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2845 #endif
2846
2847 MacroAssembler::Jump done = m_jit.jump();
2848
2849 isCell.link(&m_jit);
2850
2851 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2852
2853 done.link(&m_jit);
2854
2855 #if USE(JSVALUE64)
2856 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2857 #else
2858 booleanResult(scratchReg, node);
2859 #endif
2860 return;
2861 }
2862
2863 SpeculateCellOperand value(this, node->child1());
2864 SpeculateCellOperand prototype(this, node->child2());
2865
2866 GPRTemporary scratch(this);
2867
2868 GPRReg valueReg = value.gpr();
2869 GPRReg prototypeReg = prototype.gpr();
2870 GPRReg scratchReg = scratch.gpr();
2871
2872 compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
2873
2874 #if USE(JSVALUE64)
2875 jsValueResult(scratchReg, node, DataFormatJSBoolean);
2876 #else
2877 booleanResult(scratchReg, node);
2878 #endif
2879 }
2880
2881 void SpeculativeJIT::compileSoftModulo(Node* node)
2882 {
2883 // In the fast path, the dividend value could be the final result
2884 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
2885 SpeculateStrictInt32Operand op1(this, node->child1());
2886 #if CPU(X86) || CPU(X86_64)
2887 if (isInt32Constant(node->child2().node())) {
2888 int32_t divisor = valueOfInt32Constant(node->child2().node());
2889 if (divisor) {
2890 GPRReg op1Gpr = op1.gpr();
2891
2892 GPRTemporary eax(this, X86Registers::eax);
2893 GPRTemporary edx(this, X86Registers::edx);
2894 GPRTemporary scratch(this);
2895 GPRReg scratchGPR = scratch.gpr();
2896
2897 GPRReg op1SaveGPR;
2898 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
2899 op1SaveGPR = allocate();
2900 ASSERT(op1Gpr != op1SaveGPR);
2901 m_jit.move(op1Gpr, op1SaveGPR);
2902 } else
2903 op1SaveGPR = op1Gpr;
2904 ASSERT(op1SaveGPR != X86Registers::eax);
2905 ASSERT(op1SaveGPR != X86Registers::edx);
2906
2907 m_jit.move(op1Gpr, eax.gpr());
2908 m_jit.move(TrustedImm32(divisor), scratchGPR);
2909 if (divisor == -1)
2910 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, eax.gpr(), TrustedImm32(-2147483647-1)));
2911 m_jit.assembler().cdq();
2912 m_jit.assembler().idivl_r(scratchGPR);
2913 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
2914 // Check that we're not about to create negative zero.
2915 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
2916 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
2917 numeratorPositive.link(&m_jit);
2918 }
2919 if (op1SaveGPR != op1Gpr)
2920 unlock(op1SaveGPR);
2921
2922 integerResult(edx.gpr(), node);
2923 return;
2924 }
2925 }
2926 #elif CPU(ARM64)
2927 if (isInt32Constant(node->child2().node())) {
2928 int32_t divisor = valueOfInt32Constant(node->child2().node());
2929 if (divisor > 0 && hasOneBitSet(divisor)) { // If power of 2 then just mask
2930 GPRReg dividendGPR = op1.gpr();
2931 GPRTemporary result(this);
2932 GPRReg resultGPR = result.gpr();
2933
2934 m_jit.assembler().cmp<32>(dividendGPR, UInt12(0));
2935 m_jit.assembler().cneg<32>(resultGPR, dividendGPR, ARM64Assembler::ConditionLT);
2936 m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
2937 m_jit.assembler().cneg<32>(resultGPR, resultGPR, ARM64Assembler::ConditionLT);
2938
2939 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
2940 // Check that we're not about to create negative zero.
2941 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
2942 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
2943 numeratorPositive.link(&m_jit);
2944 }
2945 integerResult(resultGPR, node);
2946 return;
2947 }
2948 }
2949 #elif CPU(APPLE_ARMV7S) || CPU(ARM_THUMB2)
2950 if (isInt32Constant(node->child2().node())) {
2951 int32_t divisor = valueOfInt32Constant(node->child2().node());
2952 if (divisor > 0 && hasOneBitSet(divisor)) { // If power of 2 then just mask
2953 GPRReg dividendGPR = op1.gpr();
2954 GPRTemporary result(this);
2955 GPRReg resultGPR = result.gpr();
2956
2957 m_jit.assembler().cmp(dividendGPR, ARMThumbImmediate::makeEncodedImm(0));
2958 m_jit.assembler().it(ARMv7Assembler::ConditionLT, false);
2959 m_jit.assembler().neg(resultGPR, dividendGPR);
2960 m_jit.assembler().mov(resultGPR, dividendGPR);
2961 m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
2962 m_jit.assembler().it(ARMv7Assembler::ConditionLT);
2963 m_jit.assembler().neg(resultGPR, resultGPR);
2964
2965 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
2966 // Check that we're not about to create negative zero.
2967 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
2968 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
2969 numeratorPositive.link(&m_jit);
2970 }
2971 integerResult(resultGPR, node);
2972 return;
2973 }
2974 }
2975 #endif
2976
2977 SpeculateIntegerOperand op2(this, node->child2());
2978 #if CPU(X86) || CPU(X86_64)
2979 GPRTemporary eax(this, X86Registers::eax);
2980 GPRTemporary edx(this, X86Registers::edx);
2981 GPRReg op1GPR = op1.gpr();
2982 GPRReg op2GPR = op2.gpr();
2983
2984 GPRReg op2TempGPR;
2985 GPRReg temp;
2986 GPRReg op1SaveGPR;
2987
2988 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
2989 op2TempGPR = allocate();
2990 temp = op2TempGPR;
2991 } else {
2992 op2TempGPR = InvalidGPRReg;
2993 if (op1GPR == X86Registers::eax)
2994 temp = X86Registers::edx;
2995 else
2996 temp = X86Registers::eax;
2997 }
2998
2999 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
3000 op1SaveGPR = allocate();
3001 ASSERT(op1GPR != op1SaveGPR);
3002 m_jit.move(op1GPR, op1SaveGPR);
3003 } else
3004 op1SaveGPR = op1GPR;
3005
3006 ASSERT(temp != op1GPR);
3007 ASSERT(temp != op2GPR);
3008 ASSERT(op1SaveGPR != X86Registers::eax);
3009 ASSERT(op1SaveGPR != X86Registers::edx);
3010
3011 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3012
3013 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3014
3015 JITCompiler::Jump done;
3016 // FIXME: if the node is not used as number then we can do this more easily.
3017 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3018 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3019
3020 safeDenominator.link(&m_jit);
3021
3022 if (op2TempGPR != InvalidGPRReg) {
3023 m_jit.move(op2GPR, op2TempGPR);
3024 op2GPR = op2TempGPR;
3025 }
3026
3027 m_jit.move(op1GPR, eax.gpr());
3028 m_jit.assembler().cdq();
3029 m_jit.assembler().idivl_r(op2GPR);
3030
3031 if (op2TempGPR != InvalidGPRReg)
3032 unlock(op2TempGPR);
3033
3034 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3035 // Check that we're not about to create negative zero.
3036 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
3037 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
3038 numeratorPositive.link(&m_jit);
3039 }
3040
3041 if (op1SaveGPR != op1GPR)
3042 unlock(op1SaveGPR);
3043
3044 integerResult(edx.gpr(), node);
3045 #elif CPU(ARM64)
3046 GPRTemporary temp(this);
3047 GPRTemporary quotientThenRemainder(this);
3048 GPRTemporary multiplyAnswer(this);
3049 GPRReg dividendGPR = op1.gpr();
3050 GPRReg divisorGPR = op2.gpr();
3051 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3052 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3053
3054 m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3055 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3056 m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3057
3058 // If the user cares about negative zero, then speculate that we're not about
3059 // to produce negative zero.
3060 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3061 // Check that we're not about to create negative zero.
3062 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3063 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3064 numeratorPositive.link(&m_jit);
3065 }
3066
3067 integerResult(quotientThenRemainderGPR, node);
3068 #elif CPU(APPLE_ARMV7S)
3069 GPRTemporary temp(this);
3070 GPRTemporary quotientThenRemainder(this);
3071 GPRTemporary multiplyAnswer(this);
3072 GPRReg dividendGPR = op1.gpr();
3073 GPRReg divisorGPR = op2.gpr();
3074 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
3075 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
3076
3077 m_jit.assembler().sdiv(quotientThenRemainderGPR, dividendGPR, divisorGPR);
3078 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
3079 m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
3080
3081 // If the user cares about negative zero, then speculate that we're not about
3082 // to produce negative zero.
3083 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3084 // Check that we're not about to create negative zero.
3085 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
3086 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
3087 numeratorPositive.link(&m_jit);
3088 }
3089
3090 integerResult(quotientThenRemainderGPR, node);
3091 #else // not architecture that can do integer division
3092 // Do this the *safest* way possible: call out to a C function that will do the modulo,
3093 // and then attempt to convert back.
3094 GPRReg op1GPR = op1.gpr();
3095 GPRReg op2GPR = op2.gpr();
3096
3097 FPRResult result(this);
3098
3099 flushRegisters();
3100 callOperation(operationFModOnInts, result.fpr(), op1GPR, op2GPR);
3101
3102 FPRTemporary scratch(this);
3103 GPRTemporary intResult(this);
3104 JITCompiler::JumpList failureCases;
3105 m_jit.branchConvertDoubleToInt32(result.fpr(), intResult.gpr(), failureCases, scratch.fpr(), false);
3106 speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
3107 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3108 // Check that we're not about to create negative zero.
3109 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1GPR, TrustedImm32(0));
3110 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, intResult.gpr()));
3111 numeratorPositive.link(&m_jit);
3112 }
3113
3114 integerResult(intResult.gpr(), node);
3115 #endif // CPU(X86) || CPU(X86_64)
3116 }
3117
3118 void SpeculativeJIT::compileAdd(Node* node)
3119 {
3120 switch (node->binaryUseKind()) {
3121 case Int32Use: {
3122 if (isNumberConstant(node->child1().node())) {
3123 int32_t imm1 = valueOfInt32Constant(node->child1().node());
3124 SpeculateIntegerOperand op2(this, node->child2());
3125 GPRTemporary result(this);
3126
3127 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3128 m_jit.move(op2.gpr(), result.gpr());
3129 m_jit.add32(Imm32(imm1), result.gpr());
3130 } else
3131 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
3132
3133 integerResult(result.gpr(), node);
3134 return;
3135 }
3136
3137 if (isNumberConstant(node->child2().node())) {
3138 SpeculateIntegerOperand op1(this, node->child1());
3139 int32_t imm2 = valueOfInt32Constant(node->child2().node());
3140 GPRTemporary result(this);
3141
3142 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3143 m_jit.move(op1.gpr(), result.gpr());
3144 m_jit.add32(Imm32(imm2), result.gpr());
3145 } else
3146 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3147
3148 integerResult(result.gpr(), node);
3149 return;
3150 }
3151
3152 SpeculateIntegerOperand op1(this, node->child1());
3153 SpeculateIntegerOperand op2(this, node->child2());
3154 GPRTemporary result(this, op1, op2);
3155
3156 GPRReg gpr1 = op1.gpr();
3157 GPRReg gpr2 = op2.gpr();
3158 GPRReg gprResult = result.gpr();
3159
3160 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3161 if (gpr1 == gprResult)
3162 m_jit.add32(gpr2, gprResult);
3163 else {
3164 m_jit.move(gpr2, gprResult);
3165 m_jit.add32(gpr1, gprResult);
3166 }
3167 } else {
3168 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3169
3170 if (gpr1 == gprResult)
3171 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3172 else if (gpr2 == gprResult)
3173 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3174 else
3175 speculationCheck(Overflow, JSValueRegs(), 0, check);
3176 }
3177
3178 integerResult(gprResult, node);
3179 return;
3180 }
3181
3182 case NumberUse: {
3183 SpeculateDoubleOperand op1(this, node->child1());
3184 SpeculateDoubleOperand op2(this, node->child2());
3185 FPRTemporary result(this, op1, op2);
3186
3187 FPRReg reg1 = op1.fpr();
3188 FPRReg reg2 = op2.fpr();
3189 m_jit.addDouble(reg1, reg2, result.fpr());
3190
3191 doubleResult(result.fpr(), node);
3192 return;
3193 }
3194
3195 case UntypedUse: {
3196 RELEASE_ASSERT(node->op() == ValueAdd);
3197 compileValueAdd(node);
3198 return;
3199 }
3200
3201 default:
3202 RELEASE_ASSERT_NOT_REACHED();
3203 break;
3204 }
3205 }
3206
3207 void SpeculativeJIT::compileMakeRope(Node* node)
3208 {
3209 ASSERT(node->child1().useKind() == KnownStringUse);
3210 ASSERT(node->child2().useKind() == KnownStringUse);
3211 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3212
3213 SpeculateCellOperand op1(this, node->child1());
3214 SpeculateCellOperand op2(this, node->child2());
3215 SpeculateCellOperand op3(this, node->child3());
3216 GPRTemporary result(this);
3217 GPRTemporary allocator(this);
3218 GPRTemporary scratch(this);
3219
3220 GPRReg opGPRs[3];
3221 unsigned numOpGPRs;
3222 opGPRs[0] = op1.gpr();
3223 opGPRs[1] = op2.gpr();
3224 if (node->child3()) {
3225 opGPRs[2] = op3.gpr();
3226 numOpGPRs = 3;
3227 } else {
3228 opGPRs[2] = InvalidGPRReg;
3229 numOpGPRs = 2;
3230 }
3231 GPRReg resultGPR = result.gpr();
3232 GPRReg allocatorGPR = allocator.gpr();
3233 GPRReg scratchGPR = scratch.gpr();
3234
3235 JITCompiler::JumpList slowPath;
3236 MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
3237 m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3238 emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3239
3240 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3241 for (unsigned i = 0; i < numOpGPRs; ++i)
3242 m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3243 for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3244 m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3245 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3246 m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3247 for (unsigned i = 1; i < numOpGPRs; ++i) {
3248 m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3249 m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
3250 }
3251 m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3252 m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3253 m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3254
3255 switch (numOpGPRs) {
3256 case 2:
3257 addSlowPathGenerator(slowPathCall(
3258 slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3259 break;
3260 case 3:
3261 addSlowPathGenerator(slowPathCall(
3262 slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3263 break;
3264 default:
3265 RELEASE_ASSERT_NOT_REACHED();
3266 break;
3267 }
3268
3269 cellResult(resultGPR, node);
3270 }
3271
3272 void SpeculativeJIT::compileArithSub(Node* node)
3273 {
3274 switch (node->binaryUseKind()) {
3275 case Int32Use: {
3276 if (isNumberConstant(node->child2().node())) {
3277 SpeculateIntegerOperand op1(this, node->child1());
3278 int32_t imm2 = valueOfInt32Constant(node->child2().node());
3279 GPRTemporary result(this);
3280
3281 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3282 m_jit.move(op1.gpr(), result.gpr());
3283 m_jit.sub32(Imm32(imm2), result.gpr());
3284 } else {
3285 #if ENABLE(JIT_CONSTANT_BLINDING)
3286 GPRTemporary scratch(this);
3287 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3288 #else
3289 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
3290 #endif
3291 }
3292
3293 integerResult(result.gpr(), node);
3294 return;
3295 }
3296
3297 if (isNumberConstant(node->child1().node())) {
3298 int32_t imm1 = valueOfInt32Constant(node->child1().node());
3299 SpeculateIntegerOperand op2(this, node->child2());
3300 GPRTemporary result(this);
3301
3302 m_jit.move(Imm32(imm1), result.gpr());
3303 if (nodeCanTruncateInteger(node->arithNodeFlags()))
3304 m_jit.sub32(op2.gpr(), result.gpr());
3305 else
3306 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3307
3308 integerResult(result.gpr(), node);
3309 return;
3310 }
3311
3312 SpeculateIntegerOperand op1(this, node->child1());
3313 SpeculateIntegerOperand op2(this, node->child2());
3314 GPRTemporary result(this);
3315
3316 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3317 m_jit.move(op1.gpr(), result.gpr());
3318 m_jit.sub32(op2.gpr(), result.gpr());
3319 } else
3320 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3321
3322 integerResult(result.gpr(), node);
3323 return;
3324 }
3325
3326 case NumberUse: {
3327 SpeculateDoubleOperand op1(this, node->child1());
3328 SpeculateDoubleOperand op2(this, node->child2());
3329 FPRTemporary result(this, op1);
3330
3331 FPRReg reg1 = op1.fpr();
3332 FPRReg reg2 = op2.fpr();
3333 m_jit.subDouble(reg1, reg2, result.fpr());
3334
3335 doubleResult(result.fpr(), node);
3336 return;
3337 }
3338
3339 default:
3340 RELEASE_ASSERT_NOT_REACHED();
3341 return;
3342 }
3343 }
3344
3345 void SpeculativeJIT::compileArithNegate(Node* node)
3346 {
3347 switch (node->child1().useKind()) {
3348 case Int32Use: {
3349 SpeculateIntegerOperand op1(this, node->child1());
3350 GPRTemporary result(this);
3351
3352 m_jit.move(op1.gpr(), result.gpr());
3353
3354 if (nodeCanTruncateInteger(node->arithNodeFlags()))
3355 m_jit.neg32(result.gpr());
3356 else {
3357 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3358 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags()))
3359 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr()));
3360 }
3361
3362 integerResult(result.gpr(), node);
3363 return;
3364 }
3365
3366 case NumberUse: {
3367 SpeculateDoubleOperand op1(this, node->child1());
3368 FPRTemporary result(this);
3369
3370 m_jit.negateDouble(op1.fpr(), result.fpr());
3371
3372 doubleResult(result.fpr(), node);
3373 return;
3374 }
3375
3376 default:
3377 RELEASE_ASSERT_NOT_REACHED();
3378 return;
3379 }
3380 }
3381 void SpeculativeJIT::compileArithIMul(Node* node)
3382 {
3383 SpeculateIntegerOperand op1(this, node->child1());
3384 SpeculateIntegerOperand op2(this, node->child2());
3385 GPRTemporary result(this);
3386
3387 GPRReg reg1 = op1.gpr();
3388 GPRReg reg2 = op2.gpr();
3389
3390 m_jit.move(reg1, result.gpr());
3391 m_jit.mul32(reg2, result.gpr());
3392 integerResult(result.gpr(), node);
3393 return;
3394 }
3395
3396 void SpeculativeJIT::compileArithMul(Node* node)
3397 {
3398 switch (node->binaryUseKind()) {
3399 case Int32Use: {
3400 SpeculateIntegerOperand op1(this, node->child1());
3401 SpeculateIntegerOperand op2(this, node->child2());
3402 GPRTemporary result(this);
3403
3404 GPRReg reg1 = op1.gpr();
3405 GPRReg reg2 = op2.gpr();
3406
3407 // We can perform truncated multiplications if we get to this point, because if the
3408 // fixup phase could not prove that it would be safe, it would have turned us into
3409 // a double multiplication.
3410 if (nodeCanTruncateInteger(node->arithNodeFlags())) {
3411 m_jit.move(reg1, result.gpr());
3412 m_jit.mul32(reg2, result.gpr());
3413 } else {
3414 speculationCheck(
3415 Overflow, JSValueRegs(), 0,
3416 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
3417 }
3418
3419 // Check for negative zero, if the users of this node care about such things.
3420 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3421 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
3422 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
3423 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
3424 resultNonZero.link(&m_jit);
3425 }
3426
3427 integerResult(result.gpr(), node);
3428 return;
3429 }
3430
3431 case NumberUse: {
3432 SpeculateDoubleOperand op1(this, node->child1());
3433 SpeculateDoubleOperand op2(this, node->child2());
3434 FPRTemporary result(this, op1, op2);
3435
3436 FPRReg reg1 = op1.fpr();
3437 FPRReg reg2 = op2.fpr();
3438
3439 m_jit.mulDouble(reg1, reg2, result.fpr());
3440
3441 doubleResult(result.fpr(), node);
3442 return;
3443 }
3444
3445 default:
3446 RELEASE_ASSERT_NOT_REACHED();
3447 return;
3448 }
3449 }
3450
3451 #if CPU(X86) || CPU(X86_64)
3452 void SpeculativeJIT::compileIntegerArithDivForX86(Node* node)
3453 {
3454 SpeculateIntegerOperand op1(this, node->child1());
3455 SpeculateIntegerOperand op2(this, node->child2());
3456 GPRTemporary eax(this, X86Registers::eax);
3457 GPRTemporary edx(this, X86Registers::edx);
3458 GPRReg op1GPR = op1.gpr();
3459 GPRReg op2GPR = op2.gpr();
3460
3461 GPRReg op2TempGPR;
3462 GPRReg temp;
3463 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
3464 op2TempGPR = allocate();
3465 temp = op2TempGPR;
3466 } else {
3467 op2TempGPR = InvalidGPRReg;
3468 if (op1GPR == X86Registers::eax)
3469 temp = X86Registers::edx;
3470 else
3471 temp = X86Registers::eax;
3472 }
3473
3474 ASSERT(temp != op1GPR);
3475 ASSERT(temp != op2GPR);
3476
3477 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
3478
3479 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
3480
3481 JITCompiler::JumpList done;
3482 if (nodeUsedAsNumber(node->arithNodeFlags())) {
3483 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
3484 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
3485 } else {
3486 // This is the case where we convert the result to an int after we're done, and we
3487 // already know that the denominator is either -1 or 0. So, if the denominator is
3488 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
3489 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
3490 // are happy to fall through to a normal division, since we're just dividing
3491 // something by negative 1.
3492
3493 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
3494 m_jit.move(TrustedImm32(0), eax.gpr());
3495 done.append(m_jit.jump());
3496
3497 notZero.link(&m_jit);
3498 JITCompiler::Jump notNeg2ToThe31 =
3499 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
3500 m_jit.move(op1GPR, eax.gpr());
3501 done.append(m_jit.jump());
3502
3503 notNeg2ToThe31.link(&m_jit);
3504 }
3505
3506 safeDenominator.link(&m_jit);
3507
3508 // If the user cares about negative zero, then speculate that we're not about
3509 // to produce negative zero.
3510 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3511 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3512 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3513 numeratorNonZero.link(&m_jit);
3514 }
3515
3516 if (op2TempGPR != InvalidGPRReg) {
3517 m_jit.move(op2GPR, op2TempGPR);
3518 op2GPR = op2TempGPR;
3519 }
3520
3521 m_jit.move(op1GPR, eax.gpr());
3522 m_jit.assembler().cdq();
3523 m_jit.assembler().idivl_r(op2GPR);
3524
3525 if (op2TempGPR != InvalidGPRReg)
3526 unlock(op2TempGPR);
3527
3528 // Check that there was no remainder. If there had been, then we'd be obligated to
3529 // produce a double result instead.
3530 if (nodeUsedAsNumber(node->arithNodeFlags()))
3531 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
3532 else
3533 done.link(&m_jit);
3534
3535 integerResult(eax.gpr(), node);
3536 }
3537 #elif CPU(ARM64)
3538 void SpeculativeJIT::compileIntegerArithDivForARM64(Node* node)
3539 {
3540 SpeculateIntegerOperand op1(this, node->child1());
3541 SpeculateIntegerOperand op2(this, node->child2());
3542 GPRReg op1GPR = op1.gpr();
3543 GPRReg op2GPR = op2.gpr();
3544 GPRTemporary quotient(this);
3545 GPRTemporary multiplyAnswer(this);
3546
3547 // If the user cares about negative zero, then speculate that we're not about
3548 // to produce negative zero.
3549 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3550 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3551 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3552 numeratorNonZero.link(&m_jit);
3553 }
3554
3555 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
3556
3557 // Check that there was no remainder. If there had been, then we'd be obligated to
3558 // produce a double result instead.
3559 if (nodeUsedAsNumber(node->arithNodeFlags())) {
3560 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3561 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3562 }
3563
3564 integerResult(quotient.gpr(), node);
3565 }
3566 #elif CPU(APPLE_ARMV7S)
3567 void SpeculativeJIT::compileIntegerArithDivForARMv7s(Node* node)
3568 {
3569 SpeculateIntegerOperand op1(this, node->child1());
3570 SpeculateIntegerOperand op2(this, node->child2());
3571 GPRReg op1GPR = op1.gpr();
3572 GPRReg op2GPR = op2.gpr();
3573 GPRTemporary quotient(this);
3574 GPRTemporary multiplyAnswer(this);
3575
3576 // If the user cares about negative zero, then speculate that we're not about
3577 // to produce negative zero.
3578 if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
3579 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
3580 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
3581 numeratorNonZero.link(&m_jit);
3582 }
3583
3584 m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
3585
3586 // Check that there was no remainder. If there had been, then we'd be obligated to
3587 // produce a double result instead.
3588 if (nodeUsedAsNumber(node->arithNodeFlags())) {
3589 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
3590 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
3591 }
3592
3593 integerResult(quotient.gpr(), node);
3594 }
3595 #endif
3596
3597 void SpeculativeJIT::compileArithMod(Node* node)
3598 {
3599 switch (node->binaryUseKind()) {
3600 case Int32Use: {
3601 compileSoftModulo(node);
3602 return;
3603 }
3604
3605 case NumberUse: {
3606 SpeculateDoubleOperand op1(this, node->child1());
3607 SpeculateDoubleOperand op2(this, node->child2());
3608
3609 FPRReg op1FPR = op1.fpr();
3610 FPRReg op2FPR = op2.fpr();
3611
3612 flushRegisters();
3613
3614 FPRResult result(this);
3615
3616 callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
3617
3618 doubleResult(result.fpr(), node);
3619 return;
3620 }
3621
3622 default:
3623 RELEASE_ASSERT_NOT_REACHED();
3624 return;
3625 }
3626 }
3627
3628 // Returns true if the compare is fused with a subsequent branch.
3629 bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
3630 {
3631 if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
3632 return true;
3633
3634 if (node->isBinaryUseKind(Int32Use)) {
3635 compileIntegerCompare(node, condition);
3636 return false;
3637 }
3638
3639 if (node->isBinaryUseKind(NumberUse)) {
3640 compileDoubleCompare(node, doubleCondition);
3641 return false;
3642 }
3643
3644 if (node->op() == CompareEq) {
3645 if (node->isBinaryUseKind(StringUse)) {
3646 compileStringEquality(node);
3647 return false;
3648 }
3649
3650 if (node->isBinaryUseKind(BooleanUse)) {
3651 compileBooleanCompare(node, condition);
3652 return false;
3653 }
3654
3655 if (node->isBinaryUseKind(ObjectUse)) {
3656 compileObjectEquality(node);
3657 return false;
3658 }
3659
3660 if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) {
3661 compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
3662 return false;
3663 }
3664
3665 if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) {
3666 compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
3667 return false;
3668 }
3669 }
3670
3671 nonSpeculativeNonPeepholeCompare(node, condition, operation);
3672 return false;
3673 }
3674
3675 bool SpeculativeJIT::compileStrictEqForConstant(Node* node, Edge value, JSValue constant)
3676 {
3677 JSValueOperand op1(this, value);
3678
3679 // FIXME: This code is wrong for the case that the constant is null or undefined,
3680 // and the value is an object that MasqueradesAsUndefined.
3681 // https://bugs.webkit.org/show_bug.cgi?id=109487
3682
3683 unsigned branchIndexInBlock = detectPeepHoleBranch();
3684 if (branchIndexInBlock != UINT_MAX) {
3685 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3686 BlockIndex taken = branchNode->takenBlockIndex();
3687 BlockIndex notTaken = branchNode->notTakenBlockIndex();
3688 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
3689
3690 // The branch instruction will branch to the taken block.
3691 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
3692 if (taken == nextBlock()) {
3693 condition = MacroAssembler::NotEqual;
3694 BlockIndex tmp = taken;
3695 taken = notTaken;
3696 notTaken = tmp;
3697 }
3698
3699 #if USE(JSVALUE64)
3700 branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
3701 #else
3702 GPRReg payloadGPR = op1.payloadGPR();
3703 GPRReg tagGPR = op1.tagGPR();
3704 if (condition == MacroAssembler::Equal) {
3705 // Drop down if not equal, go elsewhere if equal.
3706 MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
3707 branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3708 notEqual.link(&m_jit);
3709 } else {
3710 // Drop down if equal, go elsehwere if not equal.
3711 branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
3712 branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
3713 }
3714 #endif
3715
3716 jump(notTaken);
3717
3718 use(node->child1());
3719 use(node->child2());
3720 m_indexInBlock = branchIndexInBlock;
3721 m_currentNode = branchNode;
3722 return true;
3723 }
3724
3725 GPRTemporary result(this);
3726
3727 #if USE(JSVALUE64)
3728 GPRReg op1GPR = op1.gpr();
3729 GPRReg resultGPR = result.gpr();
3730 m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
3731 MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
3732 m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
3733 notEqual.link(&m_jit);
3734 jsValueResult(resultGPR, node, DataFormatJSBoolean);
3735 #else
3736 GPRReg op1PayloadGPR = op1.payloadGPR();
3737 GPRReg op1TagGPR = op1.tagGPR();
3738 GPRReg resultGPR = result.gpr();
3739 m_jit.move(TrustedImm32(0), resultGPR);
3740 MacroAssembler::JumpList notEqual;
3741 notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
3742 notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
3743 m_jit.move(TrustedImm32(1), resultGPR);
3744 notEqual.link(&m_jit);
3745 booleanResult(resultGPR, node);
3746 #endif
3747
3748 return false;
3749 }
3750
3751 bool SpeculativeJIT::compileStrictEq(Node* node)
3752 {
3753 switch (node->binaryUseKind()) {
3754 case BooleanUse: {
3755 unsigned branchIndexInBlock = detectPeepHoleBranch();
3756 if (branchIndexInBlock != UINT_MAX) {
3757 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3758 compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
3759 use(node->child1());
3760 use(node->child2());
3761 m_indexInBlock = branchIndexInBlock;
3762 m_currentNode = branchNode;
3763 return true;
3764 }
3765 compileBooleanCompare(node, MacroAssembler::Equal);
3766 return false;
3767 }
3768
3769 case Int32Use: {
3770 unsigned branchIndexInBlock = detectPeepHoleBranch();
3771 if (branchIndexInBlock != UINT_MAX) {
3772 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3773 compilePeepHoleIntegerBranch(node, branchNode, MacroAssembler::Equal);
3774 use(node->child1());
3775 use(node->child2());
3776 m_indexInBlock = branchIndexInBlock;
3777 m_currentNode = branchNode;
3778 return true;
3779 }
3780 compileIntegerCompare(node, MacroAssembler::Equal);
3781 return false;
3782 }
3783
3784 case NumberUse: {
3785 unsigned branchIndexInBlock = detectPeepHoleBranch();
3786 if (branchIndexInBlock != UINT_MAX) {
3787 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3788 compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
3789 use(node->child1());
3790 use(node->child2());
3791 m_indexInBlock = branchIndexInBlock;
3792 m_currentNode = branchNode;
3793 return true;
3794 }
3795 compileDoubleCompare(node, MacroAssembler::DoubleEqual);
3796 return false;
3797 }
3798
3799 case StringUse: {
3800 compileStringEquality(node);
3801 return false;
3802 }
3803
3804 case ObjectUse: {
3805 unsigned branchIndexInBlock = detectPeepHoleBranch();
3806 if (branchIndexInBlock != UINT_MAX) {
3807 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
3808 compilePeepHoleObjectEquality(node, branchNode);
3809 use(node->child1());
3810 use(node->child2());
3811 m_indexInBlock = branchIndexInBlock;
3812 m_currentNode = branchNode;
3813 return true;
3814 }
3815 compileObjectEquality(node);
3816 return false;
3817 }
3818
3819 case UntypedUse: {
3820 return nonSpeculativeStrictEq(node);
3821 }
3822
3823 default:
3824 RELEASE_ASSERT_NOT_REACHED();
3825 return false;
3826 }
3827 }
3828
3829 void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
3830 {
3831 SpeculateBooleanOperand op1(this, node->child1());
3832 SpeculateBooleanOperand op2(this, node->child2());
3833 GPRTemporary result(this);
3834
3835 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
3836
3837 // If we add a DataFormatBool, we should use it here.
3838 #if USE(JSVALUE32_64)
3839 booleanResult(result.gpr(), node);
3840 #else
3841 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
3842 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
3843 #endif
3844 }
3845
3846 void SpeculativeJIT::compileStringEquality(Node* node)
3847 {
3848 SpeculateCellOperand left(this, node->child1());
3849 SpeculateCellOperand right(this, node->child2());
3850 GPRTemporary length(this);
3851 GPRTemporary leftTemp(this);
3852 GPRTemporary rightTemp(this);
3853 GPRTemporary leftTemp2(this, left);
3854 GPRTemporary rightTemp2(this, right);
3855
3856 GPRReg leftGPR = left.gpr();
3857 GPRReg rightGPR = right.gpr();
3858 GPRReg lengthGPR = length.gpr();
3859 GPRReg leftTempGPR = leftTemp.gpr();
3860 GPRReg rightTempGPR = rightTemp.gpr();
3861 GPRReg leftTemp2GPR = leftTemp2.gpr();
3862 GPRReg rightTemp2GPR = rightTemp2.gpr();
3863
3864 JITCompiler::JumpList trueCase;
3865 JITCompiler::JumpList falseCase;
3866 JITCompiler::JumpList slowCase;
3867
3868 DFG_TYPE_CHECK(
3869 JSValueSource::unboxedCell(leftGPR), node->child1(), SpecString, m_jit.branchPtr(
3870 MacroAssembler::NotEqual,
3871 MacroAssembler::Address(leftGPR, JSCell::structureOffset()),
3872 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
3873
3874 // It's safe to branch around the type check below, since proving that the values are
3875 // equal does indeed prove that the right value is a string.
3876 trueCase.append(m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR));
3877
3878 DFG_TYPE_CHECK(
3879 JSValueSource::unboxedCell(rightGPR), node->child2(), SpecString, m_jit.branchPtr(
3880 MacroAssembler::NotEqual,
3881 MacroAssembler::Address(rightGPR, JSCell::structureOffset()),
3882 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
3883
3884 m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
3885
3886 falseCase.append(m_jit.branch32(
3887 MacroAssembler::NotEqual,
3888 MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
3889 lengthGPR));
3890
3891 trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
3892
3893 m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
3894 m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
3895
3896 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
3897 slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
3898
3899 slowCase.append(m_jit.branchTest32(
3900 MacroAssembler::Zero,
3901 MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
3902 TrustedImm32(StringImpl::flagIs8Bit())));
3903 slowCase.append(m_jit.branchTest32(
3904 MacroAssembler::Zero,
3905 MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
3906 TrustedImm32(StringImpl::flagIs8Bit())));
3907
3908 m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
3909 m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
3910
3911 MacroAssembler::Label loop = m_jit.label();
3912
3913 m_jit.sub32(TrustedImm32(1), lengthGPR);
3914
3915 // This isn't going to generate the best code on x86. But that's OK, it's still better
3916 // than not inlining.
3917 m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
3918 m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
3919 falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
3920
3921 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
3922
3923 trueCase.link(&m_jit);
3924 #if USE(JSVALUE64)
3925 m_jit.move(TrustedImm64(ValueTrue), leftTempGPR);
3926 #else
3927 m_jit.move(TrustedImm32(true), leftTempGPR);
3928 #endif
3929
3930 JITCompiler::Jump done = m_jit.jump();
3931
3932 falseCase.link(&m_jit);
3933 #if USE(JSVALUE64)
3934 m_jit.move(TrustedImm64(ValueFalse), leftTempGPR);
3935 #else
3936 m_jit.move(TrustedImm32(false), leftTempGPR);
3937 #endif
3938
3939 done.link(&m_jit);
3940 addSlowPathGenerator(
3941 slowPathCall(
3942 slowCase, this, operationCompareStringEq, leftTempGPR, leftGPR, rightGPR));
3943
3944 #if USE(JSVALUE64)
3945 jsValueResult(leftTempGPR, node, DataFormatJSBoolean);
3946 #else
3947 booleanResult(leftTempGPR, node);
3948 #endif
3949 }
3950
3951 void SpeculativeJIT::compileGetIndexedPropertyStorage(Node* node)
3952 {
3953 SpeculateCellOperand base(this, node->child1());
3954 GPRReg baseReg = base.gpr();
3955
3956 GPRTemporary storage(this);
3957 GPRReg storageReg = storage.gpr();
3958
3959 const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node->arrayMode());
3960
3961 switch (node->arrayMode().type()) {
3962 case Array::String:
3963 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg);
3964
3965 addSlowPathGenerator(
3966 slowPathCall(
3967 m_jit.branchTest32(MacroAssembler::Zero, storageReg),
3968 this, operationResolveRope, storageReg, baseReg));
3969
3970 m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
3971 break;
3972
3973 default:
3974 ASSERT(descriptor);
3975 m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor->m_storageOffset), storageReg);
3976 break;
3977 }
3978
3979 storageResult(storageReg, node);
3980 }
3981
3982 void SpeculativeJIT::compileGetByValOnArguments(Node* node)
3983 {
3984 SpeculateCellOperand base(this, node->child1());
3985 SpeculateStrictInt32Operand property(this, node->child2());
3986 GPRTemporary result(this);
3987 #if USE(JSVALUE32_64)
3988 GPRTemporary resultTag(this);
3989 #endif
3990 GPRTemporary scratch(this);
3991
3992 GPRReg baseReg = base.gpr();
3993 GPRReg propertyReg = property.gpr();
3994 GPRReg resultReg = result.gpr();
3995 #if USE(JSVALUE32_64)
3996 GPRReg resultTagReg = resultTag.gpr();
3997 #endif
3998 GPRReg scratchReg = scratch.gpr();
3999
4000 if (!m_compileOkay)
4001 return;
4002
4003 ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4004
4005 // Two really lame checks.
4006 speculationCheck(
4007 Uncountable, JSValueSource(), 0,
4008 m_jit.branch32(
4009 MacroAssembler::AboveOrEqual, propertyReg,
4010 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_numArguments))));
4011 speculationCheck(
4012 Uncountable, JSValueSource(), 0,
4013 m_jit.branchTestPtr(
4014 MacroAssembler::NonZero,
4015 MacroAssembler::Address(
4016 baseReg, OBJECT_OFFSETOF(Arguments, m_slowArguments))));
4017
4018 m_jit.move(propertyReg, resultReg);
4019 m_jit.neg32(resultReg);
4020 m_jit.signExtend32ToPtr(resultReg, resultReg);
4021 m_jit.loadPtr(
4022 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_registers)),
4023 scratchReg);
4024
4025 #if USE(JSVALUE32_64)
4026 m_jit.load32(
4027 MacroAssembler::BaseIndex(
4028 scratchReg, resultReg, MacroAssembler::TimesEight,
4029 CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register) +
4030 OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
4031 resultTagReg);
4032 m_jit.load32(
4033 MacroAssembler::BaseIndex(
4034 scratchReg, resultReg, MacroAssembler::TimesEight,
4035 CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register) +
4036 OBJECT_OFFSETOF(JSValue, u.asBits.payload)),
4037 resultReg);
4038 jsValueResult(resultTagReg, resultReg, node);
4039 #else
4040 m_jit.load64(
4041 MacroAssembler::BaseIndex(
4042 scratchReg, resultReg, MacroAssembler::TimesEight,
4043 CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register)),
4044 resultReg);
4045 jsValueResult(resultReg, node);
4046 #endif
4047 }
4048
4049 void SpeculativeJIT::compileGetArgumentsLength(Node* node)
4050 {
4051 SpeculateCellOperand base(this, node->child1());
4052 GPRTemporary result(this, base);
4053
4054 GPRReg baseReg = base.gpr();
4055 GPRReg resultReg = result.gpr();
4056
4057 if (!m_compileOkay)
4058 return;
4059
4060 ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
4061
4062 speculationCheck(
4063 Uncountable, JSValueSource(), 0,
4064 m_jit.branchTest8(
4065 MacroAssembler::NonZero,
4066 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_overrodeLength))));
4067
4068 m_jit.load32(
4069 MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_numArguments)),
4070 resultReg);
4071 integerResult(resultReg, node);
4072 }
4073
4074 void SpeculativeJIT::compileGetArrayLength(Node* node)
4075 {
4076 const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node->arrayMode());
4077
4078 switch (node->arrayMode().type()) {
4079 case Array::Int32:
4080 case Array::Double:
4081 case Array::Contiguous: {
4082 StorageOperand storage(this, node->child2());
4083 GPRTemporary result(this, storage);
4084 GPRReg storageReg = storage.gpr();
4085 GPRReg resultReg = result.gpr();
4086 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
4087
4088 integerResult(resultReg, node);
4089 break;
4090 }
4091 case Array::ArrayStorage:
4092 case Array::SlowPutArrayStorage: {
4093 StorageOperand storage(this, node->child2());
4094 GPRTemporary result(this, storage);
4095 GPRReg storageReg = storage.gpr();
4096 GPRReg resultReg = result.gpr();
4097 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
4098
4099 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, MacroAssembler::TrustedImm32(0)));
4100
4101 integerResult(resultReg, node);
4102 break;
4103 }
4104 case Array::String: {
4105 SpeculateCellOperand base(this, node->child1());
4106 GPRTemporary result(this, base);
4107 GPRReg baseGPR = base.gpr();
4108 GPRReg resultGPR = result.gpr();
4109 m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
4110 integerResult(resultGPR, node);
4111 break;
4112 }
4113 case Array::Arguments: {
4114 compileGetArgumentsLength(node);
4115 break;
4116 }
4117 default:
4118 SpeculateCellOperand base(this, node->child1());
4119 GPRTemporary result(this, base);
4120 GPRReg baseGPR = base.gpr();
4121 GPRReg resultGPR = result.gpr();
4122 ASSERT(descriptor);
4123 m_jit.load32(MacroAssembler::Address(baseGPR, descriptor->m_lengthOffset), resultGPR);
4124 integerResult(resultGPR, node);
4125 break;
4126 }
4127 }
4128
4129 void SpeculativeJIT::compileNewFunctionNoCheck(Node* node)
4130 {
4131 GPRResult result(this);
4132 GPRReg resultGPR = result.gpr();
4133 flushRegisters();
4134 callOperation(
4135 operationNewFunctionNoCheck, resultGPR, m_jit.codeBlock()->functionDecl(node->functionDeclIndex()));
4136 cellResult(resultGPR, node);
4137 }
4138
4139 void SpeculativeJIT::compileNewFunctionExpression(Node* node)
4140 {
4141 GPRResult result(this);
4142 GPRReg resultGPR = result.gpr();
4143 flushRegisters();
4144 callOperation(
4145 operationNewFunctionExpression,
4146 resultGPR,
4147 m_jit.codeBlock()->functionExpr(node->functionExprIndex()));
4148 cellResult(resultGPR, node);
4149 }
4150
4151 bool SpeculativeJIT::compileRegExpExec(Node* node)
4152 {
4153 unsigned branchIndexInBlock = detectPeepHoleBranch();
4154 if (branchIndexInBlock == UINT_MAX)
4155 return false;
4156 Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
4157 ASSERT(node->adjustedRefCount() == 1);
4158
4159 BlockIndex taken = branchNode->takenBlockIndex();
4160 BlockIndex notTaken = branchNode->notTakenBlockIndex();
4161
4162 bool invert = false;
4163 if (taken == nextBlock()) {
4164 invert = true;
4165 BlockIndex tmp = taken;
4166 taken = notTaken;
4167 notTaken = tmp;
4168 }
4169
4170 SpeculateCellOperand base(this, node->child1());
4171 SpeculateCellOperand argument(this, node->child2());
4172 GPRReg baseGPR = base.gpr();
4173 GPRReg argumentGPR = argument.gpr();
4174
4175 flushRegisters();
4176 GPRResult result(this);
4177 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
4178
4179 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
4180 jump(notTaken);
4181
4182 use(node->child1());
4183 use(node->child2());
4184 m_indexInBlock = branchIndexInBlock;
4185 m_currentNode = branchNode;
4186
4187 return true;
4188 }
4189
4190 void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
4191 {
4192 if (hasIndexingHeader(node->structureTransitionData().previousStructure->indexingType())) {
4193 SpeculateCellOperand base(this, node->child1());
4194
4195 GPRReg baseGPR = base.gpr();
4196
4197 flushRegisters();
4198
4199 GPRResult result(this);
4200 callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity, result.gpr(), baseGPR);
4201
4202 storageResult(result.gpr(), node);
4203 return;
4204 }
4205
4206 SpeculateCellOperand base(this, node->child1());
4207 GPRTemporary scratch(this);
4208
4209 GPRReg baseGPR = base.gpr();
4210 GPRReg scratchGPR = scratch.gpr();
4211
4212 ASSERT(!node->structureTransitionData().previousStructure->outOfLineCapacity());
4213 ASSERT(initialOutOfLineCapacity == node->structureTransitionData().newStructure->outOfLineCapacity());
4214
4215 JITCompiler::Jump slowPath =
4216 emitAllocateBasicStorage(
4217 TrustedImm32(initialOutOfLineCapacity * sizeof(JSValue)), scratchGPR);
4218
4219 m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR);
4220
4221 addSlowPathGenerator(
4222 slowPathCall(slowPath, this, operationAllocatePropertyStorageWithInitialCapacity, scratchGPR));
4223
4224 m_jit.storePtr(scratchGPR, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
4225
4226 storageResult(scratchGPR, node);
4227 }
4228
4229 void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
4230 {
4231 size_t oldSize = node->structureTransitionData().previousStructure->outOfLineCapacity() * sizeof(JSValue);
4232 size_t newSize = oldSize * outOfLineGrowthFactor;
4233 ASSERT(newSize == node->structureTransitionData().newStructure->outOfLineCapacity() * sizeof(JSValue));
4234
4235 if (hasIndexingHeader(node->structureTransitionData().previousStructure->indexingType())) {
4236 SpeculateCellOperand base(this, node->child1());
4237
4238 GPRReg baseGPR = base.gpr();
4239
4240 flushRegisters();
4241
4242 GPRResult result(this);
4243 callOperation(operationReallocateButterflyToGrowPropertyStorage, result.gpr(), baseGPR, newSize / sizeof(JSValue));
4244
4245 storageResult(result.gpr(), node);
4246 return;
4247 }
4248
4249 SpeculateCellOperand base(this, node->child1());
4250 StorageOperand oldStorage(this, node->child2());
4251 GPRTemporary scratch1(this);
4252 GPRTemporary scratch2(this);
4253
4254 GPRReg baseGPR = base.gpr();
4255 GPRReg oldStorageGPR = oldStorage.gpr();
4256 GPRReg scratchGPR1 = scratch1.gpr();
4257 GPRReg scratchGPR2 = scratch2.gpr();
4258
4259 JITCompiler::Jump slowPath =
4260 emitAllocateBasicStorage(TrustedImm32(newSize), scratchGPR2);
4261
4262 m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR2);
4263
4264 addSlowPathGenerator(
4265 slowPathCall(slowPath, this, operationAllocatePropertyStorage, scratchGPR2, newSize / sizeof(JSValue)));
4266 // We have scratchGPR2 = new storage, scratchGPR1 = scratch
4267 for (ptrdiff_t offset = 0; offset < static_cast<ptrdiff_t>(oldSize); offset += sizeof(void*)) {
4268 m_jit.loadPtr(JITCompiler::Address(oldStorageGPR, -(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR1);
4269 m_jit.storePtr(scratchGPR1, JITCompiler::Address(scratchGPR2, -(offset + sizeof(JSValue) + sizeof(void*))));
4270 }
4271 m_jit.storePtr(scratchGPR2, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
4272
4273 storageResult(scratchGPR2, node);
4274 }
4275
4276 GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode)
4277 {
4278 if (!putByValWillNeedExtraRegister(arrayMode))
4279 return InvalidGPRReg;
4280
4281 GPRTemporary realTemporary(this);
4282 temporary.adopt(realTemporary);
4283 return temporary.gpr();
4284 }
4285
4286 void SpeculativeJIT::compileToStringOnCell(Node* node)
4287 {
4288 SpeculateCellOperand op1(this, node->child1());
4289 GPRReg op1GPR = op1.gpr();
4290
4291 switch (node->child1().useKind()) {
4292 case StringObjectUse: {
4293 GPRTemporary result(this);
4294 GPRReg resultGPR = result.gpr();
4295
4296 speculateStringObject(node->child1(), op1GPR);
4297 m_state.forNode(node->child1()).filter(SpecStringObject);
4298 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
4299 cellResult(resultGPR, node);
4300 break;
4301 }
4302
4303 case StringOrStringObjectUse: {
4304 GPRTemporary result(this);
4305 GPRReg resultGPR = result.gpr();
4306
4307 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSCell::structureOffset()), resultGPR);
4308 JITCompiler::Jump isString = m_jit.branchPtr(
4309 JITCompiler::Equal, resultGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()));
4310
4311 speculateStringObjectForStructure(node->child1(), resultGPR);
4312
4313 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
4314
4315 JITCompiler::Jump done = m_jit.jump();
4316 isString.link(&m_jit);
4317 m_jit.move(op1GPR, resultGPR);
4318 done.link(&m_jit);
4319
4320 m_state.forNode(node->child1()).filter(SpecString | SpecStringObject);
4321
4322 cellResult(resultGPR, node);
4323 break;
4324 }
4325
4326 case CellUse: {
4327 GPRResult result(this);
4328 GPRReg resultGPR = result.gpr();
4329
4330 // We flush registers instead of silent spill/fill because in this mode we
4331 // believe that most likely the input is not a string, and we need to take
4332 // slow path.
4333 flushRegisters();
4334 JITCompiler::Jump done;
4335 if (node->child1()->prediction() & SpecString) {
4336 JITCompiler::Jump needCall = m_jit.branchPtr(
4337 JITCompiler::NotEqual,
4338 JITCompiler::Address(op1GPR, JSCell::structureOffset()),
4339 TrustedImmPtr(m_jit.vm()->stringStructure.get()));
4340 m_jit.move(op1GPR, resultGPR);
4341 done = m_jit.jump();
4342 needCall.link(&m_jit);
4343 }
4344 callOperation(operationToStringOnCell, resultGPR, op1GPR);
4345 if (done.isSet())
4346 done.link(&m_jit);
4347 cellResult(resultGPR, node);
4348 break;
4349 }
4350
4351 default:
4352 RELEASE_ASSERT_NOT_REACHED();
4353 }
4354 }
4355
4356 void SpeculativeJIT::compileNewStringObject(Node* node)
4357 {
4358 SpeculateCellOperand operand(this, node->child1());
4359
4360 GPRTemporary result(this);
4361 GPRTemporary scratch1(this);
4362 GPRTemporary scratch2(this);
4363
4364 GPRReg operandGPR = operand.gpr();
4365 GPRReg resultGPR = result.gpr();
4366 GPRReg scratch1GPR = scratch1.gpr();
4367 GPRReg scratch2GPR = scratch2.gpr();
4368
4369 JITCompiler::JumpList slowPath;
4370
4371 emitAllocateJSObject<StringObject>(
4372 resultGPR, TrustedImmPtr(node->structure()), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
4373 slowPath);
4374
4375 m_jit.storePtr(
4376 TrustedImmPtr(&StringObject::s_info),
4377 JITCompiler::Address(resultGPR, JSDestructibleObject::classInfoOffset()));
4378 #if USE(JSVALUE64)
4379 m_jit.store64(
4380 operandGPR, JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset()));
4381 #else
4382 m_jit.store32(
4383 TrustedImm32(JSValue::CellTag),
4384 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
4385 m_jit.store32(
4386 operandGPR,
4387 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
4388 #endif
4389
4390 addSlowPathGenerator(slowPathCall(
4391 slowPath, this, operationNewStringObject, resultGPR, operandGPR, node->structure()));
4392
4393 cellResult(resultGPR, node);
4394 }
4395
4396 void SpeculativeJIT::speculateInt32(Edge edge)
4397 {
4398 if (!needsTypeCheck(edge, SpecInt32))
4399 return;
4400
4401 (SpeculateIntegerOperand(this, edge)).gpr();
4402 }
4403
4404 void SpeculativeJIT::speculateNumber(Edge edge)
4405 {
4406 if (!needsTypeCheck(edge, SpecNumber))
4407 return;
4408
4409 (SpeculateDoubleOperand(this, edge)).fpr();
4410 }
4411
4412 void SpeculativeJIT::speculateRealNumber(Edge edge)
4413 {
4414 if (!needsTypeCheck(edge, SpecRealNumber))
4415 return;
4416
4417 SpeculateDoubleOperand operand(this, edge);
4418 FPRReg fpr = operand.fpr();
4419 DFG_TYPE_CHECK(
4420 JSValueRegs(), edge, SpecRealNumber,
4421 m_jit.branchDouble(
4422 MacroAssembler::DoubleNotEqualOrUnordered, fpr, fpr));
4423 }
4424
4425 void SpeculativeJIT::speculateBoolean(Edge edge)
4426 {
4427 if (!needsTypeCheck(edge, SpecBoolean))
4428 return;
4429
4430 (SpeculateBooleanOperand(this, edge)).gpr();
4431 }
4432
4433 void SpeculativeJIT::speculateCell(Edge edge)
4434 {
4435 if (!needsTypeCheck(edge, SpecCell))
4436 return;
4437
4438 (SpeculateCellOperand(this, edge)).gpr();
4439 }
4440
4441 void SpeculativeJIT::speculateObject(Edge edge)
4442 {
4443 if (!needsTypeCheck(edge, SpecObject))
4444 return;
4445
4446 SpeculateCellOperand operand(this, edge);
4447 GPRReg gpr = operand.gpr();
4448 DFG_TYPE_CHECK(
4449 JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchPtr(
4450 MacroAssembler::Equal,
4451 MacroAssembler::Address(gpr, JSCell::structureOffset()),
4452 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
4453 }
4454
4455 void SpeculativeJIT::speculateObjectOrOther(Edge edge)
4456 {
4457 if (!needsTypeCheck(edge, SpecObject | SpecOther))
4458 return;
4459
4460 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4461 GPRTemporary temp(this);
4462 GPRReg tempGPR = temp.gpr();
4463 #if USE(JSVALUE64)
4464 GPRReg gpr = operand.gpr();
4465 MacroAssembler::Jump notCell = m_jit.branchTest64(
4466 MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister);
4467 DFG_TYPE_CHECK(
4468 JSValueRegs(gpr), edge, (~SpecCell) | SpecObject, m_jit.branchPtr(
4469 MacroAssembler::Equal,
4470 MacroAssembler::Address(gpr, JSCell::structureOffset()),
4471 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
4472 MacroAssembler::Jump done = m_jit.jump();
4473 notCell.link(&m_jit);
4474 if (needsTypeCheck(edge, SpecCell | SpecOther)) {
4475 m_jit.move(gpr, tempGPR);
4476 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR);
4477
4478 typeCheck(
4479 JSValueRegs(gpr), edge, SpecCell | SpecOther,
4480 m_jit.branch64(
4481 MacroAssembler::NotEqual, tempGPR,
4482 MacroAssembler::TrustedImm64(ValueNull)));
4483 }
4484 done.link(&m_jit);
4485 #else
4486 GPRReg tagGPR = operand.tagGPR();
4487 GPRReg payloadGPR = operand.payloadGPR();
4488 MacroAssembler::Jump notCell =
4489 m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag));
4490 DFG_TYPE_CHECK(
4491 JSValueRegs(tagGPR, payloadGPR), edge, (~SpecCell) | SpecObject, m_jit.branchPtr(
4492 MacroAssembler::Equal,
4493 MacroAssembler::Address(payloadGPR, JSCell::structureOffset()),
4494 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
4495 MacroAssembler::Jump done = m_jit.jump();
4496 notCell.link(&m_jit);
4497 if (needsTypeCheck(edge, SpecCell | SpecOther)) {
4498 m_jit.move(tagGPR, tempGPR);
4499 m_jit.or32(TrustedImm32(1), tempGPR);
4500
4501 typeCheck(
4502 JSValueRegs(tagGPR, payloadGPR), edge, SpecCell | SpecOther,
4503 m_jit.branch32(
4504 MacroAssembler::NotEqual, tempGPR,
4505 MacroAssembler::TrustedImm32(JSValue::NullTag)));
4506 }
4507 done.link(&m_jit);
4508 #endif
4509 }
4510
4511 void SpeculativeJIT::speculateString(Edge edge)
4512 {
4513 if (!needsTypeCheck(edge, SpecString))
4514 return;
4515
4516 SpeculateCellOperand operand(this, edge);
4517 GPRReg gpr = operand.gpr();
4518 DFG_TYPE_CHECK(
4519 JSValueSource::unboxedCell(gpr), edge, SpecString, m_jit.branchPtr(
4520 MacroAssembler::NotEqual,
4521 MacroAssembler::Address(gpr, JSCell::structureOffset()),
4522 MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
4523 }
4524
4525 void SpeculativeJIT::speculateStringObject(Edge edge, GPRReg gpr)
4526 {
4527 speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureOffset()));
4528 }
4529
4530 void SpeculativeJIT::speculateStringObject(Edge edge)
4531 {
4532 if (!needsTypeCheck(edge, SpecStringObject))
4533 return;
4534
4535 SpeculateCellOperand operand(this, edge);
4536 GPRReg gpr = operand.gpr();
4537 if (!needsTypeCheck(edge, SpecStringObject))
4538 return;
4539
4540 speculateStringObject(edge, gpr);
4541 m_state.forNode(edge).filter(SpecStringObject);
4542 }
4543
4544 void SpeculativeJIT::speculateStringOrStringObject(Edge edge)
4545 {
4546 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
4547 return;
4548
4549 SpeculateCellOperand operand(this, edge);
4550 GPRReg gpr = operand.gpr();
4551 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
4552 return;
4553
4554 GPRTemporary structure(this);
4555 GPRReg structureGPR = structure.gpr();
4556
4557 m_jit.loadPtr(JITCompiler::Address(gpr, JSCell::structureOffset()), structureGPR);
4558
4559 JITCompiler::Jump isString = m_jit.branchPtr(
4560 JITCompiler::Equal, structureGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()));
4561
4562 speculateStringObjectForStructure(edge, structureGPR);
4563
4564 isString.link(&m_jit);
4565
4566 m_state.forNode(edge).filter(SpecString | SpecStringObject);
4567 }
4568
4569 void SpeculativeJIT::speculateNotCell(Edge edge)
4570 {
4571 if (!needsTypeCheck(edge, ~SpecCell))
4572 return;
4573
4574 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4575 #if USE(JSVALUE64)
4576 typeCheck(
4577 JSValueRegs(operand.gpr()), edge, ~SpecCell,
4578 m_jit.branchTest64(
4579 JITCompiler::Zero, operand.gpr(), GPRInfo::tagMaskRegister));
4580 #else
4581 typeCheck(
4582 JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, ~SpecCell,
4583 m_jit.branch32(
4584 JITCompiler::Equal, operand.tagGPR(), TrustedImm32(JSValue::CellTag)));
4585 #endif
4586 }
4587
4588 void SpeculativeJIT::speculateOther(Edge edge)
4589 {
4590 if (!needsTypeCheck(edge, SpecOther))
4591 return;
4592
4593 JSValueOperand operand(this, edge, ManualOperandSpeculation);
4594 GPRTemporary temp(this);
4595 GPRReg tempGPR = temp.gpr();
4596 #if USE(JSVALUE64)
4597 m_jit.move(operand.gpr(), tempGPR);
4598 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR);
4599 typeCheck(
4600 JSValueRegs(operand.gpr()), edge, SpecOther,
4601 m_jit.branch64(
4602 MacroAssembler::NotEqual, tempGPR,
4603 MacroAssembler::TrustedImm64(ValueNull)));
4604 #else
4605 m_jit.move(operand.tagGPR(), tempGPR);
4606 m_jit.or32(TrustedImm32(1), tempGPR);
4607 typeCheck(
4608 JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, SpecOther,
4609 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(JSValue::NullTag)));
4610 #endif
4611 }
4612
4613 void SpeculativeJIT::speculate(Node*, Edge edge)
4614 {
4615 switch (edge.useKind()) {
4616 case UntypedUse:
4617 break;
4618 case KnownInt32Use:
4619 ASSERT(!needsTypeCheck(edge, SpecInt32));
4620 break;
4621 case KnownNumberUse:
4622 ASSERT(!needsTypeCheck(edge, SpecNumber));
4623 break;
4624 case KnownCellUse:
4625 ASSERT(!needsTypeCheck(edge, SpecCell));
4626 break;
4627 case KnownStringUse:
4628 ASSERT(!needsTypeCheck(edge, SpecString));
4629 break;
4630 case Int32Use:
4631 speculateInt32(edge);
4632 break;
4633 case RealNumberUse:
4634 speculateRealNumber(edge);
4635 break;
4636 case NumberUse:
4637 speculateNumber(edge);
4638 break;
4639 case BooleanUse:
4640 speculateBoolean(edge);
4641 break;
4642 case CellUse:
4643 speculateCell(edge);
4644 break;
4645 case ObjectUse:
4646 speculateObject(edge);
4647 break;
4648 case ObjectOrOtherUse:
4649 speculateObjectOrOther(edge);
4650 break;
4651 case StringUse:
4652 speculateString(edge);
4653 break;
4654 case StringObjectUse:
4655 speculateStringObject(edge);
4656 break;
4657 case StringOrStringObjectUse:
4658 speculateStringOrStringObject(edge);
4659 break;
4660 case NotCellUse:
4661 speculateNotCell(edge);
4662 break;
4663 case OtherUse:
4664 speculateOther(edge);
4665 break;
4666 default:
4667 RELEASE_ASSERT_NOT_REACHED();
4668 break;
4669 }
4670 }
4671
4672 } } // namespace JSC::DFG
4673
4674 #endif