]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGSpeculativeJIT64.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGSpeculativeJIT64.cpp
1 /*
2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayPrototype.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
34 #include "DFGOperations.h"
35 #include "DFGSlowPathGenerator.h"
36 #include "Debugger.h"
37 #include "DirectArguments.h"
38 #include "GetterSetter.h"
39 #include "JSCInlines.h"
40 #include "JSEnvironmentRecord.h"
41 #include "JSLexicalEnvironment.h"
42 #include "JSPropertyNameEnumerator.h"
43 #include "ObjectPrototype.h"
44 #include "SetupVarargsFrame.h"
45 #include "SpillRegistersMode.h"
46 #include "TypeProfilerLog.h"
47
48 namespace JSC { namespace DFG {
49
50 #if USE(JSVALUE64)
51
52 void SpeculativeJIT::boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat format)
53 {
54 GPRReg tempGPR;
55 if (sourceGPR == targetGPR)
56 tempGPR = allocate();
57 else
58 tempGPR = targetGPR;
59
60 FPRReg fpr = fprAllocate();
61
62 if (format == DataFormatInt52)
63 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR);
64 else
65 ASSERT(format == DataFormatStrictInt52);
66
67 m_jit.boxInt52(sourceGPR, targetGPR, tempGPR, fpr);
68
69 if (format == DataFormatInt52 && sourceGPR != targetGPR)
70 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR);
71
72 if (tempGPR != targetGPR)
73 unlock(tempGPR);
74
75 unlock(fpr);
76 }
77
78 GPRReg SpeculativeJIT::fillJSValue(Edge edge)
79 {
80 VirtualRegister virtualRegister = edge->virtualRegister();
81 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
82
83 switch (info.registerFormat()) {
84 case DataFormatNone: {
85 GPRReg gpr = allocate();
86
87 if (edge->hasConstant()) {
88 JSValue jsValue = edge->asJSValue();
89 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
90 info.fillJSValue(*m_stream, gpr, DataFormatJS);
91 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
92 } else {
93 DataFormat spillFormat = info.spillFormat();
94 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
95 switch (spillFormat) {
96 case DataFormatInt32: {
97 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
98 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
99 spillFormat = DataFormatJSInt32;
100 break;
101 }
102
103 default:
104 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
105 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat & DataFormatJS);
106 break;
107 }
108 info.fillJSValue(*m_stream, gpr, spillFormat);
109 }
110 return gpr;
111 }
112
113 case DataFormatInt32: {
114 GPRReg gpr = info.gpr();
115 // If the register has already been locked we need to take a copy.
116 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
117 if (m_gprs.isLocked(gpr)) {
118 GPRReg result = allocate();
119 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr, result);
120 return result;
121 }
122 m_gprs.lock(gpr);
123 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
124 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
125 return gpr;
126 }
127
128 case DataFormatCell:
129 // No retag required on JSVALUE64!
130 case DataFormatJS:
131 case DataFormatJSInt32:
132 case DataFormatJSDouble:
133 case DataFormatJSCell:
134 case DataFormatJSBoolean: {
135 GPRReg gpr = info.gpr();
136 m_gprs.lock(gpr);
137 return gpr;
138 }
139
140 case DataFormatBoolean:
141 case DataFormatStorage:
142 case DataFormatDouble:
143 case DataFormatInt52:
144 // this type currently never occurs
145 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
146
147 default:
148 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
149 return InvalidGPRReg;
150 }
151 }
152
153 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
154 {
155 JITGetByIdGenerator gen(
156 m_jit.codeBlock(), codeOrigin, usedRegisters(), JSValueRegs(baseGPR),
157 JSValueRegs(resultGPR), spillMode);
158 gen.generateFastPath(m_jit);
159
160 JITCompiler::JumpList slowCases;
161 if (slowPathTarget.isSet())
162 slowCases.append(slowPathTarget);
163 slowCases.append(gen.slowPathJump());
164
165 auto slowPath = slowPathCall(
166 slowCases, this, operationGetByIdOptimize, resultGPR, gen.stubInfo(), baseGPR,
167 identifierUID(identifierNumber), spillMode);
168
169 m_jit.addGetById(gen, slowPath.get());
170 addSlowPathGenerator(WTF::move(slowPath));
171 }
172
173 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
174 {
175 JITPutByIdGenerator gen(
176 m_jit.codeBlock(), codeOrigin, usedRegisters(), JSValueRegs(baseGPR),
177 JSValueRegs(valueGPR), scratchGPR, spillMode, m_jit.ecmaModeFor(codeOrigin), putKind);
178
179 gen.generateFastPath(m_jit);
180
181 JITCompiler::JumpList slowCases;
182 if (slowPathTarget.isSet())
183 slowCases.append(slowPathTarget);
184 slowCases.append(gen.slowPathJump());
185
186 auto slowPath = slowPathCall(
187 slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueGPR, baseGPR,
188 identifierUID(identifierNumber));
189
190 m_jit.addPutById(gen, slowPath.get());
191 addSlowPathGenerator(WTF::move(slowPath));
192 }
193
194 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
195 {
196 JSValueOperand arg(this, operand);
197 GPRReg argGPR = arg.gpr();
198
199 GPRTemporary result(this, Reuse, arg);
200 GPRReg resultGPR = result.gpr();
201
202 JITCompiler::Jump notCell;
203
204 JITCompiler::Jump notMasqueradesAsUndefined;
205 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
206 if (!isKnownCell(operand.node()))
207 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
208
209 m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
210 notMasqueradesAsUndefined = m_jit.jump();
211 } else {
212 GPRTemporary localGlobalObject(this);
213 GPRTemporary remoteGlobalObject(this);
214 GPRTemporary scratch(this);
215
216 if (!isKnownCell(operand.node()))
217 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
218
219 JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
220 JITCompiler::NonZero,
221 JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
222 JITCompiler::TrustedImm32(MasqueradesAsUndefined));
223
224 m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultGPR);
225 notMasqueradesAsUndefined = m_jit.jump();
226
227 isMasqueradesAsUndefined.link(&m_jit);
228 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
229 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
230 m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
231 m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
232 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
233 m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR);
234 }
235
236 if (!isKnownCell(operand.node())) {
237 JITCompiler::Jump done = m_jit.jump();
238
239 notCell.link(&m_jit);
240
241 m_jit.move(argGPR, resultGPR);
242 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
243 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
244
245 done.link(&m_jit);
246 }
247
248 notMasqueradesAsUndefined.link(&m_jit);
249
250 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
251 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
252 }
253
254 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert)
255 {
256 BasicBlock* taken = branchNode->branchData()->taken.block;
257 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
258
259 if (taken == nextBlock()) {
260 invert = !invert;
261 BasicBlock* tmp = taken;
262 taken = notTaken;
263 notTaken = tmp;
264 }
265
266 JSValueOperand arg(this, operand);
267 GPRReg argGPR = arg.gpr();
268
269 GPRTemporary result(this, Reuse, arg);
270 GPRReg resultGPR = result.gpr();
271
272 JITCompiler::Jump notCell;
273
274 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
275 if (!isKnownCell(operand.node()))
276 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
277
278 jump(invert ? taken : notTaken, ForceJump);
279 } else {
280 GPRTemporary localGlobalObject(this);
281 GPRTemporary remoteGlobalObject(this);
282 GPRTemporary scratch(this);
283
284 if (!isKnownCell(operand.node()))
285 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
286
287 branchTest8(JITCompiler::Zero,
288 JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
289 JITCompiler::TrustedImm32(MasqueradesAsUndefined),
290 invert ? taken : notTaken);
291
292 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
293 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
294 m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
295 m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
296 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
297 branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
298 }
299
300 if (!isKnownCell(operand.node())) {
301 jump(notTaken, ForceJump);
302
303 notCell.link(&m_jit);
304
305 m_jit.move(argGPR, resultGPR);
306 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
307 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken);
308 }
309
310 jump(notTaken);
311 }
312
313 bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert)
314 {
315 unsigned branchIndexInBlock = detectPeepHoleBranch();
316 if (branchIndexInBlock != UINT_MAX) {
317 Node* branchNode = m_block->at(branchIndexInBlock);
318
319 DFG_ASSERT(m_jit.graph(), node, node->adjustedRefCount() == 1);
320
321 nonSpeculativePeepholeBranchNull(operand, branchNode, invert);
322
323 use(node->child1());
324 use(node->child2());
325 m_indexInBlock = branchIndexInBlock;
326 m_currentNode = branchNode;
327
328 return true;
329 }
330
331 nonSpeculativeNonPeepholeCompareNull(operand, invert);
332
333 return false;
334 }
335
336 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
337 {
338 BasicBlock* taken = branchNode->branchData()->taken.block;
339 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
340
341 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
342
343 // The branch instruction will branch to the taken block.
344 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
345 if (taken == nextBlock()) {
346 cond = JITCompiler::invert(cond);
347 callResultCondition = JITCompiler::Zero;
348 BasicBlock* tmp = taken;
349 taken = notTaken;
350 notTaken = tmp;
351 }
352
353 JSValueOperand arg1(this, node->child1());
354 JSValueOperand arg2(this, node->child2());
355 GPRReg arg1GPR = arg1.gpr();
356 GPRReg arg2GPR = arg2.gpr();
357
358 JITCompiler::JumpList slowPath;
359
360 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
361 GPRFlushedCallResult result(this);
362 GPRReg resultGPR = result.gpr();
363
364 arg1.use();
365 arg2.use();
366
367 flushRegisters();
368 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
369
370 branchTest32(callResultCondition, resultGPR, taken);
371 } else {
372 GPRTemporary result(this, Reuse, arg2);
373 GPRReg resultGPR = result.gpr();
374
375 arg1.use();
376 arg2.use();
377
378 if (!isKnownInteger(node->child1().node()))
379 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
380 if (!isKnownInteger(node->child2().node()))
381 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
382
383 branch32(cond, arg1GPR, arg2GPR, taken);
384
385 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
386 jump(notTaken, ForceJump);
387
388 slowPath.link(&m_jit);
389
390 silentSpillAllRegisters(resultGPR);
391 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
392 silentFillAllRegisters(resultGPR);
393
394 branchTest32(callResultCondition, resultGPR, taken);
395 }
396 }
397
398 jump(notTaken);
399
400 m_indexInBlock = m_block->size() - 1;
401 m_currentNode = branchNode;
402 }
403
404 template<typename JumpType>
405 class CompareAndBoxBooleanSlowPathGenerator
406 : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
407 public:
408 CompareAndBoxBooleanSlowPathGenerator(
409 JumpType from, SpeculativeJIT* jit,
410 S_JITOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2)
411 : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
412 from, jit, function, NeedToSpill, result)
413 , m_arg1(arg1)
414 , m_arg2(arg2)
415 {
416 }
417
418 protected:
419 virtual void generateInternal(SpeculativeJIT* jit) override
420 {
421 this->setUp(jit);
422 this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
423 jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
424 jit->m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), this->m_result);
425 this->tearDown(jit);
426 }
427
428 private:
429 GPRReg m_arg1;
430 GPRReg m_arg2;
431 };
432
433 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
434 {
435 ASSERT(node->isBinaryUseKind(UntypedUse));
436 JSValueOperand arg1(this, node->child1());
437 JSValueOperand arg2(this, node->child2());
438 GPRReg arg1GPR = arg1.gpr();
439 GPRReg arg2GPR = arg2.gpr();
440
441 JITCompiler::JumpList slowPath;
442
443 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
444 GPRFlushedCallResult result(this);
445 GPRReg resultGPR = result.gpr();
446
447 arg1.use();
448 arg2.use();
449
450 flushRegisters();
451 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
452
453 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
454 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
455 } else {
456 GPRTemporary result(this, Reuse, arg2);
457 GPRReg resultGPR = result.gpr();
458
459 arg1.use();
460 arg2.use();
461
462 if (!isKnownInteger(node->child1().node()))
463 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
464 if (!isKnownInteger(node->child2().node()))
465 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
466
467 m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
468 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
469
470 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
471 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
472 slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR));
473 }
474
475 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
476 }
477 }
478
479 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
480 {
481 BasicBlock* taken = branchNode->branchData()->taken.block;
482 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
483
484 // The branch instruction will branch to the taken block.
485 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
486 if (taken == nextBlock()) {
487 invert = !invert;
488 BasicBlock* tmp = taken;
489 taken = notTaken;
490 notTaken = tmp;
491 }
492
493 JSValueOperand arg1(this, node->child1());
494 JSValueOperand arg2(this, node->child2());
495 GPRReg arg1GPR = arg1.gpr();
496 GPRReg arg2GPR = arg2.gpr();
497
498 GPRTemporary result(this);
499 GPRReg resultGPR = result.gpr();
500
501 arg1.use();
502 arg2.use();
503
504 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
505 // see if we get lucky: if the arguments are cells and they reference the same
506 // cell, then they must be strictly equal.
507 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
508
509 silentSpillAllRegisters(resultGPR);
510 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
511 silentFillAllRegisters(resultGPR);
512
513 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
514 } else {
515 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
516
517 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
518
519 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
520 JITCompiler::Jump leftDouble = m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
521 leftOK.link(&m_jit);
522 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
523 JITCompiler::Jump rightDouble = m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
524 rightOK.link(&m_jit);
525
526 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
527 jump(notTaken, ForceJump);
528
529 twoCellsCase.link(&m_jit);
530 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
531
532 leftDouble.link(&m_jit);
533 rightDouble.link(&m_jit);
534
535 silentSpillAllRegisters(resultGPR);
536 callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
537 silentFillAllRegisters(resultGPR);
538
539 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
540 }
541
542 jump(notTaken);
543 }
544
545 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
546 {
547 JSValueOperand arg1(this, node->child1());
548 JSValueOperand arg2(this, node->child2());
549 GPRReg arg1GPR = arg1.gpr();
550 GPRReg arg2GPR = arg2.gpr();
551
552 GPRTemporary result(this);
553 GPRReg resultGPR = result.gpr();
554
555 arg1.use();
556 arg2.use();
557
558 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
559 // see if we get lucky: if the arguments are cells and they reference the same
560 // cell, then they must be strictly equal.
561 // FIXME: this should flush registers instead of silent spill/fill.
562 JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
563
564 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
565
566 JITCompiler::Jump done = m_jit.jump();
567
568 notEqualCase.link(&m_jit);
569
570 silentSpillAllRegisters(resultGPR);
571 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
572 silentFillAllRegisters(resultGPR);
573
574 m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
575 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
576
577 done.link(&m_jit);
578 } else {
579 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
580
581 JITCompiler::JumpList slowPathCases;
582
583 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
584
585 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
586 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
587 leftOK.link(&m_jit);
588 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
589 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
590 rightOK.link(&m_jit);
591
592 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
593 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
594
595 JITCompiler::Jump done = m_jit.jump();
596
597 twoCellsCase.link(&m_jit);
598 slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
599
600 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
601
602 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>>(
603 slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
604 arg2GPR));
605
606 done.link(&m_jit);
607 }
608
609 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
610 }
611
612 void SpeculativeJIT::compileMiscStrictEq(Node* node)
613 {
614 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
615 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
616 GPRTemporary result(this);
617
618 if (node->child1().useKind() == MiscUse)
619 speculateMisc(node->child1(), op1.jsValueRegs());
620 if (node->child2().useKind() == MiscUse)
621 speculateMisc(node->child2(), op2.jsValueRegs());
622
623 m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
624 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
625 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
626 }
627
628 void SpeculativeJIT::emitCall(Node* node)
629 {
630 CallLinkInfo::CallType callType;
631 bool isVarargs = false;
632 bool isForwardVarargs = false;
633 switch (node->op()) {
634 case Call:
635 callType = CallLinkInfo::Call;
636 break;
637 case Construct:
638 callType = CallLinkInfo::Construct;
639 break;
640 case CallVarargs:
641 callType = CallLinkInfo::CallVarargs;
642 isVarargs = true;
643 break;
644 case ConstructVarargs:
645 callType = CallLinkInfo::ConstructVarargs;
646 isVarargs = true;
647 break;
648 case CallForwardVarargs:
649 callType = CallLinkInfo::CallVarargs;
650 isForwardVarargs = true;
651 break;
652 case ConstructForwardVarargs:
653 callType = CallLinkInfo::ConstructVarargs;
654 isForwardVarargs = true;
655 break;
656 default:
657 DFG_CRASH(m_jit.graph(), node, "bad node type");
658 break;
659 }
660
661 Edge calleeEdge = m_jit.graph().child(node, 0);
662
663 // Gotta load the arguments somehow. Varargs is trickier.
664 if (isVarargs || isForwardVarargs) {
665 CallVarargsData* data = node->callVarargsData();
666
667 GPRReg resultGPR;
668 unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
669
670 if (isForwardVarargs) {
671 flushRegisters();
672 use(node->child2());
673
674 GPRReg scratchGPR1;
675 GPRReg scratchGPR2;
676 GPRReg scratchGPR3;
677
678 scratchGPR1 = JITCompiler::selectScratchGPR();
679 scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
680 scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
681
682 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
683 JITCompiler::JumpList slowCase;
684 emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
685 JITCompiler::Jump done = m_jit.jump();
686 slowCase.link(&m_jit);
687 callOperation(operationThrowStackOverflowForVarargs);
688 m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
689 done.link(&m_jit);
690 resultGPR = scratchGPR2;
691 } else {
692 GPRReg argumentsGPR;
693 GPRReg scratchGPR1;
694 GPRReg scratchGPR2;
695 GPRReg scratchGPR3;
696
697 auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
698 if (reservedGPR != InvalidGPRReg)
699 lock(reservedGPR);
700 JSValueOperand arguments(this, node->child2());
701 argumentsGPR = arguments.gpr();
702 if (reservedGPR != InvalidGPRReg)
703 unlock(reservedGPR);
704 flushRegisters();
705
706 scratchGPR1 = JITCompiler::selectScratchGPR(argumentsGPR, reservedGPR);
707 scratchGPR2 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, reservedGPR);
708 scratchGPR3 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, scratchGPR2, reservedGPR);
709 };
710
711 loadArgumentsGPR(InvalidGPRReg);
712
713 DFG_ASSERT(m_jit.graph(), node, isFlushed());
714
715 // Right now, arguments is in argumentsGPR and the register file is flushed.
716 callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsGPR, numUsedStackSlots, data->firstVarArgOffset);
717
718 // Now we have the argument count of the callee frame, but we've lost the arguments operand.
719 // Reconstruct the arguments operand while preserving the callee frame.
720 loadArgumentsGPR(GPRInfo::returnValueGPR);
721 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
722 emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
723 m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
724
725 callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR);
726 resultGPR = GPRInfo::returnValueGPR;
727 }
728
729 m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister);
730
731 DFG_ASSERT(m_jit.graph(), node, isFlushed());
732
733 // We don't need the arguments array anymore.
734 if (isVarargs)
735 use(node->child2());
736
737 // Now set up the "this" argument.
738 JSValueOperand thisArgument(this, node->child3());
739 GPRReg thisArgumentGPR = thisArgument.gpr();
740 thisArgument.use();
741
742 m_jit.store64(thisArgumentGPR, JITCompiler::calleeArgumentSlot(0));
743 } else {
744 // The call instruction's first child is the function; the subsequent children are the
745 // arguments.
746 int numPassedArgs = node->numChildren() - 1;
747
748 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(JSStack::ArgumentCount));
749
750 for (int i = 0; i < numPassedArgs; i++) {
751 Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
752 JSValueOperand arg(this, argEdge);
753 GPRReg argGPR = arg.gpr();
754 use(argEdge);
755
756 m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i));
757 }
758 }
759
760 JSValueOperand callee(this, calleeEdge);
761 GPRReg calleeGPR = callee.gpr();
762 callee.use();
763 m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(JSStack::Callee));
764
765 flushRegisters();
766
767 GPRFlushedCallResult result(this);
768 GPRReg resultGPR = result.gpr();
769
770 JITCompiler::DataLabelPtr targetToCheck;
771 JITCompiler::Jump slowPath;
772
773 m_jit.emitStoreCodeOrigin(node->origin.semantic);
774
775 CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo();
776
777 slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0));
778
779 JITCompiler::Call fastCall = m_jit.nearCall();
780
781 JITCompiler::Jump done = m_jit.jump();
782
783 slowPath.link(&m_jit);
784
785 m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
786 m_jit.move(MacroAssembler::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2
787 JITCompiler::Call slowCall = m_jit.nearCall();
788
789 done.link(&m_jit);
790
791 m_jit.move(GPRInfo::returnValueGPR, resultGPR);
792
793 jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
794
795 callLinkInfo->setUpCall(callType, m_currentNode->origin.semantic, calleeGPR);
796 m_jit.addJSCall(fastCall, slowCall, targetToCheck, callLinkInfo);
797
798 // If we were varargs, then after the calls are done, we need to reestablish our stack pointer.
799 if (isVarargs || isForwardVarargs)
800 m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
801 }
802
803 // Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it
804 // http://llvm.org/bugs/show_bug.cgi?id=18619
805 #if COMPILER(CLANG) && defined(__has_warning)
806 #pragma clang diagnostic push
807 #if __has_warning("-Wimplicit-fallthrough")
808 #pragma clang diagnostic ignored "-Wimplicit-fallthrough"
809 #endif
810 #endif
811 template<bool strict>
812 GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
813 {
814 AbstractValue& value = m_state.forNode(edge);
815 SpeculatedType type = value.m_type;
816 ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
817
818 m_interpreter.filter(value, SpecInt32);
819 if (value.isClear()) {
820 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
821 returnFormat = DataFormatInt32;
822 return allocate();
823 }
824
825 VirtualRegister virtualRegister = edge->virtualRegister();
826 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
827
828 switch (info.registerFormat()) {
829 case DataFormatNone: {
830 GPRReg gpr = allocate();
831
832 if (edge->hasConstant()) {
833 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
834 ASSERT(edge->isInt32Constant());
835 m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
836 info.fillInt32(*m_stream, gpr);
837 returnFormat = DataFormatInt32;
838 return gpr;
839 }
840
841 DataFormat spillFormat = info.spillFormat();
842
843 DFG_ASSERT(m_jit.graph(), m_currentNode, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
844
845 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
846
847 if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) {
848 // If we know this was spilled as an integer we can fill without checking.
849 if (strict) {
850 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
851 info.fillInt32(*m_stream, gpr);
852 returnFormat = DataFormatInt32;
853 return gpr;
854 }
855 if (spillFormat == DataFormatInt32) {
856 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
857 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
858 } else
859 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
860 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
861 returnFormat = DataFormatJSInt32;
862 return gpr;
863 }
864 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
865
866 // Fill as JSValue, and fall through.
867 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
868 m_gprs.unlock(gpr);
869 FALLTHROUGH;
870 }
871
872 case DataFormatJS: {
873 DFG_ASSERT(m_jit.graph(), m_currentNode, !(type & SpecInt52));
874 // Check the value is an integer.
875 GPRReg gpr = info.gpr();
876 m_gprs.lock(gpr);
877 if (type & ~SpecInt32)
878 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branch64(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
879 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
880 // If !strict we're done, return.
881 if (!strict) {
882 returnFormat = DataFormatJSInt32;
883 return gpr;
884 }
885 // else fall through & handle as DataFormatJSInt32.
886 m_gprs.unlock(gpr);
887 FALLTHROUGH;
888 }
889
890 case DataFormatJSInt32: {
891 // In a strict fill we need to strip off the value tag.
892 if (strict) {
893 GPRReg gpr = info.gpr();
894 GPRReg result;
895 // If the register has already been locked we need to take a copy.
896 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
897 if (m_gprs.isLocked(gpr))
898 result = allocate();
899 else {
900 m_gprs.lock(gpr);
901 info.fillInt32(*m_stream, gpr);
902 result = gpr;
903 }
904 m_jit.zeroExtend32ToPtr(gpr, result);
905 returnFormat = DataFormatInt32;
906 return result;
907 }
908
909 GPRReg gpr = info.gpr();
910 m_gprs.lock(gpr);
911 returnFormat = DataFormatJSInt32;
912 return gpr;
913 }
914
915 case DataFormatInt32: {
916 GPRReg gpr = info.gpr();
917 m_gprs.lock(gpr);
918 returnFormat = DataFormatInt32;
919 return gpr;
920 }
921
922 case DataFormatJSDouble:
923 case DataFormatCell:
924 case DataFormatBoolean:
925 case DataFormatJSCell:
926 case DataFormatJSBoolean:
927 case DataFormatDouble:
928 case DataFormatStorage:
929 case DataFormatInt52:
930 case DataFormatStrictInt52:
931 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
932
933 default:
934 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
935 return InvalidGPRReg;
936 }
937 }
938 #if COMPILER(CLANG) && defined(__has_warning)
939 #pragma clang diagnostic pop
940 #endif
941
942 GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
943 {
944 return fillSpeculateInt32Internal<false>(edge, returnFormat);
945 }
946
947 GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
948 {
949 DataFormat mustBeDataFormatInt32;
950 GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
951 DFG_ASSERT(m_jit.graph(), m_currentNode, mustBeDataFormatInt32 == DataFormatInt32);
952 return result;
953 }
954
955 GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat)
956 {
957 ASSERT(desiredFormat == DataFormatInt52 || desiredFormat == DataFormatStrictInt52);
958 AbstractValue& value = m_state.forNode(edge);
959
960 m_interpreter.filter(value, SpecMachineInt);
961 if (value.isClear()) {
962 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
963 return allocate();
964 }
965
966 VirtualRegister virtualRegister = edge->virtualRegister();
967 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
968
969 switch (info.registerFormat()) {
970 case DataFormatNone: {
971 GPRReg gpr = allocate();
972
973 if (edge->hasConstant()) {
974 JSValue jsValue = edge->asJSValue();
975 ASSERT(jsValue.isMachineInt());
976 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
977 int64_t value = jsValue.asMachineInt();
978 if (desiredFormat == DataFormatInt52)
979 value = value << JSValue::int52ShiftAmount;
980 m_jit.move(MacroAssembler::Imm64(value), gpr);
981 info.fillGPR(*m_stream, gpr, desiredFormat);
982 return gpr;
983 }
984
985 DataFormat spillFormat = info.spillFormat();
986
987 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
988
989 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
990
991 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
992 if (desiredFormat == DataFormatStrictInt52) {
993 if (spillFormat == DataFormatInt52)
994 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
995 info.fillStrictInt52(*m_stream, gpr);
996 return gpr;
997 }
998 if (spillFormat == DataFormatStrictInt52)
999 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1000 info.fillInt52(*m_stream, gpr);
1001 return gpr;
1002 }
1003
1004 case DataFormatStrictInt52: {
1005 GPRReg gpr = info.gpr();
1006 bool wasLocked = m_gprs.isLocked(gpr);
1007 lock(gpr);
1008 if (desiredFormat == DataFormatStrictInt52)
1009 return gpr;
1010 if (wasLocked) {
1011 GPRReg result = allocate();
1012 m_jit.move(gpr, result);
1013 unlock(gpr);
1014 gpr = result;
1015 } else
1016 info.fillInt52(*m_stream, gpr);
1017 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1018 return gpr;
1019 }
1020
1021 case DataFormatInt52: {
1022 GPRReg gpr = info.gpr();
1023 bool wasLocked = m_gprs.isLocked(gpr);
1024 lock(gpr);
1025 if (desiredFormat == DataFormatInt52)
1026 return gpr;
1027 if (wasLocked) {
1028 GPRReg result = allocate();
1029 m_jit.move(gpr, result);
1030 unlock(gpr);
1031 gpr = result;
1032 } else
1033 info.fillStrictInt52(*m_stream, gpr);
1034 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1035 return gpr;
1036 }
1037
1038 default:
1039 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1040 return InvalidGPRReg;
1041 }
1042 }
1043
1044 FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
1045 {
1046 ASSERT(edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepMachineIntUse);
1047 ASSERT(edge->hasDoubleResult());
1048 VirtualRegister virtualRegister = edge->virtualRegister();
1049 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1050
1051 if (info.registerFormat() == DataFormatNone) {
1052 if (edge->hasConstant()) {
1053 GPRReg gpr = allocate();
1054
1055 if (edge->isNumberConstant()) {
1056 FPRReg fpr = fprAllocate();
1057 m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(edge->asNumber())), gpr);
1058 m_jit.move64ToDouble(gpr, fpr);
1059 unlock(gpr);
1060
1061 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1062 info.fillDouble(*m_stream, fpr);
1063 return fpr;
1064 }
1065 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1066 return fprAllocate();
1067 }
1068
1069 DataFormat spillFormat = info.spillFormat();
1070 if (spillFormat != DataFormatDouble) {
1071 DFG_CRASH(
1072 m_jit.graph(), m_currentNode, toCString(
1073 "Expected ", edge, " to have double format but instead it is spilled as ",
1074 dataFormatToString(spillFormat)).data());
1075 }
1076 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatDouble);
1077 FPRReg fpr = fprAllocate();
1078 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
1079 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1080 info.fillDouble(*m_stream, fpr);
1081 return fpr;
1082 }
1083
1084 DFG_ASSERT(m_jit.graph(), m_currentNode, info.registerFormat() == DataFormatDouble);
1085 FPRReg fpr = info.fpr();
1086 m_fprs.lock(fpr);
1087 return fpr;
1088 }
1089
1090 GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
1091 {
1092 AbstractValue& value = m_state.forNode(edge);
1093 SpeculatedType type = value.m_type;
1094 ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
1095
1096 m_interpreter.filter(value, SpecCell);
1097 if (value.isClear()) {
1098 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1099 return allocate();
1100 }
1101
1102 VirtualRegister virtualRegister = edge->virtualRegister();
1103 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1104
1105 switch (info.registerFormat()) {
1106 case DataFormatNone: {
1107 GPRReg gpr = allocate();
1108
1109 if (edge->hasConstant()) {
1110 JSValue jsValue = edge->asJSValue();
1111 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1112 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1113 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1114 return gpr;
1115 }
1116
1117 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1118 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1119
1120 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1121 if (type & ~SpecCell)
1122 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
1123 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1124 return gpr;
1125 }
1126
1127 case DataFormatCell:
1128 case DataFormatJSCell: {
1129 GPRReg gpr = info.gpr();
1130 m_gprs.lock(gpr);
1131 if (!ASSERT_DISABLED) {
1132 MacroAssembler::Jump checkCell = m_jit.branchIfCell(JSValueRegs(gpr));
1133 m_jit.abortWithReason(DFGIsNotCell);
1134 checkCell.link(&m_jit);
1135 }
1136 return gpr;
1137 }
1138
1139 case DataFormatJS: {
1140 GPRReg gpr = info.gpr();
1141 m_gprs.lock(gpr);
1142 if (type & ~SpecCell)
1143 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
1144 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1145 return gpr;
1146 }
1147
1148 case DataFormatJSInt32:
1149 case DataFormatInt32:
1150 case DataFormatJSDouble:
1151 case DataFormatJSBoolean:
1152 case DataFormatBoolean:
1153 case DataFormatDouble:
1154 case DataFormatStorage:
1155 case DataFormatInt52:
1156 case DataFormatStrictInt52:
1157 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1158
1159 default:
1160 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1161 return InvalidGPRReg;
1162 }
1163 }
1164
1165 GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
1166 {
1167 AbstractValue& value = m_state.forNode(edge);
1168 SpeculatedType type = value.m_type;
1169
1170 m_interpreter.filter(value, SpecBoolean);
1171 if (value.isClear()) {
1172 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1173 return allocate();
1174 }
1175
1176 VirtualRegister virtualRegister = edge->virtualRegister();
1177 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1178
1179 switch (info.registerFormat()) {
1180 case DataFormatNone: {
1181 GPRReg gpr = allocate();
1182
1183 if (edge->hasConstant()) {
1184 JSValue jsValue = edge->asJSValue();
1185 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1186 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1187 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1188 return gpr;
1189 }
1190 DFG_ASSERT(m_jit.graph(), m_currentNode, info.spillFormat() & DataFormatJS);
1191 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1192 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1193
1194 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1195 if (type & ~SpecBoolean) {
1196 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1197 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1198 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1199 }
1200 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1201 return gpr;
1202 }
1203
1204 case DataFormatBoolean:
1205 case DataFormatJSBoolean: {
1206 GPRReg gpr = info.gpr();
1207 m_gprs.lock(gpr);
1208 return gpr;
1209 }
1210
1211 case DataFormatJS: {
1212 GPRReg gpr = info.gpr();
1213 m_gprs.lock(gpr);
1214 if (type & ~SpecBoolean) {
1215 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1216 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1217 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1218 }
1219 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1220 return gpr;
1221 }
1222
1223 case DataFormatJSInt32:
1224 case DataFormatInt32:
1225 case DataFormatJSDouble:
1226 case DataFormatJSCell:
1227 case DataFormatCell:
1228 case DataFormatDouble:
1229 case DataFormatStorage:
1230 case DataFormatInt52:
1231 case DataFormatStrictInt52:
1232 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1233
1234 default:
1235 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1236 return InvalidGPRReg;
1237 }
1238 }
1239
1240 void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge)
1241 {
1242 #if ENABLE(GGC)
1243 ASSERT(!isKnownNotCell(valueEdge.node()));
1244
1245 SpeculateCellOperand base(this, baseEdge);
1246 JSValueOperand value(this, valueEdge);
1247 GPRTemporary scratch1(this);
1248 GPRTemporary scratch2(this);
1249
1250 writeBarrier(base.gpr(), value.gpr(), valueEdge, scratch1.gpr(), scratch2.gpr());
1251 #else
1252 UNUSED_PARAM(baseEdge);
1253 UNUSED_PARAM(valueEdge);
1254 #endif
1255 }
1256
1257 void SpeculativeJIT::compileObjectEquality(Node* node)
1258 {
1259 SpeculateCellOperand op1(this, node->child1());
1260 SpeculateCellOperand op2(this, node->child2());
1261 GPRTemporary result(this, Reuse, op1);
1262
1263 GPRReg op1GPR = op1.gpr();
1264 GPRReg op2GPR = op2.gpr();
1265 GPRReg resultGPR = result.gpr();
1266
1267 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1268 DFG_TYPE_CHECK(
1269 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1270 DFG_TYPE_CHECK(
1271 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1272 } else {
1273 DFG_TYPE_CHECK(
1274 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1275 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1276 m_jit.branchTest8(
1277 MacroAssembler::NonZero,
1278 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1279 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1280
1281 DFG_TYPE_CHECK(
1282 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1283 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1284 m_jit.branchTest8(
1285 MacroAssembler::NonZero,
1286 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1287 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1288 }
1289
1290 MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
1291 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1292 MacroAssembler::Jump done = m_jit.jump();
1293 falseCase.link(&m_jit);
1294 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1295 done.link(&m_jit);
1296
1297 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1298 }
1299
1300 void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
1301 {
1302 SpeculateCellOperand op1(this, objectChild);
1303 JSValueOperand op2(this, otherChild);
1304 GPRTemporary result(this);
1305
1306 GPRReg op1GPR = op1.gpr();
1307 GPRReg op2GPR = op2.gpr();
1308 GPRReg resultGPR = result.gpr();
1309
1310 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1311
1312 // At this point we know that we can perform a straight-forward equality comparison on pointer
1313 // values because we are doing strict equality.
1314 m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
1315 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1316 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1317 }
1318
1319 void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
1320 {
1321 BasicBlock* taken = branchNode->branchData()->taken.block;
1322 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1323
1324 SpeculateCellOperand op1(this, objectChild);
1325 JSValueOperand op2(this, otherChild);
1326
1327 GPRReg op1GPR = op1.gpr();
1328 GPRReg op2GPR = op2.gpr();
1329
1330 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1331
1332 if (taken == nextBlock()) {
1333 branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
1334 jump(taken);
1335 } else {
1336 branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1337 jump(notTaken);
1338 }
1339 }
1340
1341 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
1342 {
1343 SpeculateCellOperand op1(this, leftChild);
1344 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1345 GPRTemporary result(this);
1346
1347 GPRReg op1GPR = op1.gpr();
1348 GPRReg op2GPR = op2.gpr();
1349 GPRReg resultGPR = result.gpr();
1350
1351 bool masqueradesAsUndefinedWatchpointValid =
1352 masqueradesAsUndefinedWatchpointIsStillValid();
1353
1354 if (masqueradesAsUndefinedWatchpointValid) {
1355 DFG_TYPE_CHECK(
1356 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1357 } else {
1358 DFG_TYPE_CHECK(
1359 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1360 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1361 m_jit.branchTest8(
1362 MacroAssembler::NonZero,
1363 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1364 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1365 }
1366
1367 // It seems that most of the time when programs do a == b where b may be either null/undefined
1368 // or an object, b is usually an object. Balance the branches to make that case fast.
1369 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
1370
1371 // We know that within this branch, rightChild must be a cell.
1372 if (masqueradesAsUndefinedWatchpointValid) {
1373 DFG_TYPE_CHECK(
1374 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1375 } else {
1376 DFG_TYPE_CHECK(
1377 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1378 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1379 m_jit.branchTest8(
1380 MacroAssembler::NonZero,
1381 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1382 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1383 }
1384
1385 // At this point we know that we can perform a straight-forward equality comparison on pointer
1386 // values because both left and right are pointers to objects that have no special equality
1387 // protocols.
1388 MacroAssembler::Jump falseCase = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, op2GPR);
1389 MacroAssembler::Jump trueCase = m_jit.jump();
1390
1391 rightNotCell.link(&m_jit);
1392
1393 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1394 // prove that it is either null or undefined.
1395 if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
1396 m_jit.move(op2GPR, resultGPR);
1397 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1398
1399 typeCheck(
1400 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther,
1401 m_jit.branch64(
1402 MacroAssembler::NotEqual, resultGPR,
1403 MacroAssembler::TrustedImm64(ValueNull)));
1404 }
1405
1406 falseCase.link(&m_jit);
1407 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1408 MacroAssembler::Jump done = m_jit.jump();
1409 trueCase.link(&m_jit);
1410 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1411 done.link(&m_jit);
1412
1413 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1414 }
1415
1416 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
1417 {
1418 BasicBlock* taken = branchNode->branchData()->taken.block;
1419 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1420
1421 SpeculateCellOperand op1(this, leftChild);
1422 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1423 GPRTemporary result(this);
1424
1425 GPRReg op1GPR = op1.gpr();
1426 GPRReg op2GPR = op2.gpr();
1427 GPRReg resultGPR = result.gpr();
1428
1429 bool masqueradesAsUndefinedWatchpointValid =
1430 masqueradesAsUndefinedWatchpointIsStillValid();
1431
1432 if (masqueradesAsUndefinedWatchpointValid) {
1433 DFG_TYPE_CHECK(
1434 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1435 } else {
1436 DFG_TYPE_CHECK(
1437 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1438 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1439 m_jit.branchTest8(
1440 MacroAssembler::NonZero,
1441 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1442 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1443 }
1444
1445 // It seems that most of the time when programs do a == b where b may be either null/undefined
1446 // or an object, b is usually an object. Balance the branches to make that case fast.
1447 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
1448
1449 // We know that within this branch, rightChild must be a cell.
1450 if (masqueradesAsUndefinedWatchpointValid) {
1451 DFG_TYPE_CHECK(
1452 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1453 } else {
1454 DFG_TYPE_CHECK(
1455 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1456 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1457 m_jit.branchTest8(
1458 MacroAssembler::NonZero,
1459 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1460 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1461 }
1462
1463 // At this point we know that we can perform a straight-forward equality comparison on pointer
1464 // values because both left and right are pointers to objects that have no special equality
1465 // protocols.
1466 branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1467
1468 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1469 // prove that it is either null or undefined.
1470 if (!needsTypeCheck(rightChild, SpecCell | SpecOther))
1471 rightNotCell.link(&m_jit);
1472 else {
1473 jump(notTaken, ForceJump);
1474
1475 rightNotCell.link(&m_jit);
1476 m_jit.move(op2GPR, resultGPR);
1477 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1478
1479 typeCheck(
1480 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther, m_jit.branch64(
1481 MacroAssembler::NotEqual, resultGPR,
1482 MacroAssembler::TrustedImm64(ValueNull)));
1483 }
1484
1485 jump(notTaken);
1486 }
1487
1488 void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
1489 {
1490 SpeculateInt32Operand op1(this, node->child1());
1491 SpeculateInt32Operand op2(this, node->child2());
1492 GPRTemporary result(this, Reuse, op1, op2);
1493
1494 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
1495
1496 // If we add a DataFormatBool, we should use it here.
1497 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1498 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1499 }
1500
1501 void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalCondition condition)
1502 {
1503 SpeculateWhicheverInt52Operand op1(this, node->child1());
1504 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1505 GPRTemporary result(this, Reuse, op1, op2);
1506
1507 m_jit.compare64(condition, op1.gpr(), op2.gpr(), result.gpr());
1508
1509 // If we add a DataFormatBool, we should use it here.
1510 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1511 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1512 }
1513
1514 void SpeculativeJIT::compilePeepHoleInt52Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1515 {
1516 BasicBlock* taken = branchNode->branchData()->taken.block;
1517 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1518
1519 // The branch instruction will branch to the taken block.
1520 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1521 if (taken == nextBlock()) {
1522 condition = JITCompiler::invert(condition);
1523 BasicBlock* tmp = taken;
1524 taken = notTaken;
1525 notTaken = tmp;
1526 }
1527
1528 SpeculateWhicheverInt52Operand op1(this, node->child1());
1529 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1530
1531 branch64(condition, op1.gpr(), op2.gpr(), taken);
1532 jump(notTaken);
1533 }
1534
1535 void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
1536 {
1537 SpeculateDoubleOperand op1(this, node->child1());
1538 SpeculateDoubleOperand op2(this, node->child2());
1539 GPRTemporary result(this);
1540
1541 m_jit.move(TrustedImm32(ValueTrue), result.gpr());
1542 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
1543 m_jit.xor64(TrustedImm32(true), result.gpr());
1544 trueCase.link(&m_jit);
1545
1546 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1547 }
1548
1549 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
1550 {
1551 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1552 GPRTemporary result(this);
1553 GPRReg valueGPR = value.gpr();
1554 GPRReg resultGPR = result.gpr();
1555 GPRTemporary structure;
1556 GPRReg structureGPR = InvalidGPRReg;
1557 GPRTemporary scratch;
1558 GPRReg scratchGPR = InvalidGPRReg;
1559
1560 bool masqueradesAsUndefinedWatchpointValid =
1561 masqueradesAsUndefinedWatchpointIsStillValid();
1562
1563 if (!masqueradesAsUndefinedWatchpointValid) {
1564 // The masquerades as undefined case will use the structure register, so allocate it here.
1565 // Do this at the top of the function to avoid branching around a register allocation.
1566 GPRTemporary realStructure(this);
1567 GPRTemporary realScratch(this);
1568 structure.adopt(realStructure);
1569 scratch.adopt(realScratch);
1570 structureGPR = structure.gpr();
1571 scratchGPR = scratch.gpr();
1572 }
1573
1574 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
1575 if (masqueradesAsUndefinedWatchpointValid) {
1576 DFG_TYPE_CHECK(
1577 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1578 } else {
1579 DFG_TYPE_CHECK(
1580 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1581
1582 MacroAssembler::Jump isNotMasqueradesAsUndefined =
1583 m_jit.branchTest8(
1584 MacroAssembler::Zero,
1585 MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
1586 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
1587
1588 m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
1589 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
1590 m_jit.branchPtr(
1591 MacroAssembler::Equal,
1592 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1593 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1594
1595 isNotMasqueradesAsUndefined.link(&m_jit);
1596 }
1597 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1598 MacroAssembler::Jump done = m_jit.jump();
1599
1600 notCell.link(&m_jit);
1601
1602 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1603 m_jit.move(valueGPR, resultGPR);
1604 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1605 typeCheck(
1606 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
1607 MacroAssembler::NotEqual,
1608 resultGPR,
1609 MacroAssembler::TrustedImm64(ValueNull)));
1610 }
1611 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1612
1613 done.link(&m_jit);
1614
1615 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1616 }
1617
1618 void SpeculativeJIT::compileLogicalNot(Node* node)
1619 {
1620 switch (node->child1().useKind()) {
1621 case ObjectOrOtherUse: {
1622 compileObjectOrOtherLogicalNot(node->child1());
1623 return;
1624 }
1625
1626 case Int32Use: {
1627 SpeculateInt32Operand value(this, node->child1());
1628 GPRTemporary result(this, Reuse, value);
1629 m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr());
1630 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1631 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1632 return;
1633 }
1634
1635 case DoubleRepUse: {
1636 SpeculateDoubleOperand value(this, node->child1());
1637 FPRTemporary scratch(this);
1638 GPRTemporary result(this);
1639 m_jit.move(TrustedImm32(ValueFalse), result.gpr());
1640 MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
1641 m_jit.xor32(TrustedImm32(true), result.gpr());
1642 nonZero.link(&m_jit);
1643 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1644 return;
1645 }
1646
1647 case BooleanUse: {
1648 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
1649 SpeculateBooleanOperand value(this, node->child1());
1650 GPRTemporary result(this, Reuse, value);
1651
1652 m_jit.move(value.gpr(), result.gpr());
1653 m_jit.xor64(TrustedImm32(true), result.gpr());
1654
1655 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1656 return;
1657 }
1658
1659 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
1660 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1661
1662 m_jit.move(value.gpr(), result.gpr());
1663 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
1664 typeCheck(
1665 JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
1666 JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1667 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
1668
1669 // If we add a DataFormatBool, we should use it here.
1670 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1671 return;
1672 }
1673
1674 case UntypedUse: {
1675 JSValueOperand arg1(this, node->child1());
1676 GPRTemporary result(this);
1677
1678 GPRReg arg1GPR = arg1.gpr();
1679 GPRReg resultGPR = result.gpr();
1680
1681 arg1.use();
1682
1683 m_jit.move(arg1GPR, resultGPR);
1684 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR);
1685 JITCompiler::Jump slowCase = m_jit.branchTest64(JITCompiler::NonZero, resultGPR, TrustedImm32(static_cast<int32_t>(~1)));
1686
1687 addSlowPathGenerator(
1688 slowPathCall(slowCase, this, operationConvertJSValueToBoolean, resultGPR, arg1GPR));
1689
1690 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR);
1691 jsValueResult(resultGPR, node, DataFormatJSBoolean, UseChildrenCalledExplicitly);
1692 return;
1693 }
1694 case StringUse:
1695 return compileStringZeroLength(node);
1696
1697 default:
1698 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1699 break;
1700 }
1701 }
1702
1703 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
1704 {
1705 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1706 GPRTemporary scratch(this);
1707 GPRTemporary structure;
1708 GPRReg valueGPR = value.gpr();
1709 GPRReg scratchGPR = scratch.gpr();
1710 GPRReg structureGPR = InvalidGPRReg;
1711
1712 if (!masqueradesAsUndefinedWatchpointIsStillValid()) {
1713 GPRTemporary realStructure(this);
1714 structure.adopt(realStructure);
1715 structureGPR = structure.gpr();
1716 }
1717
1718 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
1719 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1720 DFG_TYPE_CHECK(
1721 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1722 } else {
1723 DFG_TYPE_CHECK(
1724 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1725
1726 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
1727 JITCompiler::Zero,
1728 MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
1729 TrustedImm32(MasqueradesAsUndefined));
1730
1731 m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
1732 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
1733 m_jit.branchPtr(
1734 MacroAssembler::Equal,
1735 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1736 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1737
1738 isNotMasqueradesAsUndefined.link(&m_jit);
1739 }
1740 jump(taken, ForceJump);
1741
1742 notCell.link(&m_jit);
1743
1744 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1745 m_jit.move(valueGPR, scratchGPR);
1746 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
1747 typeCheck(
1748 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
1749 MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
1750 }
1751 jump(notTaken);
1752
1753 noResult(m_currentNode);
1754 }
1755
1756 void SpeculativeJIT::emitBranch(Node* node)
1757 {
1758 BasicBlock* taken = node->branchData()->taken.block;
1759 BasicBlock* notTaken = node->branchData()->notTaken.block;
1760
1761 switch (node->child1().useKind()) {
1762 case ObjectOrOtherUse: {
1763 emitObjectOrOtherBranch(node->child1(), taken, notTaken);
1764 return;
1765 }
1766
1767 case Int32Use:
1768 case DoubleRepUse: {
1769 if (node->child1().useKind() == Int32Use) {
1770 bool invert = false;
1771
1772 if (taken == nextBlock()) {
1773 invert = true;
1774 BasicBlock* tmp = taken;
1775 taken = notTaken;
1776 notTaken = tmp;
1777 }
1778
1779 SpeculateInt32Operand value(this, node->child1());
1780 branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
1781 } else {
1782 SpeculateDoubleOperand value(this, node->child1());
1783 FPRTemporary scratch(this);
1784 branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
1785 }
1786
1787 jump(notTaken);
1788
1789 noResult(node);
1790 return;
1791 }
1792
1793 case StringUse: {
1794 emitStringBranch(node->child1(), taken, notTaken);
1795 return;
1796 }
1797
1798 case UntypedUse:
1799 case BooleanUse: {
1800 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
1801 GPRReg valueGPR = value.gpr();
1802
1803 if (node->child1().useKind() == BooleanUse) {
1804 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
1805 MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
1806
1807 if (taken == nextBlock()) {
1808 condition = MacroAssembler::Zero;
1809 BasicBlock* tmp = taken;
1810 taken = notTaken;
1811 notTaken = tmp;
1812 }
1813
1814 branchTest32(condition, valueGPR, TrustedImm32(true), taken);
1815 jump(notTaken);
1816 } else {
1817 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
1818 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
1819
1820 typeCheck(JSValueRegs(valueGPR), node->child1(), SpecBoolean, m_jit.jump());
1821 }
1822 value.use();
1823 } else {
1824 GPRTemporary result(this);
1825 GPRReg resultGPR = result.gpr();
1826
1827 if (node->child1()->prediction() & SpecInt32) {
1828 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken);
1829 branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
1830 }
1831
1832 if (node->child1()->prediction() & SpecBoolean) {
1833 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
1834 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
1835 }
1836
1837 value.use();
1838
1839 silentSpillAllRegisters(resultGPR);
1840 callOperation(operationConvertJSValueToBoolean, resultGPR, valueGPR);
1841 silentFillAllRegisters(resultGPR);
1842
1843 branchTest32(MacroAssembler::NonZero, resultGPR, taken);
1844 jump(notTaken);
1845 }
1846
1847 noResult(node, UseChildrenCalledExplicitly);
1848 return;
1849 }
1850
1851 default:
1852 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad use kind");
1853 }
1854 }
1855
1856 void SpeculativeJIT::compile(Node* node)
1857 {
1858 NodeType op = node->op();
1859
1860 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1861 m_jit.clearRegisterAllocationOffsets();
1862 #endif
1863
1864 switch (op) {
1865 case JSConstant:
1866 case DoubleConstant:
1867 case Int52Constant:
1868 case PhantomDirectArguments:
1869 case PhantomClonedArguments:
1870 initConstantInfo(node);
1871 break;
1872
1873 case Identity: {
1874 speculate(node, node->child1());
1875 switch (node->child1().useKind()) {
1876 case DoubleRepUse:
1877 case DoubleRepRealUse:
1878 case DoubleRepMachineIntUse: {
1879 SpeculateDoubleOperand op(this, node->child1());
1880 FPRTemporary scratch(this, op);
1881 m_jit.moveDouble(op.fpr(), scratch.fpr());
1882 doubleResult(scratch.fpr(), node);
1883 break;
1884 }
1885 case Int52RepUse: {
1886 SpeculateInt52Operand op(this, node->child1());
1887 GPRTemporary result(this, Reuse, op);
1888 m_jit.move(op.gpr(), result.gpr());
1889 int52Result(result.gpr(), node);
1890 break;
1891 }
1892 default: {
1893 JSValueOperand op(this, node->child1());
1894 GPRTemporary result(this, Reuse, op);
1895 m_jit.move(op.gpr(), result.gpr());
1896 jsValueResult(result.gpr(), node);
1897 break;
1898 }
1899 } // switch
1900 break;
1901 }
1902
1903 case GetLocal: {
1904 AbstractValue& value = m_state.variables().operand(node->local());
1905
1906 // If the CFA is tracking this variable and it found that the variable
1907 // cannot have been assigned, then don't attempt to proceed.
1908 if (value.isClear()) {
1909 m_compileOkay = false;
1910 break;
1911 }
1912
1913 switch (node->variableAccessData()->flushFormat()) {
1914 case FlushedDouble: {
1915 FPRTemporary result(this);
1916 m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr());
1917 VirtualRegister virtualRegister = node->virtualRegister();
1918 m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
1919 generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
1920 break;
1921 }
1922
1923 case FlushedInt32: {
1924 GPRTemporary result(this);
1925 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
1926
1927 // Like int32Result, but don't useChildren - our children are phi nodes,
1928 // and don't represent values within this dataflow with virtual registers.
1929 VirtualRegister virtualRegister = node->virtualRegister();
1930 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
1931 generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
1932 break;
1933 }
1934
1935 case FlushedInt52: {
1936 GPRTemporary result(this);
1937 m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr());
1938
1939 VirtualRegister virtualRegister = node->virtualRegister();
1940 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
1941 generationInfoFromVirtualRegister(virtualRegister).initInt52(node, node->refCount(), result.gpr());
1942 break;
1943 }
1944
1945 default:
1946 GPRTemporary result(this);
1947 m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr());
1948
1949 // Like jsValueResult, but don't useChildren - our children are phi nodes,
1950 // and don't represent values within this dataflow with virtual registers.
1951 VirtualRegister virtualRegister = node->virtualRegister();
1952 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
1953
1954 DataFormat format;
1955 if (isCellSpeculation(value.m_type))
1956 format = DataFormatJSCell;
1957 else if (isBooleanSpeculation(value.m_type))
1958 format = DataFormatJSBoolean;
1959 else
1960 format = DataFormatJS;
1961
1962 generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), result.gpr(), format);
1963 break;
1964 }
1965 break;
1966 }
1967
1968 case GetLocalUnlinked: {
1969 GPRTemporary result(this);
1970
1971 m_jit.load64(JITCompiler::addressFor(node->unlinkedMachineLocal()), result.gpr());
1972
1973 jsValueResult(result.gpr(), node);
1974 break;
1975 }
1976
1977 case MovHint: {
1978 compileMovHint(m_currentNode);
1979 noResult(node);
1980 break;
1981 }
1982
1983 case ZombieHint: {
1984 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
1985 noResult(node);
1986 break;
1987 }
1988
1989 case SetLocal: {
1990 switch (node->variableAccessData()->flushFormat()) {
1991 case FlushedDouble: {
1992 SpeculateDoubleOperand value(this, node->child1());
1993 m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal()));
1994 noResult(node);
1995 // Indicate that it's no longer necessary to retrieve the value of
1996 // this bytecode variable from registers or other locations in the stack,
1997 // but that it is stored as a double.
1998 recordSetLocal(DataFormatDouble);
1999 break;
2000 }
2001
2002 case FlushedInt32: {
2003 SpeculateInt32Operand value(this, node->child1());
2004 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
2005 noResult(node);
2006 recordSetLocal(DataFormatInt32);
2007 break;
2008 }
2009
2010 case FlushedInt52: {
2011 SpeculateInt52Operand value(this, node->child1());
2012 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal()));
2013 noResult(node);
2014 recordSetLocal(DataFormatInt52);
2015 break;
2016 }
2017
2018 case FlushedCell: {
2019 SpeculateCellOperand cell(this, node->child1());
2020 GPRReg cellGPR = cell.gpr();
2021 m_jit.store64(cellGPR, JITCompiler::addressFor(node->machineLocal()));
2022 noResult(node);
2023 recordSetLocal(DataFormatCell);
2024 break;
2025 }
2026
2027 case FlushedBoolean: {
2028 SpeculateBooleanOperand boolean(this, node->child1());
2029 m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node->machineLocal()));
2030 noResult(node);
2031 recordSetLocal(DataFormatBoolean);
2032 break;
2033 }
2034
2035 case FlushedJSValue: {
2036 JSValueOperand value(this, node->child1());
2037 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal()));
2038 noResult(node);
2039 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2040 break;
2041 }
2042
2043 default:
2044 DFG_CRASH(m_jit.graph(), node, "Bad flush format");
2045 break;
2046 }
2047
2048 break;
2049 }
2050
2051 case SetArgument:
2052 // This is a no-op; it just marks the fact that the argument is being used.
2053 // But it may be profitable to use this as a hook to run speculation checks
2054 // on arguments, thereby allowing us to trivially eliminate such checks if
2055 // the argument is not used.
2056 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2057 break;
2058
2059 case BitAnd:
2060 case BitOr:
2061 case BitXor:
2062 if (node->child1()->isInt32Constant()) {
2063 SpeculateInt32Operand op2(this, node->child2());
2064 GPRTemporary result(this, Reuse, op2);
2065
2066 bitOp(op, node->child1()->asInt32(), op2.gpr(), result.gpr());
2067
2068 int32Result(result.gpr(), node);
2069 } else if (node->child2()->isInt32Constant()) {
2070 SpeculateInt32Operand op1(this, node->child1());
2071 GPRTemporary result(this, Reuse, op1);
2072
2073 bitOp(op, node->child2()->asInt32(), op1.gpr(), result.gpr());
2074
2075 int32Result(result.gpr(), node);
2076 } else {
2077 SpeculateInt32Operand op1(this, node->child1());
2078 SpeculateInt32Operand op2(this, node->child2());
2079 GPRTemporary result(this, Reuse, op1, op2);
2080
2081 GPRReg reg1 = op1.gpr();
2082 GPRReg reg2 = op2.gpr();
2083 bitOp(op, reg1, reg2, result.gpr());
2084
2085 int32Result(result.gpr(), node);
2086 }
2087 break;
2088
2089 case BitRShift:
2090 case BitLShift:
2091 case BitURShift:
2092 if (node->child2()->isInt32Constant()) {
2093 SpeculateInt32Operand op1(this, node->child1());
2094 GPRTemporary result(this, Reuse, op1);
2095
2096 shiftOp(op, op1.gpr(), node->child2()->asInt32() & 0x1f, result.gpr());
2097
2098 int32Result(result.gpr(), node);
2099 } else {
2100 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
2101 SpeculateInt32Operand op1(this, node->child1());
2102 SpeculateInt32Operand op2(this, node->child2());
2103 GPRTemporary result(this, Reuse, op1);
2104
2105 GPRReg reg1 = op1.gpr();
2106 GPRReg reg2 = op2.gpr();
2107 shiftOp(op, reg1, reg2, result.gpr());
2108
2109 int32Result(result.gpr(), node);
2110 }
2111 break;
2112
2113 case UInt32ToNumber: {
2114 compileUInt32ToNumber(node);
2115 break;
2116 }
2117
2118 case DoubleAsInt32: {
2119 compileDoubleAsInt32(node);
2120 break;
2121 }
2122
2123 case ValueToInt32: {
2124 compileValueToInt32(node);
2125 break;
2126 }
2127
2128 case DoubleRep: {
2129 compileDoubleRep(node);
2130 break;
2131 }
2132
2133 case ValueRep: {
2134 compileValueRep(node);
2135 break;
2136 }
2137
2138 case Int52Rep: {
2139 switch (node->child1().useKind()) {
2140 case Int32Use: {
2141 SpeculateInt32Operand operand(this, node->child1());
2142 GPRTemporary result(this, Reuse, operand);
2143
2144 m_jit.signExtend32ToPtr(operand.gpr(), result.gpr());
2145
2146 strictInt52Result(result.gpr(), node);
2147 break;
2148 }
2149
2150 case MachineIntUse: {
2151 GPRTemporary result(this);
2152 GPRReg resultGPR = result.gpr();
2153
2154 convertMachineInt(node->child1(), resultGPR);
2155
2156 strictInt52Result(resultGPR, node);
2157 break;
2158 }
2159
2160 case DoubleRepMachineIntUse: {
2161 SpeculateDoubleOperand value(this, node->child1());
2162 FPRReg valueFPR = value.fpr();
2163
2164 GPRFlushedCallResult result(this);
2165 GPRReg resultGPR = result.gpr();
2166
2167 flushRegisters();
2168
2169 callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR);
2170
2171 DFG_TYPE_CHECK(
2172 JSValueRegs(), node->child1(), SpecInt52AsDouble,
2173 m_jit.branch64(
2174 JITCompiler::Equal, resultGPR,
2175 JITCompiler::TrustedImm64(JSValue::notInt52)));
2176
2177 strictInt52Result(resultGPR, node);
2178 break;
2179 }
2180
2181 default:
2182 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2183 }
2184 break;
2185 }
2186
2187 case ValueAdd: {
2188 JSValueOperand op1(this, node->child1());
2189 JSValueOperand op2(this, node->child2());
2190
2191 GPRReg op1GPR = op1.gpr();
2192 GPRReg op2GPR = op2.gpr();
2193
2194 flushRegisters();
2195
2196 GPRFlushedCallResult result(this);
2197 if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node()))
2198 callOperation(operationValueAddNotNumber, result.gpr(), op1GPR, op2GPR);
2199 else
2200 callOperation(operationValueAdd, result.gpr(), op1GPR, op2GPR);
2201
2202 jsValueResult(result.gpr(), node);
2203 break;
2204 }
2205
2206 case ArithAdd:
2207 compileAdd(node);
2208 break;
2209
2210 case ArithClz32:
2211 compileArithClz32(node);
2212 break;
2213
2214 case MakeRope:
2215 compileMakeRope(node);
2216 break;
2217
2218 case ArithSub:
2219 compileArithSub(node);
2220 break;
2221
2222 case ArithNegate:
2223 compileArithNegate(node);
2224 break;
2225
2226 case ArithMul:
2227 compileArithMul(node);
2228 break;
2229
2230 case ArithDiv: {
2231 compileArithDiv(node);
2232 break;
2233 }
2234
2235 case ArithMod: {
2236 compileArithMod(node);
2237 break;
2238 }
2239
2240 case ArithAbs: {
2241 switch (node->child1().useKind()) {
2242 case Int32Use: {
2243 SpeculateStrictInt32Operand op1(this, node->child1());
2244 GPRTemporary result(this);
2245 GPRTemporary scratch(this);
2246
2247 m_jit.move(op1.gpr(), result.gpr());
2248 m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
2249 m_jit.add32(scratch.gpr(), result.gpr());
2250 m_jit.xor32(scratch.gpr(), result.gpr());
2251 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
2252 int32Result(result.gpr(), node);
2253 break;
2254 }
2255
2256 case DoubleRepUse: {
2257 SpeculateDoubleOperand op1(this, node->child1());
2258 FPRTemporary result(this);
2259
2260 m_jit.absDouble(op1.fpr(), result.fpr());
2261 doubleResult(result.fpr(), node);
2262 break;
2263 }
2264
2265 default:
2266 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2267 break;
2268 }
2269 break;
2270 }
2271
2272 case ArithMin:
2273 case ArithMax: {
2274 switch (node->binaryUseKind()) {
2275 case Int32Use: {
2276 SpeculateStrictInt32Operand op1(this, node->child1());
2277 SpeculateStrictInt32Operand op2(this, node->child2());
2278 GPRTemporary result(this, Reuse, op1);
2279
2280 MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
2281 m_jit.move(op2.gpr(), result.gpr());
2282 if (op1.gpr() != result.gpr()) {
2283 MacroAssembler::Jump done = m_jit.jump();
2284 op1Less.link(&m_jit);
2285 m_jit.move(op1.gpr(), result.gpr());
2286 done.link(&m_jit);
2287 } else
2288 op1Less.link(&m_jit);
2289
2290 int32Result(result.gpr(), node);
2291 break;
2292 }
2293
2294 case DoubleRepUse: {
2295 SpeculateDoubleOperand op1(this, node->child1());
2296 SpeculateDoubleOperand op2(this, node->child2());
2297 FPRTemporary result(this, op1);
2298
2299 FPRReg op1FPR = op1.fpr();
2300 FPRReg op2FPR = op2.fpr();
2301 FPRReg resultFPR = result.fpr();
2302
2303 MacroAssembler::JumpList done;
2304
2305 MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
2306
2307 // op2 is eather the lesser one or one of then is NaN
2308 MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
2309
2310 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2311 // op1 + op2 and putting it into result.
2312 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
2313 done.append(m_jit.jump());
2314
2315 op2Less.link(&m_jit);
2316 m_jit.moveDouble(op2FPR, resultFPR);
2317
2318 if (op1FPR != resultFPR) {
2319 done.append(m_jit.jump());
2320
2321 op1Less.link(&m_jit);
2322 m_jit.moveDouble(op1FPR, resultFPR);
2323 } else
2324 op1Less.link(&m_jit);
2325
2326 done.link(&m_jit);
2327
2328 doubleResult(resultFPR, node);
2329 break;
2330 }
2331
2332 default:
2333 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2334 break;
2335 }
2336 break;
2337 }
2338
2339 case ArithPow:
2340 compileArithPow(node);
2341 break;
2342
2343 case ArithSqrt:
2344 compileArithSqrt(node);
2345 break;
2346
2347 case ArithFRound: {
2348 SpeculateDoubleOperand op1(this, node->child1());
2349 FPRTemporary result(this, op1);
2350
2351 m_jit.convertDoubleToFloat(op1.fpr(), result.fpr());
2352 m_jit.convertFloatToDouble(result.fpr(), result.fpr());
2353
2354 doubleResult(result.fpr(), node);
2355 break;
2356 }
2357
2358 case ArithRound:
2359 compileArithRound(node);
2360 break;
2361
2362 case ArithSin: {
2363 SpeculateDoubleOperand op1(this, node->child1());
2364 FPRReg op1FPR = op1.fpr();
2365
2366 flushRegisters();
2367
2368 FPRResult result(this);
2369 callOperation(sin, result.fpr(), op1FPR);
2370 doubleResult(result.fpr(), node);
2371 break;
2372 }
2373
2374 case ArithCos: {
2375 SpeculateDoubleOperand op1(this, node->child1());
2376 FPRReg op1FPR = op1.fpr();
2377
2378 flushRegisters();
2379
2380 FPRResult result(this);
2381 callOperation(cos, result.fpr(), op1FPR);
2382 doubleResult(result.fpr(), node);
2383 break;
2384 }
2385
2386 case ArithLog:
2387 compileArithLog(node);
2388 break;
2389
2390 case LogicalNot:
2391 compileLogicalNot(node);
2392 break;
2393
2394 case CompareLess:
2395 if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
2396 return;
2397 break;
2398
2399 case CompareLessEq:
2400 if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
2401 return;
2402 break;
2403
2404 case CompareGreater:
2405 if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
2406 return;
2407 break;
2408
2409 case CompareGreaterEq:
2410 if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
2411 return;
2412 break;
2413
2414 case CompareEqConstant:
2415 ASSERT(node->child2()->asJSValue().isNull());
2416 if (nonSpeculativeCompareNull(node, node->child1()))
2417 return;
2418 break;
2419
2420 case CompareEq:
2421 if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
2422 return;
2423 break;
2424
2425 case CompareStrictEq:
2426 if (compileStrictEq(node))
2427 return;
2428 break;
2429
2430 case StringCharCodeAt: {
2431 compileGetCharCodeAt(node);
2432 break;
2433 }
2434
2435 case StringCharAt: {
2436 // Relies on StringCharAt node having same basic layout as GetByVal
2437 compileGetByValOnString(node);
2438 break;
2439 }
2440
2441 case StringFromCharCode: {
2442 compileFromCharCode(node);
2443 break;
2444 }
2445
2446 case CheckArray: {
2447 checkArray(node);
2448 break;
2449 }
2450
2451 case Arrayify:
2452 case ArrayifyToStructure: {
2453 arrayify(node);
2454 break;
2455 }
2456
2457 case GetByVal: {
2458 switch (node->arrayMode().type()) {
2459 case Array::SelectUsingPredictions:
2460 case Array::ForceExit:
2461 DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
2462 break;
2463 case Array::Generic: {
2464 JSValueOperand base(this, node->child1());
2465 JSValueOperand property(this, node->child2());
2466 GPRReg baseGPR = base.gpr();
2467 GPRReg propertyGPR = property.gpr();
2468
2469 flushRegisters();
2470 GPRFlushedCallResult result(this);
2471 callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR);
2472
2473 jsValueResult(result.gpr(), node);
2474 break;
2475 }
2476 case Array::Int32:
2477 case Array::Contiguous: {
2478 if (node->arrayMode().isInBounds()) {
2479 SpeculateStrictInt32Operand property(this, node->child2());
2480 StorageOperand storage(this, node->child3());
2481
2482 GPRReg propertyReg = property.gpr();
2483 GPRReg storageReg = storage.gpr();
2484
2485 if (!m_compileOkay)
2486 return;
2487
2488 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2489
2490 GPRTemporary result(this);
2491 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
2492 if (node->arrayMode().isSaneChain()) {
2493 ASSERT(node->arrayMode().type() == Array::Contiguous);
2494 JITCompiler::Jump notHole = m_jit.branchTest64(
2495 MacroAssembler::NonZero, result.gpr());
2496 m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), result.gpr());
2497 notHole.link(&m_jit);
2498 } else {
2499 speculationCheck(
2500 LoadFromHole, JSValueRegs(), 0,
2501 m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2502 }
2503 jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS);
2504 break;
2505 }
2506
2507 SpeculateCellOperand base(this, node->child1());
2508 SpeculateStrictInt32Operand property(this, node->child2());
2509 StorageOperand storage(this, node->child3());
2510
2511 GPRReg baseReg = base.gpr();
2512 GPRReg propertyReg = property.gpr();
2513 GPRReg storageReg = storage.gpr();
2514
2515 if (!m_compileOkay)
2516 return;
2517
2518 GPRTemporary result(this);
2519 GPRReg resultReg = result.gpr();
2520
2521 MacroAssembler::JumpList slowCases;
2522
2523 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2524
2525 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2526 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2527
2528 addSlowPathGenerator(
2529 slowPathCall(
2530 slowCases, this, operationGetByValArrayInt,
2531 result.gpr(), baseReg, propertyReg));
2532
2533 jsValueResult(resultReg, node);
2534 break;
2535 }
2536
2537 case Array::Double: {
2538 if (node->arrayMode().isInBounds()) {
2539 SpeculateStrictInt32Operand property(this, node->child2());
2540 StorageOperand storage(this, node->child3());
2541
2542 GPRReg propertyReg = property.gpr();
2543 GPRReg storageReg = storage.gpr();
2544
2545 if (!m_compileOkay)
2546 return;
2547
2548 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2549
2550 FPRTemporary result(this);
2551 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
2552 if (!node->arrayMode().isSaneChain())
2553 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
2554 doubleResult(result.fpr(), node);
2555 break;
2556 }
2557
2558 SpeculateCellOperand base(this, node->child1());
2559 SpeculateStrictInt32Operand property(this, node->child2());
2560 StorageOperand storage(this, node->child3());
2561
2562 GPRReg baseReg = base.gpr();
2563 GPRReg propertyReg = property.gpr();
2564 GPRReg storageReg = storage.gpr();
2565
2566 if (!m_compileOkay)
2567 return;
2568
2569 GPRTemporary result(this);
2570 FPRTemporary temp(this);
2571 GPRReg resultReg = result.gpr();
2572 FPRReg tempReg = temp.fpr();
2573
2574 MacroAssembler::JumpList slowCases;
2575
2576 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2577
2578 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
2579 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
2580 boxDouble(tempReg, resultReg);
2581
2582 addSlowPathGenerator(
2583 slowPathCall(
2584 slowCases, this, operationGetByValArrayInt,
2585 result.gpr(), baseReg, propertyReg));
2586
2587 jsValueResult(resultReg, node);
2588 break;
2589 }
2590
2591 case Array::ArrayStorage:
2592 case Array::SlowPutArrayStorage: {
2593 if (node->arrayMode().isInBounds()) {
2594 SpeculateStrictInt32Operand property(this, node->child2());
2595 StorageOperand storage(this, node->child3());
2596
2597 GPRReg propertyReg = property.gpr();
2598 GPRReg storageReg = storage.gpr();
2599
2600 if (!m_compileOkay)
2601 return;
2602
2603 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2604
2605 GPRTemporary result(this);
2606 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
2607 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2608
2609 jsValueResult(result.gpr(), node);
2610 break;
2611 }
2612
2613 SpeculateCellOperand base(this, node->child1());
2614 SpeculateStrictInt32Operand property(this, node->child2());
2615 StorageOperand storage(this, node->child3());
2616
2617 GPRReg baseReg = base.gpr();
2618 GPRReg propertyReg = property.gpr();
2619 GPRReg storageReg = storage.gpr();
2620
2621 if (!m_compileOkay)
2622 return;
2623
2624 GPRTemporary result(this);
2625 GPRReg resultReg = result.gpr();
2626
2627 MacroAssembler::JumpList slowCases;
2628
2629 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2630
2631 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
2632 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2633
2634 addSlowPathGenerator(
2635 slowPathCall(
2636 slowCases, this, operationGetByValArrayInt,
2637 result.gpr(), baseReg, propertyReg));
2638
2639 jsValueResult(resultReg, node);
2640 break;
2641 }
2642 case Array::String:
2643 compileGetByValOnString(node);
2644 break;
2645 case Array::DirectArguments:
2646 compileGetByValOnDirectArguments(node);
2647 break;
2648 case Array::ScopedArguments:
2649 compileGetByValOnScopedArguments(node);
2650 break;
2651 default: {
2652 TypedArrayType type = node->arrayMode().typedArrayType();
2653 if (isInt(type))
2654 compileGetByValOnIntTypedArray(node, type);
2655 else
2656 compileGetByValOnFloatTypedArray(node, type);
2657 } }
2658 break;
2659 }
2660
2661 case PutByValDirect:
2662 case PutByVal:
2663 case PutByValAlias: {
2664 Edge child1 = m_jit.graph().varArgChild(node, 0);
2665 Edge child2 = m_jit.graph().varArgChild(node, 1);
2666 Edge child3 = m_jit.graph().varArgChild(node, 2);
2667 Edge child4 = m_jit.graph().varArgChild(node, 3);
2668
2669 ArrayMode arrayMode = node->arrayMode().modeForPut();
2670 bool alreadyHandled = false;
2671
2672 switch (arrayMode.type()) {
2673 case Array::SelectUsingPredictions:
2674 case Array::ForceExit:
2675 DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
2676 break;
2677 case Array::Generic: {
2678 DFG_ASSERT(m_jit.graph(), node, node->op() == PutByVal || node->op() == PutByValDirect);
2679
2680 JSValueOperand arg1(this, child1);
2681 JSValueOperand arg2(this, child2);
2682 JSValueOperand arg3(this, child3);
2683 GPRReg arg1GPR = arg1.gpr();
2684 GPRReg arg2GPR = arg2.gpr();
2685 GPRReg arg3GPR = arg3.gpr();
2686 flushRegisters();
2687 if (node->op() == PutByValDirect)
2688 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR);
2689 else
2690 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
2691
2692 noResult(node);
2693 alreadyHandled = true;
2694 break;
2695 }
2696 default:
2697 break;
2698 }
2699
2700 if (alreadyHandled)
2701 break;
2702
2703 // FIXME: the base may not be necessary for some array access modes. But we have to
2704 // keep it alive to this point, so it's likely to be in a register anyway. Likely
2705 // no harm in locking it here.
2706 SpeculateCellOperand base(this, child1);
2707 SpeculateStrictInt32Operand property(this, child2);
2708
2709 GPRReg baseReg = base.gpr();
2710 GPRReg propertyReg = property.gpr();
2711
2712 switch (arrayMode.type()) {
2713 case Array::Int32:
2714 case Array::Contiguous: {
2715 JSValueOperand value(this, child3, ManualOperandSpeculation);
2716
2717 GPRReg valueReg = value.gpr();
2718
2719 if (!m_compileOkay)
2720 return;
2721
2722 if (arrayMode.type() == Array::Int32) {
2723 DFG_TYPE_CHECK(
2724 JSValueRegs(valueReg), child3, SpecInt32,
2725 m_jit.branch64(
2726 MacroAssembler::Below, valueReg, GPRInfo::tagTypeNumberRegister));
2727 }
2728
2729 StorageOperand storage(this, child4);
2730 GPRReg storageReg = storage.gpr();
2731
2732 if (node->op() == PutByValAlias) {
2733 // Store the value to the array.
2734 GPRReg propertyReg = property.gpr();
2735 GPRReg valueReg = value.gpr();
2736 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2737
2738 noResult(node);
2739 break;
2740 }
2741
2742 GPRTemporary temporary;
2743 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2744
2745 MacroAssembler::Jump slowCase;
2746
2747 if (arrayMode.isInBounds()) {
2748 speculationCheck(
2749 OutOfBounds, JSValueRegs(), 0,
2750 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2751 } else {
2752 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2753
2754 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2755
2756 if (!arrayMode.isOutOfBounds())
2757 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2758
2759 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2760 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2761
2762 inBounds.link(&m_jit);
2763 }
2764
2765 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2766
2767 base.use();
2768 property.use();
2769 value.use();
2770 storage.use();
2771
2772 if (arrayMode.isOutOfBounds()) {
2773 if (node->op() == PutByValDirect) {
2774 addSlowPathGenerator(slowPathCall(
2775 slowCase, this,
2776 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
2777 NoResult, baseReg, propertyReg, valueReg));
2778 } else {
2779 addSlowPathGenerator(slowPathCall(
2780 slowCase, this,
2781 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
2782 NoResult, baseReg, propertyReg, valueReg));
2783 }
2784 }
2785
2786 noResult(node, UseChildrenCalledExplicitly);
2787 break;
2788 }
2789
2790 case Array::Double: {
2791 compileDoublePutByVal(node, base, property);
2792 break;
2793 }
2794
2795 case Array::ArrayStorage:
2796 case Array::SlowPutArrayStorage: {
2797 JSValueOperand value(this, child3);
2798
2799 GPRReg valueReg = value.gpr();
2800
2801 if (!m_compileOkay)
2802 return;
2803
2804 StorageOperand storage(this, child4);
2805 GPRReg storageReg = storage.gpr();
2806
2807 if (node->op() == PutByValAlias) {
2808 // Store the value to the array.
2809 GPRReg propertyReg = property.gpr();
2810 GPRReg valueReg = value.gpr();
2811 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
2812
2813 noResult(node);
2814 break;
2815 }
2816
2817 GPRTemporary temporary;
2818 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2819
2820 MacroAssembler::JumpList slowCases;
2821
2822 MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
2823 if (!arrayMode.isOutOfBounds())
2824 speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
2825 else
2826 slowCases.append(beyondArrayBounds);
2827
2828 // Check if we're writing to a hole; if so increment m_numValuesInVector.
2829 if (arrayMode.isInBounds()) {
2830 speculationCheck(
2831 StoreToHole, JSValueRegs(), 0,
2832 m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
2833 } else {
2834 MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
2835 if (arrayMode.isSlowPut()) {
2836 // This is sort of strange. If we wanted to optimize this code path, we would invert
2837 // the above branch. But it's simply not worth it since this only happens if we're
2838 // already having a bad time.
2839 slowCases.append(m_jit.jump());
2840 } else {
2841 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
2842
2843 // If we're writing to a hole we might be growing the array;
2844 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2845 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2846 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
2847
2848 lengthDoesNotNeedUpdate.link(&m_jit);
2849 }
2850 notHoleValue.link(&m_jit);
2851 }
2852
2853 // Store the value to the array.
2854 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
2855
2856 base.use();
2857 property.use();
2858 value.use();
2859 storage.use();
2860
2861 if (!slowCases.empty()) {
2862 if (node->op() == PutByValDirect) {
2863 addSlowPathGenerator(slowPathCall(
2864 slowCases, this,
2865 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
2866 NoResult, baseReg, propertyReg, valueReg));
2867 } else {
2868 addSlowPathGenerator(slowPathCall(
2869 slowCases, this,
2870 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
2871 NoResult, baseReg, propertyReg, valueReg));
2872 }
2873 }
2874
2875 noResult(node, UseChildrenCalledExplicitly);
2876 break;
2877 }
2878
2879 default: {
2880 TypedArrayType type = arrayMode.typedArrayType();
2881 if (isInt(type))
2882 compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type);
2883 else
2884 compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type);
2885 } }
2886
2887 break;
2888 }
2889
2890 case RegExpExec: {
2891 if (compileRegExpExec(node))
2892 return;
2893 if (!node->adjustedRefCount()) {
2894 SpeculateCellOperand base(this, node->child1());
2895 SpeculateCellOperand argument(this, node->child2());
2896 GPRReg baseGPR = base.gpr();
2897 GPRReg argumentGPR = argument.gpr();
2898
2899 flushRegisters();
2900 GPRFlushedCallResult result(this);
2901 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
2902
2903 // Must use jsValueResult because otherwise we screw up register
2904 // allocation, which thinks that this node has a result.
2905 jsValueResult(result.gpr(), node);
2906 break;
2907 }
2908
2909 SpeculateCellOperand base(this, node->child1());
2910 SpeculateCellOperand argument(this, node->child2());
2911 GPRReg baseGPR = base.gpr();
2912 GPRReg argumentGPR = argument.gpr();
2913
2914 flushRegisters();
2915 GPRFlushedCallResult result(this);
2916 callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR);
2917
2918 jsValueResult(result.gpr(), node);
2919 break;
2920 }
2921
2922 case RegExpTest: {
2923 SpeculateCellOperand base(this, node->child1());
2924 SpeculateCellOperand argument(this, node->child2());
2925 GPRReg baseGPR = base.gpr();
2926 GPRReg argumentGPR = argument.gpr();
2927
2928 flushRegisters();
2929 GPRFlushedCallResult result(this);
2930 callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
2931
2932 // If we add a DataFormatBool, we should use it here.
2933 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
2934 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
2935 break;
2936 }
2937
2938 case ArrayPush: {
2939 ASSERT(node->arrayMode().isJSArray());
2940
2941 SpeculateCellOperand base(this, node->child1());
2942 GPRTemporary storageLength(this);
2943
2944 GPRReg baseGPR = base.gpr();
2945 GPRReg storageLengthGPR = storageLength.gpr();
2946
2947 StorageOperand storage(this, node->child3());
2948 GPRReg storageGPR = storage.gpr();
2949
2950 switch (node->arrayMode().type()) {
2951 case Array::Int32:
2952 case Array::Contiguous: {
2953 JSValueOperand value(this, node->child2(), ManualOperandSpeculation);
2954 GPRReg valueGPR = value.gpr();
2955
2956 if (node->arrayMode().type() == Array::Int32) {
2957 DFG_TYPE_CHECK(
2958 JSValueRegs(valueGPR), node->child2(), SpecInt32,
2959 m_jit.branch64(
2960 MacroAssembler::Below, valueGPR, GPRInfo::tagTypeNumberRegister));
2961 }
2962
2963 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2964 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
2965 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
2966 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2967 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2968 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
2969
2970 addSlowPathGenerator(
2971 slowPathCall(
2972 slowPath, this, operationArrayPush, storageLengthGPR,
2973 valueGPR, baseGPR));
2974
2975 jsValueResult(storageLengthGPR, node);
2976 break;
2977 }
2978
2979 case Array::Double: {
2980 SpeculateDoubleOperand value(this, node->child2());
2981 FPRReg valueFPR = value.fpr();
2982
2983 DFG_TYPE_CHECK(
2984 JSValueRegs(), node->child2(), SpecDoubleReal,
2985 m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
2986
2987 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
2988 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
2989 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
2990 m_jit.add32(TrustedImm32(1), storageLengthGPR);
2991 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
2992 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
2993
2994 addSlowPathGenerator(
2995 slowPathCall(
2996 slowPath, this, operationArrayPushDouble, storageLengthGPR,
2997 valueFPR, baseGPR));
2998
2999 jsValueResult(storageLengthGPR, node);
3000 break;
3001 }
3002
3003 case Array::ArrayStorage: {
3004 JSValueOperand value(this, node->child2());
3005 GPRReg valueGPR = value.gpr();
3006
3007 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3008
3009 // Refuse to handle bizarre lengths.
3010 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
3011
3012 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
3013
3014 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3015
3016 m_jit.add32(TrustedImm32(1), storageLengthGPR);
3017 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3018 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3019 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
3020
3021 addSlowPathGenerator(
3022 slowPathCall(
3023 slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
3024 valueGPR, baseGPR));
3025
3026 jsValueResult(storageLengthGPR, node);
3027 break;
3028 }
3029
3030 default:
3031 CRASH();
3032 break;
3033 }
3034 break;
3035 }
3036
3037 case ArrayPop: {
3038 ASSERT(node->arrayMode().isJSArray());
3039
3040 SpeculateCellOperand base(this, node->child1());
3041 StorageOperand storage(this, node->child2());
3042 GPRTemporary value(this);
3043 GPRTemporary storageLength(this);
3044 FPRTemporary temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop().
3045
3046 GPRReg baseGPR = base.gpr();
3047 GPRReg storageGPR = storage.gpr();
3048 GPRReg valueGPR = value.gpr();
3049 GPRReg storageLengthGPR = storageLength.gpr();
3050 FPRReg tempFPR = temp.fpr();
3051
3052 switch (node->arrayMode().type()) {
3053 case Array::Int32:
3054 case Array::Double:
3055 case Array::Contiguous: {
3056 m_jit.load32(
3057 MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
3058 MacroAssembler::Jump undefinedCase =
3059 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
3060 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3061 m_jit.store32(
3062 storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3063 MacroAssembler::Jump slowCase;
3064 if (node->arrayMode().type() == Array::Double) {
3065 m_jit.loadDouble(
3066 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
3067 tempFPR);
3068 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3069 // length and the new length.
3070 m_jit.store64(
3071 MacroAssembler::TrustedImm64(bitwise_cast<int64_t>(PNaN)), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3072 slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
3073 boxDouble(tempFPR, valueGPR);
3074 } else {
3075 m_jit.load64(
3076 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
3077 valueGPR);
3078 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3079 // length and the new length.
3080 m_jit.store64(
3081 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3082 slowCase = m_jit.branchTest64(MacroAssembler::Zero, valueGPR);
3083 }
3084
3085 addSlowPathGenerator(
3086 slowPathMove(
3087 undefinedCase, this,
3088 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3089 addSlowPathGenerator(
3090 slowPathCall(
3091 slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR));
3092
3093 // We can't know for sure that the result is an int because of the slow paths. :-/
3094 jsValueResult(valueGPR, node);
3095 break;
3096 }
3097
3098 case Array::ArrayStorage: {
3099 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3100
3101 JITCompiler::Jump undefinedCase =
3102 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
3103
3104 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3105
3106 JITCompiler::JumpList slowCases;
3107 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
3108
3109 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
3110 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, valueGPR));
3111
3112 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3113
3114 m_jit.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3115 m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3116
3117 addSlowPathGenerator(
3118 slowPathMove(
3119 undefinedCase, this,
3120 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3121
3122 addSlowPathGenerator(
3123 slowPathCall(
3124 slowCases, this, operationArrayPop, valueGPR, baseGPR));
3125
3126 jsValueResult(valueGPR, node);
3127 break;
3128 }
3129
3130 default:
3131 CRASH();
3132 break;
3133 }
3134 break;
3135 }
3136
3137 case DFG::Jump: {
3138 jump(node->targetBlock());
3139 noResult(node);
3140 break;
3141 }
3142
3143 case Branch:
3144 emitBranch(node);
3145 break;
3146
3147 case Switch:
3148 emitSwitch(node);
3149 break;
3150
3151 case Return: {
3152 ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1);
3153 ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
3154 ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
3155
3156 // Return the result in returnValueGPR.
3157 JSValueOperand op1(this, node->child1());
3158 m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
3159
3160 m_jit.emitFunctionEpilogue();
3161 m_jit.ret();
3162
3163 noResult(node);
3164 break;
3165 }
3166
3167 case Throw:
3168 case ThrowReferenceError: {
3169 // We expect that throw statements are rare and are intended to exit the code block
3170 // anyway, so we just OSR back to the old JIT for now.
3171 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
3172 break;
3173 }
3174
3175 case BooleanToNumber: {
3176 switch (node->child1().useKind()) {
3177 case BooleanUse: {
3178 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
3179 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
3180
3181 m_jit.move(value.gpr(), result.gpr());
3182 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
3183 DFG_TYPE_CHECK(
3184 JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
3185 JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
3186
3187 int32Result(result.gpr(), node);
3188 break;
3189 }
3190
3191 case UntypedUse: {
3192 JSValueOperand value(this, node->child1());
3193 GPRTemporary result(this);
3194
3195 if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) {
3196 m_jit.move(value.gpr(), result.gpr());
3197 m_jit.and32(TrustedImm32(1), result.gpr());
3198 int32Result(result.gpr(), node);
3199 break;
3200 }
3201
3202 m_jit.move(value.gpr(), result.gpr());
3203 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
3204 JITCompiler::Jump isBoolean = m_jit.branchTest64(
3205 JITCompiler::Zero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1)));
3206 m_jit.move(value.gpr(), result.gpr());
3207 JITCompiler::Jump done = m_jit.jump();
3208 isBoolean.link(&m_jit);
3209 m_jit.or64(GPRInfo::tagTypeNumberRegister, result.gpr());
3210 done.link(&m_jit);
3211
3212 jsValueResult(result.gpr(), node);
3213 break;
3214 }
3215
3216 default:
3217 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
3218 break;
3219 }
3220 break;
3221 }
3222
3223 case ToPrimitive: {
3224 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse);
3225 JSValueOperand op1(this, node->child1());
3226 GPRTemporary result(this, Reuse, op1);
3227
3228 GPRReg op1GPR = op1.gpr();
3229 GPRReg resultGPR = result.gpr();
3230
3231 op1.use();
3232
3233 MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(JSValueRegs(op1GPR));
3234 MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1GPR);
3235
3236 alreadyPrimitive.link(&m_jit);
3237 m_jit.move(op1GPR, resultGPR);
3238
3239 addSlowPathGenerator(
3240 slowPathCall(notPrimitive, this, operationToPrimitive, resultGPR, op1GPR));
3241
3242 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
3243 break;
3244 }
3245
3246 case ToString:
3247 case CallStringConstructor: {
3248 if (node->child1().useKind() == UntypedUse) {
3249 JSValueOperand op1(this, node->child1());
3250 GPRReg op1GPR = op1.gpr();
3251
3252 GPRFlushedCallResult result(this);
3253 GPRReg resultGPR = result.gpr();
3254
3255 flushRegisters();
3256
3257 JITCompiler::Jump done;
3258 if (node->child1()->prediction() & SpecString) {
3259 JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(JSValueRegs(op1GPR));
3260 JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1GPR);
3261 m_jit.move(op1GPR, resultGPR);
3262 done = m_jit.jump();
3263 slowPath1.link(&m_jit);
3264 slowPath2.link(&m_jit);
3265 }
3266 if (op == ToString)
3267 callOperation(operationToString, resultGPR, op1GPR);
3268 else {
3269 ASSERT(op == CallStringConstructor);
3270 callOperation(operationCallStringConstructor, resultGPR, op1GPR);
3271 }
3272 if (done.isSet())
3273 done.link(&m_jit);
3274 cellResult(resultGPR, node);
3275 break;
3276 }
3277
3278 compileToStringOrCallStringConstructorOnCell(node);
3279 break;
3280 }
3281
3282 case NewStringObject: {
3283 compileNewStringObject(node);
3284 break;
3285 }
3286
3287 case NewArray: {
3288 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
3289 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
3290 Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
3291 DFG_ASSERT(m_jit.graph(), node, structure->indexingType() == node->indexingType());
3292 ASSERT(
3293 hasUndecided(structure->indexingType())
3294 || hasInt32(structure->indexingType())
3295 || hasDouble(structure->indexingType())
3296 || hasContiguous(structure->indexingType()));
3297
3298 unsigned numElements = node->numChildren();
3299
3300 GPRTemporary result(this);
3301 GPRTemporary storage(this);
3302
3303 GPRReg resultGPR = result.gpr();
3304 GPRReg storageGPR = storage.gpr();
3305
3306 emitAllocateJSArray(resultGPR, structure, storageGPR, numElements);
3307
3308 // At this point, one way or another, resultGPR and storageGPR have pointers to
3309 // the JSArray and the Butterfly, respectively.
3310
3311 ASSERT(!hasUndecided(structure->indexingType()) || !node->numChildren());
3312
3313 for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
3314 Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
3315 switch (node->indexingType()) {
3316 case ALL_BLANK_INDEXING_TYPES:
3317 case ALL_UNDECIDED_INDEXING_TYPES:
3318 CRASH();
3319 break;
3320 case ALL_DOUBLE_INDEXING_TYPES: {
3321 SpeculateDoubleOperand operand(this, use);
3322 FPRReg opFPR = operand.fpr();
3323 DFG_TYPE_CHECK(
3324 JSValueRegs(), use, SpecDoubleReal,
3325 m_jit.branchDouble(
3326 MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
3327 m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx));
3328 break;
3329 }
3330 case ALL_INT32_INDEXING_TYPES:
3331 case ALL_CONTIGUOUS_INDEXING_TYPES: {
3332 JSValueOperand operand(this, use, ManualOperandSpeculation);
3333 GPRReg opGPR = operand.gpr();
3334 if (hasInt32(node->indexingType())) {
3335 DFG_TYPE_CHECK(
3336 JSValueRegs(opGPR), use, SpecInt32,
3337 m_jit.branch64(
3338 MacroAssembler::Below, opGPR, GPRInfo::tagTypeNumberRegister));
3339 }
3340 m_jit.store64(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx));
3341 break;
3342 }
3343 default:
3344 CRASH();
3345 break;
3346 }
3347 }
3348
3349 // Yuck, we should *really* have a way of also returning the storageGPR. But
3350 // that's the least of what's wrong with this code. We really shouldn't be
3351 // allocating the array after having computed - and probably spilled to the
3352 // stack - all of the things that will go into the array. The solution to that
3353 // bigger problem will also likely fix the redundancy in reloading the storage
3354 // pointer that we currently have.
3355
3356 cellResult(resultGPR, node);
3357 break;
3358 }
3359
3360 if (!node->numChildren()) {
3361 flushRegisters();
3362 GPRFlushedCallResult result(this);
3363 callOperation(operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
3364 cellResult(result.gpr(), node);
3365 break;
3366 }
3367
3368 size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren();
3369 ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(scratchSize);
3370 EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
3371
3372 for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
3373 // Need to perform the speculations that this node promises to perform. If we're
3374 // emitting code here and the indexing type is not array storage then there is
3375 // probably something hilarious going on and we're already failing at all the
3376 // things, but at least we're going to be sound.
3377 Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
3378 switch (node->indexingType()) {
3379 case ALL_BLANK_INDEXING_TYPES:
3380 case ALL_UNDECIDED_INDEXING_TYPES:
3381 CRASH();
3382 break;
3383 case ALL_DOUBLE_INDEXING_TYPES: {
3384 SpeculateDoubleOperand operand(this, use);
3385 GPRTemporary scratch(this);
3386 FPRReg opFPR = operand.fpr();
3387 GPRReg scratchGPR = scratch.gpr();
3388 DFG_TYPE_CHECK(
3389 JSValueRegs(), use, SpecDoubleReal,
3390 m_jit.branchDouble(
3391 MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
3392 m_jit.boxDouble(opFPR, scratchGPR);
3393 m_jit.store64(scratchGPR, buffer + operandIdx);
3394 break;
3395 }
3396 case ALL_INT32_INDEXING_TYPES: {
3397 JSValueOperand operand(this, use, ManualOperandSpeculation);
3398 GPRReg opGPR = operand.gpr();
3399 if (hasInt32(node->indexingType())) {
3400 DFG_TYPE_CHECK(
3401 JSValueRegs(opGPR), use, SpecInt32,
3402 m_jit.branch64(
3403 MacroAssembler::Below, opGPR, GPRInfo::tagTypeNumberRegister));
3404 }
3405 m_jit.store64(opGPR, buffer + operandIdx);
3406 break;
3407 }
3408 case ALL_CONTIGUOUS_INDEXING_TYPES:
3409 case ALL_ARRAY_STORAGE_INDEXING_TYPES: {
3410 JSValueOperand operand(this, use);
3411 GPRReg opGPR = operand.gpr();
3412 m_jit.store64(opGPR, buffer + operandIdx);
3413 operand.use();
3414 break;
3415 }
3416 default:
3417 CRASH();
3418 break;
3419 }
3420 }
3421
3422 switch (node->indexingType()) {
3423 case ALL_DOUBLE_INDEXING_TYPES:
3424 case ALL_INT32_INDEXING_TYPES:
3425 useChildren(node);
3426 break;
3427 default:
3428 break;
3429 }
3430
3431 flushRegisters();
3432
3433 if (scratchSize) {
3434 GPRTemporary scratch(this);
3435
3436 // Tell GC mark phase how much of the scratch buffer is active during call.
3437 m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
3438 m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr());
3439 }
3440
3441 GPRFlushedCallResult result(this);
3442
3443 callOperation(
3444 operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
3445 static_cast<void*>(buffer), node->numChildren());
3446
3447 if (scratchSize) {
3448 GPRTemporary scratch(this);
3449
3450 m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr());
3451 m_jit.storePtr(TrustedImmPtr(0), scratch.gpr());
3452 }
3453
3454 cellResult(result.gpr(), node, UseChildrenCalledExplicitly);
3455 break;
3456 }
3457
3458 case NewArrayWithSize: {
3459 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
3460 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
3461 SpeculateStrictInt32Operand size(this, node->child1());
3462 GPRTemporary result(this);
3463 GPRTemporary storage(this);
3464 GPRTemporary scratch(this);
3465 GPRTemporary scratch2(this);
3466
3467 GPRReg sizeGPR = size.gpr();
3468 GPRReg resultGPR = result.gpr();
3469 GPRReg storageGPR = storage.gpr();
3470 GPRReg scratchGPR = scratch.gpr();
3471 GPRReg scratch2GPR = scratch2.gpr();
3472
3473 MacroAssembler::JumpList slowCases;
3474 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
3475
3476 ASSERT((1 << 3) == sizeof(JSValue));
3477 m_jit.move(sizeGPR, scratchGPR);
3478 m_jit.lshift32(TrustedImm32(3), scratchGPR);
3479 m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR);
3480 slowCases.append(
3481 emitAllocateBasicStorage(resultGPR, storageGPR));
3482 m_jit.subPtr(scratchGPR, storageGPR);
3483 Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
3484 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
3485
3486 m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3487 m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
3488
3489 if (hasDouble(node->indexingType())) {
3490 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
3491 m_jit.move(sizeGPR, scratch2GPR);
3492 MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratch2GPR);
3493 MacroAssembler::Label loop = m_jit.label();
3494 m_jit.sub32(TrustedImm32(1), scratch2GPR);
3495 m_jit.store64(scratchGPR, MacroAssembler::BaseIndex(storageGPR, scratch2GPR, MacroAssembler::TimesEight));
3496 m_jit.branchTest32(MacroAssembler::NonZero, scratch2GPR).linkTo(loop, &m_jit);
3497 done.link(&m_jit);
3498 }
3499
3500 addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
3501 slowCases, this, operationNewArrayWithSize, resultGPR,
3502 globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
3503 globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage),
3504 sizeGPR));
3505
3506 cellResult(resultGPR, node);
3507 break;
3508 }
3509
3510 SpeculateStrictInt32Operand size(this, node->child1());
3511 GPRReg sizeGPR = size.gpr();
3512 flushRegisters();
3513 GPRFlushedCallResult result(this);
3514 GPRReg resultGPR = result.gpr();
3515 GPRReg structureGPR = selectScratchGPR(sizeGPR);
3516 MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
3517 m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR);
3518 MacroAssembler::Jump done = m_jit.jump();
3519 bigLength.link(&m_jit);
3520 m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)), structureGPR);
3521 done.link(&m_jit);
3522 callOperation(operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR);
3523 cellResult(resultGPR, node);
3524 break;
3525 }
3526
3527 case NewArrayBuffer: {
3528 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
3529 IndexingType indexingType = node->indexingType();
3530 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) {
3531 unsigned numElements = node->numConstants();
3532
3533 GPRTemporary result(this);
3534 GPRTemporary storage(this);
3535
3536 GPRReg resultGPR = result.gpr();
3537 GPRReg storageGPR = storage.gpr();
3538
3539 emitAllocateJSArray(resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), storageGPR, numElements);
3540
3541 DFG_ASSERT(m_jit.graph(), node, indexingType & IsArray);
3542 JSValue* data = m_jit.codeBlock()->constantBuffer(node->startConstant());
3543 if (indexingType == ArrayWithDouble) {
3544 for (unsigned index = 0; index < node->numConstants(); ++index) {
3545 double value = data[index].asNumber();
3546 m_jit.store64(
3547 Imm64(bitwise_cast<int64_t>(value)),
3548 MacroAssembler::Address(storageGPR, sizeof(double) * index));
3549 }
3550 } else {
3551 for (unsigned index = 0; index < node->numConstants(); ++index) {
3552 m_jit.store64(
3553 Imm64(JSValue::encode(data[index])),
3554 MacroAssembler::Address(storageGPR, sizeof(JSValue) * index));
3555 }
3556 }
3557
3558 cellResult(resultGPR, node);
3559 break;
3560 }
3561
3562 flushRegisters();
3563 GPRFlushedCallResult result(this);
3564
3565 callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants());
3566
3567 cellResult(result.gpr(), node);
3568 break;
3569 }
3570
3571 case NewTypedArray: {
3572 switch (node->child1().useKind()) {
3573 case Int32Use:
3574 compileNewTypedArray(node);
3575 break;
3576 case UntypedUse: {
3577 JSValueOperand argument(this, node->child1());
3578 GPRReg argumentGPR = argument.gpr();
3579
3580 flushRegisters();
3581
3582 GPRFlushedCallResult result(this);
3583 GPRReg resultGPR = result.gpr();
3584
3585 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
3586 callOperation(
3587 operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()),
3588 resultGPR, globalObject->typedArrayStructure(node->typedArrayType()),
3589 argumentGPR);
3590
3591 cellResult(resultGPR, node);
3592 break;
3593 }
3594 default:
3595 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
3596 break;
3597 }
3598 break;
3599 }
3600
3601 case NewRegexp: {
3602 flushRegisters();
3603 GPRFlushedCallResult result(this);
3604
3605 callOperation(operationNewRegexp, result.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex()));
3606
3607 cellResult(result.gpr(), node);
3608 break;
3609 }
3610
3611 case ToThis: {
3612 ASSERT(node->child1().useKind() == UntypedUse);
3613 JSValueOperand thisValue(this, node->child1());
3614 GPRTemporary temp(this);
3615 GPRReg thisValueGPR = thisValue.gpr();
3616 GPRReg tempGPR = temp.gpr();
3617
3618 MacroAssembler::JumpList slowCases;
3619 slowCases.append(m_jit.branchIfNotCell(JSValueRegs(thisValueGPR)));
3620 slowCases.append(m_jit.branch8(
3621 MacroAssembler::NotEqual,
3622 MacroAssembler::Address(thisValueGPR, JSCell::typeInfoTypeOffset()),
3623 TrustedImm32(FinalObjectType)));
3624 m_jit.move(thisValueGPR, tempGPR);
3625 J_JITOperation_EJ function;
3626 if (m_jit.graph().executableFor(node->origin.semantic)->isStrictMode())
3627 function = operationToThisStrict;
3628 else
3629 function = operationToThis;
3630 addSlowPathGenerator(
3631 slowPathCall(slowCases, this, function, tempGPR, thisValueGPR));
3632
3633 jsValueResult(tempGPR, node);
3634 break;
3635 }
3636
3637 case CreateThis: {
3638 // Note that there is not so much profit to speculate here. The only things we
3639 // speculate on are (1) that it's a cell, since that eliminates cell checks
3640 // later if the proto is reused, and (2) if we have a FinalObject prediction
3641 // then we speculate because we want to get recompiled if it isn't (since
3642 // otherwise we'd start taking slow path a lot).
3643
3644 SpeculateCellOperand callee(this, node->child1());
3645 GPRTemporary result(this);
3646 GPRTemporary allocator(this);
3647 GPRTemporary structure(this);
3648 GPRTemporary scratch(this);
3649
3650 GPRReg calleeGPR = callee.gpr();
3651 GPRReg resultGPR = result.gpr();
3652 GPRReg allocatorGPR = allocator.gpr();
3653 GPRReg structureGPR = structure.gpr();
3654 GPRReg scratchGPR = scratch.gpr();
3655 // Rare data is only used to access the allocator & structure
3656 // We can avoid using an additional GPR this way
3657 GPRReg rareDataGPR = structureGPR;
3658
3659 MacroAssembler::JumpList slowPath;
3660
3661 m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR);
3662 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR));
3663 m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
3664 m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR);
3665 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR));
3666 emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath);
3667
3668 addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR, node->inlineCapacity()));
3669
3670 cellResult(resultGPR, node);
3671 break;
3672 }
3673
3674 case NewObject: {
3675 GPRTemporary result(this);
3676 GPRTemporary allocator(this);
3677 GPRTemporary scratch(this);
3678
3679 GPRReg resultGPR = result.gpr();
3680 GPRReg allocatorGPR = allocator.gpr();
3681 GPRReg scratchGPR = scratch.gpr();
3682
3683 MacroAssembler::JumpList slowPath;
3684
3685 Structure* structure = node->structure();
3686 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
3687 MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
3688
3689 m_jit.move(TrustedImmPtr(allocatorPtr), allocatorGPR);
3690 emitAllocateJSObject(resultGPR, allocatorGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR, slowPath);
3691
3692 addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR, structure));
3693
3694 cellResult(resultGPR, node);
3695 break;
3696 }
3697
3698 case GetCallee: {
3699 GPRTemporary result(this);
3700 m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), result.gpr());
3701 cellResult(result.gpr(), node);
3702 break;
3703 }
3704
3705 case GetArgumentCount: {
3706 GPRTemporary result(this);
3707 m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), result.gpr());
3708 int32Result(result.gpr(), node);
3709 break;
3710 }
3711
3712 case GetScope:
3713 compileGetScope(node);
3714 break;
3715
3716 case SkipScope:
3717 compileSkipScope(node);
3718 break;
3719
3720 case GetClosureVar: {
3721 SpeculateCellOperand base(this, node->child1());
3722 GPRTemporary result(this);
3723 GPRReg baseGPR = base.gpr();
3724 GPRReg resultGPR = result.gpr();
3725
3726 m_jit.load64(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset())), resultGPR);
3727 jsValueResult(resultGPR, node);
3728 break;
3729 }
3730 case PutClosureVar: {
3731 SpeculateCellOperand base(this, node->child1());
3732 JSValueOperand value(this, node->child2());
3733
3734 GPRReg baseGPR = base.gpr();
3735 GPRReg valueGPR = value.gpr();
3736
3737 m_jit.store64(valueGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset())));
3738 noResult(node);
3739 break;
3740 }
3741 case GetById: {
3742 ASSERT(node->prediction());
3743
3744 switch (node->child1().useKind()) {
3745 case CellUse: {
3746 SpeculateCellOperand base(this, node->child1());
3747 GPRTemporary result(this, Reuse, base);
3748
3749 GPRReg baseGPR = base.gpr();
3750 GPRReg resultGPR = result.gpr();
3751
3752 base.use();
3753
3754 cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber());
3755
3756 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
3757 break;
3758 }
3759
3760 case UntypedUse: {
3761 JSValueOperand base(this, node->child1());
3762 GPRTemporary result(this, Reuse, base);
3763
3764 GPRReg baseGPR = base.gpr();
3765 GPRReg resultGPR = result.gpr();
3766
3767 base.use();
3768
3769 JITCompiler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(baseGPR));
3770
3771 cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), notCell);
3772
3773 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
3774 break;
3775 }
3776
3777 default:
3778 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
3779 break;
3780 }
3781 break;
3782 }
3783
3784 case GetByIdFlush: {
3785 if (!node->prediction()) {
3786 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
3787 break;
3788 }
3789
3790 switch (node->child1().useKind()) {
3791 case CellUse: {
3792 SpeculateCellOperand base(this, node->child1());
3793 GPRReg baseGPR = base.gpr();
3794
3795 GPRFlushedCallResult result(this);
3796
3797 GPRReg resultGPR = result.gpr();
3798
3799 base.use();
3800
3801 flushRegisters();
3802
3803 cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill);
3804
3805 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
3806 break;
3807 }
3808
3809 case UntypedUse: {
3810 JSValueOperand base(this, node->child1());
3811 GPRReg baseGPR = base.gpr();
3812
3813 GPRFlushedCallResult result(this);
3814 GPRReg resultGPR = result.gpr();
3815
3816 base.use();
3817 flushRegisters();
3818
3819 JITCompiler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(baseGPR));
3820
3821 cachedGetById(node->origin.semantic, baseGPR, resultGPR, node->identifierNumber(), notCell, DontSpill);
3822
3823 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
3824 break;
3825 }
3826
3827 default:
3828 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
3829 break;
3830 }
3831 break;
3832 }
3833
3834 case GetArrayLength:
3835 compileGetArrayLength(node);
3836 break;
3837
3838 case CheckCell: {
3839 SpeculateCellOperand cell(this, node->child1());
3840 speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell()));
3841 noResult(node);
3842 break;
3843 }
3844
3845 case CheckNotEmpty: {
3846 JSValueOperand operand(this, node->child1());
3847 GPRReg gpr = operand.gpr();
3848 speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branchTest64(JITCompiler::Zero, gpr));
3849 noResult(node);
3850 break;
3851 }
3852
3853 case GetExecutable: {
3854 SpeculateCellOperand function(this, node->child1());
3855 GPRTemporary result(this, Reuse, function);
3856 GPRReg functionGPR = function.gpr();
3857 GPRReg resultGPR = result.gpr();
3858 speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType);
3859 m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR);
3860 cellResult(resultGPR, node);
3861 break;
3862 }
3863
3864 case CheckStructure: {
3865 SpeculateCellOperand base(this, node->child1());
3866
3867 ASSERT(node->structureSet().size());
3868
3869 ExitKind exitKind;
3870 if (node->child1()->hasConstant())
3871 exitKind = BadConstantCache;
3872 else
3873 exitKind = BadCache;
3874
3875 if (node->structureSet().size() == 1) {
3876 speculationCheck(
3877 exitKind, JSValueSource::unboxedCell(base.gpr()), 0,
3878 m_jit.branchWeakStructure(
3879 JITCompiler::NotEqual,
3880 JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()),
3881 node->structureSet()[0]));
3882 } else {
3883 JITCompiler::JumpList done;
3884
3885 for (size_t i = 0; i < node->structureSet().size() - 1; ++i)
3886 done.append(m_jit.branchWeakStructure(JITCompiler::Equal, MacroAssembler::Address(base.gpr(), JSCell::structureIDOffset()), node->structureSet()[i]));
3887
3888 speculationCheck(
3889 exitKind, JSValueSource::unboxedCell(base.gpr()), 0,
3890 m_jit.branchWeakStructure(
3891 JITCompiler::NotEqual, MacroAssembler::Address(base.gpr(), JSCell::structureIDOffset()), node->structureSet().last()));
3892
3893 done.link(&m_jit);
3894 }
3895
3896 noResult(node);
3897 break;
3898 }
3899
3900 case PutStructure: {
3901 Structure* oldStructure = node->transition()->previous;
3902 Structure* newStructure = node->transition()->next;
3903
3904 m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node);
3905
3906 SpeculateCellOperand base(this, node->child1());
3907 GPRReg baseGPR = base.gpr();
3908
3909 ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
3910 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
3911 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
3912 m_jit.store32(MacroAssembler::TrustedImm32(newStructure->id()), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()));
3913
3914 noResult(node);
3915 break;
3916 }
3917
3918 case AllocatePropertyStorage:
3919 compileAllocatePropertyStorage(node);
3920 break;
3921
3922 case ReallocatePropertyStorage:
3923 compileReallocatePropertyStorage(node);
3924 break;
3925
3926 case GetButterfly: {
3927 SpeculateCellOperand base(this, node->child1());
3928 GPRTemporary result(this, Reuse, base);
3929
3930 GPRReg baseGPR = base.gpr();
3931 GPRReg resultGPR = result.gpr();
3932
3933 m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
3934
3935 storageResult(resultGPR, node);
3936 break;
3937 }
3938
3939 case GetIndexedPropertyStorage: {
3940 compileGetIndexedPropertyStorage(node);
3941 break;
3942 }
3943
3944 case ConstantStoragePointer: {
3945 compileConstantStoragePointer(node);
3946 break;
3947 }
3948
3949 case GetTypedArrayByteOffset: {
3950 compileGetTypedArrayByteOffset(node);
3951 break;
3952 }
3953
3954 case GetByOffset:
3955 case GetGetterSetterByOffset: {
3956 StorageOperand storage(this, node->child1());
3957 GPRTemporary result(this, Reuse, storage);
3958
3959 GPRReg storageGPR = storage.gpr();
3960 GPRReg resultGPR = result.gpr();
3961
3962 StorageAccessData& storageAccessData = node->storageAccessData();
3963
3964 m_jit.load64(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset)), resultGPR);
3965
3966 jsValueResult(resultGPR, node);
3967 break;
3968 }
3969
3970 case GetGetter: {
3971 SpeculateCellOperand op1(this, node->child1());
3972 GPRTemporary result(this, Reuse, op1);
3973
3974 GPRReg op1GPR = op1.gpr();
3975 GPRReg resultGPR = result.gpr();
3976
3977 m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR);
3978
3979 cellResult(resultGPR, node);
3980 break;
3981 }
3982
3983 case GetSetter: {
3984 SpeculateCellOperand op1(this, node->child1());
3985 GPRTemporary result(this, Reuse, op1);
3986
3987 GPRReg op1GPR = op1.gpr();
3988 GPRReg resultGPR = result.gpr();
3989
3990 m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR);
3991
3992 cellResult(resultGPR, node);
3993 break;
3994 }
3995
3996 case PutByOffset: {
3997 StorageOperand storage(this, node->child1());
3998 JSValueOperand value(this, node->child3());
3999 GPRTemporary scratch1(this);
4000 GPRTemporary scratch2(this);
4001
4002 GPRReg storageGPR = storage.gpr();
4003 GPRReg valueGPR = value.gpr();
4004
4005 speculate(node, node->child2());
4006
4007 StorageAccessData& storageAccessData = node->storageAccessData();
4008
4009 m_jit.store64(valueGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset)));
4010
4011 noResult(node);
4012 break;
4013 }
4014
4015 case PutByIdFlush: {
4016 SpeculateCellOperand base(this, node->child1());
4017 JSValueOperand value(this, node->child2());
4018 GPRTemporary scratch(this);
4019
4020 GPRReg baseGPR = base.gpr();
4021 GPRReg valueGPR = value.gpr();
4022 GPRReg scratchGPR = scratch.gpr();
4023 flushRegisters();
4024
4025 cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill);
4026
4027 noResult(node);
4028 break;
4029 }
4030
4031 case PutById: {
4032 SpeculateCellOperand base(this, node->child1());
4033 JSValueOperand value(this, node->child2());
4034 GPRTemporary scratch(this);
4035
4036 GPRReg baseGPR = base.gpr();
4037 GPRReg valueGPR = value.gpr();
4038 GPRReg scratchGPR = scratch.gpr();
4039
4040 cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect);
4041
4042 noResult(node);
4043 break;
4044 }
4045
4046 case PutByIdDirect: {
4047 SpeculateCellOperand base(this, node->child1());
4048 JSValueOperand value(this, node->child2());
4049 GPRTemporary scratch(this);
4050
4051 GPRReg baseGPR = base.gpr();
4052 GPRReg valueGPR = value.gpr();
4053 GPRReg scratchGPR = scratch.gpr();
4054
4055 cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), Direct);
4056
4057 noResult(node);
4058 break;
4059 }
4060
4061 case GetGlobalVar: {
4062 GPRTemporary result(this);
4063
4064 m_jit.load64(node->variablePointer(), result.gpr());
4065
4066 jsValueResult(result.gpr(), node);
4067 break;
4068 }
4069
4070 case PutGlobalVar: {
4071 JSValueOperand value(this, node->child2());
4072
4073 m_jit.store64(value.gpr(), node->variablePointer());
4074
4075 noResult(node);
4076 break;
4077 }
4078
4079 case NotifyWrite: {
4080 compileNotifyWrite(node);
4081 break;
4082 }
4083
4084 case VarInjectionWatchpoint: {
4085 noResult(node);
4086 break;
4087 }
4088
4089 case CheckHasInstance: {
4090 SpeculateCellOperand base(this, node->child1());
4091 GPRTemporary structure(this);
4092
4093 // Speculate that base 'ImplementsDefaultHasInstance'.
4094 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(
4095 MacroAssembler::Zero,
4096 MacroAssembler::Address(base.gpr(), JSCell::typeInfoFlagsOffset()),
4097 MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
4098
4099 noResult(node);
4100 break;
4101 }
4102
4103 case InstanceOf: {
4104 compileInstanceOf(node);
4105 break;
4106 }
4107
4108 case IsUndefined: {
4109 JSValueOperand value(this, node->child1());
4110 GPRTemporary result(this);
4111 GPRTemporary localGlobalObject(this);
4112 GPRTemporary remoteGlobalObject(this);
4113 GPRTemporary scratch(this);
4114
4115 JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
4116
4117 m_jit.compare64(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr());
4118 JITCompiler::Jump done = m_jit.jump();
4119
4120 isCell.link(&m_jit);
4121 JITCompiler::Jump notMasqueradesAsUndefined;
4122 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
4123 m_jit.move(TrustedImm32(0), result.gpr());
4124 notMasqueradesAsUndefined = m_jit.jump();
4125 } else {
4126 JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
4127 JITCompiler::NonZero,
4128 JITCompiler::Address(value.gpr(), JSCell::typeInfoFlagsOffset()),
4129 TrustedImm32(MasqueradesAsUndefined));
4130 m_jit.move(TrustedImm32(0), result.gpr());
4131 notMasqueradesAsUndefined = m_jit.jump();
4132
4133 isMasqueradesAsUndefined.link(&m_jit);
4134 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
4135 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
4136 m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR);
4137 m_jit.emitLoadStructure(value.gpr(), result.gpr(), scratch.gpr());
4138 m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR);
4139 m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr());
4140 }
4141
4142 notMasqueradesAsUndefined.link(&m_jit);
4143 done.link(&m_jit);
4144 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
4145 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
4146 break;
4147 }
4148
4149 case IsBoolean: {
4150 JSValueOperand value(this, node->child1());
4151 GPRTemporary result(this, Reuse, value);
4152
4153 m_jit.move(value.gpr(), result.gpr());
4154 m_jit.xor64(JITCompiler::TrustedImm32(ValueFalse), result.gpr());
4155 m_jit.test64(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr());
4156 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
4157 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
4158 break;
4159 }
4160
4161 case IsNumber: {
4162 JSValueOperand value(this, node->child1());
4163 GPRTemporary result(this, Reuse, value);
4164
4165 m_jit.test64(JITCompiler::NonZero, value.gpr(), GPRInfo::tagTypeNumberRegister, result.gpr());
4166 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
4167 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
4168 break;
4169 }
4170
4171 case IsString: {
4172 JSValueOperand value(this, node->child1());
4173 GPRTemporary result(this, Reuse, value);
4174
4175 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
4176
4177 m_jit.compare8(JITCompiler::Equal,
4178 JITCompiler::Address(value.gpr(), JSCell::typeInfoTypeOffset()),
4179 TrustedImm32(StringType),
4180 result.gpr());
4181 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
4182 JITCompiler::Jump done = m_jit.jump();
4183
4184 isNotCell.link(&m_jit);
4185 m_jit.move(TrustedImm32(ValueFalse), result.gpr());
4186
4187 done.link(&m_jit);
4188 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
4189 break;
4190 }
4191
4192 case IsObject: {
4193 JSValueOperand value(this, node->child1());
4194 GPRTemporary result(this, Reuse, value);
4195
4196 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
4197
4198 m_jit.compare8(JITCompiler::AboveOrEqual,
4199 JITCompiler::Address(value.gpr(), JSCell::typeInfoTypeOffset()),
4200 TrustedImm32(ObjectType),
4201 result.gpr());
4202 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
4203 JITCompiler::Jump done = m_jit.jump();
4204
4205 isNotCell.link(&m_jit);
4206 m_jit.move(TrustedImm32(ValueFalse), result.gpr());
4207
4208 done.link(&m_jit);
4209 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
4210 break;
4211 }
4212
4213 case IsObjectOrNull: {
4214 compileIsObjectOrNull(node);
4215 break;
4216 }
4217
4218 case IsFunction: {
4219 compileIsFunction(node);
4220 break;
4221 }
4222
4223 case TypeOf: {
4224 compileTypeOf(node);
4225 break;
4226 }
4227
4228 case Flush:
4229 break;
4230
4231 case Call:
4232 case Construct:
4233 case CallVarargs:
4234 case CallForwardVarargs:
4235 case ConstructVarargs:
4236 case ConstructForwardVarargs:
4237 emitCall(node);
4238 break;
4239
4240 case LoadVarargs: {
4241 LoadVarargsData* data = node->loadVarargsData();
4242
4243 GPRReg argumentsGPR;
4244 {
4245 JSValueOperand arguments(this, node->child1());
4246 argumentsGPR = arguments.gpr();
4247 flushRegisters();
4248 }
4249
4250 callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsGPR, data->offset);
4251
4252 lock(GPRInfo::returnValueGPR);
4253 {
4254 JSValueOperand arguments(this, node->child1());
4255 argumentsGPR = arguments.gpr();
4256 flushRegisters();
4257 }
4258 unlock(GPRInfo::returnValueGPR);
4259
4260 // FIXME: There is a chance that we will call an effectful length property twice. This is safe
4261 // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
4262 // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
4263 // past the sizing.
4264 // https://bugs.webkit.org/show_bug.cgi?id=141448
4265
4266 GPRReg argCountIncludingThisGPR =
4267 JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsGPR);
4268
4269 m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
4270 speculationCheck(
4271 VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
4272 MacroAssembler::Above,
4273 argCountIncludingThisGPR,
4274 TrustedImm32(data->limit)));
4275
4276 m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount));
4277
4278 callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum);
4279
4280 noResult(node);
4281 break;
4282 }
4283
4284 case ForwardVarargs: {
4285 compileForwardVarargs(node);
4286 break;
4287 }
4288
4289 case CreateActivation: {
4290 compileCreateActivation(node);
4291 break;
4292 }
4293
4294 case CreateDirectArguments: {
4295 compileCreateDirectArguments(node);
4296 break;
4297 }
4298
4299 case GetFromArguments: {
4300 compileGetFromArguments(node);
4301 break;
4302 }
4303
4304 case PutToArguments: {
4305 compilePutToArguments(node);
4306 break;
4307 }
4308
4309 case CreateScopedArguments: {
4310 compileCreateScopedArguments(node);
4311 break;
4312 }
4313
4314 case CreateClonedArguments: {
4315 compileCreateClonedArguments(node);
4316 break;
4317 }
4318
4319 case NewFunction:
4320 compileNewFunction(node);
4321 break;
4322
4323 case In:
4324 compileIn(node);
4325 break;
4326
4327 case CountExecution:
4328 m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address()));
4329 break;
4330
4331 case ForceOSRExit: {
4332 terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
4333 break;
4334 }
4335
4336 case InvalidationPoint:
4337 emitInvalidationPoint(node);
4338 break;
4339
4340 case CheckWatchdogTimer:
4341 ASSERT(m_jit.vm()->watchdog);
4342 speculationCheck(
4343 WatchdogTimerFired, JSValueRegs(), 0,
4344 m_jit.branchTest8(
4345 JITCompiler::NonZero,
4346 JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog->timerDidFireAddress())));
4347 break;
4348
4349 case Phantom:
4350 case Check:
4351 DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate);
4352 noResult(node);
4353 break;
4354
4355 case Breakpoint:
4356 case ProfileWillCall:
4357 case ProfileDidCall:
4358 case PhantomLocal:
4359 case LoopHint:
4360 // This is a no-op.
4361 noResult(node);
4362 break;
4363
4364 case Unreachable:
4365 DFG_CRASH(m_jit.graph(), node, "Unexpected Unreachable node");
4366 break;
4367
4368 case StoreBarrier: {
4369 compileStoreBarrier(node);
4370 break;
4371 }
4372
4373 case GetEnumerableLength: {
4374 SpeculateCellOperand enumerator(this, node->child1());
4375 GPRFlushedCallResult result(this);
4376 GPRReg resultGPR = result.gpr();
4377
4378 m_jit.load32(MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR);
4379 int32Result(resultGPR, node);
4380 break;
4381 }
4382 case HasGenericProperty: {
4383 JSValueOperand base(this, node->child1());
4384 SpeculateCellOperand property(this, node->child2());
4385 GPRFlushedCallResult result(this);
4386 GPRReg resultGPR = result.gpr();
4387
4388 flushRegisters();
4389 callOperation(operationHasGenericProperty, resultGPR, base.gpr(), property.gpr());
4390 jsValueResult(resultGPR, node, DataFormatJSBoolean);
4391 break;
4392 }
4393 case HasStructureProperty: {
4394 JSValueOperand base(this, node->child1());
4395 SpeculateCellOperand property(this, node->child2());
4396 SpeculateCellOperand enumerator(this, node->child3());
4397 GPRTemporary result(this);
4398
4399 GPRReg baseGPR = base.gpr();
4400 GPRReg propertyGPR = property.gpr();
4401 GPRReg resultGPR = result.gpr();
4402
4403 m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), resultGPR);
4404 MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
4405 resultGPR,
4406 MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset()));
4407
4408 moveTrueTo(resultGPR);
4409 MacroAssembler::Jump done = m_jit.jump();
4410
4411 done.link(&m_jit);
4412
4413 addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasGenericProperty, resultGPR, baseGPR, propertyGPR));
4414 jsValueResult(resultGPR, node, DataFormatJSBoolean);
4415 break;
4416 }
4417 case HasIndexedProperty: {
4418 SpeculateCellOperand base(this, node->child1());
4419 SpeculateStrictInt32Operand index(this, node->child2());
4420 GPRTemporary result(this);
4421
4422 GPRReg baseGPR = base.gpr();
4423 GPRReg indexGPR = index.gpr();
4424 GPRReg resultGPR = result.gpr();
4425
4426 MacroAssembler::JumpList slowCases;
4427 ArrayMode mode = node->arrayMode();
4428 switch (mode.type()) {
4429 case Array::Int32:
4430 case Array::Contiguous: {
4431 ASSERT(!!node->child3());
4432 StorageOperand storage(this, node->child3());
4433 GPRTemporary scratch(this);
4434
4435 GPRReg storageGPR = storage.gpr();
4436 GPRReg scratchGPR = scratch.gpr();
4437
4438 MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
4439 if (mode.isInBounds())
4440 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
4441 else
4442 slowCases.append(outOfBounds);
4443
4444 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchGPR);
4445 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
4446 moveTrueTo(resultGPR);
4447 break;
4448 }
4449 case Array::Double: {
4450 ASSERT(!!node->child3());
4451 StorageOperand storage(this, node->child3());
4452 FPRTemporary scratch(this);
4453 FPRReg scratchFPR = scratch.fpr();
4454 GPRReg storageGPR = storage.gpr();
4455
4456 MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
4457 if (mode.isInBounds())
4458 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
4459 else
4460 slowCases.append(outOfBounds);
4461
4462 m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
4463 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR));
4464 moveTrueTo(resultGPR);
4465 break;
4466 }
4467 case Array::ArrayStorage: {
4468 ASSERT(!!node->child3());
4469 StorageOperand storage(this, node->child3());
4470 GPRTemporary scratch(this);
4471
4472 GPRReg storageGPR = storage.gpr();
4473 GPRReg scratchGPR = scratch.gpr();
4474
4475 MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
4476 if (mode.isInBounds())
4477 speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
4478 else
4479 slowCases.append(outOfBounds);
4480
4481 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), scratchGPR);
4482 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
4483 moveTrueTo(resultGPR);
4484 break;
4485 }
4486 default: {
4487 slowCases.append(m_jit.jump());
4488 break;
4489 }
4490 }
4491
4492 addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedProperty, resultGPR, baseGPR, indexGPR));
4493
4494 jsValueResult(resultGPR, node, DataFormatJSBoolean);
4495 break;
4496 }
4497 case GetDirectPname: {
4498 Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
4499 Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
4500 Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
4501 Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
4502
4503 SpeculateCellOperand base(this, baseEdge);
4504 SpeculateCellOperand property(this, propertyEdge);
4505 SpeculateStrictInt32Operand index(this, indexEdge);
4506 SpeculateCellOperand enumerator(this, enumeratorEdge);
4507 GPRTemporary result(this);
4508 GPRTemporary scratch1(this);
4509 GPRTemporary scratch2(this);
4510
4511 GPRReg baseGPR = base.gpr();
4512 GPRReg propertyGPR = property.gpr();
4513 GPRReg indexGPR = index.gpr();
4514 GPRReg enumeratorGPR = enumerator.gpr();
4515 GPRReg resultGPR = result.gpr();
4516 GPRReg scratch1GPR = scratch1.gpr();
4517 GPRReg scratch2GPR = scratch2.gpr();
4518
4519 // Check the structure
4520 m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratch1GPR);
4521 MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
4522 scratch1GPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()));
4523
4524 // Compute the offset
4525 // If index is less than the enumerator's cached inline storage, then it's an inline access
4526 MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
4527 indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
4528
4529 m_jit.load64(MacroAssembler::BaseIndex(baseGPR, indexGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage()), resultGPR);
4530
4531 MacroAssembler::Jump done = m_jit.jump();
4532
4533 // Otherwise it's out of line
4534 outOfLineAccess.link(&m_jit);
4535 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratch2GPR);
4536 m_jit.move(indexGPR, scratch1GPR);
4537 m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratch1GPR);
4538 m_jit.neg32(scratch1GPR);
4539 m_jit.signExtend32ToPtr(scratch1GPR, scratch1GPR);
4540 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
4541 m_jit.load64(MacroAssembler::BaseIndex(scratch2GPR, scratch1GPR, MacroAssembler::TimesEight, offsetOfFirstProperty), resultGPR);
4542
4543 done.link(&m_jit);
4544
4545 addSlowPathGenerator(slowPathCall(wrongStructure, this, operationGetByVal, resultGPR, baseGPR, propertyGPR));
4546
4547 jsValueResult(resultGPR, node);
4548 break;
4549 }
4550 case GetPropertyEnumerator: {
4551 SpeculateCellOperand base(this, node->child1());
4552 GPRFlushedCallResult result(this);
4553 GPRReg resultGPR = result.gpr();
4554
4555 flushRegisters();
4556 callOperation(operationGetPropertyEnumerator, resultGPR, base.gpr());
4557 cellResult(resultGPR, node);
4558 break;
4559 }
4560 case GetEnumeratorStructurePname:
4561 case GetEnumeratorGenericPname: {
4562 SpeculateCellOperand enumerator(this, node->child1());
4563 SpeculateStrictInt32Operand index(this, node->child2());
4564 GPRTemporary scratch1(this);
4565 GPRTemporary result(this);
4566
4567 GPRReg enumeratorGPR = enumerator.gpr();
4568 GPRReg indexGPR = index.gpr();
4569 GPRReg scratch1GPR = scratch1.gpr();
4570 GPRReg resultGPR = result.gpr();
4571
4572 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR,
4573 MacroAssembler::Address(enumeratorGPR, (op == GetEnumeratorStructurePname)
4574 ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset()
4575 : JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
4576
4577 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsNull())), resultGPR);
4578
4579 MacroAssembler::Jump done = m_jit.jump();
4580 inBounds.link(&m_jit);
4581
4582 m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratch1GPR);
4583 m_jit.load64(MacroAssembler::BaseIndex(scratch1GPR, indexGPR, MacroAssembler::TimesEight), resultGPR);
4584
4585 done.link(&m_jit);
4586 jsValueResult(resultGPR, node);
4587 break;
4588 }
4589 case ToIndexString: {
4590 SpeculateInt32Operand index(this, node->child1());
4591 GPRFlushedCallResult result(this);
4592 GPRReg resultGPR = result.gpr();
4593
4594 flushRegisters();
4595 callOperation(operationToIndexString, resultGPR, index.gpr());
4596 cellResult(resultGPR, node);
4597 break;
4598 }
4599 case ProfileType: {
4600 JSValueOperand value(this, node->child1());
4601 GPRTemporary scratch1(this);
4602 GPRTemporary scratch2(this);
4603 GPRTemporary scratch3(this);
4604
4605 GPRReg scratch1GPR = scratch1.gpr();
4606 GPRReg scratch2GPR = scratch2.gpr();
4607 GPRReg scratch3GPR = scratch3.gpr();
4608 GPRReg valueGPR = value.gpr();
4609
4610 MacroAssembler::JumpList jumpToEnd;
4611
4612 TypeLocation* cachedTypeLocation = node->typeLocation();
4613 // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
4614 // These typechecks are inlined to match those of the 64-bit JSValue type checks.
4615 if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
4616 jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined()))));
4617 else if (cachedTypeLocation->m_lastSeenType == TypeNull)
4618 jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNull()))));
4619 else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) {
4620 m_jit.move(valueGPR, scratch2GPR);
4621 m_jit.and64(TrustedImm32(~1), scratch2GPR);
4622 jumpToEnd.append(m_jit.branch64(MacroAssembler::Equal, scratch2GPR, MacroAssembler::TrustedImm64(ValueFalse)));
4623 } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt)
4624 jumpToEnd.append(m_jit.branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister));
4625 else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
4626 jumpToEnd.append(m_jit.branchTest64(MacroAssembler::NonZero, valueGPR, GPRInfo::tagTypeNumberRegister));
4627 else if (cachedTypeLocation->m_lastSeenType == TypeString) {
4628 MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
4629 jumpToEnd.append(m_jit.branchIfString(valueGPR));
4630 isNotCell.link(&m_jit);
4631 }
4632
4633 // Load the TypeProfilerLog into Scratch2.
4634 TypeProfilerLog* cachedTypeProfilerLog = m_jit.vm()->typeProfilerLog();
4635 m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR);
4636
4637 // Load the next LogEntry into Scratch1.
4638 m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR);
4639
4640 // Store the JSValue onto the log entry.
4641 m_jit.store64(valueGPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset()));
4642
4643 // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry.
4644 MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
4645 m_jit.load32(MacroAssembler::Address(valueGPR, JSCell::structureIDOffset()), scratch3GPR);
4646 m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
4647 MacroAssembler::Jump skipIsCell = m_jit.jump();
4648 isNotCell.link(&m_jit);
4649 m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
4650 skipIsCell.link(&m_jit);
4651
4652 // Store the typeLocation on the log entry.
4653 m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR);
4654 m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset()));
4655
4656 // Increment the current log entry.
4657 m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR);
4658 m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()));
4659 MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
4660 addSlowPathGenerator(
4661 slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult));
4662
4663 jumpToEnd.link(&m_jit);
4664
4665 noResult(node);
4666 break;
4667 }
4668 case ProfileControlFlow: {
4669 BasicBlockLocation* basicBlockLocation = node->basicBlockLocation();
4670 if (!basicBlockLocation->hasExecuted()) {
4671 GPRTemporary scratch1(this);
4672 basicBlockLocation->emitExecuteCode(m_jit, scratch1.gpr());
4673 }
4674 noResult(node);
4675 break;
4676 }
4677
4678 #if ENABLE(FTL_JIT)
4679 case CheckTierUpInLoop: {
4680 MacroAssembler::Jump done = m_jit.branchAdd32(
4681 MacroAssembler::Signed,
4682 TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()),
4683 MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter));
4684
4685 silentSpillAllRegisters(InvalidGPRReg);
4686 m_jit.setupArgumentsExecState();
4687 appendCall(triggerTierUpNowInLoop);
4688 silentFillAllRegisters(InvalidGPRReg);
4689
4690 done.link(&m_jit);
4691 break;
4692 }
4693
4694 case CheckTierUpAtReturn: {
4695 MacroAssembler::Jump done = m_jit.branchAdd32(
4696 MacroAssembler::Signed,
4697 TrustedImm32(Options::ftlTierUpCounterIncrementForReturn()),
4698 MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter));
4699
4700 silentSpillAllRegisters(InvalidGPRReg);
4701 m_jit.setupArgumentsExecState();
4702 appendCall(triggerTierUpNow);
4703 silentFillAllRegisters(InvalidGPRReg);
4704
4705 done.link(&m_jit);
4706 break;
4707 }
4708
4709 case CheckTierUpAndOSREnter:
4710 case CheckTierUpWithNestedTriggerAndOSREnter: {
4711 ASSERT(!node->origin.semantic.inlineCallFrame);
4712
4713 GPRTemporary temp(this);
4714 GPRReg tempGPR = temp.gpr();
4715
4716 MacroAssembler::Jump forceOSREntry;
4717 if (op == CheckTierUpWithNestedTriggerAndOSREnter)
4718 forceOSREntry = m_jit.branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->nestedTriggerIsSet));
4719
4720 MacroAssembler::Jump done = m_jit.branchAdd32(
4721 MacroAssembler::Signed,
4722 TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()),
4723 MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter));
4724
4725 if (forceOSREntry.isSet())
4726 forceOSREntry.link(&m_jit);
4727 silentSpillAllRegisters(tempGPR);
4728 m_jit.setupArgumentsWithExecState(
4729 TrustedImm32(node->origin.semantic.bytecodeIndex),
4730 TrustedImm32(m_stream->size()));
4731 appendCallSetResult(triggerOSREntryNow, tempGPR);
4732 MacroAssembler::Jump dontEnter = m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR);
4733 m_jit.jump(tempGPR);
4734 dontEnter.link(&m_jit);
4735 silentFillAllRegisters(tempGPR);
4736
4737 done.link(&m_jit);
4738 break;
4739 }
4740 #else // ENABLE(FTL_JIT)
4741 case CheckTierUpInLoop:
4742 case CheckTierUpAtReturn:
4743 case CheckTierUpAndOSREnter:
4744 case CheckTierUpWithNestedTriggerAndOSREnter:
4745 DFG_CRASH(m_jit.graph(), node, "Unexpected tier-up node");
4746 break;
4747 #endif // ENABLE(FTL_JIT)
4748
4749 case NativeCall:
4750 case NativeConstruct:
4751 case LastNodeType:
4752 case Phi:
4753 case Upsilon:
4754 case ExtractOSREntryLocal:
4755 case CheckInBounds:
4756 case ArithIMul:
4757 case MultiGetByOffset:
4758 case MultiPutByOffset:
4759 case FiatInt52:
4760 case CheckBadCell:
4761 case BottomValue:
4762 case PhantomNewObject:
4763 case PhantomNewFunction:
4764 case PhantomCreateActivation:
4765 case GetMyArgumentByVal:
4766 case PutHint:
4767 case CheckStructureImmediate:
4768 case MaterializeNewObject:
4769 case MaterializeCreateActivation:
4770 case PutStack:
4771 case KillStack:
4772 case GetStack:
4773 DFG_CRASH(m_jit.graph(), node, "Unexpected node");
4774 break;
4775 }
4776
4777 if (!m_compileOkay)
4778 return;
4779
4780 if (node->hasResult() && node->mustGenerate())
4781 use(node);
4782 }
4783
4784 #if ENABLE(GGC)
4785 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2)
4786 {
4787 JITCompiler::Jump isNotCell;
4788 if (!isKnownCell(valueUse.node()))
4789 isNotCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
4790
4791 JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
4792 storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
4793 ownerIsRememberedOrInEden.link(&m_jit);
4794
4795 if (!isKnownCell(valueUse.node()))
4796 isNotCell.link(&m_jit);
4797 }
4798 #endif // ENABLE(GGC)
4799
4800 void SpeculativeJIT::moveTrueTo(GPRReg gpr)
4801 {
4802 m_jit.move(TrustedImm32(ValueTrue), gpr);
4803 }
4804
4805 void SpeculativeJIT::moveFalseTo(GPRReg gpr)
4806 {
4807 m_jit.move(TrustedImm32(ValueFalse), gpr);
4808 }
4809
4810 void SpeculativeJIT::blessBoolean(GPRReg gpr)
4811 {
4812 m_jit.or32(TrustedImm32(ValueFalse), gpr);
4813 }
4814
4815 void SpeculativeJIT::convertMachineInt(Edge valueEdge, GPRReg resultGPR)
4816 {
4817 JSValueOperand value(this, valueEdge, ManualOperandSpeculation);
4818 GPRReg valueGPR = value.gpr();
4819
4820 JITCompiler::Jump notInt32 =
4821 m_jit.branch64(JITCompiler::Below, valueGPR, GPRInfo::tagTypeNumberRegister);
4822
4823 m_jit.signExtend32ToPtr(valueGPR, resultGPR);
4824 JITCompiler::Jump done = m_jit.jump();
4825
4826 notInt32.link(&m_jit);
4827 silentSpillAllRegisters(resultGPR);
4828 callOperation(operationConvertBoxedDoubleToInt52, resultGPR, valueGPR);
4829 silentFillAllRegisters(resultGPR);
4830
4831 DFG_TYPE_CHECK(
4832 JSValueRegs(valueGPR), valueEdge, SpecInt32 | SpecInt52AsDouble,
4833 m_jit.branch64(
4834 JITCompiler::Equal, resultGPR,
4835 JITCompiler::TrustedImm64(JSValue::notInt52)));
4836 done.link(&m_jit);
4837 }
4838
4839 void SpeculativeJIT::speculateMachineInt(Edge edge)
4840 {
4841 if (!needsTypeCheck(edge, SpecInt32 | SpecInt52AsDouble))
4842 return;
4843
4844 GPRTemporary temp(this);
4845 convertMachineInt(edge, temp.gpr());
4846 }
4847
4848 void SpeculativeJIT::speculateDoubleRepMachineInt(Edge edge)
4849 {
4850 if (!needsTypeCheck(edge, SpecInt52AsDouble))
4851 return;
4852
4853 SpeculateDoubleOperand value(this, edge);
4854 FPRReg valueFPR = value.fpr();
4855
4856 GPRFlushedCallResult result(this);
4857 GPRReg resultGPR = result.gpr();
4858
4859 flushRegisters();
4860
4861 callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR);
4862
4863 DFG_TYPE_CHECK(
4864 JSValueRegs(), edge, SpecInt52AsDouble,
4865 m_jit.branch64(
4866 JITCompiler::Equal, resultGPR,
4867 JITCompiler::TrustedImm64(JSValue::notInt52)));
4868 }
4869
4870 #endif
4871
4872 } } // namespace JSC::DFG
4873
4874 #endif