]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2011-2015 Apple Inc. All rights reserved. | |
3 | * Copyright (C) 2011 Intel Corporation. All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions | |
7 | * are met: | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * | |
14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
25 | */ | |
26 | ||
27 | #include "config.h" | |
28 | #include "DFGSpeculativeJIT.h" | |
29 | ||
30 | #if ENABLE(DFG_JIT) | |
31 | ||
32 | #include "ArrayPrototype.h" | |
33 | #include "DFGAbstractInterpreterInlines.h" | |
34 | #include "DFGCallArrayAllocatorSlowPathGenerator.h" | |
35 | #include "DFGOperations.h" | |
36 | #include "DFGSlowPathGenerator.h" | |
37 | #include "Debugger.h" | |
38 | #include "DirectArguments.h" | |
39 | #include "GetterSetter.h" | |
40 | #include "JSEnvironmentRecord.h" | |
41 | #include "JSLexicalEnvironment.h" | |
42 | #include "JSPropertyNameEnumerator.h" | |
43 | #include "ObjectPrototype.h" | |
44 | #include "JSCInlines.h" | |
45 | #include "SetupVarargsFrame.h" | |
46 | #include "TypeProfilerLog.h" | |
47 | ||
48 | namespace JSC { namespace DFG { | |
49 | ||
50 | #if USE(JSVALUE32_64) | |
51 | ||
52 | bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr) | |
53 | { | |
54 | // FIXME: For double we could fill with a FPR. | |
55 | UNUSED_PARAM(fpr); | |
56 | ||
57 | VirtualRegister virtualRegister = edge->virtualRegister(); | |
58 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); | |
59 | ||
60 | switch (info.registerFormat()) { | |
61 | case DataFormatNone: { | |
62 | ||
63 | if (edge->hasConstant()) { | |
64 | tagGPR = allocate(); | |
65 | payloadGPR = allocate(); | |
66 | JSValue value = edge->asJSValue(); | |
67 | m_jit.move(Imm32(value.tag()), tagGPR); | |
68 | m_jit.move(Imm32(value.payload()), payloadGPR); | |
69 | m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant); | |
70 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant); | |
71 | info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS); | |
72 | } else { | |
73 | DataFormat spillFormat = info.spillFormat(); | |
74 | ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage); | |
75 | tagGPR = allocate(); | |
76 | payloadGPR = allocate(); | |
77 | switch (spillFormat) { | |
78 | case DataFormatInt32: | |
79 | m_jit.move(TrustedImm32(JSValue::Int32Tag), tagGPR); | |
80 | spillFormat = DataFormatJSInt32; // This will be used as the new register format. | |
81 | break; | |
82 | case DataFormatCell: | |
83 | m_jit.move(TrustedImm32(JSValue::CellTag), tagGPR); | |
84 | spillFormat = DataFormatJSCell; // This will be used as the new register format. | |
85 | break; | |
86 | case DataFormatBoolean: | |
87 | m_jit.move(TrustedImm32(JSValue::BooleanTag), tagGPR); | |
88 | spillFormat = DataFormatJSBoolean; // This will be used as the new register format. | |
89 | break; | |
90 | default: | |
91 | m_jit.load32(JITCompiler::tagFor(virtualRegister), tagGPR); | |
92 | break; | |
93 | } | |
94 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR); | |
95 | m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled); | |
96 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled); | |
97 | info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat); | |
98 | } | |
99 | ||
100 | return true; | |
101 | } | |
102 | ||
103 | case DataFormatInt32: | |
104 | case DataFormatCell: | |
105 | case DataFormatBoolean: { | |
106 | GPRReg gpr = info.gpr(); | |
107 | // If the register has already been locked we need to take a copy. | |
108 | if (m_gprs.isLocked(gpr)) { | |
109 | payloadGPR = allocate(); | |
110 | m_jit.move(gpr, payloadGPR); | |
111 | } else { | |
112 | payloadGPR = gpr; | |
113 | m_gprs.lock(gpr); | |
114 | } | |
115 | tagGPR = allocate(); | |
116 | int32_t tag = JSValue::EmptyValueTag; | |
117 | DataFormat fillFormat = DataFormatJS; | |
118 | switch (info.registerFormat()) { | |
119 | case DataFormatInt32: | |
120 | tag = JSValue::Int32Tag; | |
121 | fillFormat = DataFormatJSInt32; | |
122 | break; | |
123 | case DataFormatCell: | |
124 | tag = JSValue::CellTag; | |
125 | fillFormat = DataFormatJSCell; | |
126 | break; | |
127 | case DataFormatBoolean: | |
128 | tag = JSValue::BooleanTag; | |
129 | fillFormat = DataFormatJSBoolean; | |
130 | break; | |
131 | default: | |
132 | RELEASE_ASSERT_NOT_REACHED(); | |
133 | break; | |
134 | } | |
135 | m_jit.move(TrustedImm32(tag), tagGPR); | |
136 | m_gprs.release(gpr); | |
137 | m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); | |
138 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); | |
139 | info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat); | |
140 | return true; | |
141 | } | |
142 | ||
143 | case DataFormatJSDouble: | |
144 | case DataFormatJS: | |
145 | case DataFormatJSInt32: | |
146 | case DataFormatJSCell: | |
147 | case DataFormatJSBoolean: { | |
148 | tagGPR = info.tagGPR(); | |
149 | payloadGPR = info.payloadGPR(); | |
150 | m_gprs.lock(tagGPR); | |
151 | m_gprs.lock(payloadGPR); | |
152 | return true; | |
153 | } | |
154 | ||
155 | case DataFormatStorage: | |
156 | case DataFormatDouble: | |
157 | // this type currently never occurs | |
158 | RELEASE_ASSERT_NOT_REACHED(); | |
159 | ||
160 | default: | |
161 | RELEASE_ASSERT_NOT_REACHED(); | |
162 | return true; | |
163 | } | |
164 | } | |
165 | ||
166 | void SpeculativeJIT::cachedGetById( | |
167 | CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, | |
168 | unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) | |
169 | { | |
170 | // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens | |
171 | // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to | |
172 | // trip over one move instruction. | |
173 | if (basePayloadGPR == resultTagGPR) { | |
174 | RELEASE_ASSERT(basePayloadGPR != resultPayloadGPR); | |
175 | ||
176 | if (baseTagGPROrNone == resultPayloadGPR) { | |
177 | m_jit.swap(basePayloadGPR, baseTagGPROrNone); | |
178 | baseTagGPROrNone = resultTagGPR; | |
179 | } else | |
180 | m_jit.move(basePayloadGPR, resultPayloadGPR); | |
181 | basePayloadGPR = resultPayloadGPR; | |
182 | } | |
183 | ||
184 | JITGetByIdGenerator gen( | |
185 | m_jit.codeBlock(), codeOrigin, usedRegisters(), | |
186 | JSValueRegs(baseTagGPROrNone, basePayloadGPR), | |
187 | JSValueRegs(resultTagGPR, resultPayloadGPR), spillMode); | |
188 | ||
189 | gen.generateFastPath(m_jit); | |
190 | ||
191 | JITCompiler::JumpList slowCases; | |
192 | if (slowPathTarget.isSet()) | |
193 | slowCases.append(slowPathTarget); | |
194 | slowCases.append(gen.slowPathJump()); | |
195 | ||
196 | std::unique_ptr<SlowPathGenerator> slowPath; | |
197 | if (baseTagGPROrNone == InvalidGPRReg) { | |
198 | slowPath = slowPathCall( | |
199 | slowCases, this, operationGetByIdOptimize, | |
200 | JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), | |
201 | static_cast<int32_t>(JSValue::CellTag), basePayloadGPR, | |
202 | identifierUID(identifierNumber)); | |
203 | } else { | |
204 | slowPath = slowPathCall( | |
205 | slowCases, this, operationGetByIdOptimize, | |
206 | JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), baseTagGPROrNone, | |
207 | basePayloadGPR, identifierUID(identifierNumber)); | |
208 | } | |
209 | ||
210 | m_jit.addGetById(gen, slowPath.get()); | |
211 | addSlowPathGenerator(WTF::move(slowPath)); | |
212 | } | |
213 | ||
214 | void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) | |
215 | { | |
216 | JITPutByIdGenerator gen( | |
217 | m_jit.codeBlock(), codeOrigin, usedRegisters(), | |
218 | JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR), | |
219 | scratchGPR, spillMode, m_jit.ecmaModeFor(codeOrigin), putKind); | |
220 | ||
221 | gen.generateFastPath(m_jit); | |
222 | ||
223 | JITCompiler::JumpList slowCases; | |
224 | if (slowPathTarget.isSet()) | |
225 | slowCases.append(slowPathTarget); | |
226 | slowCases.append(gen.slowPathJump()); | |
227 | ||
228 | auto slowPath = slowPathCall( | |
229 | slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueTagGPR, | |
230 | valuePayloadGPR, basePayloadGPR, identifierUID(identifierNumber)); | |
231 | ||
232 | m_jit.addPutById(gen, slowPath.get()); | |
233 | addSlowPathGenerator(WTF::move(slowPath)); | |
234 | } | |
235 | ||
236 | void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert) | |
237 | { | |
238 | JSValueOperand arg(this, operand); | |
239 | GPRReg argTagGPR = arg.tagGPR(); | |
240 | GPRReg argPayloadGPR = arg.payloadGPR(); | |
241 | ||
242 | GPRTemporary resultPayload(this, Reuse, arg, PayloadWord); | |
243 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
244 | ||
245 | JITCompiler::Jump notCell; | |
246 | JITCompiler::Jump notMasqueradesAsUndefined; | |
247 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { | |
248 | if (!isKnownCell(operand.node())) | |
249 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); | |
250 | ||
251 | m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR); | |
252 | notMasqueradesAsUndefined = m_jit.jump(); | |
253 | } else { | |
254 | GPRTemporary localGlobalObject(this); | |
255 | GPRTemporary remoteGlobalObject(this); | |
256 | ||
257 | if (!isKnownCell(operand.node())) | |
258 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); | |
259 | ||
260 | JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( | |
261 | JITCompiler::NonZero, | |
262 | JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()), | |
263 | JITCompiler::TrustedImm32(MasqueradesAsUndefined)); | |
264 | ||
265 | m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR); | |
266 | notMasqueradesAsUndefined = m_jit.jump(); | |
267 | ||
268 | isMasqueradesAsUndefined.link(&m_jit); | |
269 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); | |
270 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); | |
271 | m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); | |
272 | m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultPayloadGPR); | |
273 | m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); | |
274 | m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR); | |
275 | } | |
276 | ||
277 | if (!isKnownCell(operand.node())) { | |
278 | JITCompiler::Jump done = m_jit.jump(); | |
279 | ||
280 | notCell.link(&m_jit); | |
281 | // null or undefined? | |
282 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); | |
283 | m_jit.or32(TrustedImm32(1), argTagGPR, resultPayloadGPR); | |
284 | m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR); | |
285 | ||
286 | done.link(&m_jit); | |
287 | } | |
288 | ||
289 | notMasqueradesAsUndefined.link(&m_jit); | |
290 | ||
291 | booleanResult(resultPayloadGPR, m_currentNode); | |
292 | } | |
293 | ||
294 | void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert) | |
295 | { | |
296 | BasicBlock* taken = branchNode->branchData()->taken.block; | |
297 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; | |
298 | ||
299 | if (taken == nextBlock()) { | |
300 | invert = !invert; | |
301 | BasicBlock* tmp = taken; | |
302 | taken = notTaken; | |
303 | notTaken = tmp; | |
304 | } | |
305 | ||
306 | JSValueOperand arg(this, operand); | |
307 | GPRReg argTagGPR = arg.tagGPR(); | |
308 | GPRReg argPayloadGPR = arg.payloadGPR(); | |
309 | ||
310 | GPRTemporary result(this, Reuse, arg, TagWord); | |
311 | GPRReg resultGPR = result.gpr(); | |
312 | ||
313 | JITCompiler::Jump notCell; | |
314 | ||
315 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { | |
316 | if (!isKnownCell(operand.node())) | |
317 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); | |
318 | ||
319 | jump(invert ? taken : notTaken, ForceJump); | |
320 | } else { | |
321 | GPRTemporary localGlobalObject(this); | |
322 | GPRTemporary remoteGlobalObject(this); | |
323 | ||
324 | if (!isKnownCell(operand.node())) | |
325 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); | |
326 | ||
327 | branchTest8(JITCompiler::Zero, | |
328 | JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()), | |
329 | JITCompiler::TrustedImm32(MasqueradesAsUndefined), | |
330 | invert ? taken : notTaken); | |
331 | ||
332 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); | |
333 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); | |
334 | m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); | |
335 | m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultGPR); | |
336 | m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); | |
337 | branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken); | |
338 | } | |
339 | ||
340 | if (!isKnownCell(operand.node())) { | |
341 | jump(notTaken, ForceJump); | |
342 | ||
343 | notCell.link(&m_jit); | |
344 | // null or undefined? | |
345 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); | |
346 | m_jit.or32(TrustedImm32(1), argTagGPR, resultGPR); | |
347 | branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken); | |
348 | } | |
349 | ||
350 | jump(notTaken); | |
351 | } | |
352 | ||
353 | bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert) | |
354 | { | |
355 | unsigned branchIndexInBlock = detectPeepHoleBranch(); | |
356 | if (branchIndexInBlock != UINT_MAX) { | |
357 | Node* branchNode = m_block->at(branchIndexInBlock); | |
358 | ||
359 | ASSERT(node->adjustedRefCount() == 1); | |
360 | ||
361 | nonSpeculativePeepholeBranchNull(operand, branchNode, invert); | |
362 | ||
363 | use(node->child1()); | |
364 | use(node->child2()); | |
365 | m_indexInBlock = branchIndexInBlock; | |
366 | m_currentNode = branchNode; | |
367 | ||
368 | return true; | |
369 | } | |
370 | ||
371 | nonSpeculativeNonPeepholeCompareNull(operand, invert); | |
372 | ||
373 | return false; | |
374 | } | |
375 | ||
376 | void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction) | |
377 | { | |
378 | BasicBlock* taken = branchNode->branchData()->taken.block; | |
379 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; | |
380 | ||
381 | JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero; | |
382 | ||
383 | // The branch instruction will branch to the taken block. | |
384 | // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. | |
385 | if (taken == nextBlock()) { | |
386 | cond = JITCompiler::invert(cond); | |
387 | callResultCondition = JITCompiler::Zero; | |
388 | BasicBlock* tmp = taken; | |
389 | taken = notTaken; | |
390 | notTaken = tmp; | |
391 | } | |
392 | ||
393 | JSValueOperand arg1(this, node->child1()); | |
394 | JSValueOperand arg2(this, node->child2()); | |
395 | GPRReg arg1TagGPR = arg1.tagGPR(); | |
396 | GPRReg arg1PayloadGPR = arg1.payloadGPR(); | |
397 | GPRReg arg2TagGPR = arg2.tagGPR(); | |
398 | GPRReg arg2PayloadGPR = arg2.payloadGPR(); | |
399 | ||
400 | JITCompiler::JumpList slowPath; | |
401 | ||
402 | if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { | |
403 | GPRFlushedCallResult result(this); | |
404 | GPRReg resultGPR = result.gpr(); | |
405 | ||
406 | arg1.use(); | |
407 | arg2.use(); | |
408 | ||
409 | flushRegisters(); | |
410 | callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); | |
411 | ||
412 | branchTest32(callResultCondition, resultGPR, taken); | |
413 | } else { | |
414 | GPRTemporary result(this); | |
415 | GPRReg resultGPR = result.gpr(); | |
416 | ||
417 | arg1.use(); | |
418 | arg2.use(); | |
419 | ||
420 | if (!isKnownInteger(node->child1().node())) | |
421 | slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); | |
422 | if (!isKnownInteger(node->child2().node())) | |
423 | slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); | |
424 | ||
425 | branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken); | |
426 | ||
427 | if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) { | |
428 | jump(notTaken, ForceJump); | |
429 | ||
430 | slowPath.link(&m_jit); | |
431 | ||
432 | silentSpillAllRegisters(resultGPR); | |
433 | callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); | |
434 | silentFillAllRegisters(resultGPR); | |
435 | ||
436 | branchTest32(callResultCondition, resultGPR, taken); | |
437 | } | |
438 | } | |
439 | ||
440 | jump(notTaken); | |
441 | ||
442 | m_indexInBlock = m_block->size() - 1; | |
443 | m_currentNode = branchNode; | |
444 | } | |
445 | ||
446 | template<typename JumpType> | |
447 | class CompareAndBoxBooleanSlowPathGenerator | |
448 | : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> { | |
449 | public: | |
450 | CompareAndBoxBooleanSlowPathGenerator( | |
451 | JumpType from, SpeculativeJIT* jit, | |
452 | S_JITOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, | |
453 | GPRReg arg2Tag, GPRReg arg2Payload) | |
454 | : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>( | |
455 | from, jit, function, NeedToSpill, result) | |
456 | , m_arg1Tag(arg1Tag) | |
457 | , m_arg1Payload(arg1Payload) | |
458 | , m_arg2Tag(arg2Tag) | |
459 | , m_arg2Payload(arg2Payload) | |
460 | { | |
461 | } | |
462 | ||
463 | protected: | |
464 | virtual void generateInternal(SpeculativeJIT* jit) | |
465 | { | |
466 | this->setUp(jit); | |
467 | this->recordCall( | |
468 | jit->callOperation( | |
469 | this->m_function, this->m_result, m_arg1Tag, m_arg1Payload, m_arg2Tag, | |
470 | m_arg2Payload)); | |
471 | jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result); | |
472 | this->tearDown(jit); | |
473 | } | |
474 | ||
475 | private: | |
476 | GPRReg m_arg1Tag; | |
477 | GPRReg m_arg1Payload; | |
478 | GPRReg m_arg2Tag; | |
479 | GPRReg m_arg2Payload; | |
480 | }; | |
481 | ||
482 | void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction) | |
483 | { | |
484 | JSValueOperand arg1(this, node->child1()); | |
485 | JSValueOperand arg2(this, node->child2()); | |
486 | GPRReg arg1TagGPR = arg1.tagGPR(); | |
487 | GPRReg arg1PayloadGPR = arg1.payloadGPR(); | |
488 | GPRReg arg2TagGPR = arg2.tagGPR(); | |
489 | GPRReg arg2PayloadGPR = arg2.payloadGPR(); | |
490 | ||
491 | JITCompiler::JumpList slowPath; | |
492 | ||
493 | if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { | |
494 | GPRFlushedCallResult result(this); | |
495 | GPRReg resultPayloadGPR = result.gpr(); | |
496 | ||
497 | arg1.use(); | |
498 | arg2.use(); | |
499 | ||
500 | flushRegisters(); | |
501 | callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); | |
502 | ||
503 | booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
504 | } else { | |
505 | GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord); | |
506 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
507 | ||
508 | arg1.use(); | |
509 | arg2.use(); | |
510 | ||
511 | if (!isKnownInteger(node->child1().node())) | |
512 | slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); | |
513 | if (!isKnownInteger(node->child2().node())) | |
514 | slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); | |
515 | ||
516 | m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR); | |
517 | ||
518 | if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) { | |
519 | addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>( | |
520 | slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR, | |
521 | arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR)); | |
522 | } | |
523 | ||
524 | booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
525 | } | |
526 | } | |
527 | ||
528 | void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert) | |
529 | { | |
530 | BasicBlock* taken = branchNode->branchData()->taken.block; | |
531 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; | |
532 | ||
533 | // The branch instruction will branch to the taken block. | |
534 | // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. | |
535 | if (taken == nextBlock()) { | |
536 | invert = !invert; | |
537 | BasicBlock* tmp = taken; | |
538 | taken = notTaken; | |
539 | notTaken = tmp; | |
540 | } | |
541 | ||
542 | JSValueOperand arg1(this, node->child1()); | |
543 | JSValueOperand arg2(this, node->child2()); | |
544 | GPRReg arg1TagGPR = arg1.tagGPR(); | |
545 | GPRReg arg1PayloadGPR = arg1.payloadGPR(); | |
546 | GPRReg arg2TagGPR = arg2.tagGPR(); | |
547 | GPRReg arg2PayloadGPR = arg2.payloadGPR(); | |
548 | ||
549 | GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord); | |
550 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
551 | ||
552 | arg1.use(); | |
553 | arg2.use(); | |
554 | ||
555 | if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { | |
556 | // see if we get lucky: if the arguments are cells and they reference the same | |
557 | // cell, then they must be strictly equal. | |
558 | branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken); | |
559 | ||
560 | silentSpillAllRegisters(resultPayloadGPR); | |
561 | callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); | |
562 | silentFillAllRegisters(resultPayloadGPR); | |
563 | ||
564 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken); | |
565 | } else { | |
566 | // FIXME: Add fast paths for twoCells, number etc. | |
567 | ||
568 | silentSpillAllRegisters(resultPayloadGPR); | |
569 | callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); | |
570 | silentFillAllRegisters(resultPayloadGPR); | |
571 | ||
572 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken); | |
573 | } | |
574 | ||
575 | jump(notTaken); | |
576 | } | |
577 | ||
578 | void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) | |
579 | { | |
580 | JSValueOperand arg1(this, node->child1()); | |
581 | JSValueOperand arg2(this, node->child2()); | |
582 | GPRReg arg1TagGPR = arg1.tagGPR(); | |
583 | GPRReg arg1PayloadGPR = arg1.payloadGPR(); | |
584 | GPRReg arg2TagGPR = arg2.tagGPR(); | |
585 | GPRReg arg2PayloadGPR = arg2.payloadGPR(); | |
586 | ||
587 | GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord); | |
588 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
589 | ||
590 | arg1.use(); | |
591 | arg2.use(); | |
592 | ||
593 | if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { | |
594 | // see if we get lucky: if the arguments are cells and they reference the same | |
595 | // cell, then they must be strictly equal. | |
596 | // FIXME: this should flush registers instead of silent spill/fill. | |
597 | JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR); | |
598 | ||
599 | m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR); | |
600 | JITCompiler::Jump done = m_jit.jump(); | |
601 | ||
602 | notEqualCase.link(&m_jit); | |
603 | ||
604 | silentSpillAllRegisters(resultPayloadGPR); | |
605 | callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); | |
606 | silentFillAllRegisters(resultPayloadGPR); | |
607 | ||
608 | m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); | |
609 | ||
610 | done.link(&m_jit); | |
611 | } else { | |
612 | // FIXME: Add fast paths. | |
613 | ||
614 | silentSpillAllRegisters(resultPayloadGPR); | |
615 | callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); | |
616 | silentFillAllRegisters(resultPayloadGPR); | |
617 | ||
618 | m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); | |
619 | } | |
620 | ||
621 | booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
622 | } | |
623 | ||
624 | void SpeculativeJIT::compileMiscStrictEq(Node* node) | |
625 | { | |
626 | JSValueOperand op1(this, node->child1(), ManualOperandSpeculation); | |
627 | JSValueOperand op2(this, node->child2(), ManualOperandSpeculation); | |
628 | GPRTemporary result(this); | |
629 | ||
630 | if (node->child1().useKind() == MiscUse) | |
631 | speculateMisc(node->child1(), op1.jsValueRegs()); | |
632 | if (node->child2().useKind() == MiscUse) | |
633 | speculateMisc(node->child2(), op2.jsValueRegs()); | |
634 | ||
635 | m_jit.move(TrustedImm32(0), result.gpr()); | |
636 | JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR()); | |
637 | m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr()); | |
638 | notEqual.link(&m_jit); | |
639 | booleanResult(result.gpr(), node); | |
640 | } | |
641 | ||
642 | void SpeculativeJIT::emitCall(Node* node) | |
643 | { | |
644 | CallLinkInfo::CallType callType; | |
645 | bool isVarargs = false; | |
646 | bool isForwardVarargs = false; | |
647 | switch (node->op()) { | |
648 | case Call: | |
649 | callType = CallLinkInfo::Call; | |
650 | break; | |
651 | case Construct: | |
652 | callType = CallLinkInfo::Construct; | |
653 | break; | |
654 | case CallVarargs: | |
655 | callType = CallLinkInfo::CallVarargs; | |
656 | isVarargs = true; | |
657 | break; | |
658 | case ConstructVarargs: | |
659 | callType = CallLinkInfo::ConstructVarargs; | |
660 | isVarargs = true; | |
661 | break; | |
662 | case CallForwardVarargs: | |
663 | callType = CallLinkInfo::CallVarargs; | |
664 | isForwardVarargs = true; | |
665 | break; | |
666 | case ConstructForwardVarargs: | |
667 | callType = CallLinkInfo::ConstructVarargs; | |
668 | isForwardVarargs = true; | |
669 | break; | |
670 | default: | |
671 | DFG_CRASH(m_jit.graph(), node, "bad node type"); | |
672 | break; | |
673 | } | |
674 | ||
675 | Edge calleeEdge = m_jit.graph().child(node, 0); | |
676 | ||
677 | // Gotta load the arguments somehow. Varargs is trickier. | |
678 | if (isVarargs || isForwardVarargs) { | |
679 | CallVarargsData* data = node->callVarargsData(); | |
680 | ||
681 | GPRReg resultGPR; | |
682 | unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal; | |
683 | ||
684 | if (isForwardVarargs) { | |
685 | flushRegisters(); | |
686 | use(node->child2()); | |
687 | ||
688 | GPRReg scratchGPR1; | |
689 | GPRReg scratchGPR2; | |
690 | GPRReg scratchGPR3; | |
691 | ||
692 | scratchGPR1 = JITCompiler::selectScratchGPR(); | |
693 | scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1); | |
694 | scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2); | |
695 | ||
696 | m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2); | |
697 | JITCompiler::JumpList slowCase; | |
698 | emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase); | |
699 | JITCompiler::Jump done = m_jit.jump(); | |
700 | slowCase.link(&m_jit); | |
701 | callOperation(operationThrowStackOverflowForVarargs); | |
702 | m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow); | |
703 | done.link(&m_jit); | |
704 | resultGPR = scratchGPR2; | |
705 | } else { | |
706 | GPRReg argumentsPayloadGPR; | |
707 | GPRReg argumentsTagGPR; | |
708 | GPRReg scratchGPR1; | |
709 | GPRReg scratchGPR2; | |
710 | GPRReg scratchGPR3; | |
711 | ||
712 | auto loadArgumentsGPR = [&] (GPRReg reservedGPR) { | |
713 | if (reservedGPR != InvalidGPRReg) | |
714 | lock(reservedGPR); | |
715 | JSValueOperand arguments(this, node->child2()); | |
716 | argumentsTagGPR = arguments.tagGPR(); | |
717 | argumentsPayloadGPR = arguments.payloadGPR(); | |
718 | if (reservedGPR != InvalidGPRReg) | |
719 | unlock(reservedGPR); | |
720 | flushRegisters(); | |
721 | ||
722 | scratchGPR1 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, reservedGPR); | |
723 | scratchGPR2 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, reservedGPR); | |
724 | scratchGPR3 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, scratchGPR2, reservedGPR); | |
725 | }; | |
726 | ||
727 | loadArgumentsGPR(InvalidGPRReg); | |
728 | ||
729 | DFG_ASSERT(m_jit.graph(), node, isFlushed()); | |
730 | ||
731 | // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is | |
732 | // flushed. | |
733 | callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, numUsedStackSlots, data->firstVarArgOffset); | |
734 | ||
735 | // Now we have the argument count of the callee frame, but we've lost the arguments operand. | |
736 | // Reconstruct the arguments operand while preserving the callee frame. | |
737 | loadArgumentsGPR(GPRInfo::returnValueGPR); | |
738 | m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1); | |
739 | emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1); | |
740 | m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister); | |
741 | ||
742 | callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsTagGPR, argumentsPayloadGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR); | |
743 | resultGPR = GPRInfo::returnValueGPR; | |
744 | } | |
745 | ||
746 | m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister); | |
747 | ||
748 | DFG_ASSERT(m_jit.graph(), node, isFlushed()); | |
749 | ||
750 | // We don't need the arguments array anymore. | |
751 | if (isVarargs) | |
752 | use(node->child2()); | |
753 | ||
754 | // Now set up the "this" argument. | |
755 | JSValueOperand thisArgument(this, node->child3()); | |
756 | GPRReg thisArgumentTagGPR = thisArgument.tagGPR(); | |
757 | GPRReg thisArgumentPayloadGPR = thisArgument.payloadGPR(); | |
758 | thisArgument.use(); | |
759 | ||
760 | m_jit.store32(thisArgumentTagGPR, JITCompiler::calleeArgumentTagSlot(0)); | |
761 | m_jit.store32(thisArgumentPayloadGPR, JITCompiler::calleeArgumentPayloadSlot(0)); | |
762 | } else { | |
763 | // The call instruction's first child is either the function (normal call) or the | |
764 | // receiver (method call). subsequent children are the arguments. | |
765 | int numPassedArgs = node->numChildren() - 1; | |
766 | ||
767 | m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(JSStack::ArgumentCount)); | |
768 | ||
769 | for (int i = 0; i < numPassedArgs; i++) { | |
770 | Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; | |
771 | JSValueOperand arg(this, argEdge); | |
772 | GPRReg argTagGPR = arg.tagGPR(); | |
773 | GPRReg argPayloadGPR = arg.payloadGPR(); | |
774 | use(argEdge); | |
775 | ||
776 | m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i)); | |
777 | m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i)); | |
778 | } | |
779 | } | |
780 | ||
781 | JSValueOperand callee(this, calleeEdge); | |
782 | GPRReg calleeTagGPR = callee.tagGPR(); | |
783 | GPRReg calleePayloadGPR = callee.payloadGPR(); | |
784 | use(calleeEdge); | |
785 | m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee)); | |
786 | m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee)); | |
787 | ||
788 | flushRegisters(); | |
789 | ||
790 | GPRFlushedCallResult resultPayload(this); | |
791 | GPRFlushedCallResult2 resultTag(this); | |
792 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
793 | GPRReg resultTagGPR = resultTag.gpr(); | |
794 | ||
795 | JITCompiler::DataLabelPtr targetToCheck; | |
796 | JITCompiler::JumpList slowPath; | |
797 | ||
798 | m_jit.emitStoreCodeOrigin(node->origin.semantic); | |
799 | ||
800 | CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo(); | |
801 | ||
802 | slowPath.append(m_jit.branchIfNotCell(callee.jsValueRegs())); | |
803 | slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck)); | |
804 | ||
805 | JITCompiler::Call fastCall = m_jit.nearCall(); | |
806 | ||
807 | JITCompiler::Jump done = m_jit.jump(); | |
808 | ||
809 | slowPath.link(&m_jit); | |
810 | ||
811 | // Callee payload needs to be in regT0, tag in regT1 | |
812 | if (calleeTagGPR == GPRInfo::regT0) { | |
813 | if (calleePayloadGPR == GPRInfo::regT1) | |
814 | m_jit.swap(GPRInfo::regT1, GPRInfo::regT0); | |
815 | else { | |
816 | m_jit.move(calleeTagGPR, GPRInfo::regT1); | |
817 | m_jit.move(calleePayloadGPR, GPRInfo::regT0); | |
818 | } | |
819 | } else { | |
820 | m_jit.move(calleePayloadGPR, GPRInfo::regT0); | |
821 | m_jit.move(calleeTagGPR, GPRInfo::regT1); | |
822 | } | |
823 | m_jit.move(MacroAssembler::TrustedImmPtr(info), GPRInfo::regT2); | |
824 | JITCompiler::Call slowCall = m_jit.nearCall(); | |
825 | ||
826 | done.link(&m_jit); | |
827 | ||
828 | m_jit.setupResults(resultPayloadGPR, resultTagGPR); | |
829 | ||
830 | jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly); | |
831 | ||
832 | info->setUpCall(callType, node->origin.semantic, calleePayloadGPR); | |
833 | m_jit.addJSCall(fastCall, slowCall, targetToCheck, info); | |
834 | ||
835 | // If we were varargs, then after the calls are done, we need to reestablish our stack pointer. | |
836 | if (isVarargs || isForwardVarargs) | |
837 | m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister); | |
838 | } | |
839 | ||
840 | template<bool strict> | |
841 | GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat) | |
842 | { | |
843 | AbstractValue& value = m_state.forNode(edge); | |
844 | SpeculatedType type = value.m_type; | |
845 | ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32)); | |
846 | ||
847 | m_interpreter.filter(value, SpecInt32); | |
848 | if (value.isClear()) { | |
849 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); | |
850 | returnFormat = DataFormatInt32; | |
851 | return allocate(); | |
852 | } | |
853 | ||
854 | VirtualRegister virtualRegister = edge->virtualRegister(); | |
855 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); | |
856 | ||
857 | switch (info.registerFormat()) { | |
858 | case DataFormatNone: { | |
859 | if (edge->hasConstant()) { | |
860 | ASSERT(edge->isInt32Constant()); | |
861 | GPRReg gpr = allocate(); | |
862 | m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr); | |
863 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
864 | info.fillInt32(*m_stream, gpr); | |
865 | returnFormat = DataFormatInt32; | |
866 | return gpr; | |
867 | } | |
868 | ||
869 | DataFormat spillFormat = info.spillFormat(); | |
870 | ||
871 | ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32); | |
872 | ||
873 | // If we know this was spilled as an integer we can fill without checking. | |
874 | if (type & ~SpecInt32) | |
875 | speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag))); | |
876 | ||
877 | GPRReg gpr = allocate(); | |
878 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); | |
879 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
880 | info.fillInt32(*m_stream, gpr); | |
881 | returnFormat = DataFormatInt32; | |
882 | return gpr; | |
883 | } | |
884 | ||
885 | case DataFormatJSInt32: | |
886 | case DataFormatJS: { | |
887 | // Check the value is an integer. | |
888 | GPRReg tagGPR = info.tagGPR(); | |
889 | GPRReg payloadGPR = info.payloadGPR(); | |
890 | m_gprs.lock(tagGPR); | |
891 | m_gprs.lock(payloadGPR); | |
892 | if (type & ~SpecInt32) | |
893 | speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag))); | |
894 | m_gprs.unlock(tagGPR); | |
895 | m_gprs.release(tagGPR); | |
896 | m_gprs.release(payloadGPR); | |
897 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger); | |
898 | info.fillInt32(*m_stream, payloadGPR); | |
899 | // If !strict we're done, return. | |
900 | returnFormat = DataFormatInt32; | |
901 | return payloadGPR; | |
902 | } | |
903 | ||
904 | case DataFormatInt32: { | |
905 | GPRReg gpr = info.gpr(); | |
906 | m_gprs.lock(gpr); | |
907 | returnFormat = DataFormatInt32; | |
908 | return gpr; | |
909 | } | |
910 | ||
911 | case DataFormatCell: | |
912 | case DataFormatBoolean: | |
913 | case DataFormatJSDouble: | |
914 | case DataFormatJSCell: | |
915 | case DataFormatJSBoolean: | |
916 | case DataFormatDouble: | |
917 | case DataFormatStorage: | |
918 | default: | |
919 | RELEASE_ASSERT_NOT_REACHED(); | |
920 | return InvalidGPRReg; | |
921 | } | |
922 | } | |
923 | ||
924 | GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat) | |
925 | { | |
926 | return fillSpeculateInt32Internal<false>(edge, returnFormat); | |
927 | } | |
928 | ||
929 | GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge) | |
930 | { | |
931 | DataFormat mustBeDataFormatInt32; | |
932 | GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32); | |
933 | ASSERT(mustBeDataFormatInt32 == DataFormatInt32); | |
934 | return result; | |
935 | } | |
936 | ||
937 | FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge) | |
938 | { | |
939 | ASSERT(isDouble(edge.useKind())); | |
940 | ASSERT(edge->hasDoubleResult()); | |
941 | VirtualRegister virtualRegister = edge->virtualRegister(); | |
942 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); | |
943 | ||
944 | if (info.registerFormat() == DataFormatNone) { | |
945 | ||
946 | if (edge->hasConstant()) { | |
947 | RELEASE_ASSERT(edge->isNumberConstant()); | |
948 | FPRReg fpr = fprAllocate(); | |
949 | m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(edge.node())), fpr); | |
950 | m_fprs.retain(fpr, virtualRegister, SpillOrderConstant); | |
951 | info.fillDouble(*m_stream, fpr); | |
952 | return fpr; | |
953 | } | |
954 | ||
955 | RELEASE_ASSERT(info.spillFormat() == DataFormatDouble); | |
956 | FPRReg fpr = fprAllocate(); | |
957 | m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); | |
958 | m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); | |
959 | info.fillDouble(*m_stream, fpr); | |
960 | return fpr; | |
961 | } | |
962 | ||
963 | RELEASE_ASSERT(info.registerFormat() == DataFormatDouble); | |
964 | FPRReg fpr = info.fpr(); | |
965 | m_fprs.lock(fpr); | |
966 | return fpr; | |
967 | } | |
968 | ||
969 | GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) | |
970 | { | |
971 | AbstractValue& value = m_state.forNode(edge); | |
972 | SpeculatedType type = value.m_type; | |
973 | ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell)); | |
974 | ||
975 | m_interpreter.filter(value, SpecCell); | |
976 | if (value.isClear()) { | |
977 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); | |
978 | return allocate(); | |
979 | } | |
980 | ||
981 | VirtualRegister virtualRegister = edge->virtualRegister(); | |
982 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); | |
983 | ||
984 | switch (info.registerFormat()) { | |
985 | case DataFormatNone: { | |
986 | if (edge->hasConstant()) { | |
987 | JSValue jsValue = edge->asJSValue(); | |
988 | GPRReg gpr = allocate(); | |
989 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
990 | m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); | |
991 | info.fillCell(*m_stream, gpr); | |
992 | return gpr; | |
993 | } | |
994 | ||
995 | ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell); | |
996 | if (type & ~SpecCell) { | |
997 | speculationCheck( | |
998 | BadType, | |
999 | JSValueSource(JITCompiler::addressFor(virtualRegister)), | |
1000 | edge, | |
1001 | m_jit.branch32( | |
1002 | MacroAssembler::NotEqual, | |
1003 | JITCompiler::tagFor(virtualRegister), | |
1004 | TrustedImm32(JSValue::CellTag))); | |
1005 | } | |
1006 | GPRReg gpr = allocate(); | |
1007 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); | |
1008 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
1009 | info.fillCell(*m_stream, gpr); | |
1010 | return gpr; | |
1011 | } | |
1012 | ||
1013 | case DataFormatCell: { | |
1014 | GPRReg gpr = info.gpr(); | |
1015 | m_gprs.lock(gpr); | |
1016 | return gpr; | |
1017 | } | |
1018 | ||
1019 | case DataFormatJSCell: | |
1020 | case DataFormatJS: { | |
1021 | GPRReg tagGPR = info.tagGPR(); | |
1022 | GPRReg payloadGPR = info.payloadGPR(); | |
1023 | m_gprs.lock(tagGPR); | |
1024 | m_gprs.lock(payloadGPR); | |
1025 | if (type & ~SpecCell) { | |
1026 | speculationCheck( | |
1027 | BadType, JSValueRegs(tagGPR, payloadGPR), edge, | |
1028 | m_jit.branchIfNotCell(info.jsValueRegs())); | |
1029 | } | |
1030 | m_gprs.unlock(tagGPR); | |
1031 | m_gprs.release(tagGPR); | |
1032 | m_gprs.release(payloadGPR); | |
1033 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell); | |
1034 | info.fillCell(*m_stream, payloadGPR); | |
1035 | return payloadGPR; | |
1036 | } | |
1037 | ||
1038 | case DataFormatJSInt32: | |
1039 | case DataFormatInt32: | |
1040 | case DataFormatJSDouble: | |
1041 | case DataFormatJSBoolean: | |
1042 | case DataFormatBoolean: | |
1043 | case DataFormatDouble: | |
1044 | case DataFormatStorage: | |
1045 | RELEASE_ASSERT_NOT_REACHED(); | |
1046 | ||
1047 | default: | |
1048 | RELEASE_ASSERT_NOT_REACHED(); | |
1049 | return InvalidGPRReg; | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) | |
1054 | { | |
1055 | AbstractValue& value = m_state.forNode(edge); | |
1056 | SpeculatedType type = value.m_type; | |
1057 | ||
1058 | m_interpreter.filter(value, SpecBoolean); | |
1059 | if (value.isClear()) { | |
1060 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); | |
1061 | return allocate(); | |
1062 | } | |
1063 | ||
1064 | VirtualRegister virtualRegister = edge->virtualRegister(); | |
1065 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); | |
1066 | ||
1067 | switch (info.registerFormat()) { | |
1068 | case DataFormatNone: { | |
1069 | if (edge->hasConstant()) { | |
1070 | JSValue jsValue = edge->asJSValue(); | |
1071 | GPRReg gpr = allocate(); | |
1072 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
1073 | m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr); | |
1074 | info.fillBoolean(*m_stream, gpr); | |
1075 | return gpr; | |
1076 | } | |
1077 | ||
1078 | ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean); | |
1079 | ||
1080 | if (type & ~SpecBoolean) | |
1081 | speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag))); | |
1082 | ||
1083 | GPRReg gpr = allocate(); | |
1084 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); | |
1085 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
1086 | info.fillBoolean(*m_stream, gpr); | |
1087 | return gpr; | |
1088 | } | |
1089 | ||
1090 | case DataFormatBoolean: { | |
1091 | GPRReg gpr = info.gpr(); | |
1092 | m_gprs.lock(gpr); | |
1093 | return gpr; | |
1094 | } | |
1095 | ||
1096 | case DataFormatJSBoolean: | |
1097 | case DataFormatJS: { | |
1098 | GPRReg tagGPR = info.tagGPR(); | |
1099 | GPRReg payloadGPR = info.payloadGPR(); | |
1100 | m_gprs.lock(tagGPR); | |
1101 | m_gprs.lock(payloadGPR); | |
1102 | if (type & ~SpecBoolean) | |
1103 | speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag))); | |
1104 | ||
1105 | m_gprs.unlock(tagGPR); | |
1106 | m_gprs.release(tagGPR); | |
1107 | m_gprs.release(payloadGPR); | |
1108 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean); | |
1109 | info.fillBoolean(*m_stream, payloadGPR); | |
1110 | return payloadGPR; | |
1111 | } | |
1112 | ||
1113 | case DataFormatJSInt32: | |
1114 | case DataFormatInt32: | |
1115 | case DataFormatJSDouble: | |
1116 | case DataFormatJSCell: | |
1117 | case DataFormatCell: | |
1118 | case DataFormatDouble: | |
1119 | case DataFormatStorage: | |
1120 | RELEASE_ASSERT_NOT_REACHED(); | |
1121 | ||
1122 | default: | |
1123 | RELEASE_ASSERT_NOT_REACHED(); | |
1124 | return InvalidGPRReg; | |
1125 | } | |
1126 | } | |
1127 | ||
1128 | void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge) | |
1129 | { | |
1130 | #if ENABLE(GGC) | |
1131 | ASSERT(!isKnownNotCell(valueEdge.node())); | |
1132 | ||
1133 | SpeculateCellOperand base(this, baseEdge); | |
1134 | JSValueOperand value(this, valueEdge); | |
1135 | GPRTemporary scratch1(this); | |
1136 | GPRTemporary scratch2(this); | |
1137 | ||
1138 | writeBarrier(base.gpr(), value.tagGPR(), valueEdge, scratch1.gpr(), scratch2.gpr()); | |
1139 | #else | |
1140 | UNUSED_PARAM(baseEdge); | |
1141 | UNUSED_PARAM(valueEdge); | |
1142 | #endif | |
1143 | } | |
1144 | ||
1145 | void SpeculativeJIT::compileObjectEquality(Node* node) | |
1146 | { | |
1147 | SpeculateCellOperand op1(this, node->child1()); | |
1148 | SpeculateCellOperand op2(this, node->child2()); | |
1149 | GPRReg op1GPR = op1.gpr(); | |
1150 | GPRReg op2GPR = op2.gpr(); | |
1151 | ||
1152 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { | |
1153 | DFG_TYPE_CHECK( | |
1154 | JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1155 | DFG_TYPE_CHECK( | |
1156 | JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR)); | |
1157 | } else { | |
1158 | DFG_TYPE_CHECK( | |
1159 | JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1160 | speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), | |
1161 | m_jit.branchTest8( | |
1162 | MacroAssembler::NonZero, | |
1163 | MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), | |
1164 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); | |
1165 | ||
1166 | DFG_TYPE_CHECK( | |
1167 | JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR)); | |
1168 | speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), | |
1169 | m_jit.branchTest8( | |
1170 | MacroAssembler::NonZero, | |
1171 | MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), | |
1172 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); | |
1173 | } | |
1174 | ||
1175 | GPRTemporary resultPayload(this, Reuse, op2); | |
1176 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
1177 | ||
1178 | MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR); | |
1179 | m_jit.move(TrustedImm32(1), resultPayloadGPR); | |
1180 | MacroAssembler::Jump done = m_jit.jump(); | |
1181 | falseCase.link(&m_jit); | |
1182 | m_jit.move(TrustedImm32(0), resultPayloadGPR); | |
1183 | done.link(&m_jit); | |
1184 | ||
1185 | booleanResult(resultPayloadGPR, node); | |
1186 | } | |
1187 | ||
1188 | void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild) | |
1189 | { | |
1190 | SpeculateCellOperand op1(this, objectChild); | |
1191 | JSValueOperand op2(this, otherChild); | |
1192 | ||
1193 | GPRReg op1GPR = op1.gpr(); | |
1194 | GPRReg op2GPR = op2.payloadGPR(); | |
1195 | ||
1196 | DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1197 | ||
1198 | GPRTemporary resultPayload(this, Reuse, op1); | |
1199 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
1200 | ||
1201 | MacroAssembler::Jump op2CellJump = m_jit.branchIfCell(op2.jsValueRegs()); | |
1202 | ||
1203 | m_jit.move(TrustedImm32(0), resultPayloadGPR); | |
1204 | MacroAssembler::Jump op2NotCellJump = m_jit.jump(); | |
1205 | ||
1206 | // At this point we know that we can perform a straight-forward equality comparison on pointer | |
1207 | // values because we are doing strict equality. | |
1208 | op2CellJump.link(&m_jit); | |
1209 | m_jit.compare32(MacroAssembler::Equal, op1GPR, op2GPR, resultPayloadGPR); | |
1210 | ||
1211 | op2NotCellJump.link(&m_jit); | |
1212 | booleanResult(resultPayloadGPR, m_currentNode); | |
1213 | } | |
1214 | ||
1215 | void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode) | |
1216 | { | |
1217 | BasicBlock* taken = branchNode->branchData()->taken.block; | |
1218 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; | |
1219 | ||
1220 | SpeculateCellOperand op1(this, objectChild); | |
1221 | JSValueOperand op2(this, otherChild); | |
1222 | ||
1223 | GPRReg op1GPR = op1.gpr(); | |
1224 | GPRReg op2GPR = op2.payloadGPR(); | |
1225 | ||
1226 | DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1227 | ||
1228 | branch32(MacroAssembler::NotEqual, op2.tagGPR(), TrustedImm32(JSValue::CellTag), notTaken); | |
1229 | ||
1230 | if (taken == nextBlock()) { | |
1231 | branch32(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken); | |
1232 | jump(taken); | |
1233 | } else { | |
1234 | branch32(MacroAssembler::Equal, op1GPR, op2GPR, taken); | |
1235 | jump(notTaken); | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild) | |
1240 | { | |
1241 | SpeculateCellOperand op1(this, leftChild); | |
1242 | JSValueOperand op2(this, rightChild, ManualOperandSpeculation); | |
1243 | GPRTemporary result(this); | |
1244 | ||
1245 | GPRReg op1GPR = op1.gpr(); | |
1246 | GPRReg op2TagGPR = op2.tagGPR(); | |
1247 | GPRReg op2PayloadGPR = op2.payloadGPR(); | |
1248 | GPRReg resultGPR = result.gpr(); | |
1249 | ||
1250 | bool masqueradesAsUndefinedWatchpointValid = | |
1251 | masqueradesAsUndefinedWatchpointIsStillValid(); | |
1252 | ||
1253 | if (masqueradesAsUndefinedWatchpointValid) { | |
1254 | DFG_TYPE_CHECK( | |
1255 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1256 | } else { | |
1257 | DFG_TYPE_CHECK( | |
1258 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1259 | speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, | |
1260 | m_jit.branchTest8( | |
1261 | MacroAssembler::NonZero, | |
1262 | MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), | |
1263 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); | |
1264 | } | |
1265 | ||
1266 | ||
1267 | // It seems that most of the time when programs do a == b where b may be either null/undefined | |
1268 | // or an object, b is usually an object. Balance the branches to make that case fast. | |
1269 | MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs()); | |
1270 | ||
1271 | // We know that within this branch, rightChild must be a cell. | |
1272 | if (masqueradesAsUndefinedWatchpointValid) { | |
1273 | DFG_TYPE_CHECK( | |
1274 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR)); | |
1275 | } else { | |
1276 | DFG_TYPE_CHECK( | |
1277 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR)); | |
1278 | speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, | |
1279 | m_jit.branchTest8( | |
1280 | MacroAssembler::NonZero, | |
1281 | MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()), | |
1282 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); | |
1283 | } | |
1284 | ||
1285 | // At this point we know that we can perform a straight-forward equality comparison on pointer | |
1286 | // values because both left and right are pointers to objects that have no special equality | |
1287 | // protocols. | |
1288 | MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2PayloadGPR); | |
1289 | MacroAssembler::Jump trueCase = m_jit.jump(); | |
1290 | ||
1291 | rightNotCell.link(&m_jit); | |
1292 | ||
1293 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to | |
1294 | // prove that it is either null or undefined. | |
1295 | if (needsTypeCheck(rightChild, SpecCell | SpecOther)) { | |
1296 | m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR); | |
1297 | ||
1298 | typeCheck( | |
1299 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, | |
1300 | m_jit.branch32( | |
1301 | MacroAssembler::NotEqual, resultGPR, | |
1302 | MacroAssembler::TrustedImm32(JSValue::NullTag))); | |
1303 | } | |
1304 | ||
1305 | falseCase.link(&m_jit); | |
1306 | m_jit.move(TrustedImm32(0), resultGPR); | |
1307 | MacroAssembler::Jump done = m_jit.jump(); | |
1308 | trueCase.link(&m_jit); | |
1309 | m_jit.move(TrustedImm32(1), resultGPR); | |
1310 | done.link(&m_jit); | |
1311 | ||
1312 | booleanResult(resultGPR, m_currentNode); | |
1313 | } | |
1314 | ||
1315 | void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode) | |
1316 | { | |
1317 | BasicBlock* taken = branchNode->branchData()->taken.block; | |
1318 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; | |
1319 | ||
1320 | SpeculateCellOperand op1(this, leftChild); | |
1321 | JSValueOperand op2(this, rightChild, ManualOperandSpeculation); | |
1322 | GPRTemporary result(this); | |
1323 | ||
1324 | GPRReg op1GPR = op1.gpr(); | |
1325 | GPRReg op2TagGPR = op2.tagGPR(); | |
1326 | GPRReg op2PayloadGPR = op2.payloadGPR(); | |
1327 | GPRReg resultGPR = result.gpr(); | |
1328 | ||
1329 | bool masqueradesAsUndefinedWatchpointValid = | |
1330 | masqueradesAsUndefinedWatchpointIsStillValid(); | |
1331 | ||
1332 | if (masqueradesAsUndefinedWatchpointValid) { | |
1333 | DFG_TYPE_CHECK( | |
1334 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1335 | } else { | |
1336 | DFG_TYPE_CHECK( | |
1337 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); | |
1338 | speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, | |
1339 | m_jit.branchTest8( | |
1340 | MacroAssembler::NonZero, | |
1341 | MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), | |
1342 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); | |
1343 | } | |
1344 | ||
1345 | // It seems that most of the time when programs do a == b where b may be either null/undefined | |
1346 | // or an object, b is usually an object. Balance the branches to make that case fast. | |
1347 | MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs()); | |
1348 | ||
1349 | // We know that within this branch, rightChild must be a cell. | |
1350 | if (masqueradesAsUndefinedWatchpointValid) { | |
1351 | DFG_TYPE_CHECK( | |
1352 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, | |
1353 | m_jit.branchIfNotObject(op2PayloadGPR)); | |
1354 | } else { | |
1355 | DFG_TYPE_CHECK( | |
1356 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, | |
1357 | m_jit.branchIfNotObject(op2PayloadGPR)); | |
1358 | speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, | |
1359 | m_jit.branchTest8( | |
1360 | MacroAssembler::NonZero, | |
1361 | MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()), | |
1362 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); | |
1363 | } | |
1364 | ||
1365 | // At this point we know that we can perform a straight-forward equality comparison on pointer | |
1366 | // values because both left and right are pointers to objects that have no special equality | |
1367 | // protocols. | |
1368 | branch32(MacroAssembler::Equal, op1GPR, op2PayloadGPR, taken); | |
1369 | ||
1370 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to | |
1371 | // prove that it is either null or undefined. | |
1372 | if (!needsTypeCheck(rightChild, SpecCell | SpecOther)) | |
1373 | rightNotCell.link(&m_jit); | |
1374 | else { | |
1375 | jump(notTaken, ForceJump); | |
1376 | ||
1377 | rightNotCell.link(&m_jit); | |
1378 | m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR); | |
1379 | ||
1380 | typeCheck( | |
1381 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, | |
1382 | m_jit.branch32( | |
1383 | MacroAssembler::NotEqual, resultGPR, | |
1384 | MacroAssembler::TrustedImm32(JSValue::NullTag))); | |
1385 | } | |
1386 | ||
1387 | jump(notTaken); | |
1388 | } | |
1389 | ||
1390 | void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition) | |
1391 | { | |
1392 | SpeculateInt32Operand op1(this, node->child1()); | |
1393 | SpeculateInt32Operand op2(this, node->child2()); | |
1394 | GPRTemporary resultPayload(this); | |
1395 | ||
1396 | m_jit.compare32(condition, op1.gpr(), op2.gpr(), resultPayload.gpr()); | |
1397 | ||
1398 | // If we add a DataFormatBool, we should use it here. | |
1399 | booleanResult(resultPayload.gpr(), node); | |
1400 | } | |
1401 | ||
1402 | void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition) | |
1403 | { | |
1404 | SpeculateDoubleOperand op1(this, node->child1()); | |
1405 | SpeculateDoubleOperand op2(this, node->child2()); | |
1406 | GPRTemporary resultPayload(this); | |
1407 | ||
1408 | m_jit.move(TrustedImm32(1), resultPayload.gpr()); | |
1409 | MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr()); | |
1410 | m_jit.move(TrustedImm32(0), resultPayload.gpr()); | |
1411 | trueCase.link(&m_jit); | |
1412 | ||
1413 | booleanResult(resultPayload.gpr(), node); | |
1414 | } | |
1415 | ||
1416 | void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) | |
1417 | { | |
1418 | JSValueOperand value(this, nodeUse, ManualOperandSpeculation); | |
1419 | GPRTemporary resultPayload(this); | |
1420 | GPRReg valueTagGPR = value.tagGPR(); | |
1421 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
1422 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
1423 | GPRTemporary structure; | |
1424 | GPRReg structureGPR = InvalidGPRReg; | |
1425 | ||
1426 | bool masqueradesAsUndefinedWatchpointValid = | |
1427 | masqueradesAsUndefinedWatchpointIsStillValid(); | |
1428 | ||
1429 | if (!masqueradesAsUndefinedWatchpointValid) { | |
1430 | // The masquerades as undefined case will use the structure register, so allocate it here. | |
1431 | // Do this at the top of the function to avoid branching around a register allocation. | |
1432 | GPRTemporary realStructure(this); | |
1433 | structure.adopt(realStructure); | |
1434 | structureGPR = structure.gpr(); | |
1435 | } | |
1436 | ||
1437 | MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs()); | |
1438 | if (masqueradesAsUndefinedWatchpointValid) { | |
1439 | DFG_TYPE_CHECK( | |
1440 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, | |
1441 | m_jit.branchIfNotObject(valuePayloadGPR)); | |
1442 | } else { | |
1443 | DFG_TYPE_CHECK( | |
1444 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, | |
1445 | m_jit.branchIfNotObject(valuePayloadGPR)); | |
1446 | ||
1447 | MacroAssembler::Jump isNotMasqueradesAsUndefined = | |
1448 | m_jit.branchTest8( | |
1449 | MacroAssembler::Zero, | |
1450 | MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()), | |
1451 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined)); | |
1452 | ||
1453 | m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), structureGPR); | |
1454 | speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, | |
1455 | m_jit.branchPtr( | |
1456 | MacroAssembler::Equal, | |
1457 | MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), | |
1458 | MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); | |
1459 | ||
1460 | isNotMasqueradesAsUndefined.link(&m_jit); | |
1461 | } | |
1462 | m_jit.move(TrustedImm32(0), resultPayloadGPR); | |
1463 | MacroAssembler::Jump done = m_jit.jump(); | |
1464 | ||
1465 | notCell.link(&m_jit); | |
1466 | ||
1467 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); | |
1468 | if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { | |
1469 | m_jit.or32(TrustedImm32(1), valueTagGPR, resultPayloadGPR); | |
1470 | typeCheck( | |
1471 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, | |
1472 | m_jit.branch32( | |
1473 | MacroAssembler::NotEqual, | |
1474 | resultPayloadGPR, | |
1475 | TrustedImm32(JSValue::NullTag))); | |
1476 | } | |
1477 | m_jit.move(TrustedImm32(1), resultPayloadGPR); | |
1478 | ||
1479 | done.link(&m_jit); | |
1480 | ||
1481 | booleanResult(resultPayloadGPR, m_currentNode); | |
1482 | } | |
1483 | ||
1484 | void SpeculativeJIT::compileLogicalNot(Node* node) | |
1485 | { | |
1486 | switch (node->child1().useKind()) { | |
1487 | case BooleanUse: { | |
1488 | SpeculateBooleanOperand value(this, node->child1()); | |
1489 | GPRTemporary result(this, Reuse, value); | |
1490 | m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr()); | |
1491 | booleanResult(result.gpr(), node); | |
1492 | return; | |
1493 | } | |
1494 | ||
1495 | case ObjectOrOtherUse: { | |
1496 | compileObjectOrOtherLogicalNot(node->child1()); | |
1497 | return; | |
1498 | } | |
1499 | ||
1500 | case Int32Use: { | |
1501 | SpeculateInt32Operand value(this, node->child1()); | |
1502 | GPRTemporary resultPayload(this, Reuse, value); | |
1503 | m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), resultPayload.gpr()); | |
1504 | booleanResult(resultPayload.gpr(), node); | |
1505 | return; | |
1506 | } | |
1507 | ||
1508 | case DoubleRepUse: { | |
1509 | SpeculateDoubleOperand value(this, node->child1()); | |
1510 | FPRTemporary scratch(this); | |
1511 | GPRTemporary resultPayload(this); | |
1512 | m_jit.move(TrustedImm32(0), resultPayload.gpr()); | |
1513 | MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()); | |
1514 | m_jit.move(TrustedImm32(1), resultPayload.gpr()); | |
1515 | nonZero.link(&m_jit); | |
1516 | booleanResult(resultPayload.gpr(), node); | |
1517 | return; | |
1518 | } | |
1519 | ||
1520 | case UntypedUse: { | |
1521 | JSValueOperand arg1(this, node->child1()); | |
1522 | GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord); | |
1523 | GPRReg arg1TagGPR = arg1.tagGPR(); | |
1524 | GPRReg arg1PayloadGPR = arg1.payloadGPR(); | |
1525 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
1526 | ||
1527 | arg1.use(); | |
1528 | ||
1529 | JITCompiler::Jump slowCase = m_jit.branch32(JITCompiler::NotEqual, arg1TagGPR, TrustedImm32(JSValue::BooleanTag)); | |
1530 | ||
1531 | m_jit.move(arg1PayloadGPR, resultPayloadGPR); | |
1532 | ||
1533 | addSlowPathGenerator( | |
1534 | slowPathCall( | |
1535 | slowCase, this, operationConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR, | |
1536 | arg1PayloadGPR)); | |
1537 | ||
1538 | m_jit.xor32(TrustedImm32(1), resultPayloadGPR); | |
1539 | booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
1540 | return; | |
1541 | } | |
1542 | case StringUse: | |
1543 | return compileStringZeroLength(node); | |
1544 | ||
1545 | default: | |
1546 | RELEASE_ASSERT_NOT_REACHED(); | |
1547 | break; | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken) | |
1552 | { | |
1553 | JSValueOperand value(this, nodeUse, ManualOperandSpeculation); | |
1554 | GPRTemporary scratch(this); | |
1555 | GPRReg valueTagGPR = value.tagGPR(); | |
1556 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
1557 | GPRReg scratchGPR = scratch.gpr(); | |
1558 | ||
1559 | MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs()); | |
1560 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { | |
1561 | DFG_TYPE_CHECK( | |
1562 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, | |
1563 | m_jit.branchIfNotObject(valuePayloadGPR)); | |
1564 | } else { | |
1565 | DFG_TYPE_CHECK( | |
1566 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, | |
1567 | m_jit.branchIfNotObject(valuePayloadGPR)); | |
1568 | ||
1569 | JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( | |
1570 | JITCompiler::Zero, | |
1571 | MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()), | |
1572 | TrustedImm32(MasqueradesAsUndefined)); | |
1573 | ||
1574 | m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), scratchGPR); | |
1575 | speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, | |
1576 | m_jit.branchPtr( | |
1577 | MacroAssembler::Equal, | |
1578 | MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()), | |
1579 | MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); | |
1580 | ||
1581 | isNotMasqueradesAsUndefined.link(&m_jit); | |
1582 | } | |
1583 | jump(taken, ForceJump); | |
1584 | ||
1585 | notCell.link(&m_jit); | |
1586 | ||
1587 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); | |
1588 | if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { | |
1589 | m_jit.or32(TrustedImm32(1), valueTagGPR, scratchGPR); | |
1590 | typeCheck( | |
1591 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, | |
1592 | m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag))); | |
1593 | } | |
1594 | ||
1595 | jump(notTaken); | |
1596 | ||
1597 | noResult(m_currentNode); | |
1598 | } | |
1599 | ||
1600 | void SpeculativeJIT::emitBranch(Node* node) | |
1601 | { | |
1602 | BasicBlock* taken = node->branchData()->taken.block; | |
1603 | BasicBlock* notTaken = node->branchData()->notTaken.block; | |
1604 | ||
1605 | switch (node->child1().useKind()) { | |
1606 | case BooleanUse: { | |
1607 | SpeculateBooleanOperand value(this, node->child1()); | |
1608 | MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; | |
1609 | ||
1610 | if (taken == nextBlock()) { | |
1611 | condition = MacroAssembler::Zero; | |
1612 | BasicBlock* tmp = taken; | |
1613 | taken = notTaken; | |
1614 | notTaken = tmp; | |
1615 | } | |
1616 | ||
1617 | branchTest32(condition, value.gpr(), TrustedImm32(1), taken); | |
1618 | jump(notTaken); | |
1619 | ||
1620 | noResult(node); | |
1621 | return; | |
1622 | } | |
1623 | ||
1624 | case ObjectOrOtherUse: { | |
1625 | emitObjectOrOtherBranch(node->child1(), taken, notTaken); | |
1626 | return; | |
1627 | } | |
1628 | ||
1629 | case StringUse: { | |
1630 | emitStringBranch(node->child1(), taken, notTaken); | |
1631 | return; | |
1632 | } | |
1633 | ||
1634 | case DoubleRepUse: | |
1635 | case Int32Use: { | |
1636 | if (node->child1().useKind() == Int32Use) { | |
1637 | bool invert = false; | |
1638 | ||
1639 | if (taken == nextBlock()) { | |
1640 | invert = true; | |
1641 | BasicBlock* tmp = taken; | |
1642 | taken = notTaken; | |
1643 | notTaken = tmp; | |
1644 | } | |
1645 | ||
1646 | SpeculateInt32Operand value(this, node->child1()); | |
1647 | branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken); | |
1648 | } else { | |
1649 | SpeculateDoubleOperand value(this, node->child1()); | |
1650 | FPRTemporary scratch(this); | |
1651 | branchDoubleNonZero(value.fpr(), scratch.fpr(), taken); | |
1652 | } | |
1653 | ||
1654 | jump(notTaken); | |
1655 | ||
1656 | noResult(node); | |
1657 | return; | |
1658 | } | |
1659 | ||
1660 | case UntypedUse: { | |
1661 | JSValueOperand value(this, node->child1()); | |
1662 | value.fill(); | |
1663 | GPRReg valueTagGPR = value.tagGPR(); | |
1664 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
1665 | ||
1666 | GPRTemporary result(this); | |
1667 | GPRReg resultGPR = result.gpr(); | |
1668 | ||
1669 | use(node->child1()); | |
1670 | ||
1671 | JITCompiler::Jump fastPath = m_jit.branch32(JITCompiler::Equal, valueTagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)); | |
1672 | JITCompiler::Jump slowPath = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::BooleanTag)); | |
1673 | ||
1674 | fastPath.link(&m_jit); | |
1675 | branchTest32(JITCompiler::Zero, valuePayloadGPR, notTaken); | |
1676 | jump(taken, ForceJump); | |
1677 | ||
1678 | slowPath.link(&m_jit); | |
1679 | silentSpillAllRegisters(resultGPR); | |
1680 | callOperation(operationConvertJSValueToBoolean, resultGPR, valueTagGPR, valuePayloadGPR); | |
1681 | silentFillAllRegisters(resultGPR); | |
1682 | ||
1683 | branchTest32(JITCompiler::NonZero, resultGPR, taken); | |
1684 | jump(notTaken); | |
1685 | ||
1686 | noResult(node, UseChildrenCalledExplicitly); | |
1687 | return; | |
1688 | } | |
1689 | ||
1690 | default: | |
1691 | RELEASE_ASSERT_NOT_REACHED(); | |
1692 | break; | |
1693 | } | |
1694 | } | |
1695 | ||
1696 | template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType> | |
1697 | void SpeculativeJIT::compileContiguousPutByVal(Node* node, BaseOperandType& base, PropertyOperandType& property, ValueOperandType& value, GPRReg valuePayloadReg, TagType valueTag) | |
1698 | { | |
1699 | Edge child4 = m_jit.graph().varArgChild(node, 3); | |
1700 | ||
1701 | ArrayMode arrayMode = node->arrayMode(); | |
1702 | ||
1703 | GPRReg baseReg = base.gpr(); | |
1704 | GPRReg propertyReg = property.gpr(); | |
1705 | ||
1706 | StorageOperand storage(this, child4); | |
1707 | GPRReg storageReg = storage.gpr(); | |
1708 | ||
1709 | if (node->op() == PutByValAlias) { | |
1710 | // Store the value to the array. | |
1711 | GPRReg propertyReg = property.gpr(); | |
1712 | m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
1713 | m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
1714 | ||
1715 | noResult(node); | |
1716 | return; | |
1717 | } | |
1718 | ||
1719 | MacroAssembler::Jump slowCase; | |
1720 | ||
1721 | if (arrayMode.isInBounds()) { | |
1722 | speculationCheck( | |
1723 | OutOfBounds, JSValueRegs(), 0, | |
1724 | m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); | |
1725 | } else { | |
1726 | MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); | |
1727 | ||
1728 | slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength())); | |
1729 | ||
1730 | if (!arrayMode.isOutOfBounds()) | |
1731 | speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase); | |
1732 | ||
1733 | m_jit.add32(TrustedImm32(1), propertyReg); | |
1734 | m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); | |
1735 | m_jit.sub32(TrustedImm32(1), propertyReg); | |
1736 | ||
1737 | inBounds.link(&m_jit); | |
1738 | } | |
1739 | ||
1740 | m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
1741 | m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
1742 | ||
1743 | base.use(); | |
1744 | property.use(); | |
1745 | value.use(); | |
1746 | storage.use(); | |
1747 | ||
1748 | if (arrayMode.isOutOfBounds()) { | |
1749 | if (node->op() == PutByValDirect) { | |
1750 | addSlowPathGenerator(slowPathCall( | |
1751 | slowCase, this, | |
1752 | m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict, | |
1753 | NoResult, baseReg, propertyReg, valueTag, valuePayloadReg)); | |
1754 | } else { | |
1755 | addSlowPathGenerator(slowPathCall( | |
1756 | slowCase, this, | |
1757 | m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, | |
1758 | NoResult, baseReg, propertyReg, valueTag, valuePayloadReg)); | |
1759 | } | |
1760 | } | |
1761 | ||
1762 | noResult(node, UseChildrenCalledExplicitly); | |
1763 | } | |
1764 | ||
1765 | void SpeculativeJIT::compile(Node* node) | |
1766 | { | |
1767 | NodeType op = node->op(); | |
1768 | ||
1769 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) | |
1770 | m_jit.clearRegisterAllocationOffsets(); | |
1771 | #endif | |
1772 | ||
1773 | switch (op) { | |
1774 | case JSConstant: | |
1775 | case DoubleConstant: | |
1776 | case PhantomDirectArguments: | |
1777 | case PhantomClonedArguments: | |
1778 | initConstantInfo(node); | |
1779 | break; | |
1780 | ||
1781 | case Identity: { | |
1782 | speculate(node, node->child1()); | |
1783 | switch (node->child1().useKind()) { | |
1784 | case DoubleRepUse: | |
1785 | case DoubleRepRealUse: { | |
1786 | SpeculateDoubleOperand op(this, node->child1()); | |
1787 | doubleResult(op.fpr(), node); | |
1788 | break; | |
1789 | } | |
1790 | case Int52RepUse: | |
1791 | case MachineIntUse: | |
1792 | case DoubleRepMachineIntUse: { | |
1793 | RELEASE_ASSERT_NOT_REACHED(); | |
1794 | break; | |
1795 | } | |
1796 | default: { | |
1797 | JSValueOperand op(this, node->child1()); | |
1798 | jsValueResult(op.tagGPR(), op.payloadGPR(), node); | |
1799 | break; | |
1800 | } | |
1801 | } // switch | |
1802 | break; | |
1803 | } | |
1804 | ||
1805 | case GetLocal: { | |
1806 | AbstractValue& value = m_state.variables().operand(node->local()); | |
1807 | ||
1808 | // If the CFA is tracking this variable and it found that the variable | |
1809 | // cannot have been assigned, then don't attempt to proceed. | |
1810 | if (value.isClear()) { | |
1811 | m_compileOkay = false; | |
1812 | break; | |
1813 | } | |
1814 | ||
1815 | switch (node->variableAccessData()->flushFormat()) { | |
1816 | case FlushedDouble: { | |
1817 | FPRTemporary result(this); | |
1818 | m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr()); | |
1819 | VirtualRegister virtualRegister = node->virtualRegister(); | |
1820 | m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble); | |
1821 | generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr()); | |
1822 | break; | |
1823 | } | |
1824 | ||
1825 | case FlushedInt32: { | |
1826 | GPRTemporary result(this); | |
1827 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); | |
1828 | ||
1829 | // Like int32Result, but don't useChildren - our children are phi nodes, | |
1830 | // and don't represent values within this dataflow with virtual registers. | |
1831 | VirtualRegister virtualRegister = node->virtualRegister(); | |
1832 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger); | |
1833 | generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr()); | |
1834 | break; | |
1835 | } | |
1836 | ||
1837 | case FlushedCell: { | |
1838 | GPRTemporary result(this); | |
1839 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); | |
1840 | ||
1841 | // Like cellResult, but don't useChildren - our children are phi nodes, | |
1842 | // and don't represent values within this dataflow with virtual registers. | |
1843 | VirtualRegister virtualRegister = node->virtualRegister(); | |
1844 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell); | |
1845 | generationInfoFromVirtualRegister(virtualRegister).initCell(node, node->refCount(), result.gpr()); | |
1846 | break; | |
1847 | } | |
1848 | ||
1849 | case FlushedBoolean: { | |
1850 | GPRTemporary result(this); | |
1851 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); | |
1852 | ||
1853 | // Like booleanResult, but don't useChildren - our children are phi nodes, | |
1854 | // and don't represent values within this dataflow with virtual registers. | |
1855 | VirtualRegister virtualRegister = node->virtualRegister(); | |
1856 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean); | |
1857 | generationInfoFromVirtualRegister(virtualRegister).initBoolean(node, node->refCount(), result.gpr()); | |
1858 | break; | |
1859 | } | |
1860 | ||
1861 | case FlushedJSValue: { | |
1862 | GPRTemporary result(this); | |
1863 | GPRTemporary tag(this); | |
1864 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); | |
1865 | m_jit.load32(JITCompiler::tagFor(node->machineLocal()), tag.gpr()); | |
1866 | ||
1867 | // Like jsValueResult, but don't useChildren - our children are phi nodes, | |
1868 | // and don't represent values within this dataflow with virtual registers. | |
1869 | VirtualRegister virtualRegister = node->virtualRegister(); | |
1870 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS); | |
1871 | m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS); | |
1872 | ||
1873 | generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), tag.gpr(), result.gpr(), DataFormatJS); | |
1874 | break; | |
1875 | } | |
1876 | ||
1877 | default: | |
1878 | RELEASE_ASSERT_NOT_REACHED(); | |
1879 | } | |
1880 | break; | |
1881 | } | |
1882 | ||
1883 | case GetLocalUnlinked: { | |
1884 | GPRTemporary payload(this); | |
1885 | GPRTemporary tag(this); | |
1886 | m_jit.load32(JITCompiler::payloadFor(node->unlinkedMachineLocal()), payload.gpr()); | |
1887 | m_jit.load32(JITCompiler::tagFor(node->unlinkedMachineLocal()), tag.gpr()); | |
1888 | jsValueResult(tag.gpr(), payload.gpr(), node); | |
1889 | break; | |
1890 | } | |
1891 | ||
1892 | case MovHint: { | |
1893 | compileMovHint(m_currentNode); | |
1894 | noResult(node); | |
1895 | break; | |
1896 | } | |
1897 | ||
1898 | case ZombieHint: { | |
1899 | recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead); | |
1900 | noResult(node); | |
1901 | break; | |
1902 | } | |
1903 | ||
1904 | case SetLocal: { | |
1905 | switch (node->variableAccessData()->flushFormat()) { | |
1906 | case FlushedDouble: { | |
1907 | SpeculateDoubleOperand value(this, node->child1()); | |
1908 | m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal())); | |
1909 | noResult(node); | |
1910 | // Indicate that it's no longer necessary to retrieve the value of | |
1911 | // this bytecode variable from registers or other locations in the stack, | |
1912 | // but that it is stored as a double. | |
1913 | recordSetLocal(DataFormatDouble); | |
1914 | break; | |
1915 | } | |
1916 | ||
1917 | case FlushedInt32: { | |
1918 | SpeculateInt32Operand value(this, node->child1()); | |
1919 | m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal())); | |
1920 | noResult(node); | |
1921 | recordSetLocal(DataFormatInt32); | |
1922 | break; | |
1923 | } | |
1924 | ||
1925 | case FlushedCell: { | |
1926 | SpeculateCellOperand cell(this, node->child1()); | |
1927 | GPRReg cellGPR = cell.gpr(); | |
1928 | m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node->machineLocal())); | |
1929 | noResult(node); | |
1930 | recordSetLocal(DataFormatCell); | |
1931 | break; | |
1932 | } | |
1933 | ||
1934 | case FlushedBoolean: { | |
1935 | SpeculateBooleanOperand value(this, node->child1()); | |
1936 | m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal())); | |
1937 | noResult(node); | |
1938 | recordSetLocal(DataFormatBoolean); | |
1939 | break; | |
1940 | } | |
1941 | ||
1942 | case FlushedJSValue: { | |
1943 | JSValueOperand value(this, node->child1()); | |
1944 | m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->machineLocal())); | |
1945 | m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->machineLocal())); | |
1946 | noResult(node); | |
1947 | recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); | |
1948 | break; | |
1949 | } | |
1950 | ||
1951 | default: | |
1952 | RELEASE_ASSERT_NOT_REACHED(); | |
1953 | break; | |
1954 | } | |
1955 | break; | |
1956 | } | |
1957 | ||
1958 | case SetArgument: | |
1959 | // This is a no-op; it just marks the fact that the argument is being used. | |
1960 | // But it may be profitable to use this as a hook to run speculation checks | |
1961 | // on arguments, thereby allowing us to trivially eliminate such checks if | |
1962 | // the argument is not used. | |
1963 | recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); | |
1964 | break; | |
1965 | ||
1966 | case BitAnd: | |
1967 | case BitOr: | |
1968 | case BitXor: | |
1969 | if (node->child1()->isInt32Constant()) { | |
1970 | SpeculateInt32Operand op2(this, node->child2()); | |
1971 | GPRTemporary result(this, Reuse, op2); | |
1972 | ||
1973 | bitOp(op, node->child1()->asInt32(), op2.gpr(), result.gpr()); | |
1974 | ||
1975 | int32Result(result.gpr(), node); | |
1976 | } else if (node->child2()->isInt32Constant()) { | |
1977 | SpeculateInt32Operand op1(this, node->child1()); | |
1978 | GPRTemporary result(this, Reuse, op1); | |
1979 | ||
1980 | bitOp(op, node->child2()->asInt32(), op1.gpr(), result.gpr()); | |
1981 | ||
1982 | int32Result(result.gpr(), node); | |
1983 | } else { | |
1984 | SpeculateInt32Operand op1(this, node->child1()); | |
1985 | SpeculateInt32Operand op2(this, node->child2()); | |
1986 | GPRTemporary result(this, Reuse, op1, op2); | |
1987 | ||
1988 | GPRReg reg1 = op1.gpr(); | |
1989 | GPRReg reg2 = op2.gpr(); | |
1990 | bitOp(op, reg1, reg2, result.gpr()); | |
1991 | ||
1992 | int32Result(result.gpr(), node); | |
1993 | } | |
1994 | break; | |
1995 | ||
1996 | case BitRShift: | |
1997 | case BitLShift: | |
1998 | case BitURShift: | |
1999 | if (node->child2()->isInt32Constant()) { | |
2000 | SpeculateInt32Operand op1(this, node->child1()); | |
2001 | GPRTemporary result(this, Reuse, op1); | |
2002 | ||
2003 | shiftOp(op, op1.gpr(), node->child2()->asInt32() & 0x1f, result.gpr()); | |
2004 | ||
2005 | int32Result(result.gpr(), node); | |
2006 | } else { | |
2007 | // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. | |
2008 | SpeculateInt32Operand op1(this, node->child1()); | |
2009 | SpeculateInt32Operand op2(this, node->child2()); | |
2010 | GPRTemporary result(this, Reuse, op1); | |
2011 | ||
2012 | GPRReg reg1 = op1.gpr(); | |
2013 | GPRReg reg2 = op2.gpr(); | |
2014 | shiftOp(op, reg1, reg2, result.gpr()); | |
2015 | ||
2016 | int32Result(result.gpr(), node); | |
2017 | } | |
2018 | break; | |
2019 | ||
2020 | case UInt32ToNumber: { | |
2021 | compileUInt32ToNumber(node); | |
2022 | break; | |
2023 | } | |
2024 | ||
2025 | case DoubleAsInt32: { | |
2026 | compileDoubleAsInt32(node); | |
2027 | break; | |
2028 | } | |
2029 | ||
2030 | case ValueToInt32: { | |
2031 | compileValueToInt32(node); | |
2032 | break; | |
2033 | } | |
2034 | ||
2035 | case DoubleRep: { | |
2036 | compileDoubleRep(node); | |
2037 | break; | |
2038 | } | |
2039 | ||
2040 | case ValueRep: { | |
2041 | compileValueRep(node); | |
2042 | break; | |
2043 | } | |
2044 | ||
2045 | case ValueAdd: { | |
2046 | JSValueOperand op1(this, node->child1()); | |
2047 | JSValueOperand op2(this, node->child2()); | |
2048 | ||
2049 | GPRReg op1TagGPR = op1.tagGPR(); | |
2050 | GPRReg op1PayloadGPR = op1.payloadGPR(); | |
2051 | GPRReg op2TagGPR = op2.tagGPR(); | |
2052 | GPRReg op2PayloadGPR = op2.payloadGPR(); | |
2053 | ||
2054 | flushRegisters(); | |
2055 | ||
2056 | GPRFlushedCallResult2 resultTag(this); | |
2057 | GPRFlushedCallResult resultPayload(this); | |
2058 | if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) | |
2059 | callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR); | |
2060 | else | |
2061 | callOperation(operationValueAdd, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR); | |
2062 | ||
2063 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); | |
2064 | break; | |
2065 | } | |
2066 | ||
2067 | case ArithAdd: | |
2068 | compileAdd(node); | |
2069 | break; | |
2070 | ||
2071 | case ArithClz32: | |
2072 | compileArithClz32(node); | |
2073 | break; | |
2074 | ||
2075 | case MakeRope: | |
2076 | compileMakeRope(node); | |
2077 | break; | |
2078 | ||
2079 | case ArithSub: | |
2080 | compileArithSub(node); | |
2081 | break; | |
2082 | ||
2083 | case ArithNegate: | |
2084 | compileArithNegate(node); | |
2085 | break; | |
2086 | ||
2087 | case ArithMul: | |
2088 | compileArithMul(node); | |
2089 | break; | |
2090 | ||
2091 | case ArithDiv: { | |
2092 | compileArithDiv(node); | |
2093 | break; | |
2094 | } | |
2095 | ||
2096 | case ArithMod: { | |
2097 | compileArithMod(node); | |
2098 | break; | |
2099 | } | |
2100 | ||
2101 | case ArithPow: { | |
2102 | compileArithPow(node); | |
2103 | break; | |
2104 | } | |
2105 | ||
2106 | case ArithAbs: { | |
2107 | switch (node->child1().useKind()) { | |
2108 | case Int32Use: { | |
2109 | SpeculateStrictInt32Operand op1(this, node->child1()); | |
2110 | GPRTemporary result(this, Reuse, op1); | |
2111 | GPRTemporary scratch(this); | |
2112 | ||
2113 | m_jit.move(op1.gpr(), result.gpr()); | |
2114 | m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr()); | |
2115 | m_jit.add32(scratch.gpr(), result.gpr()); | |
2116 | m_jit.xor32(scratch.gpr(), result.gpr()); | |
2117 | speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31))); | |
2118 | int32Result(result.gpr(), node); | |
2119 | break; | |
2120 | } | |
2121 | ||
2122 | ||
2123 | case DoubleRepUse: { | |
2124 | SpeculateDoubleOperand op1(this, node->child1()); | |
2125 | FPRTemporary result(this); | |
2126 | ||
2127 | m_jit.absDouble(op1.fpr(), result.fpr()); | |
2128 | doubleResult(result.fpr(), node); | |
2129 | break; | |
2130 | } | |
2131 | ||
2132 | default: | |
2133 | RELEASE_ASSERT_NOT_REACHED(); | |
2134 | break; | |
2135 | } | |
2136 | break; | |
2137 | } | |
2138 | ||
2139 | case ArithMin: | |
2140 | case ArithMax: { | |
2141 | switch (node->binaryUseKind()) { | |
2142 | case Int32Use: { | |
2143 | SpeculateStrictInt32Operand op1(this, node->child1()); | |
2144 | SpeculateStrictInt32Operand op2(this, node->child2()); | |
2145 | GPRTemporary result(this, Reuse, op1); | |
2146 | ||
2147 | GPRReg op1GPR = op1.gpr(); | |
2148 | GPRReg op2GPR = op2.gpr(); | |
2149 | GPRReg resultGPR = result.gpr(); | |
2150 | ||
2151 | MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1GPR, op2GPR); | |
2152 | m_jit.move(op2GPR, resultGPR); | |
2153 | if (op1GPR != resultGPR) { | |
2154 | MacroAssembler::Jump done = m_jit.jump(); | |
2155 | op1Less.link(&m_jit); | |
2156 | m_jit.move(op1GPR, resultGPR); | |
2157 | done.link(&m_jit); | |
2158 | } else | |
2159 | op1Less.link(&m_jit); | |
2160 | ||
2161 | int32Result(resultGPR, node); | |
2162 | break; | |
2163 | } | |
2164 | ||
2165 | case DoubleRepUse: { | |
2166 | SpeculateDoubleOperand op1(this, node->child1()); | |
2167 | SpeculateDoubleOperand op2(this, node->child2()); | |
2168 | FPRTemporary result(this, op1); | |
2169 | ||
2170 | FPRReg op1FPR = op1.fpr(); | |
2171 | FPRReg op2FPR = op2.fpr(); | |
2172 | FPRReg resultFPR = result.fpr(); | |
2173 | ||
2174 | MacroAssembler::JumpList done; | |
2175 | ||
2176 | MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR); | |
2177 | ||
2178 | // op2 is eather the lesser one or one of then is NaN | |
2179 | MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR); | |
2180 | ||
2181 | // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding | |
2182 | // op1 + op2 and putting it into result. | |
2183 | m_jit.addDouble(op1FPR, op2FPR, resultFPR); | |
2184 | done.append(m_jit.jump()); | |
2185 | ||
2186 | op2Less.link(&m_jit); | |
2187 | m_jit.moveDouble(op2FPR, resultFPR); | |
2188 | ||
2189 | if (op1FPR != resultFPR) { | |
2190 | done.append(m_jit.jump()); | |
2191 | ||
2192 | op1Less.link(&m_jit); | |
2193 | m_jit.moveDouble(op1FPR, resultFPR); | |
2194 | } else | |
2195 | op1Less.link(&m_jit); | |
2196 | ||
2197 | done.link(&m_jit); | |
2198 | ||
2199 | doubleResult(resultFPR, node); | |
2200 | break; | |
2201 | } | |
2202 | ||
2203 | default: | |
2204 | RELEASE_ASSERT_NOT_REACHED(); | |
2205 | break; | |
2206 | } | |
2207 | break; | |
2208 | } | |
2209 | ||
2210 | case ArithSqrt: | |
2211 | compileArithSqrt(node); | |
2212 | break; | |
2213 | ||
2214 | case ArithFRound: { | |
2215 | SpeculateDoubleOperand op1(this, node->child1()); | |
2216 | FPRTemporary result(this, op1); | |
2217 | ||
2218 | m_jit.convertDoubleToFloat(op1.fpr(), result.fpr()); | |
2219 | m_jit.convertFloatToDouble(result.fpr(), result.fpr()); | |
2220 | ||
2221 | doubleResult(result.fpr(), node); | |
2222 | break; | |
2223 | } | |
2224 | ||
2225 | case ArithRound: | |
2226 | compileArithRound(node); | |
2227 | break; | |
2228 | ||
2229 | case ArithSin: { | |
2230 | SpeculateDoubleOperand op1(this, node->child1()); | |
2231 | FPRReg op1FPR = op1.fpr(); | |
2232 | ||
2233 | flushRegisters(); | |
2234 | ||
2235 | FPRResult result(this); | |
2236 | callOperation(sin, result.fpr(), op1FPR); | |
2237 | doubleResult(result.fpr(), node); | |
2238 | break; | |
2239 | } | |
2240 | ||
2241 | case ArithCos: { | |
2242 | SpeculateDoubleOperand op1(this, node->child1()); | |
2243 | FPRReg op1FPR = op1.fpr(); | |
2244 | ||
2245 | flushRegisters(); | |
2246 | ||
2247 | FPRResult result(this); | |
2248 | callOperation(cos, result.fpr(), op1FPR); | |
2249 | doubleResult(result.fpr(), node); | |
2250 | break; | |
2251 | } | |
2252 | ||
2253 | case ArithLog: | |
2254 | compileArithLog(node); | |
2255 | break; | |
2256 | ||
2257 | case LogicalNot: | |
2258 | compileLogicalNot(node); | |
2259 | break; | |
2260 | ||
2261 | case CompareLess: | |
2262 | if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess)) | |
2263 | return; | |
2264 | break; | |
2265 | ||
2266 | case CompareLessEq: | |
2267 | if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq)) | |
2268 | return; | |
2269 | break; | |
2270 | ||
2271 | case CompareGreater: | |
2272 | if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater)) | |
2273 | return; | |
2274 | break; | |
2275 | ||
2276 | case CompareGreaterEq: | |
2277 | if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq)) | |
2278 | return; | |
2279 | break; | |
2280 | ||
2281 | case CompareEqConstant: | |
2282 | ASSERT(node->child2()->asJSValue().isNull()); | |
2283 | if (nonSpeculativeCompareNull(node, node->child1())) | |
2284 | return; | |
2285 | break; | |
2286 | ||
2287 | case CompareEq: | |
2288 | if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq)) | |
2289 | return; | |
2290 | break; | |
2291 | ||
2292 | case CompareStrictEq: | |
2293 | if (compileStrictEq(node)) | |
2294 | return; | |
2295 | break; | |
2296 | ||
2297 | case StringCharCodeAt: { | |
2298 | compileGetCharCodeAt(node); | |
2299 | break; | |
2300 | } | |
2301 | ||
2302 | case StringCharAt: { | |
2303 | // Relies on StringCharAt node having same basic layout as GetByVal | |
2304 | compileGetByValOnString(node); | |
2305 | break; | |
2306 | } | |
2307 | ||
2308 | case StringFromCharCode: { | |
2309 | compileFromCharCode(node); | |
2310 | break; | |
2311 | } | |
2312 | ||
2313 | case CheckArray: { | |
2314 | checkArray(node); | |
2315 | break; | |
2316 | } | |
2317 | ||
2318 | case Arrayify: | |
2319 | case ArrayifyToStructure: { | |
2320 | arrayify(node); | |
2321 | break; | |
2322 | } | |
2323 | ||
2324 | case GetByVal: { | |
2325 | switch (node->arrayMode().type()) { | |
2326 | case Array::SelectUsingPredictions: | |
2327 | case Array::ForceExit: | |
2328 | RELEASE_ASSERT_NOT_REACHED(); | |
2329 | #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) | |
2330 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); | |
2331 | #endif | |
2332 | break; | |
2333 | case Array::Generic: { | |
2334 | SpeculateCellOperand base(this, node->child1()); // Save a register, speculate cell. We'll probably be right. | |
2335 | JSValueOperand property(this, node->child2()); | |
2336 | GPRReg baseGPR = base.gpr(); | |
2337 | GPRReg propertyTagGPR = property.tagGPR(); | |
2338 | GPRReg propertyPayloadGPR = property.payloadGPR(); | |
2339 | ||
2340 | flushRegisters(); | |
2341 | GPRFlushedCallResult2 resultTag(this); | |
2342 | GPRFlushedCallResult resultPayload(this); | |
2343 | callOperation(operationGetByValCell, resultTag.gpr(), resultPayload.gpr(), baseGPR, propertyTagGPR, propertyPayloadGPR); | |
2344 | ||
2345 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); | |
2346 | break; | |
2347 | } | |
2348 | case Array::Int32: | |
2349 | case Array::Contiguous: { | |
2350 | if (node->arrayMode().isInBounds()) { | |
2351 | SpeculateStrictInt32Operand property(this, node->child2()); | |
2352 | StorageOperand storage(this, node->child3()); | |
2353 | ||
2354 | GPRReg propertyReg = property.gpr(); | |
2355 | GPRReg storageReg = storage.gpr(); | |
2356 | ||
2357 | if (!m_compileOkay) | |
2358 | return; | |
2359 | ||
2360 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); | |
2361 | ||
2362 | GPRTemporary resultPayload(this); | |
2363 | if (node->arrayMode().type() == Array::Int32) { | |
2364 | ASSERT(!node->arrayMode().isSaneChain()); | |
2365 | ||
2366 | speculationCheck( | |
2367 | OutOfBounds, JSValueRegs(), 0, | |
2368 | m_jit.branch32( | |
2369 | MacroAssembler::Equal, | |
2370 | MacroAssembler::BaseIndex( | |
2371 | storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset), | |
2372 | TrustedImm32(JSValue::EmptyValueTag))); | |
2373 | m_jit.load32( | |
2374 | MacroAssembler::BaseIndex( | |
2375 | storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset), | |
2376 | resultPayload.gpr()); | |
2377 | int32Result(resultPayload.gpr(), node); | |
2378 | break; | |
2379 | } | |
2380 | ||
2381 | GPRTemporary resultTag(this); | |
2382 | m_jit.load32( | |
2383 | MacroAssembler::BaseIndex( | |
2384 | storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset), | |
2385 | resultTag.gpr()); | |
2386 | m_jit.load32( | |
2387 | MacroAssembler::BaseIndex( | |
2388 | storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset), | |
2389 | resultPayload.gpr()); | |
2390 | if (node->arrayMode().isSaneChain()) { | |
2391 | JITCompiler::Jump notHole = m_jit.branch32( | |
2392 | MacroAssembler::NotEqual, resultTag.gpr(), | |
2393 | TrustedImm32(JSValue::EmptyValueTag)); | |
2394 | m_jit.move(TrustedImm32(JSValue::UndefinedTag), resultTag.gpr()); | |
2395 | m_jit.move(TrustedImm32(0), resultPayload.gpr()); | |
2396 | notHole.link(&m_jit); | |
2397 | } else { | |
2398 | speculationCheck( | |
2399 | LoadFromHole, JSValueRegs(), 0, | |
2400 | m_jit.branch32( | |
2401 | MacroAssembler::Equal, resultTag.gpr(), | |
2402 | TrustedImm32(JSValue::EmptyValueTag))); | |
2403 | } | |
2404 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); | |
2405 | break; | |
2406 | } | |
2407 | ||
2408 | SpeculateCellOperand base(this, node->child1()); | |
2409 | SpeculateStrictInt32Operand property(this, node->child2()); | |
2410 | StorageOperand storage(this, node->child3()); | |
2411 | ||
2412 | GPRReg baseReg = base.gpr(); | |
2413 | GPRReg propertyReg = property.gpr(); | |
2414 | GPRReg storageReg = storage.gpr(); | |
2415 | ||
2416 | if (!m_compileOkay) | |
2417 | return; | |
2418 | ||
2419 | GPRTemporary resultTag(this); | |
2420 | GPRTemporary resultPayload(this); | |
2421 | GPRReg resultTagReg = resultTag.gpr(); | |
2422 | GPRReg resultPayloadReg = resultPayload.gpr(); | |
2423 | ||
2424 | MacroAssembler::JumpList slowCases; | |
2425 | ||
2426 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); | |
2427 | ||
2428 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); | |
2429 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); | |
2430 | slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag))); | |
2431 | ||
2432 | addSlowPathGenerator( | |
2433 | slowPathCall( | |
2434 | slowCases, this, operationGetByValArrayInt, | |
2435 | JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg)); | |
2436 | ||
2437 | jsValueResult(resultTagReg, resultPayloadReg, node); | |
2438 | break; | |
2439 | } | |
2440 | case Array::Double: { | |
2441 | if (node->arrayMode().isInBounds()) { | |
2442 | SpeculateStrictInt32Operand property(this, node->child2()); | |
2443 | StorageOperand storage(this, node->child3()); | |
2444 | ||
2445 | GPRReg propertyReg = property.gpr(); | |
2446 | GPRReg storageReg = storage.gpr(); | |
2447 | ||
2448 | if (!m_compileOkay) | |
2449 | return; | |
2450 | ||
2451 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); | |
2452 | ||
2453 | FPRTemporary result(this); | |
2454 | m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr()); | |
2455 | if (!node->arrayMode().isSaneChain()) | |
2456 | speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr())); | |
2457 | doubleResult(result.fpr(), node); | |
2458 | break; | |
2459 | } | |
2460 | ||
2461 | SpeculateCellOperand base(this, node->child1()); | |
2462 | SpeculateStrictInt32Operand property(this, node->child2()); | |
2463 | StorageOperand storage(this, node->child3()); | |
2464 | ||
2465 | GPRReg baseReg = base.gpr(); | |
2466 | GPRReg propertyReg = property.gpr(); | |
2467 | GPRReg storageReg = storage.gpr(); | |
2468 | ||
2469 | if (!m_compileOkay) | |
2470 | return; | |
2471 | ||
2472 | GPRTemporary resultTag(this); | |
2473 | GPRTemporary resultPayload(this); | |
2474 | FPRTemporary temp(this); | |
2475 | GPRReg resultTagReg = resultTag.gpr(); | |
2476 | GPRReg resultPayloadReg = resultPayload.gpr(); | |
2477 | FPRReg tempReg = temp.fpr(); | |
2478 | ||
2479 | MacroAssembler::JumpList slowCases; | |
2480 | ||
2481 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); | |
2482 | ||
2483 | m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg); | |
2484 | slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg)); | |
2485 | boxDouble(tempReg, resultTagReg, resultPayloadReg); | |
2486 | ||
2487 | addSlowPathGenerator( | |
2488 | slowPathCall( | |
2489 | slowCases, this, operationGetByValArrayInt, | |
2490 | JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg)); | |
2491 | ||
2492 | jsValueResult(resultTagReg, resultPayloadReg, node); | |
2493 | break; | |
2494 | } | |
2495 | case Array::ArrayStorage: | |
2496 | case Array::SlowPutArrayStorage: { | |
2497 | if (node->arrayMode().isInBounds()) { | |
2498 | SpeculateStrictInt32Operand property(this, node->child2()); | |
2499 | StorageOperand storage(this, node->child3()); | |
2500 | GPRReg propertyReg = property.gpr(); | |
2501 | GPRReg storageReg = storage.gpr(); | |
2502 | ||
2503 | if (!m_compileOkay) | |
2504 | return; | |
2505 | ||
2506 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); | |
2507 | ||
2508 | GPRTemporary resultTag(this); | |
2509 | GPRTemporary resultPayload(this); | |
2510 | ||
2511 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); | |
2512 | speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); | |
2513 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); | |
2514 | ||
2515 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); | |
2516 | break; | |
2517 | } | |
2518 | ||
2519 | SpeculateCellOperand base(this, node->child1()); | |
2520 | SpeculateStrictInt32Operand property(this, node->child2()); | |
2521 | StorageOperand storage(this, node->child3()); | |
2522 | GPRReg propertyReg = property.gpr(); | |
2523 | GPRReg storageReg = storage.gpr(); | |
2524 | GPRReg baseReg = base.gpr(); | |
2525 | ||
2526 | if (!m_compileOkay) | |
2527 | return; | |
2528 | ||
2529 | GPRTemporary resultTag(this); | |
2530 | GPRTemporary resultPayload(this); | |
2531 | GPRReg resultTagReg = resultTag.gpr(); | |
2532 | GPRReg resultPayloadReg = resultPayload.gpr(); | |
2533 | ||
2534 | JITCompiler::Jump outOfBounds = m_jit.branch32( | |
2535 | MacroAssembler::AboveOrEqual, propertyReg, | |
2536 | MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); | |
2537 | ||
2538 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); | |
2539 | JITCompiler::Jump hole = m_jit.branch32( | |
2540 | MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)); | |
2541 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); | |
2542 | ||
2543 | JITCompiler::JumpList slowCases; | |
2544 | slowCases.append(outOfBounds); | |
2545 | slowCases.append(hole); | |
2546 | addSlowPathGenerator( | |
2547 | slowPathCall( | |
2548 | slowCases, this, operationGetByValArrayInt, | |
2549 | JSValueRegs(resultTagReg, resultPayloadReg), | |
2550 | baseReg, propertyReg)); | |
2551 | ||
2552 | jsValueResult(resultTagReg, resultPayloadReg, node); | |
2553 | break; | |
2554 | } | |
2555 | case Array::String: | |
2556 | compileGetByValOnString(node); | |
2557 | break; | |
2558 | case Array::DirectArguments: | |
2559 | compileGetByValOnDirectArguments(node); | |
2560 | break; | |
2561 | case Array::ScopedArguments: | |
2562 | compileGetByValOnScopedArguments(node); | |
2563 | break; | |
2564 | default: { | |
2565 | TypedArrayType type = node->arrayMode().typedArrayType(); | |
2566 | if (isInt(type)) | |
2567 | compileGetByValOnIntTypedArray(node, type); | |
2568 | else | |
2569 | compileGetByValOnFloatTypedArray(node, type); | |
2570 | } } | |
2571 | break; | |
2572 | } | |
2573 | ||
2574 | case PutByValDirect: | |
2575 | case PutByVal: | |
2576 | case PutByValAlias: { | |
2577 | Edge child1 = m_jit.graph().varArgChild(node, 0); | |
2578 | Edge child2 = m_jit.graph().varArgChild(node, 1); | |
2579 | Edge child3 = m_jit.graph().varArgChild(node, 2); | |
2580 | Edge child4 = m_jit.graph().varArgChild(node, 3); | |
2581 | ||
2582 | ArrayMode arrayMode = node->arrayMode().modeForPut(); | |
2583 | bool alreadyHandled = false; | |
2584 | ||
2585 | switch (arrayMode.type()) { | |
2586 | case Array::SelectUsingPredictions: | |
2587 | case Array::ForceExit: | |
2588 | RELEASE_ASSERT_NOT_REACHED(); | |
2589 | #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) | |
2590 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); | |
2591 | alreadyHandled = true; | |
2592 | #endif | |
2593 | break; | |
2594 | case Array::Generic: { | |
2595 | ASSERT(node->op() == PutByVal || node->op() == PutByValDirect); | |
2596 | ||
2597 | SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right. | |
2598 | JSValueOperand property(this, child2); | |
2599 | JSValueOperand value(this, child3); | |
2600 | GPRReg baseGPR = base.gpr(); | |
2601 | GPRReg propertyTagGPR = property.tagGPR(); | |
2602 | GPRReg propertyPayloadGPR = property.payloadGPR(); | |
2603 | GPRReg valueTagGPR = value.tagGPR(); | |
2604 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
2605 | ||
2606 | flushRegisters(); | |
2607 | if (node->op() == PutByValDirect) | |
2608 | callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR); | |
2609 | else | |
2610 | callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR); | |
2611 | ||
2612 | noResult(node); | |
2613 | alreadyHandled = true; | |
2614 | break; | |
2615 | } | |
2616 | default: | |
2617 | break; | |
2618 | } | |
2619 | ||
2620 | if (alreadyHandled) | |
2621 | break; | |
2622 | ||
2623 | SpeculateCellOperand base(this, child1); | |
2624 | SpeculateStrictInt32Operand property(this, child2); | |
2625 | ||
2626 | GPRReg baseReg = base.gpr(); | |
2627 | GPRReg propertyReg = property.gpr(); | |
2628 | ||
2629 | switch (arrayMode.type()) { | |
2630 | case Array::Int32: { | |
2631 | SpeculateInt32Operand value(this, child3); | |
2632 | ||
2633 | GPRReg valuePayloadReg = value.gpr(); | |
2634 | ||
2635 | if (!m_compileOkay) | |
2636 | return; | |
2637 | ||
2638 | compileContiguousPutByVal(node, base, property, value, valuePayloadReg, TrustedImm32(JSValue::Int32Tag)); | |
2639 | break; | |
2640 | } | |
2641 | case Array::Contiguous: { | |
2642 | JSValueOperand value(this, child3); | |
2643 | ||
2644 | GPRReg valueTagReg = value.tagGPR(); | |
2645 | GPRReg valuePayloadReg = value.payloadGPR(); | |
2646 | ||
2647 | if (!m_compileOkay) | |
2648 | return; | |
2649 | ||
2650 | compileContiguousPutByVal(node, base, property, value, valuePayloadReg, valueTagReg); | |
2651 | break; | |
2652 | } | |
2653 | case Array::Double: { | |
2654 | compileDoublePutByVal(node, base, property); | |
2655 | break; | |
2656 | } | |
2657 | case Array::ArrayStorage: | |
2658 | case Array::SlowPutArrayStorage: { | |
2659 | JSValueOperand value(this, child3); | |
2660 | ||
2661 | GPRReg valueTagReg = value.tagGPR(); | |
2662 | GPRReg valuePayloadReg = value.payloadGPR(); | |
2663 | ||
2664 | if (!m_compileOkay) | |
2665 | return; | |
2666 | ||
2667 | StorageOperand storage(this, child4); | |
2668 | GPRReg storageReg = storage.gpr(); | |
2669 | ||
2670 | if (node->op() == PutByValAlias) { | |
2671 | // Store the value to the array. | |
2672 | GPRReg propertyReg = property.gpr(); | |
2673 | m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
2674 | m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
2675 | ||
2676 | noResult(node); | |
2677 | break; | |
2678 | } | |
2679 | ||
2680 | MacroAssembler::JumpList slowCases; | |
2681 | ||
2682 | MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); | |
2683 | if (!arrayMode.isOutOfBounds()) | |
2684 | speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds); | |
2685 | else | |
2686 | slowCases.append(beyondArrayBounds); | |
2687 | ||
2688 | // Check if we're writing to a hole; if so increment m_numValuesInVector. | |
2689 | if (arrayMode.isInBounds()) { | |
2690 | speculationCheck( | |
2691 | StoreToHole, JSValueRegs(), 0, | |
2692 | m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag))); | |
2693 | } else { | |
2694 | MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); | |
2695 | if (arrayMode.isSlowPut()) { | |
2696 | // This is sort of strange. If we wanted to optimize this code path, we would invert | |
2697 | // the above branch. But it's simply not worth it since this only happens if we're | |
2698 | // already having a bad time. | |
2699 | slowCases.append(m_jit.jump()); | |
2700 | } else { | |
2701 | m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); | |
2702 | ||
2703 | // If we're writing to a hole we might be growing the array; | |
2704 | MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); | |
2705 | m_jit.add32(TrustedImm32(1), propertyReg); | |
2706 | m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); | |
2707 | m_jit.sub32(TrustedImm32(1), propertyReg); | |
2708 | ||
2709 | lengthDoesNotNeedUpdate.link(&m_jit); | |
2710 | } | |
2711 | notHoleValue.link(&m_jit); | |
2712 | } | |
2713 | ||
2714 | // Store the value to the array. | |
2715 | m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
2716 | m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
2717 | ||
2718 | base.use(); | |
2719 | property.use(); | |
2720 | value.use(); | |
2721 | storage.use(); | |
2722 | ||
2723 | if (!slowCases.empty()) { | |
2724 | if (node->op() == PutByValDirect) { | |
2725 | addSlowPathGenerator(slowPathCall( | |
2726 | slowCases, this, | |
2727 | m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict, | |
2728 | NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg)); | |
2729 | } else { | |
2730 | addSlowPathGenerator(slowPathCall( | |
2731 | slowCases, this, | |
2732 | m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, | |
2733 | NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg)); | |
2734 | } | |
2735 | } | |
2736 | ||
2737 | noResult(node, UseChildrenCalledExplicitly); | |
2738 | break; | |
2739 | } | |
2740 | ||
2741 | default: { | |
2742 | TypedArrayType type = arrayMode.typedArrayType(); | |
2743 | if (isInt(type)) | |
2744 | compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type); | |
2745 | else | |
2746 | compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type); | |
2747 | } } | |
2748 | break; | |
2749 | } | |
2750 | ||
2751 | case RegExpExec: { | |
2752 | if (compileRegExpExec(node)) | |
2753 | return; | |
2754 | ||
2755 | if (!node->adjustedRefCount()) { | |
2756 | SpeculateCellOperand base(this, node->child1()); | |
2757 | SpeculateCellOperand argument(this, node->child2()); | |
2758 | GPRReg baseGPR = base.gpr(); | |
2759 | GPRReg argumentGPR = argument.gpr(); | |
2760 | ||
2761 | flushRegisters(); | |
2762 | GPRFlushedCallResult result(this); | |
2763 | callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); | |
2764 | ||
2765 | // Must use jsValueResult because otherwise we screw up register | |
2766 | // allocation, which thinks that this node has a result. | |
2767 | booleanResult(result.gpr(), node); | |
2768 | break; | |
2769 | } | |
2770 | ||
2771 | SpeculateCellOperand base(this, node->child1()); | |
2772 | SpeculateCellOperand argument(this, node->child2()); | |
2773 | GPRReg baseGPR = base.gpr(); | |
2774 | GPRReg argumentGPR = argument.gpr(); | |
2775 | ||
2776 | flushRegisters(); | |
2777 | GPRFlushedCallResult2 resultTag(this); | |
2778 | GPRFlushedCallResult resultPayload(this); | |
2779 | callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR); | |
2780 | ||
2781 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); | |
2782 | break; | |
2783 | } | |
2784 | ||
2785 | case RegExpTest: { | |
2786 | SpeculateCellOperand base(this, node->child1()); | |
2787 | SpeculateCellOperand argument(this, node->child2()); | |
2788 | GPRReg baseGPR = base.gpr(); | |
2789 | GPRReg argumentGPR = argument.gpr(); | |
2790 | ||
2791 | flushRegisters(); | |
2792 | GPRFlushedCallResult result(this); | |
2793 | callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); | |
2794 | ||
2795 | // If we add a DataFormatBool, we should use it here. | |
2796 | booleanResult(result.gpr(), node); | |
2797 | break; | |
2798 | } | |
2799 | ||
2800 | case ArrayPush: { | |
2801 | ASSERT(node->arrayMode().isJSArray()); | |
2802 | ||
2803 | SpeculateCellOperand base(this, node->child1()); | |
2804 | GPRTemporary storageLength(this); | |
2805 | ||
2806 | GPRReg baseGPR = base.gpr(); | |
2807 | GPRReg storageLengthGPR = storageLength.gpr(); | |
2808 | ||
2809 | StorageOperand storage(this, node->child3()); | |
2810 | GPRReg storageGPR = storage.gpr(); | |
2811 | ||
2812 | switch (node->arrayMode().type()) { | |
2813 | case Array::Int32: { | |
2814 | SpeculateInt32Operand value(this, node->child2()); | |
2815 | GPRReg valuePayloadGPR = value.gpr(); | |
2816 | ||
2817 | m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); | |
2818 | MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); | |
2819 | m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
2820 | m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
2821 | m_jit.add32(TrustedImm32(1), storageLengthGPR); | |
2822 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); | |
2823 | m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); | |
2824 | ||
2825 | addSlowPathGenerator( | |
2826 | slowPathCall( | |
2827 | slowPath, this, operationArrayPush, | |
2828 | JSValueRegs(storageGPR, storageLengthGPR), | |
2829 | TrustedImm32(JSValue::Int32Tag), valuePayloadGPR, baseGPR)); | |
2830 | ||
2831 | jsValueResult(storageGPR, storageLengthGPR, node); | |
2832 | break; | |
2833 | } | |
2834 | ||
2835 | case Array::Contiguous: { | |
2836 | JSValueOperand value(this, node->child2()); | |
2837 | GPRReg valueTagGPR = value.tagGPR(); | |
2838 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
2839 | ||
2840 | m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); | |
2841 | MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); | |
2842 | m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
2843 | m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
2844 | m_jit.add32(TrustedImm32(1), storageLengthGPR); | |
2845 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); | |
2846 | m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); | |
2847 | ||
2848 | addSlowPathGenerator( | |
2849 | slowPathCall( | |
2850 | slowPath, this, operationArrayPush, | |
2851 | JSValueRegs(storageGPR, storageLengthGPR), | |
2852 | valueTagGPR, valuePayloadGPR, baseGPR)); | |
2853 | ||
2854 | jsValueResult(storageGPR, storageLengthGPR, node); | |
2855 | break; | |
2856 | } | |
2857 | ||
2858 | case Array::Double: { | |
2859 | SpeculateDoubleOperand value(this, node->child2()); | |
2860 | FPRReg valueFPR = value.fpr(); | |
2861 | ||
2862 | DFG_TYPE_CHECK( | |
2863 | JSValueRegs(), node->child2(), SpecDoubleReal, | |
2864 | m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR)); | |
2865 | ||
2866 | m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); | |
2867 | MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); | |
2868 | m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight)); | |
2869 | m_jit.add32(TrustedImm32(1), storageLengthGPR); | |
2870 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); | |
2871 | m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); | |
2872 | ||
2873 | addSlowPathGenerator( | |
2874 | slowPathCall( | |
2875 | slowPath, this, operationArrayPushDouble, | |
2876 | JSValueRegs(storageGPR, storageLengthGPR), | |
2877 | valueFPR, baseGPR)); | |
2878 | ||
2879 | jsValueResult(storageGPR, storageLengthGPR, node); | |
2880 | break; | |
2881 | } | |
2882 | ||
2883 | case Array::ArrayStorage: { | |
2884 | JSValueOperand value(this, node->child2()); | |
2885 | GPRReg valueTagGPR = value.tagGPR(); | |
2886 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
2887 | ||
2888 | m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); | |
2889 | ||
2890 | // Refuse to handle bizarre lengths. | |
2891 | speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); | |
2892 | ||
2893 | MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); | |
2894 | ||
2895 | m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
2896 | m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
2897 | ||
2898 | m_jit.add32(TrustedImm32(1), storageLengthGPR); | |
2899 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); | |
2900 | m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); | |
2901 | m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); | |
2902 | ||
2903 | addSlowPathGenerator(slowPathCall(slowPath, this, operationArrayPush, JSValueRegs(storageGPR, storageLengthGPR), valueTagGPR, valuePayloadGPR, baseGPR)); | |
2904 | ||
2905 | jsValueResult(storageGPR, storageLengthGPR, node); | |
2906 | break; | |
2907 | } | |
2908 | ||
2909 | default: | |
2910 | CRASH(); | |
2911 | break; | |
2912 | } | |
2913 | break; | |
2914 | } | |
2915 | ||
2916 | case ArrayPop: { | |
2917 | ASSERT(node->arrayMode().isJSArray()); | |
2918 | ||
2919 | SpeculateCellOperand base(this, node->child1()); | |
2920 | StorageOperand storage(this, node->child2()); | |
2921 | GPRTemporary valueTag(this); | |
2922 | GPRTemporary valuePayload(this); | |
2923 | ||
2924 | GPRReg baseGPR = base.gpr(); | |
2925 | GPRReg valueTagGPR = valueTag.gpr(); | |
2926 | GPRReg valuePayloadGPR = valuePayload.gpr(); | |
2927 | GPRReg storageGPR = storage.gpr(); | |
2928 | ||
2929 | switch (node->arrayMode().type()) { | |
2930 | case Array::Int32: | |
2931 | case Array::Contiguous: { | |
2932 | m_jit.load32( | |
2933 | MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR); | |
2934 | MacroAssembler::Jump undefinedCase = | |
2935 | m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR); | |
2936 | m_jit.sub32(TrustedImm32(1), valuePayloadGPR); | |
2937 | m_jit.store32( | |
2938 | valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); | |
2939 | m_jit.load32( | |
2940 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), | |
2941 | valueTagGPR); | |
2942 | MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); | |
2943 | m_jit.store32( | |
2944 | MacroAssembler::TrustedImm32(JSValue::EmptyValueTag), | |
2945 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
2946 | m_jit.load32( | |
2947 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), | |
2948 | valuePayloadGPR); | |
2949 | ||
2950 | addSlowPathGenerator( | |
2951 | slowPathMove( | |
2952 | undefinedCase, this, | |
2953 | MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, | |
2954 | MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); | |
2955 | addSlowPathGenerator( | |
2956 | slowPathCall( | |
2957 | slowCase, this, operationArrayPopAndRecoverLength, | |
2958 | JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); | |
2959 | ||
2960 | jsValueResult(valueTagGPR, valuePayloadGPR, node); | |
2961 | break; | |
2962 | } | |
2963 | ||
2964 | case Array::Double: { | |
2965 | FPRTemporary temp(this); | |
2966 | FPRReg tempFPR = temp.fpr(); | |
2967 | ||
2968 | m_jit.load32( | |
2969 | MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR); | |
2970 | MacroAssembler::Jump undefinedCase = | |
2971 | m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR); | |
2972 | m_jit.sub32(TrustedImm32(1), valuePayloadGPR); | |
2973 | m_jit.store32( | |
2974 | valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); | |
2975 | m_jit.loadDouble( | |
2976 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight), | |
2977 | tempFPR); | |
2978 | MacroAssembler::Jump slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR); | |
2979 | JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN); | |
2980 | m_jit.store32( | |
2981 | MacroAssembler::TrustedImm32(nan.u.asBits.tag), | |
2982 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
2983 | m_jit.store32( | |
2984 | MacroAssembler::TrustedImm32(nan.u.asBits.payload), | |
2985 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
2986 | boxDouble(tempFPR, valueTagGPR, valuePayloadGPR); | |
2987 | ||
2988 | addSlowPathGenerator( | |
2989 | slowPathMove( | |
2990 | undefinedCase, this, | |
2991 | MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, | |
2992 | MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); | |
2993 | addSlowPathGenerator( | |
2994 | slowPathCall( | |
2995 | slowCase, this, operationArrayPopAndRecoverLength, | |
2996 | JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); | |
2997 | ||
2998 | jsValueResult(valueTagGPR, valuePayloadGPR, node); | |
2999 | break; | |
3000 | } | |
3001 | ||
3002 | case Array::ArrayStorage: { | |
3003 | GPRTemporary storageLength(this); | |
3004 | GPRReg storageLengthGPR = storageLength.gpr(); | |
3005 | ||
3006 | m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); | |
3007 | ||
3008 | JITCompiler::JumpList setUndefinedCases; | |
3009 | setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR)); | |
3010 | ||
3011 | m_jit.sub32(TrustedImm32(1), storageLengthGPR); | |
3012 | ||
3013 | MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); | |
3014 | ||
3015 | m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR); | |
3016 | m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR); | |
3017 | ||
3018 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); | |
3019 | ||
3020 | setUndefinedCases.append(m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR)); | |
3021 | ||
3022 | m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
3023 | ||
3024 | m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); | |
3025 | ||
3026 | addSlowPathGenerator( | |
3027 | slowPathMove( | |
3028 | setUndefinedCases, this, | |
3029 | MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, | |
3030 | MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); | |
3031 | ||
3032 | addSlowPathGenerator( | |
3033 | slowPathCall( | |
3034 | slowCase, this, operationArrayPop, | |
3035 | JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); | |
3036 | ||
3037 | jsValueResult(valueTagGPR, valuePayloadGPR, node); | |
3038 | break; | |
3039 | } | |
3040 | ||
3041 | default: | |
3042 | CRASH(); | |
3043 | break; | |
3044 | } | |
3045 | break; | |
3046 | } | |
3047 | ||
3048 | case DFG::Jump: { | |
3049 | jump(node->targetBlock()); | |
3050 | noResult(node); | |
3051 | break; | |
3052 | } | |
3053 | ||
3054 | case Branch: | |
3055 | emitBranch(node); | |
3056 | break; | |
3057 | ||
3058 | case Switch: | |
3059 | emitSwitch(node); | |
3060 | break; | |
3061 | ||
3062 | case Return: { | |
3063 | ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT2); | |
3064 | ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR); | |
3065 | ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister); | |
3066 | ||
3067 | // Return the result in returnValueGPR. | |
3068 | JSValueOperand op1(this, node->child1()); | |
3069 | op1.fill(); | |
3070 | if (op1.isDouble()) | |
3071 | boxDouble(op1.fpr(), GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR); | |
3072 | else { | |
3073 | if (op1.payloadGPR() == GPRInfo::returnValueGPR2 && op1.tagGPR() == GPRInfo::returnValueGPR) | |
3074 | m_jit.swap(GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); | |
3075 | else if (op1.payloadGPR() == GPRInfo::returnValueGPR2) { | |
3076 | m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR); | |
3077 | m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2); | |
3078 | } else { | |
3079 | m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2); | |
3080 | m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR); | |
3081 | } | |
3082 | } | |
3083 | ||
3084 | m_jit.emitFunctionEpilogue(); | |
3085 | m_jit.ret(); | |
3086 | ||
3087 | noResult(node); | |
3088 | break; | |
3089 | } | |
3090 | ||
3091 | case Throw: | |
3092 | case ThrowReferenceError: { | |
3093 | // We expect that throw statements are rare and are intended to exit the code block | |
3094 | // anyway, so we just OSR back to the old JIT for now. | |
3095 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); | |
3096 | break; | |
3097 | } | |
3098 | ||
3099 | case BooleanToNumber: { | |
3100 | switch (node->child1().useKind()) { | |
3101 | case BooleanUse: { | |
3102 | SpeculateBooleanOperand value(this, node->child1()); | |
3103 | GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). | |
3104 | ||
3105 | m_jit.move(value.gpr(), result.gpr()); | |
3106 | ||
3107 | int32Result(result.gpr(), node); | |
3108 | break; | |
3109 | } | |
3110 | ||
3111 | case UntypedUse: { | |
3112 | JSValueOperand value(this, node->child1()); | |
3113 | ||
3114 | if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) { | |
3115 | GPRTemporary result(this); | |
3116 | ||
3117 | GPRReg valueGPR = value.payloadGPR(); | |
3118 | GPRReg resultGPR = result.gpr(); | |
3119 | ||
3120 | m_jit.move(valueGPR, resultGPR); | |
3121 | int32Result(result.gpr(), node); | |
3122 | break; | |
3123 | } | |
3124 | ||
3125 | GPRTemporary resultTag(this); | |
3126 | GPRTemporary resultPayload(this); | |
3127 | ||
3128 | GPRReg valueTagGPR = value.tagGPR(); | |
3129 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
3130 | GPRReg resultTagGPR = resultTag.gpr(); | |
3131 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3132 | ||
3133 | m_jit.move(valuePayloadGPR, resultPayloadGPR); | |
3134 | JITCompiler::Jump isBoolean = m_jit.branch32( | |
3135 | JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::BooleanTag)); | |
3136 | m_jit.move(valueTagGPR, resultTagGPR); | |
3137 | JITCompiler::Jump done = m_jit.jump(); | |
3138 | isBoolean.link(&m_jit); | |
3139 | m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); | |
3140 | done.link(&m_jit); | |
3141 | ||
3142 | jsValueResult(resultTagGPR, resultPayloadGPR, node); | |
3143 | break; | |
3144 | } | |
3145 | ||
3146 | default: | |
3147 | RELEASE_ASSERT_NOT_REACHED(); | |
3148 | break; | |
3149 | } | |
3150 | break; | |
3151 | } | |
3152 | ||
3153 | case ToPrimitive: { | |
3154 | RELEASE_ASSERT(node->child1().useKind() == UntypedUse); | |
3155 | JSValueOperand op1(this, node->child1()); | |
3156 | GPRTemporary resultTag(this, Reuse, op1, TagWord); | |
3157 | GPRTemporary resultPayload(this, Reuse, op1, PayloadWord); | |
3158 | ||
3159 | GPRReg op1TagGPR = op1.tagGPR(); | |
3160 | GPRReg op1PayloadGPR = op1.payloadGPR(); | |
3161 | GPRReg resultTagGPR = resultTag.gpr(); | |
3162 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3163 | ||
3164 | op1.use(); | |
3165 | ||
3166 | if (!(m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean))) { | |
3167 | m_jit.move(op1TagGPR, resultTagGPR); | |
3168 | m_jit.move(op1PayloadGPR, resultPayloadGPR); | |
3169 | } else { | |
3170 | MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(op1.jsValueRegs()); | |
3171 | MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1PayloadGPR); | |
3172 | ||
3173 | alreadyPrimitive.link(&m_jit); | |
3174 | m_jit.move(op1TagGPR, resultTagGPR); | |
3175 | m_jit.move(op1PayloadGPR, resultPayloadGPR); | |
3176 | ||
3177 | addSlowPathGenerator( | |
3178 | slowPathCall( | |
3179 | notPrimitive, this, operationToPrimitive, | |
3180 | JSValueRegs(resultTagGPR, resultPayloadGPR), op1TagGPR, op1PayloadGPR)); | |
3181 | } | |
3182 | ||
3183 | jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
3184 | break; | |
3185 | } | |
3186 | ||
3187 | case ToString: | |
3188 | case CallStringConstructor: { | |
3189 | if (node->child1().useKind() == UntypedUse) { | |
3190 | JSValueOperand op1(this, node->child1()); | |
3191 | GPRReg op1PayloadGPR = op1.payloadGPR(); | |
3192 | GPRReg op1TagGPR = op1.tagGPR(); | |
3193 | ||
3194 | GPRFlushedCallResult result(this); | |
3195 | GPRReg resultGPR = result.gpr(); | |
3196 | ||
3197 | flushRegisters(); | |
3198 | ||
3199 | JITCompiler::Jump done; | |
3200 | if (node->child1()->prediction() & SpecString) { | |
3201 | JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(op1.jsValueRegs()); | |
3202 | JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1PayloadGPR); | |
3203 | m_jit.move(op1PayloadGPR, resultGPR); | |
3204 | done = m_jit.jump(); | |
3205 | slowPath1.link(&m_jit); | |
3206 | slowPath2.link(&m_jit); | |
3207 | } | |
3208 | if (op == ToString) | |
3209 | callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR); | |
3210 | else { | |
3211 | ASSERT(op == CallStringConstructor); | |
3212 | callOperation(operationCallStringConstructor, resultGPR, op1TagGPR, op1PayloadGPR); | |
3213 | } | |
3214 | if (done.isSet()) | |
3215 | done.link(&m_jit); | |
3216 | cellResult(resultGPR, node); | |
3217 | break; | |
3218 | } | |
3219 | ||
3220 | compileToStringOrCallStringConstructorOnCell(node); | |
3221 | break; | |
3222 | } | |
3223 | ||
3224 | case NewStringObject: { | |
3225 | compileNewStringObject(node); | |
3226 | break; | |
3227 | } | |
3228 | ||
3229 | case NewArray: { | |
3230 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); | |
3231 | if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) { | |
3232 | Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()); | |
3233 | ASSERT(structure->indexingType() == node->indexingType()); | |
3234 | ASSERT( | |
3235 | hasUndecided(structure->indexingType()) | |
3236 | || hasInt32(structure->indexingType()) | |
3237 | || hasDouble(structure->indexingType()) | |
3238 | || hasContiguous(structure->indexingType())); | |
3239 | ||
3240 | unsigned numElements = node->numChildren(); | |
3241 | ||
3242 | GPRTemporary result(this); | |
3243 | GPRTemporary storage(this); | |
3244 | ||
3245 | GPRReg resultGPR = result.gpr(); | |
3246 | GPRReg storageGPR = storage.gpr(); | |
3247 | ||
3248 | emitAllocateJSArray(resultGPR, structure, storageGPR, numElements); | |
3249 | ||
3250 | // At this point, one way or another, resultGPR and storageGPR have pointers to | |
3251 | // the JSArray and the Butterfly, respectively. | |
3252 | ||
3253 | ASSERT(!hasUndecided(structure->indexingType()) || !node->numChildren()); | |
3254 | ||
3255 | for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) { | |
3256 | Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]; | |
3257 | switch (node->indexingType()) { | |
3258 | case ALL_BLANK_INDEXING_TYPES: | |
3259 | case ALL_UNDECIDED_INDEXING_TYPES: | |
3260 | CRASH(); | |
3261 | break; | |
3262 | case ALL_DOUBLE_INDEXING_TYPES: { | |
3263 | SpeculateDoubleOperand operand(this, use); | |
3264 | FPRReg opFPR = operand.fpr(); | |
3265 | DFG_TYPE_CHECK( | |
3266 | JSValueRegs(), use, SpecDoubleReal, | |
3267 | m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); | |
3268 | ||
3269 | m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx)); | |
3270 | break; | |
3271 | } | |
3272 | case ALL_INT32_INDEXING_TYPES: { | |
3273 | SpeculateInt32Operand operand(this, use); | |
3274 | m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
3275 | m_jit.store32(operand.gpr(), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
3276 | break; | |
3277 | } | |
3278 | case ALL_CONTIGUOUS_INDEXING_TYPES: { | |
3279 | JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]); | |
3280 | GPRReg opTagGPR = operand.tagGPR(); | |
3281 | GPRReg opPayloadGPR = operand.payloadGPR(); | |
3282 | m_jit.store32(opTagGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
3283 | m_jit.store32(opPayloadGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
3284 | break; | |
3285 | } | |
3286 | default: | |
3287 | CRASH(); | |
3288 | break; | |
3289 | } | |
3290 | } | |
3291 | ||
3292 | // Yuck, we should *really* have a way of also returning the storageGPR. But | |
3293 | // that's the least of what's wrong with this code. We really shouldn't be | |
3294 | // allocating the array after having computed - and probably spilled to the | |
3295 | // stack - all of the things that will go into the array. The solution to that | |
3296 | // bigger problem will also likely fix the redundancy in reloading the storage | |
3297 | // pointer that we currently have. | |
3298 | ||
3299 | cellResult(resultGPR, node); | |
3300 | break; | |
3301 | } | |
3302 | ||
3303 | if (!node->numChildren()) { | |
3304 | flushRegisters(); | |
3305 | GPRFlushedCallResult result(this); | |
3306 | callOperation( | |
3307 | operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); | |
3308 | cellResult(result.gpr(), node); | |
3309 | break; | |
3310 | } | |
3311 | ||
3312 | size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren(); | |
3313 | ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(scratchSize); | |
3314 | EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; | |
3315 | ||
3316 | for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) { | |
3317 | // Need to perform the speculations that this node promises to perform. If we're | |
3318 | // emitting code here and the indexing type is not array storage then there is | |
3319 | // probably something hilarious going on and we're already failing at all the | |
3320 | // things, but at least we're going to be sound. | |
3321 | Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]; | |
3322 | switch (node->indexingType()) { | |
3323 | case ALL_BLANK_INDEXING_TYPES: | |
3324 | case ALL_UNDECIDED_INDEXING_TYPES: | |
3325 | CRASH(); | |
3326 | break; | |
3327 | case ALL_DOUBLE_INDEXING_TYPES: { | |
3328 | SpeculateDoubleOperand operand(this, use); | |
3329 | FPRReg opFPR = operand.fpr(); | |
3330 | DFG_TYPE_CHECK( | |
3331 | JSValueRegs(), use, SpecFullRealNumber, | |
3332 | m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); | |
3333 | ||
3334 | m_jit.storeDouble(opFPR, TrustedImmPtr(reinterpret_cast<char*>(buffer + operandIdx))); | |
3335 | break; | |
3336 | } | |
3337 | case ALL_INT32_INDEXING_TYPES: { | |
3338 | SpeculateInt32Operand operand(this, use); | |
3339 | GPRReg opGPR = operand.gpr(); | |
3340 | m_jit.store32(TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); | |
3341 | m_jit.store32(opGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
3342 | break; | |
3343 | } | |
3344 | case ALL_CONTIGUOUS_INDEXING_TYPES: | |
3345 | case ALL_ARRAY_STORAGE_INDEXING_TYPES: { | |
3346 | JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]); | |
3347 | GPRReg opTagGPR = operand.tagGPR(); | |
3348 | GPRReg opPayloadGPR = operand.payloadGPR(); | |
3349 | ||
3350 | m_jit.store32(opTagGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); | |
3351 | m_jit.store32(opPayloadGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); | |
3352 | operand.use(); | |
3353 | break; | |
3354 | } | |
3355 | default: | |
3356 | CRASH(); | |
3357 | break; | |
3358 | } | |
3359 | } | |
3360 | ||
3361 | switch (node->indexingType()) { | |
3362 | case ALL_DOUBLE_INDEXING_TYPES: | |
3363 | case ALL_INT32_INDEXING_TYPES: | |
3364 | useChildren(node); | |
3365 | break; | |
3366 | default: | |
3367 | break; | |
3368 | } | |
3369 | ||
3370 | flushRegisters(); | |
3371 | ||
3372 | if (scratchSize) { | |
3373 | GPRTemporary scratch(this); | |
3374 | ||
3375 | // Tell GC mark phase how much of the scratch buffer is active during call. | |
3376 | m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr()); | |
3377 | m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr()); | |
3378 | } | |
3379 | ||
3380 | GPRFlushedCallResult result(this); | |
3381 | ||
3382 | callOperation( | |
3383 | operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), | |
3384 | static_cast<void*>(buffer), node->numChildren()); | |
3385 | ||
3386 | if (scratchSize) { | |
3387 | GPRTemporary scratch(this); | |
3388 | ||
3389 | m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr()); | |
3390 | m_jit.storePtr(TrustedImmPtr(0), scratch.gpr()); | |
3391 | } | |
3392 | ||
3393 | cellResult(result.gpr(), node, UseChildrenCalledExplicitly); | |
3394 | break; | |
3395 | } | |
3396 | ||
3397 | case NewArrayWithSize: { | |
3398 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); | |
3399 | if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) { | |
3400 | SpeculateStrictInt32Operand size(this, node->child1()); | |
3401 | GPRTemporary result(this); | |
3402 | GPRTemporary storage(this); | |
3403 | GPRTemporary scratch(this); | |
3404 | GPRTemporary scratch2(this); | |
3405 | ||
3406 | GPRReg sizeGPR = size.gpr(); | |
3407 | GPRReg resultGPR = result.gpr(); | |
3408 | GPRReg storageGPR = storage.gpr(); | |
3409 | GPRReg scratchGPR = scratch.gpr(); | |
3410 | GPRReg scratch2GPR = scratch2.gpr(); | |
3411 | ||
3412 | MacroAssembler::JumpList slowCases; | |
3413 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH))); | |
3414 | ||
3415 | ASSERT((1 << 3) == sizeof(JSValue)); | |
3416 | m_jit.move(sizeGPR, scratchGPR); | |
3417 | m_jit.lshift32(TrustedImm32(3), scratchGPR); | |
3418 | m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR); | |
3419 | slowCases.append( | |
3420 | emitAllocateBasicStorage(resultGPR, storageGPR)); | |
3421 | m_jit.subPtr(scratchGPR, storageGPR); | |
3422 | Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()); | |
3423 | emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases); | |
3424 | ||
3425 | m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); | |
3426 | m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); | |
3427 | ||
3428 | if (hasDouble(node->indexingType())) { | |
3429 | JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN); | |
3430 | ||
3431 | m_jit.move(sizeGPR, scratchGPR); | |
3432 | MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR); | |
3433 | MacroAssembler::Label loop = m_jit.label(); | |
3434 | m_jit.sub32(TrustedImm32(1), scratchGPR); | |
3435 | m_jit.store32(TrustedImm32(nan.u.asBits.tag), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
3436 | m_jit.store32(TrustedImm32(nan.u.asBits.payload), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
3437 | m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit); | |
3438 | done.link(&m_jit); | |
3439 | } | |
3440 | ||
3441 | addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>( | |
3442 | slowCases, this, operationNewArrayWithSize, resultGPR, | |
3443 | globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), | |
3444 | globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage), | |
3445 | sizeGPR)); | |
3446 | ||
3447 | cellResult(resultGPR, node); | |
3448 | break; | |
3449 | } | |
3450 | ||
3451 | SpeculateStrictInt32Operand size(this, node->child1()); | |
3452 | GPRReg sizeGPR = size.gpr(); | |
3453 | flushRegisters(); | |
3454 | GPRFlushedCallResult result(this); | |
3455 | GPRReg resultGPR = result.gpr(); | |
3456 | GPRReg structureGPR = selectScratchGPR(sizeGPR); | |
3457 | MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)); | |
3458 | m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR); | |
3459 | MacroAssembler::Jump done = m_jit.jump(); | |
3460 | bigLength.link(&m_jit); | |
3461 | m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)), structureGPR); | |
3462 | done.link(&m_jit); | |
3463 | callOperation( | |
3464 | operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR); | |
3465 | cellResult(resultGPR, node); | |
3466 | break; | |
3467 | } | |
3468 | ||
3469 | case NewArrayBuffer: { | |
3470 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); | |
3471 | IndexingType indexingType = node->indexingType(); | |
3472 | if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) { | |
3473 | unsigned numElements = node->numConstants(); | |
3474 | ||
3475 | GPRTemporary result(this); | |
3476 | GPRTemporary storage(this); | |
3477 | ||
3478 | GPRReg resultGPR = result.gpr(); | |
3479 | GPRReg storageGPR = storage.gpr(); | |
3480 | ||
3481 | emitAllocateJSArray(resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), storageGPR, numElements); | |
3482 | ||
3483 | if (node->indexingType() == ArrayWithDouble) { | |
3484 | JSValue* data = m_jit.codeBlock()->constantBuffer(node->startConstant()); | |
3485 | for (unsigned index = 0; index < node->numConstants(); ++index) { | |
3486 | union { | |
3487 | int32_t halves[2]; | |
3488 | double value; | |
3489 | } u; | |
3490 | u.value = data[index].asNumber(); | |
3491 | m_jit.store32(Imm32(u.halves[0]), MacroAssembler::Address(storageGPR, sizeof(double) * index)); | |
3492 | m_jit.store32(Imm32(u.halves[1]), MacroAssembler::Address(storageGPR, sizeof(double) * index + sizeof(int32_t))); | |
3493 | } | |
3494 | } else { | |
3495 | int32_t* data = bitwise_cast<int32_t*>(m_jit.codeBlock()->constantBuffer(node->startConstant())); | |
3496 | for (unsigned index = 0; index < node->numConstants() * 2; ++index) { | |
3497 | m_jit.store32( | |
3498 | Imm32(data[index]), MacroAssembler::Address(storageGPR, sizeof(int32_t) * index)); | |
3499 | } | |
3500 | } | |
3501 | ||
3502 | cellResult(resultGPR, node); | |
3503 | break; | |
3504 | } | |
3505 | ||
3506 | flushRegisters(); | |
3507 | GPRFlushedCallResult result(this); | |
3508 | ||
3509 | callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants()); | |
3510 | ||
3511 | cellResult(result.gpr(), node); | |
3512 | break; | |
3513 | } | |
3514 | ||
3515 | case NewTypedArray: { | |
3516 | switch (node->child1().useKind()) { | |
3517 | case Int32Use: | |
3518 | compileNewTypedArray(node); | |
3519 | break; | |
3520 | case UntypedUse: { | |
3521 | JSValueOperand argument(this, node->child1()); | |
3522 | GPRReg argumentTagGPR = argument.tagGPR(); | |
3523 | GPRReg argumentPayloadGPR = argument.payloadGPR(); | |
3524 | ||
3525 | flushRegisters(); | |
3526 | ||
3527 | GPRFlushedCallResult result(this); | |
3528 | GPRReg resultGPR = result.gpr(); | |
3529 | ||
3530 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); | |
3531 | callOperation( | |
3532 | operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()), | |
3533 | resultGPR, globalObject->typedArrayStructure(node->typedArrayType()), | |
3534 | argumentTagGPR, argumentPayloadGPR); | |
3535 | ||
3536 | cellResult(resultGPR, node); | |
3537 | break; | |
3538 | } | |
3539 | default: | |
3540 | RELEASE_ASSERT_NOT_REACHED(); | |
3541 | break; | |
3542 | } | |
3543 | break; | |
3544 | } | |
3545 | ||
3546 | case NewRegexp: { | |
3547 | flushRegisters(); | |
3548 | GPRFlushedCallResult resultPayload(this); | |
3549 | GPRFlushedCallResult2 resultTag(this); | |
3550 | ||
3551 | callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex())); | |
3552 | ||
3553 | // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag. | |
3554 | cellResult(resultPayload.gpr(), node); | |
3555 | break; | |
3556 | } | |
3557 | ||
3558 | case ToThis: { | |
3559 | ASSERT(node->child1().useKind() == UntypedUse); | |
3560 | JSValueOperand thisValue(this, node->child1()); | |
3561 | GPRTemporary temp(this); | |
3562 | GPRTemporary tempTag(this); | |
3563 | GPRReg thisValuePayloadGPR = thisValue.payloadGPR(); | |
3564 | GPRReg thisValueTagGPR = thisValue.tagGPR(); | |
3565 | GPRReg tempGPR = temp.gpr(); | |
3566 | GPRReg tempTagGPR = tempTag.gpr(); | |
3567 | ||
3568 | MacroAssembler::JumpList slowCases; | |
3569 | slowCases.append(m_jit.branchIfNotCell(thisValue.jsValueRegs())); | |
3570 | slowCases.append(m_jit.branch8( | |
3571 | MacroAssembler::NotEqual, | |
3572 | MacroAssembler::Address(thisValuePayloadGPR, JSCell::typeInfoTypeOffset()), | |
3573 | TrustedImm32(FinalObjectType))); | |
3574 | m_jit.move(thisValuePayloadGPR, tempGPR); | |
3575 | m_jit.move(thisValueTagGPR, tempTagGPR); | |
3576 | J_JITOperation_EJ function; | |
3577 | if (m_jit.graph().executableFor(node->origin.semantic)->isStrictMode()) | |
3578 | function = operationToThisStrict; | |
3579 | else | |
3580 | function = operationToThis; | |
3581 | addSlowPathGenerator( | |
3582 | slowPathCall( | |
3583 | slowCases, this, function, | |
3584 | JSValueRegs(tempTagGPR, tempGPR), thisValueTagGPR, thisValuePayloadGPR)); | |
3585 | ||
3586 | jsValueResult(tempTagGPR, tempGPR, node); | |
3587 | break; | |
3588 | } | |
3589 | ||
3590 | case CreateThis: { | |
3591 | // Note that there is not so much profit to speculate here. The only things we | |
3592 | // speculate on are (1) that it's a cell, since that eliminates cell checks | |
3593 | // later if the proto is reused, and (2) if we have a FinalObject prediction | |
3594 | // then we speculate because we want to get recompiled if it isn't (since | |
3595 | // otherwise we'd start taking slow path a lot). | |
3596 | ||
3597 | SpeculateCellOperand callee(this, node->child1()); | |
3598 | GPRTemporary result(this); | |
3599 | GPRTemporary allocator(this); | |
3600 | GPRTemporary structure(this); | |
3601 | GPRTemporary scratch(this); | |
3602 | ||
3603 | GPRReg calleeGPR = callee.gpr(); | |
3604 | GPRReg resultGPR = result.gpr(); | |
3605 | GPRReg allocatorGPR = allocator.gpr(); | |
3606 | GPRReg structureGPR = structure.gpr(); | |
3607 | GPRReg scratchGPR = scratch.gpr(); | |
3608 | // Rare data is only used to access the allocator & structure | |
3609 | // We can avoid using an additional GPR this way | |
3610 | GPRReg rareDataGPR = structureGPR; | |
3611 | ||
3612 | MacroAssembler::JumpList slowPath; | |
3613 | ||
3614 | m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR); | |
3615 | slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR)); | |
3616 | m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR); | |
3617 | m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR); | |
3618 | slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR)); | |
3619 | emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath); | |
3620 | ||
3621 | addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR, node->inlineCapacity())); | |
3622 | ||
3623 | cellResult(resultGPR, node); | |
3624 | break; | |
3625 | } | |
3626 | ||
3627 | case NewObject: { | |
3628 | GPRTemporary result(this); | |
3629 | GPRTemporary allocator(this); | |
3630 | GPRTemporary scratch(this); | |
3631 | ||
3632 | GPRReg resultGPR = result.gpr(); | |
3633 | GPRReg allocatorGPR = allocator.gpr(); | |
3634 | GPRReg scratchGPR = scratch.gpr(); | |
3635 | ||
3636 | MacroAssembler::JumpList slowPath; | |
3637 | ||
3638 | Structure* structure = node->structure(); | |
3639 | size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); | |
3640 | MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize); | |
3641 | ||
3642 | m_jit.move(TrustedImmPtr(allocatorPtr), allocatorGPR); | |
3643 | emitAllocateJSObject(resultGPR, allocatorGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR, slowPath); | |
3644 | ||
3645 | addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR, structure)); | |
3646 | ||
3647 | cellResult(resultGPR, node); | |
3648 | break; | |
3649 | } | |
3650 | ||
3651 | case GetCallee: { | |
3652 | GPRTemporary result(this); | |
3653 | m_jit.loadPtr(JITCompiler::payloadFor(JSStack::Callee), result.gpr()); | |
3654 | cellResult(result.gpr(), node); | |
3655 | break; | |
3656 | } | |
3657 | ||
3658 | case GetArgumentCount: { | |
3659 | GPRTemporary result(this); | |
3660 | m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), result.gpr()); | |
3661 | int32Result(result.gpr(), node); | |
3662 | break; | |
3663 | } | |
3664 | ||
3665 | case GetScope: | |
3666 | compileGetScope(node); | |
3667 | break; | |
3668 | ||
3669 | case SkipScope: | |
3670 | compileSkipScope(node); | |
3671 | break; | |
3672 | ||
3673 | case GetClosureVar: { | |
3674 | SpeculateCellOperand base(this, node->child1()); | |
3675 | GPRTemporary resultTag(this); | |
3676 | GPRTemporary resultPayload(this); | |
3677 | GPRReg baseGPR = base.gpr(); | |
3678 | GPRReg resultTagGPR = resultTag.gpr(); | |
3679 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3680 | m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset), resultTagGPR); | |
3681 | m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset), resultPayloadGPR); | |
3682 | jsValueResult(resultTagGPR, resultPayloadGPR, node); | |
3683 | break; | |
3684 | } | |
3685 | ||
3686 | case PutClosureVar: { | |
3687 | SpeculateCellOperand base(this, node->child1()); | |
3688 | JSValueOperand value(this, node->child2()); | |
3689 | ||
3690 | GPRReg baseGPR = base.gpr(); | |
3691 | GPRReg valueTagGPR = value.tagGPR(); | |
3692 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
3693 | ||
3694 | m_jit.store32(valueTagGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset)); | |
3695 | m_jit.store32(valuePayloadGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset)); | |
3696 | noResult(node); | |
3697 | break; | |
3698 | } | |
3699 | ||
3700 | case GetById: { | |
3701 | ASSERT(node->prediction()); | |
3702 | ||
3703 | switch (node->child1().useKind()) { | |
3704 | case CellUse: { | |
3705 | SpeculateCellOperand base(this, node->child1()); | |
3706 | GPRTemporary resultTag(this); | |
3707 | GPRTemporary resultPayload(this, Reuse, base); | |
3708 | ||
3709 | GPRReg baseGPR = base.gpr(); | |
3710 | GPRReg resultTagGPR = resultTag.gpr(); | |
3711 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3712 | ||
3713 | base.use(); | |
3714 | ||
3715 | cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber()); | |
3716 | ||
3717 | jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
3718 | break; | |
3719 | } | |
3720 | ||
3721 | case UntypedUse: { | |
3722 | JSValueOperand base(this, node->child1()); | |
3723 | GPRTemporary resultTag(this); | |
3724 | GPRTemporary resultPayload(this, Reuse, base, TagWord); | |
3725 | ||
3726 | GPRReg baseTagGPR = base.tagGPR(); | |
3727 | GPRReg basePayloadGPR = base.payloadGPR(); | |
3728 | GPRReg resultTagGPR = resultTag.gpr(); | |
3729 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3730 | ||
3731 | base.use(); | |
3732 | ||
3733 | JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs()); | |
3734 | ||
3735 | cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell); | |
3736 | ||
3737 | jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
3738 | break; | |
3739 | } | |
3740 | ||
3741 | default: | |
3742 | RELEASE_ASSERT_NOT_REACHED(); | |
3743 | break; | |
3744 | } | |
3745 | break; | |
3746 | } | |
3747 | ||
3748 | case GetByIdFlush: { | |
3749 | if (!node->prediction()) { | |
3750 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); | |
3751 | break; | |
3752 | } | |
3753 | ||
3754 | switch (node->child1().useKind()) { | |
3755 | case CellUse: { | |
3756 | SpeculateCellOperand base(this, node->child1()); | |
3757 | ||
3758 | GPRReg baseGPR = base.gpr(); | |
3759 | ||
3760 | GPRFlushedCallResult resultPayload(this); | |
3761 | GPRFlushedCallResult2 resultTag(this); | |
3762 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3763 | GPRReg resultTagGPR = resultTag.gpr(); | |
3764 | ||
3765 | base.use(); | |
3766 | ||
3767 | flushRegisters(); | |
3768 | ||
3769 | cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill); | |
3770 | ||
3771 | jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
3772 | break; | |
3773 | } | |
3774 | ||
3775 | case UntypedUse: { | |
3776 | JSValueOperand base(this, node->child1()); | |
3777 | GPRReg baseTagGPR = base.tagGPR(); | |
3778 | GPRReg basePayloadGPR = base.payloadGPR(); | |
3779 | ||
3780 | GPRFlushedCallResult resultPayload(this); | |
3781 | GPRFlushedCallResult2 resultTag(this); | |
3782 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3783 | GPRReg resultTagGPR = resultTag.gpr(); | |
3784 | ||
3785 | base.use(); | |
3786 | ||
3787 | flushRegisters(); | |
3788 | ||
3789 | JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs()); | |
3790 | ||
3791 | cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell, DontSpill); | |
3792 | ||
3793 | jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); | |
3794 | break; | |
3795 | } | |
3796 | ||
3797 | default: | |
3798 | RELEASE_ASSERT_NOT_REACHED(); | |
3799 | break; | |
3800 | } | |
3801 | break; | |
3802 | } | |
3803 | ||
3804 | case GetArrayLength: | |
3805 | compileGetArrayLength(node); | |
3806 | break; | |
3807 | ||
3808 | case CheckCell: { | |
3809 | SpeculateCellOperand cell(this, node->child1()); | |
3810 | speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell())); | |
3811 | noResult(node); | |
3812 | break; | |
3813 | } | |
3814 | ||
3815 | case CheckNotEmpty: { | |
3816 | JSValueOperand operand(this, node->child1()); | |
3817 | GPRReg tagGPR = operand.tagGPR(); | |
3818 | speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::EmptyValueTag))); | |
3819 | noResult(node); | |
3820 | break; | |
3821 | } | |
3822 | ||
3823 | case GetExecutable: { | |
3824 | SpeculateCellOperand function(this, node->child1()); | |
3825 | GPRTemporary result(this, Reuse, function); | |
3826 | GPRReg functionGPR = function.gpr(); | |
3827 | GPRReg resultGPR = result.gpr(); | |
3828 | speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType); | |
3829 | m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR); | |
3830 | cellResult(resultGPR, node); | |
3831 | break; | |
3832 | } | |
3833 | ||
3834 | case CheckStructure: { | |
3835 | SpeculateCellOperand base(this, node->child1()); | |
3836 | ||
3837 | ASSERT(node->structureSet().size()); | |
3838 | ||
3839 | if (node->structureSet().size() == 1) { | |
3840 | speculationCheck( | |
3841 | BadCache, JSValueSource::unboxedCell(base.gpr()), 0, | |
3842 | m_jit.branchWeakPtr( | |
3843 | JITCompiler::NotEqual, | |
3844 | JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()), | |
3845 | node->structureSet()[0])); | |
3846 | } else { | |
3847 | GPRTemporary structure(this); | |
3848 | ||
3849 | m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()), structure.gpr()); | |
3850 | ||
3851 | JITCompiler::JumpList done; | |
3852 | ||
3853 | for (size_t i = 0; i < node->structureSet().size() - 1; ++i) | |
3854 | done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node->structureSet()[i])); | |
3855 | ||
3856 | speculationCheck( | |
3857 | BadCache, JSValueSource::unboxedCell(base.gpr()), 0, | |
3858 | m_jit.branchWeakPtr( | |
3859 | JITCompiler::NotEqual, structure.gpr(), node->structureSet().last())); | |
3860 | ||
3861 | done.link(&m_jit); | |
3862 | } | |
3863 | ||
3864 | noResult(node); | |
3865 | break; | |
3866 | } | |
3867 | ||
3868 | case PutStructure: { | |
3869 | Structure* oldStructure = node->transition()->previous; | |
3870 | Structure* newStructure = node->transition()->next; | |
3871 | ||
3872 | m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node); | |
3873 | ||
3874 | SpeculateCellOperand base(this, node->child1()); | |
3875 | GPRReg baseGPR = base.gpr(); | |
3876 | ||
3877 | ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType()); | |
3878 | ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type()); | |
3879 | ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags()); | |
3880 | m_jit.storePtr(MacroAssembler::TrustedImmPtr(newStructure), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset())); | |
3881 | ||
3882 | noResult(node); | |
3883 | break; | |
3884 | } | |
3885 | ||
3886 | case AllocatePropertyStorage: | |
3887 | compileAllocatePropertyStorage(node); | |
3888 | break; | |
3889 | ||
3890 | case ReallocatePropertyStorage: | |
3891 | compileReallocatePropertyStorage(node); | |
3892 | break; | |
3893 | ||
3894 | case GetButterfly: { | |
3895 | SpeculateCellOperand base(this, node->child1()); | |
3896 | GPRTemporary result(this, Reuse, base); | |
3897 | ||
3898 | GPRReg baseGPR = base.gpr(); | |
3899 | GPRReg resultGPR = result.gpr(); | |
3900 | ||
3901 | m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR); | |
3902 | ||
3903 | storageResult(resultGPR, node); | |
3904 | break; | |
3905 | } | |
3906 | ||
3907 | case GetIndexedPropertyStorage: { | |
3908 | compileGetIndexedPropertyStorage(node); | |
3909 | break; | |
3910 | } | |
3911 | ||
3912 | case ConstantStoragePointer: { | |
3913 | compileConstantStoragePointer(node); | |
3914 | break; | |
3915 | } | |
3916 | ||
3917 | case GetTypedArrayByteOffset: { | |
3918 | compileGetTypedArrayByteOffset(node); | |
3919 | break; | |
3920 | } | |
3921 | ||
3922 | case GetByOffset: { | |
3923 | StorageOperand storage(this, node->child1()); | |
3924 | GPRTemporary resultTag(this, Reuse, storage); | |
3925 | GPRTemporary resultPayload(this); | |
3926 | ||
3927 | GPRReg storageGPR = storage.gpr(); | |
3928 | GPRReg resultTagGPR = resultTag.gpr(); | |
3929 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3930 | ||
3931 | StorageAccessData& storageAccessData = node->storageAccessData(); | |
3932 | ||
3933 | m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); | |
3934 | m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); | |
3935 | ||
3936 | jsValueResult(resultTagGPR, resultPayloadGPR, node); | |
3937 | break; | |
3938 | } | |
3939 | ||
3940 | case GetGetterSetterByOffset: { | |
3941 | StorageOperand storage(this, node->child1()); | |
3942 | GPRTemporary resultPayload(this); | |
3943 | ||
3944 | GPRReg storageGPR = storage.gpr(); | |
3945 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
3946 | ||
3947 | StorageAccessData& storageAccessData = node->storageAccessData(); | |
3948 | ||
3949 | m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); | |
3950 | ||
3951 | cellResult(resultPayloadGPR, node); | |
3952 | break; | |
3953 | } | |
3954 | ||
3955 | case GetGetter: { | |
3956 | SpeculateCellOperand op1(this, node->child1()); | |
3957 | GPRTemporary result(this, Reuse, op1); | |
3958 | ||
3959 | GPRReg op1GPR = op1.gpr(); | |
3960 | GPRReg resultGPR = result.gpr(); | |
3961 | ||
3962 | m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR); | |
3963 | ||
3964 | cellResult(resultGPR, node); | |
3965 | break; | |
3966 | } | |
3967 | ||
3968 | case GetSetter: { | |
3969 | SpeculateCellOperand op1(this, node->child1()); | |
3970 | GPRTemporary result(this, Reuse, op1); | |
3971 | ||
3972 | GPRReg op1GPR = op1.gpr(); | |
3973 | GPRReg resultGPR = result.gpr(); | |
3974 | ||
3975 | m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR); | |
3976 | ||
3977 | cellResult(resultGPR, node); | |
3978 | break; | |
3979 | } | |
3980 | ||
3981 | case PutByOffset: { | |
3982 | StorageOperand storage(this, node->child1()); | |
3983 | JSValueOperand value(this, node->child3()); | |
3984 | ||
3985 | GPRReg storageGPR = storage.gpr(); | |
3986 | GPRReg valueTagGPR = value.tagGPR(); | |
3987 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
3988 | ||
3989 | speculate(node, node->child2()); | |
3990 | ||
3991 | StorageAccessData& storageAccessData = node->storageAccessData(); | |
3992 | ||
3993 | m_jit.storePtr(valueTagGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); | |
3994 | m_jit.storePtr(valuePayloadGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); | |
3995 | ||
3996 | noResult(node); | |
3997 | break; | |
3998 | } | |
3999 | ||
4000 | case PutByIdFlush: { | |
4001 | SpeculateCellOperand base(this, node->child1()); | |
4002 | JSValueOperand value(this, node->child2()); | |
4003 | GPRTemporary scratch(this); | |
4004 | ||
4005 | GPRReg baseGPR = base.gpr(); | |
4006 | GPRReg valueTagGPR = value.tagGPR(); | |
4007 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
4008 | GPRReg scratchGPR = scratch.gpr(); | |
4009 | flushRegisters(); | |
4010 | ||
4011 | cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill); | |
4012 | ||
4013 | noResult(node); | |
4014 | break; | |
4015 | } | |
4016 | ||
4017 | case PutById: { | |
4018 | SpeculateCellOperand base(this, node->child1()); | |
4019 | JSValueOperand value(this, node->child2()); | |
4020 | GPRTemporary scratch(this); | |
4021 | ||
4022 | GPRReg baseGPR = base.gpr(); | |
4023 | GPRReg valueTagGPR = value.tagGPR(); | |
4024 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
4025 | GPRReg scratchGPR = scratch.gpr(); | |
4026 | ||
4027 | cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect); | |
4028 | ||
4029 | noResult(node); | |
4030 | break; | |
4031 | } | |
4032 | ||
4033 | case PutByIdDirect: { | |
4034 | SpeculateCellOperand base(this, node->child1()); | |
4035 | JSValueOperand value(this, node->child2()); | |
4036 | GPRTemporary scratch(this); | |
4037 | ||
4038 | GPRReg baseGPR = base.gpr(); | |
4039 | GPRReg valueTagGPR = value.tagGPR(); | |
4040 | GPRReg valuePayloadGPR = value.payloadGPR(); | |
4041 | GPRReg scratchGPR = scratch.gpr(); | |
4042 | ||
4043 | cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), Direct); | |
4044 | ||
4045 | noResult(node); | |
4046 | break; | |
4047 | } | |
4048 | ||
4049 | case GetGlobalVar: { | |
4050 | GPRTemporary resultPayload(this); | |
4051 | GPRTemporary resultTag(this); | |
4052 | ||
4053 | m_jit.move(TrustedImmPtr(node->variablePointer()), resultPayload.gpr()); | |
4054 | m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTag.gpr()); | |
4055 | m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayload.gpr()); | |
4056 | ||
4057 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); | |
4058 | break; | |
4059 | } | |
4060 | ||
4061 | case PutGlobalVar: { | |
4062 | JSValueOperand value(this, node->child2()); | |
4063 | ||
4064 | // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have | |
4065 | // a spare register - a good optimization would be to put the register pointer into | |
4066 | // a register and then do a zero offset store followed by a four-offset store (or | |
4067 | // vice-versa depending on endianness). | |
4068 | m_jit.store32(value.tagGPR(), node->variablePointer()->tagPointer()); | |
4069 | m_jit.store32(value.payloadGPR(), node->variablePointer()->payloadPointer()); | |
4070 | ||
4071 | noResult(node); | |
4072 | break; | |
4073 | } | |
4074 | ||
4075 | case NotifyWrite: { | |
4076 | compileNotifyWrite(node); | |
4077 | break; | |
4078 | } | |
4079 | ||
4080 | case VarInjectionWatchpoint: { | |
4081 | noResult(node); | |
4082 | break; | |
4083 | } | |
4084 | ||
4085 | case CheckHasInstance: { | |
4086 | SpeculateCellOperand base(this, node->child1()); | |
4087 | GPRTemporary structure(this); | |
4088 | ||
4089 | // Speculate that base 'ImplementsDefaultHasInstance'. | |
4090 | speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8( | |
4091 | MacroAssembler::Zero, | |
4092 | MacroAssembler::Address(base.gpr(), JSCell::typeInfoFlagsOffset()), | |
4093 | MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance))); | |
4094 | ||
4095 | noResult(node); | |
4096 | break; | |
4097 | } | |
4098 | ||
4099 | case InstanceOf: { | |
4100 | compileInstanceOf(node); | |
4101 | break; | |
4102 | } | |
4103 | ||
4104 | case IsUndefined: { | |
4105 | JSValueOperand value(this, node->child1()); | |
4106 | GPRTemporary result(this); | |
4107 | GPRTemporary localGlobalObject(this); | |
4108 | GPRTemporary remoteGlobalObject(this); | |
4109 | ||
4110 | JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs()); | |
4111 | ||
4112 | m_jit.compare32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::UndefinedTag), result.gpr()); | |
4113 | JITCompiler::Jump done = m_jit.jump(); | |
4114 | ||
4115 | isCell.link(&m_jit); | |
4116 | JITCompiler::Jump notMasqueradesAsUndefined; | |
4117 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { | |
4118 | m_jit.move(TrustedImm32(0), result.gpr()); | |
4119 | notMasqueradesAsUndefined = m_jit.jump(); | |
4120 | } else { | |
4121 | JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( | |
4122 | JITCompiler::NonZero, | |
4123 | JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), | |
4124 | TrustedImm32(MasqueradesAsUndefined)); | |
4125 | m_jit.move(TrustedImm32(0), result.gpr()); | |
4126 | notMasqueradesAsUndefined = m_jit.jump(); | |
4127 | ||
4128 | isMasqueradesAsUndefined.link(&m_jit); | |
4129 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); | |
4130 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); | |
4131 | m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR); | |
4132 | m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureIDOffset()), result.gpr()); | |
4133 | m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR); | |
4134 | m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr()); | |
4135 | } | |
4136 | ||
4137 | notMasqueradesAsUndefined.link(&m_jit); | |
4138 | done.link(&m_jit); | |
4139 | booleanResult(result.gpr(), node); | |
4140 | break; | |
4141 | } | |
4142 | ||
4143 | case IsBoolean: { | |
4144 | JSValueOperand value(this, node->child1()); | |
4145 | GPRTemporary result(this, Reuse, value, TagWord); | |
4146 | ||
4147 | m_jit.compare32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag), result.gpr()); | |
4148 | booleanResult(result.gpr(), node); | |
4149 | break; | |
4150 | } | |
4151 | ||
4152 | case IsNumber: { | |
4153 | JSValueOperand value(this, node->child1()); | |
4154 | GPRTemporary result(this, Reuse, value, TagWord); | |
4155 | ||
4156 | m_jit.add32(TrustedImm32(1), value.tagGPR(), result.gpr()); | |
4157 | m_jit.compare32(JITCompiler::Below, result.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag + 1), result.gpr()); | |
4158 | booleanResult(result.gpr(), node); | |
4159 | break; | |
4160 | } | |
4161 | ||
4162 | case IsString: { | |
4163 | JSValueOperand value(this, node->child1()); | |
4164 | GPRTemporary result(this, Reuse, value, TagWord); | |
4165 | ||
4166 | JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); | |
4167 | ||
4168 | m_jit.compare8(JITCompiler::Equal, | |
4169 | JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()), | |
4170 | TrustedImm32(StringType), | |
4171 | result.gpr()); | |
4172 | JITCompiler::Jump done = m_jit.jump(); | |
4173 | ||
4174 | isNotCell.link(&m_jit); | |
4175 | m_jit.move(TrustedImm32(0), result.gpr()); | |
4176 | ||
4177 | done.link(&m_jit); | |
4178 | booleanResult(result.gpr(), node); | |
4179 | break; | |
4180 | } | |
4181 | ||
4182 | case IsObject: { | |
4183 | JSValueOperand value(this, node->child1()); | |
4184 | GPRTemporary result(this, Reuse, value, TagWord); | |
4185 | ||
4186 | JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); | |
4187 | ||
4188 | m_jit.compare8(JITCompiler::AboveOrEqual, | |
4189 | JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()), | |
4190 | TrustedImm32(ObjectType), | |
4191 | result.gpr()); | |
4192 | JITCompiler::Jump done = m_jit.jump(); | |
4193 | ||
4194 | isNotCell.link(&m_jit); | |
4195 | m_jit.move(TrustedImm32(0), result.gpr()); | |
4196 | ||
4197 | done.link(&m_jit); | |
4198 | booleanResult(result.gpr(), node); | |
4199 | break; | |
4200 | } | |
4201 | ||
4202 | case IsObjectOrNull: { | |
4203 | compileIsObjectOrNull(node); | |
4204 | break; | |
4205 | } | |
4206 | ||
4207 | case IsFunction: { | |
4208 | compileIsFunction(node); | |
4209 | break; | |
4210 | } | |
4211 | case TypeOf: { | |
4212 | compileTypeOf(node); | |
4213 | break; | |
4214 | } | |
4215 | ||
4216 | case Flush: | |
4217 | break; | |
4218 | ||
4219 | case Call: | |
4220 | case Construct: | |
4221 | case CallVarargs: | |
4222 | case CallForwardVarargs: | |
4223 | case ConstructVarargs: | |
4224 | case ConstructForwardVarargs: | |
4225 | emitCall(node); | |
4226 | break; | |
4227 | ||
4228 | case LoadVarargs: { | |
4229 | LoadVarargsData* data = node->loadVarargsData(); | |
4230 | ||
4231 | GPRReg argumentsTagGPR; | |
4232 | GPRReg argumentsPayloadGPR; | |
4233 | { | |
4234 | JSValueOperand arguments(this, node->child1()); | |
4235 | argumentsTagGPR = arguments.tagGPR(); | |
4236 | argumentsPayloadGPR = arguments.payloadGPR(); | |
4237 | flushRegisters(); | |
4238 | } | |
4239 | ||
4240 | callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, data->offset); | |
4241 | ||
4242 | lock(GPRInfo::returnValueGPR); | |
4243 | { | |
4244 | JSValueOperand arguments(this, node->child1()); | |
4245 | argumentsTagGPR = arguments.tagGPR(); | |
4246 | argumentsPayloadGPR = arguments.payloadGPR(); | |
4247 | flushRegisters(); | |
4248 | } | |
4249 | unlock(GPRInfo::returnValueGPR); | |
4250 | ||
4251 | // FIXME: There is a chance that we will call an effectful length property twice. This is safe | |
4252 | // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance | |
4253 | // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right | |
4254 | // past the sizing. | |
4255 | // https://bugs.webkit.org/show_bug.cgi?id=141448 | |
4256 | ||
4257 | GPRReg argCountIncludingThisGPR = | |
4258 | JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR); | |
4259 | ||
4260 | m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR); | |
4261 | speculationCheck( | |
4262 | VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32( | |
4263 | MacroAssembler::Above, | |
4264 | argCountIncludingThisGPR, | |
4265 | TrustedImm32(data->limit))); | |
4266 | ||
4267 | m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount)); | |
4268 | ||
4269 | callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsTagGPR, argumentsPayloadGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum); | |
4270 | ||
4271 | noResult(node); | |
4272 | break; | |
4273 | } | |
4274 | ||
4275 | case ForwardVarargs: { | |
4276 | compileForwardVarargs(node); | |
4277 | break; | |
4278 | } | |
4279 | ||
4280 | case CreateActivation: { | |
4281 | compileCreateActivation(node); | |
4282 | break; | |
4283 | } | |
4284 | ||
4285 | case CreateDirectArguments: { | |
4286 | compileCreateDirectArguments(node); | |
4287 | break; | |
4288 | } | |
4289 | ||
4290 | case GetFromArguments: { | |
4291 | compileGetFromArguments(node); | |
4292 | break; | |
4293 | } | |
4294 | ||
4295 | case PutToArguments: { | |
4296 | compilePutToArguments(node); | |
4297 | break; | |
4298 | } | |
4299 | ||
4300 | case CreateScopedArguments: { | |
4301 | compileCreateScopedArguments(node); | |
4302 | break; | |
4303 | } | |
4304 | ||
4305 | case CreateClonedArguments: { | |
4306 | compileCreateClonedArguments(node); | |
4307 | break; | |
4308 | } | |
4309 | ||
4310 | case NewFunction: | |
4311 | compileNewFunction(node); | |
4312 | break; | |
4313 | ||
4314 | case In: | |
4315 | compileIn(node); | |
4316 | break; | |
4317 | ||
4318 | case StoreBarrier: { | |
4319 | compileStoreBarrier(node); | |
4320 | break; | |
4321 | } | |
4322 | ||
4323 | case GetEnumerableLength: { | |
4324 | SpeculateCellOperand enumerator(this, node->child1()); | |
4325 | GPRFlushedCallResult result(this); | |
4326 | GPRReg resultGPR = result.gpr(); | |
4327 | ||
4328 | m_jit.load32(MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR); | |
4329 | int32Result(resultGPR, node); | |
4330 | break; | |
4331 | } | |
4332 | case HasGenericProperty: { | |
4333 | JSValueOperand base(this, node->child1()); | |
4334 | SpeculateCellOperand property(this, node->child2()); | |
4335 | GPRFlushedCallResult resultPayload(this); | |
4336 | GPRFlushedCallResult2 resultTag(this); | |
4337 | GPRReg basePayloadGPR = base.payloadGPR(); | |
4338 | GPRReg baseTagGPR = base.tagGPR(); | |
4339 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
4340 | GPRReg resultTagGPR = resultTag.gpr(); | |
4341 | ||
4342 | flushRegisters(); | |
4343 | callOperation(operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, property.gpr()); | |
4344 | booleanResult(resultPayloadGPR, node); | |
4345 | break; | |
4346 | } | |
4347 | case HasStructureProperty: { | |
4348 | JSValueOperand base(this, node->child1()); | |
4349 | SpeculateCellOperand property(this, node->child2()); | |
4350 | SpeculateCellOperand enumerator(this, node->child3()); | |
4351 | GPRTemporary resultPayload(this); | |
4352 | GPRTemporary resultTag(this); | |
4353 | ||
4354 | GPRReg baseTagGPR = base.tagGPR(); | |
4355 | GPRReg basePayloadGPR = base.payloadGPR(); | |
4356 | GPRReg propertyGPR = property.gpr(); | |
4357 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
4358 | GPRReg resultTagGPR = resultTag.gpr(); | |
4359 | ||
4360 | m_jit.load32(MacroAssembler::Address(basePayloadGPR, JSCell::structureIDOffset()), resultTagGPR); | |
4361 | MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual, | |
4362 | resultTagGPR, | |
4363 | MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset())); | |
4364 | ||
4365 | moveTrueTo(resultPayloadGPR); | |
4366 | MacroAssembler::Jump done = m_jit.jump(); | |
4367 | ||
4368 | done.link(&m_jit); | |
4369 | ||
4370 | addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, propertyGPR)); | |
4371 | booleanResult(resultPayloadGPR, node); | |
4372 | break; | |
4373 | } | |
4374 | case HasIndexedProperty: { | |
4375 | SpeculateCellOperand base(this, node->child1()); | |
4376 | SpeculateInt32Operand index(this, node->child2()); | |
4377 | GPRTemporary resultPayload(this); | |
4378 | GPRTemporary resultTag(this); | |
4379 | ||
4380 | GPRReg baseGPR = base.gpr(); | |
4381 | GPRReg indexGPR = index.gpr(); | |
4382 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
4383 | GPRReg resultTagGPR = resultTag.gpr(); | |
4384 | ||
4385 | MacroAssembler::JumpList slowCases; | |
4386 | ArrayMode mode = node->arrayMode(); | |
4387 | switch (mode.type()) { | |
4388 | case Array::Int32: | |
4389 | case Array::Contiguous: { | |
4390 | ASSERT(!!node->child3()); | |
4391 | StorageOperand storage(this, node->child3()); | |
4392 | GPRTemporary scratch(this); | |
4393 | ||
4394 | GPRReg storageGPR = storage.gpr(); | |
4395 | GPRReg scratchGPR = scratch.gpr(); | |
4396 | ||
4397 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()))); | |
4398 | m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR); | |
4399 | slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag))); | |
4400 | break; | |
4401 | } | |
4402 | case Array::Double: { | |
4403 | ASSERT(!!node->child3()); | |
4404 | StorageOperand storage(this, node->child3()); | |
4405 | FPRTemporary scratch(this); | |
4406 | FPRReg scratchFPR = scratch.fpr(); | |
4407 | GPRReg storageGPR = storage.gpr(); | |
4408 | ||
4409 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()))); | |
4410 | m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR); | |
4411 | slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR)); | |
4412 | break; | |
4413 | } | |
4414 | case Array::ArrayStorage: { | |
4415 | ASSERT(!!node->child3()); | |
4416 | StorageOperand storage(this, node->child3()); | |
4417 | GPRTemporary scratch(this); | |
4418 | ||
4419 | GPRReg storageGPR = storage.gpr(); | |
4420 | GPRReg scratchGPR = scratch.gpr(); | |
4421 | ||
4422 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()))); | |
4423 | m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR); | |
4424 | slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag))); | |
4425 | break; | |
4426 | } | |
4427 | default: { | |
4428 | slowCases.append(m_jit.jump()); | |
4429 | break; | |
4430 | } | |
4431 | } | |
4432 | ||
4433 | moveTrueTo(resultPayloadGPR); | |
4434 | MacroAssembler::Jump done = m_jit.jump(); | |
4435 | ||
4436 | addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedProperty, resultTagGPR, resultPayloadGPR, baseGPR, indexGPR)); | |
4437 | ||
4438 | done.link(&m_jit); | |
4439 | booleanResult(resultPayloadGPR, node); | |
4440 | break; | |
4441 | } | |
4442 | case GetDirectPname: { | |
4443 | Edge& baseEdge = m_jit.graph().varArgChild(node, 0); | |
4444 | Edge& propertyEdge = m_jit.graph().varArgChild(node, 1); | |
4445 | ||
4446 | SpeculateCellOperand base(this, baseEdge); | |
4447 | SpeculateCellOperand property(this, propertyEdge); | |
4448 | GPRReg baseGPR = base.gpr(); | |
4449 | GPRReg propertyGPR = property.gpr(); | |
4450 | ||
4451 | #if CPU(X86) | |
4452 | GPRFlushedCallResult resultPayload(this); | |
4453 | GPRFlushedCallResult2 resultTag(this); | |
4454 | GPRTemporary scratch(this); | |
4455 | ||
4456 | GPRReg resultTagGPR = resultTag.gpr(); | |
4457 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
4458 | GPRReg scratchGPR = scratch.gpr(); | |
4459 | ||
4460 | // Not enough registers on X86 for this code, so always use the slow path. | |
4461 | flushRegisters(); | |
4462 | m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), scratchGPR); | |
4463 | callOperation(operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, scratchGPR, propertyGPR); | |
4464 | #else | |
4465 | GPRTemporary resultPayload(this); | |
4466 | GPRTemporary resultTag(this); | |
4467 | GPRTemporary scratch(this); | |
4468 | ||
4469 | GPRReg resultTagGPR = resultTag.gpr(); | |
4470 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
4471 | GPRReg scratchGPR = scratch.gpr(); | |
4472 | ||
4473 | Edge& indexEdge = m_jit.graph().varArgChild(node, 2); | |
4474 | Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3); | |
4475 | ||
4476 | SpeculateInt32Operand index(this, indexEdge); | |
4477 | SpeculateCellOperand enumerator(this, enumeratorEdge); | |
4478 | ||
4479 | GPRReg indexGPR = index.gpr(); | |
4480 | GPRReg enumeratorGPR = enumerator.gpr(); | |
4481 | ||
4482 | // Check the structure | |
4483 | m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR); | |
4484 | MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual, | |
4485 | scratchGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset())); | |
4486 | ||
4487 | // Compute the offset | |
4488 | // If index is less than the enumerator's cached inline storage, then it's an inline access | |
4489 | MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual, | |
4490 | indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); | |
4491 | ||
4492 | m_jit.move(indexGPR, scratchGPR); | |
4493 | m_jit.signExtend32ToPtr(scratchGPR, scratchGPR); | |
4494 | m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR); | |
4495 | m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR); | |
4496 | ||
4497 | MacroAssembler::Jump done = m_jit.jump(); | |
4498 | ||
4499 | // Otherwise it's out of line | |
4500 | outOfLineAccess.link(&m_jit); | |
4501 | m_jit.move(indexGPR, scratchGPR); | |
4502 | m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR); | |
4503 | m_jit.neg32(scratchGPR); | |
4504 | m_jit.signExtend32ToPtr(scratchGPR, scratchGPR); | |
4505 | // We use resultPayloadGPR as a temporary here. We have to make sure clobber it after getting the | |
4506 | // value out of indexGPR and enumeratorGPR because resultPayloadGPR could reuse either of those registers. | |
4507 | m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultPayloadGPR); | |
4508 | int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); | |
4509 | m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR); | |
4510 | m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR); | |
4511 | ||
4512 | done.link(&m_jit); | |
4513 | ||
4514 | addSlowPathGenerator(slowPathCall(wrongStructure, this, operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, propertyGPR)); | |
4515 | #endif | |
4516 | ||
4517 | jsValueResult(resultTagGPR, resultPayloadGPR, node); | |
4518 | break; | |
4519 | } | |
4520 | case GetPropertyEnumerator: { | |
4521 | SpeculateCellOperand base(this, node->child1()); | |
4522 | GPRFlushedCallResult result(this); | |
4523 | GPRReg resultGPR = result.gpr(); | |
4524 | ||
4525 | flushRegisters(); | |
4526 | callOperation(operationGetPropertyEnumerator, resultGPR, base.gpr()); | |
4527 | cellResult(resultGPR, node); | |
4528 | break; | |
4529 | } | |
4530 | case GetEnumeratorStructurePname: | |
4531 | case GetEnumeratorGenericPname: { | |
4532 | SpeculateCellOperand enumerator(this, node->child1()); | |
4533 | SpeculateInt32Operand index(this, node->child2()); | |
4534 | GPRTemporary scratch(this); | |
4535 | GPRTemporary resultPayload(this); | |
4536 | GPRTemporary resultTag(this); | |
4537 | ||
4538 | GPRReg enumeratorGPR = enumerator.gpr(); | |
4539 | GPRReg indexGPR = index.gpr(); | |
4540 | GPRReg scratchGPR = scratch.gpr(); | |
4541 | GPRReg resultTagGPR = resultTag.gpr(); | |
4542 | GPRReg resultPayloadGPR = resultPayload.gpr(); | |
4543 | ||
4544 | MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR, | |
4545 | MacroAssembler::Address(enumeratorGPR, (op == GetEnumeratorStructurePname) | |
4546 | ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset() | |
4547 | : JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); | |
4548 | ||
4549 | m_jit.move(MacroAssembler::TrustedImm32(JSValue::NullTag), resultTagGPR); | |
4550 | m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR); | |
4551 | ||
4552 | MacroAssembler::Jump done = m_jit.jump(); | |
4553 | inBounds.link(&m_jit); | |
4554 | ||
4555 | m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratchGPR); | |
4556 | m_jit.loadPtr(MacroAssembler::BaseIndex(scratchGPR, indexGPR, MacroAssembler::ScalePtr), resultPayloadGPR); | |
4557 | m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), resultTagGPR); | |
4558 | ||
4559 | done.link(&m_jit); | |
4560 | jsValueResult(resultTagGPR, resultPayloadGPR, node); | |
4561 | break; | |
4562 | } | |
4563 | case ToIndexString: { | |
4564 | SpeculateInt32Operand index(this, node->child1()); | |
4565 | GPRFlushedCallResult result(this); | |
4566 | GPRReg resultGPR = result.gpr(); | |
4567 | ||
4568 | flushRegisters(); | |
4569 | callOperation(operationToIndexString, resultGPR, index.gpr()); | |
4570 | cellResult(resultGPR, node); | |
4571 | break; | |
4572 | } | |
4573 | case ProfileType: { | |
4574 | JSValueOperand value(this, node->child1()); | |
4575 | GPRTemporary scratch1(this); | |
4576 | GPRTemporary scratch2(this); | |
4577 | GPRTemporary scratch3(this); | |
4578 | ||
4579 | GPRReg scratch1GPR = scratch1.gpr(); | |
4580 | GPRReg scratch2GPR = scratch2.gpr(); | |
4581 | GPRReg scratch3GPR = scratch3.gpr(); | |
4582 | ||
4583 | // Load the TypeProfilerLog into Scratch2. | |
4584 | TypeProfilerLog* cachedTypeProfilerLog = m_jit.vm()->typeProfilerLog(); | |
4585 | m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR); | |
4586 | ||
4587 | // Load the next LogEntry into Scratch1. | |
4588 | m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR); | |
4589 | ||
4590 | // Store the JSValue onto the log entry. | |
4591 | m_jit.store32(value.tagGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); | |
4592 | m_jit.store32(value.payloadGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); | |
4593 | ||
4594 | // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry. | |
4595 | MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs()); | |
4596 | m_jit.load32(MacroAssembler::Address(value.payloadGPR(), JSCell::structureIDOffset()), scratch3GPR); | |
4597 | m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset())); | |
4598 | MacroAssembler::Jump skipIsCell = m_jit.jump(); | |
4599 | isNotCell.link(&m_jit); | |
4600 | m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset())); | |
4601 | skipIsCell.link(&m_jit); | |
4602 | ||
4603 | // Store the typeLocation on the log entry. | |
4604 | TypeLocation* cachedTypeLocation = node->typeLocation(); | |
4605 | m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR); | |
4606 | m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset())); | |
4607 | ||
4608 | // Increment the current log entry. | |
4609 | m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR); | |
4610 | m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset())); | |
4611 | MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); | |
4612 | addSlowPathGenerator( | |
4613 | slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult)); | |
4614 | ||
4615 | noResult(node); | |
4616 | break; | |
4617 | } | |
4618 | case ProfileControlFlow: { | |
4619 | BasicBlockLocation* basicBlockLocation = node->basicBlockLocation(); | |
4620 | if (!basicBlockLocation->hasExecuted()) { | |
4621 | GPRTemporary scratch1(this); | |
4622 | basicBlockLocation->emitExecuteCode(m_jit, scratch1.gpr()); | |
4623 | } | |
4624 | noResult(node); | |
4625 | break; | |
4626 | } | |
4627 | ||
4628 | case ForceOSRExit: { | |
4629 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); | |
4630 | break; | |
4631 | } | |
4632 | ||
4633 | case InvalidationPoint: | |
4634 | emitInvalidationPoint(node); | |
4635 | break; | |
4636 | ||
4637 | case CheckWatchdogTimer: | |
4638 | ASSERT(m_jit.vm()->watchdog); | |
4639 | speculationCheck( | |
4640 | WatchdogTimerFired, JSValueRegs(), 0, | |
4641 | m_jit.branchTest8( | |
4642 | JITCompiler::NonZero, | |
4643 | JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog->timerDidFireAddress()))); | |
4644 | break; | |
4645 | ||
4646 | case CountExecution: | |
4647 | m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address())); | |
4648 | break; | |
4649 | ||
4650 | case Phantom: | |
4651 | case Check: | |
4652 | DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); | |
4653 | noResult(node); | |
4654 | break; | |
4655 | ||
4656 | case Breakpoint: | |
4657 | case ProfileWillCall: | |
4658 | case ProfileDidCall: | |
4659 | case PhantomLocal: | |
4660 | case LoopHint: | |
4661 | // This is a no-op. | |
4662 | noResult(node); | |
4663 | break; | |
4664 | ||
4665 | ||
4666 | case Unreachable: | |
4667 | RELEASE_ASSERT_NOT_REACHED(); | |
4668 | break; | |
4669 | ||
4670 | case LastNodeType: | |
4671 | case Phi: | |
4672 | case Upsilon: | |
4673 | case ExtractOSREntryLocal: | |
4674 | case CheckTierUpInLoop: | |
4675 | case CheckTierUpAtReturn: | |
4676 | case CheckTierUpAndOSREnter: | |
4677 | case CheckTierUpWithNestedTriggerAndOSREnter: | |
4678 | case Int52Rep: | |
4679 | case FiatInt52: | |
4680 | case Int52Constant: | |
4681 | case CheckInBounds: | |
4682 | case ArithIMul: | |
4683 | case MultiGetByOffset: | |
4684 | case MultiPutByOffset: | |
4685 | case NativeCall: | |
4686 | case NativeConstruct: | |
4687 | case CheckBadCell: | |
4688 | case BottomValue: | |
4689 | case PhantomNewObject: | |
4690 | case PhantomNewFunction: | |
4691 | case PhantomCreateActivation: | |
4692 | case PutHint: | |
4693 | case CheckStructureImmediate: | |
4694 | case MaterializeNewObject: | |
4695 | case MaterializeCreateActivation: | |
4696 | case PutStack: | |
4697 | case KillStack: | |
4698 | case GetStack: | |
4699 | case GetMyArgumentByVal: | |
4700 | DFG_CRASH(m_jit.graph(), node, "unexpected node in DFG backend"); | |
4701 | break; | |
4702 | } | |
4703 | ||
4704 | if (!m_compileOkay) | |
4705 | return; | |
4706 | ||
4707 | if (node->hasResult() && node->mustGenerate()) | |
4708 | use(node); | |
4709 | } | |
4710 | ||
4711 | #if ENABLE(GGC) | |
4712 | void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueTagGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2) | |
4713 | { | |
4714 | JITCompiler::Jump isNotCell; | |
4715 | if (!isKnownCell(valueUse.node())) | |
4716 | isNotCell = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::CellTag)); | |
4717 | ||
4718 | JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR); | |
4719 | storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2); | |
4720 | ownerIsRememberedOrInEden.link(&m_jit); | |
4721 | ||
4722 | if (!isKnownCell(valueUse.node())) | |
4723 | isNotCell.link(&m_jit); | |
4724 | } | |
4725 | #endif // ENABLE(GGC) | |
4726 | ||
4727 | void SpeculativeJIT::moveTrueTo(GPRReg gpr) | |
4728 | { | |
4729 | m_jit.move(TrustedImm32(1), gpr); | |
4730 | } | |
4731 | ||
4732 | void SpeculativeJIT::moveFalseTo(GPRReg gpr) | |
4733 | { | |
4734 | m_jit.move(TrustedImm32(0), gpr); | |
4735 | } | |
4736 | ||
4737 | void SpeculativeJIT::blessBoolean(GPRReg) | |
4738 | { | |
4739 | } | |
4740 | ||
4741 | #endif | |
4742 | ||
4743 | } } // namespace JSC::DFG | |
4744 | ||
4745 | #endif |