]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2011 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "DFGSpeculativeJIT.h" | |
28 | ||
29 | #if ENABLE(DFG_JIT) | |
30 | ||
31 | namespace JSC { namespace DFG { | |
32 | ||
33 | #if USE(JSVALUE64) | |
34 | ||
35 | GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat) | |
36 | { | |
37 | Node& node = at(nodeIndex); | |
38 | VirtualRegister virtualRegister = node.virtualRegister(); | |
39 | GenerationInfo& info = m_generationInfo[virtualRegister]; | |
40 | ||
41 | if (info.registerFormat() == DataFormatNone) { | |
42 | GPRReg gpr = allocate(); | |
43 | ||
44 | if (node.hasConstant()) { | |
45 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
46 | if (isInt32Constant(nodeIndex)) { | |
47 | m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); | |
48 | info.fillInteger(gpr); | |
49 | returnFormat = DataFormatInteger; | |
50 | return gpr; | |
51 | } | |
52 | if (isNumberConstant(nodeIndex)) { | |
53 | JSValue jsValue = jsNumber(valueOfNumberConstant(nodeIndex)); | |
54 | m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); | |
55 | } else { | |
56 | ASSERT(isJSConstant(nodeIndex)); | |
57 | JSValue jsValue = valueOfJSConstant(nodeIndex); | |
58 | m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr); | |
59 | } | |
60 | } else if (info.spillFormat() == DataFormatInteger) { | |
61 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
62 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); | |
63 | // Tag it, since fillInteger() is used when we want a boxed integer. | |
64 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); | |
65 | } else { | |
66 | ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger); | |
67 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
68 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
69 | } | |
70 | ||
71 | // Since we statically know that we're filling an integer, and values | |
72 | // in the RegisterFile are boxed, this must be DataFormatJSInteger. | |
73 | // We will check this with a jitAssert below. | |
74 | info.fillJSValue(gpr, DataFormatJSInteger); | |
75 | unlock(gpr); | |
76 | } | |
77 | ||
78 | switch (info.registerFormat()) { | |
79 | case DataFormatNone: | |
80 | // Should have filled, above. | |
81 | case DataFormatJSDouble: | |
82 | case DataFormatDouble: | |
83 | case DataFormatJS: | |
84 | case DataFormatCell: | |
85 | case DataFormatJSCell: | |
86 | case DataFormatBoolean: | |
87 | case DataFormatJSBoolean: | |
88 | case DataFormatStorage: | |
89 | // Should only be calling this function if we know this operand to be integer. | |
90 | ASSERT_NOT_REACHED(); | |
91 | ||
92 | case DataFormatJSInteger: { | |
93 | GPRReg gpr = info.gpr(); | |
94 | m_gprs.lock(gpr); | |
95 | m_jit.jitAssertIsJSInt32(gpr); | |
96 | returnFormat = DataFormatJSInteger; | |
97 | return gpr; | |
98 | } | |
99 | ||
100 | case DataFormatInteger: { | |
101 | GPRReg gpr = info.gpr(); | |
102 | m_gprs.lock(gpr); | |
103 | m_jit.jitAssertIsInt32(gpr); | |
104 | returnFormat = DataFormatInteger; | |
105 | return gpr; | |
106 | } | |
107 | } | |
108 | ||
109 | ASSERT_NOT_REACHED(); | |
110 | return InvalidGPRReg; | |
111 | } | |
112 | ||
113 | FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) | |
114 | { | |
115 | Node& node = at(nodeIndex); | |
116 | VirtualRegister virtualRegister = node.virtualRegister(); | |
117 | GenerationInfo& info = m_generationInfo[virtualRegister]; | |
118 | ||
119 | if (info.registerFormat() == DataFormatNone) { | |
120 | if (node.hasConstant()) { | |
121 | GPRReg gpr = allocate(); | |
122 | ||
123 | if (isInt32Constant(nodeIndex)) { | |
124 | // FIXME: should not be reachable? | |
125 | m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); | |
126 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
127 | info.fillInteger(gpr); | |
128 | unlock(gpr); | |
129 | } else if (isNumberConstant(nodeIndex)) { | |
130 | FPRReg fpr = fprAllocate(); | |
131 | m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr); | |
132 | m_jit.movePtrToDouble(gpr, fpr); | |
133 | unlock(gpr); | |
134 | ||
135 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
136 | info.fillDouble(fpr); | |
137 | return fpr; | |
138 | } else { | |
139 | // FIXME: should not be reachable? | |
140 | ASSERT(isJSConstant(nodeIndex)); | |
141 | JSValue jsValue = valueOfJSConstant(nodeIndex); | |
142 | m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr); | |
143 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
144 | info.fillJSValue(gpr, DataFormatJS); | |
145 | unlock(gpr); | |
146 | } | |
147 | } else { | |
148 | DataFormat spillFormat = info.spillFormat(); | |
149 | switch (spillFormat) { | |
150 | case DataFormatDouble: { | |
151 | FPRReg fpr = fprAllocate(); | |
152 | m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); | |
153 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
154 | info.fillDouble(fpr); | |
155 | return fpr; | |
156 | } | |
157 | ||
158 | case DataFormatInteger: { | |
159 | GPRReg gpr = allocate(); | |
160 | ||
161 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
162 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); | |
163 | info.fillInteger(gpr); | |
164 | unlock(gpr); | |
165 | break; | |
166 | } | |
167 | ||
168 | default: | |
169 | GPRReg gpr = allocate(); | |
170 | ||
171 | ASSERT(spillFormat & DataFormatJS); | |
172 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
173 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
174 | info.fillJSValue(gpr, spillFormat); | |
175 | unlock(gpr); | |
176 | break; | |
177 | } | |
178 | } | |
179 | } | |
180 | ||
181 | switch (info.registerFormat()) { | |
182 | case DataFormatNone: | |
183 | // Should have filled, above. | |
184 | case DataFormatCell: | |
185 | case DataFormatJSCell: | |
186 | case DataFormatBoolean: | |
187 | case DataFormatJSBoolean: | |
188 | case DataFormatStorage: | |
189 | // Should only be calling this function if we know this operand to be numeric. | |
190 | ASSERT_NOT_REACHED(); | |
191 | ||
192 | case DataFormatJS: { | |
193 | GPRReg jsValueGpr = info.gpr(); | |
194 | m_gprs.lock(jsValueGpr); | |
195 | FPRReg fpr = fprAllocate(); | |
196 | GPRReg tempGpr = allocate(); // FIXME: can we skip this allocation on the last use of the virtual register? | |
197 | ||
198 | JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); | |
199 | ||
200 | m_jit.jitAssertIsJSDouble(jsValueGpr); | |
201 | ||
202 | // First, if we get here we have a double encoded as a JSValue | |
203 | m_jit.move(jsValueGpr, tempGpr); | |
204 | unboxDouble(tempGpr, fpr); | |
205 | JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); | |
206 | ||
207 | // Finally, handle integers. | |
208 | isInteger.link(&m_jit); | |
209 | m_jit.convertInt32ToDouble(jsValueGpr, fpr); | |
210 | hasUnboxedDouble.link(&m_jit); | |
211 | ||
212 | m_gprs.release(jsValueGpr); | |
213 | m_gprs.unlock(jsValueGpr); | |
214 | m_gprs.unlock(tempGpr); | |
215 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
216 | info.fillDouble(fpr); | |
217 | info.killSpilled(); | |
218 | return fpr; | |
219 | } | |
220 | ||
221 | case DataFormatJSInteger: | |
222 | case DataFormatInteger: { | |
223 | FPRReg fpr = fprAllocate(); | |
224 | GPRReg gpr = info.gpr(); | |
225 | m_gprs.lock(gpr); | |
226 | m_jit.convertInt32ToDouble(gpr, fpr); | |
227 | m_gprs.unlock(gpr); | |
228 | return fpr; | |
229 | } | |
230 | ||
231 | // Unbox the double | |
232 | case DataFormatJSDouble: { | |
233 | GPRReg gpr = info.gpr(); | |
234 | FPRReg fpr = fprAllocate(); | |
235 | if (m_gprs.isLocked(gpr)) { | |
236 | // Make sure we don't trample gpr if it is in use. | |
237 | GPRReg temp = allocate(); | |
238 | m_jit.move(gpr, temp); | |
239 | unboxDouble(temp, fpr); | |
240 | unlock(temp); | |
241 | } else | |
242 | unboxDouble(gpr, fpr); | |
243 | ||
244 | m_gprs.release(gpr); | |
245 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
246 | ||
247 | info.fillDouble(fpr); | |
248 | return fpr; | |
249 | } | |
250 | ||
251 | case DataFormatDouble: { | |
252 | FPRReg fpr = info.fpr(); | |
253 | m_fprs.lock(fpr); | |
254 | return fpr; | |
255 | } | |
256 | } | |
257 | ||
258 | ASSERT_NOT_REACHED(); | |
259 | return InvalidFPRReg; | |
260 | } | |
261 | ||
262 | GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) | |
263 | { | |
264 | Node& node = at(nodeIndex); | |
265 | VirtualRegister virtualRegister = node.virtualRegister(); | |
266 | GenerationInfo& info = m_generationInfo[virtualRegister]; | |
267 | ||
268 | switch (info.registerFormat()) { | |
269 | case DataFormatNone: { | |
270 | GPRReg gpr = allocate(); | |
271 | ||
272 | if (node.hasConstant()) { | |
273 | if (isInt32Constant(nodeIndex)) { | |
274 | info.fillJSValue(gpr, DataFormatJSInteger); | |
275 | JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex)); | |
276 | m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); | |
277 | } else if (isNumberConstant(nodeIndex)) { | |
278 | info.fillJSValue(gpr, DataFormatJSDouble); | |
279 | JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(nodeIndex)); | |
280 | m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); | |
281 | } else { | |
282 | ASSERT(isJSConstant(nodeIndex)); | |
283 | JSValue jsValue = valueOfJSConstant(nodeIndex); | |
284 | m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr); | |
285 | info.fillJSValue(gpr, DataFormatJS); | |
286 | } | |
287 | ||
288 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
289 | } else { | |
290 | DataFormat spillFormat = info.spillFormat(); | |
291 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
292 | if (spillFormat == DataFormatInteger) { | |
293 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); | |
294 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); | |
295 | spillFormat = DataFormatJSInteger; | |
296 | } else { | |
297 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
298 | if (spillFormat == DataFormatDouble) { | |
299 | // Need to box the double, since we want a JSValue. | |
300 | m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr); | |
301 | spillFormat = DataFormatJSDouble; | |
302 | } else | |
303 | ASSERT(spillFormat & DataFormatJS); | |
304 | } | |
305 | info.fillJSValue(gpr, spillFormat); | |
306 | } | |
307 | return gpr; | |
308 | } | |
309 | ||
310 | case DataFormatInteger: { | |
311 | GPRReg gpr = info.gpr(); | |
312 | // If the register has already been locked we need to take a copy. | |
313 | // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger. | |
314 | if (m_gprs.isLocked(gpr)) { | |
315 | GPRReg result = allocate(); | |
316 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr, result); | |
317 | return result; | |
318 | } | |
319 | m_gprs.lock(gpr); | |
320 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); | |
321 | info.fillJSValue(gpr, DataFormatJSInteger); | |
322 | return gpr; | |
323 | } | |
324 | ||
325 | case DataFormatDouble: { | |
326 | FPRReg fpr = info.fpr(); | |
327 | GPRReg gpr = boxDouble(fpr); | |
328 | ||
329 | // Update all info | |
330 | info.fillJSValue(gpr, DataFormatJSDouble); | |
331 | m_fprs.release(fpr); | |
332 | m_gprs.retain(gpr, virtualRegister, SpillOrderJS); | |
333 | ||
334 | return gpr; | |
335 | } | |
336 | ||
337 | case DataFormatCell: | |
338 | // No retag required on JSVALUE64! | |
339 | case DataFormatJS: | |
340 | case DataFormatJSInteger: | |
341 | case DataFormatJSDouble: | |
342 | case DataFormatJSCell: | |
343 | case DataFormatJSBoolean: { | |
344 | GPRReg gpr = info.gpr(); | |
345 | m_gprs.lock(gpr); | |
346 | return gpr; | |
347 | } | |
348 | ||
349 | case DataFormatBoolean: | |
350 | case DataFormatStorage: | |
351 | // this type currently never occurs | |
352 | ASSERT_NOT_REACHED(); | |
353 | } | |
354 | ||
355 | ASSERT_NOT_REACHED(); | |
356 | return InvalidGPRReg; | |
357 | } | |
358 | ||
359 | void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node) | |
360 | { | |
361 | if (isKnownNumeric(node.child1().index())) { | |
362 | JSValueOperand op1(this, node.child1()); | |
363 | GPRTemporary result(this, op1); | |
364 | m_jit.move(op1.gpr(), result.gpr()); | |
365 | jsValueResult(result.gpr(), m_compileIndex); | |
366 | return; | |
367 | } | |
368 | ||
369 | JSValueOperand op1(this, node.child1()); | |
370 | GPRTemporary result(this); | |
371 | ||
372 | ASSERT(!isInt32Constant(node.child1().index())); | |
373 | ASSERT(!isNumberConstant(node.child1().index())); | |
374 | ||
375 | GPRReg jsValueGpr = op1.gpr(); | |
376 | GPRReg gpr = result.gpr(); | |
377 | op1.use(); | |
378 | ||
379 | JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); | |
380 | JITCompiler::Jump nonNumeric = m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister); | |
381 | ||
382 | // First, if we get here we have a double encoded as a JSValue | |
383 | m_jit.move(jsValueGpr, gpr); | |
384 | JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); | |
385 | ||
386 | // Next handle cells (& other JS immediates) | |
387 | nonNumeric.link(&m_jit); | |
388 | silentSpillAllRegisters(gpr); | |
389 | callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, jsValueGpr); | |
390 | boxDouble(FPRInfo::returnValueFPR, gpr); | |
391 | silentFillAllRegisters(gpr); | |
392 | JITCompiler::Jump hasCalledToNumber = m_jit.jump(); | |
393 | ||
394 | // Finally, handle integers. | |
395 | isInteger.link(&m_jit); | |
396 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr); | |
397 | hasUnboxedDouble.link(&m_jit); | |
398 | hasCalledToNumber.link(&m_jit); | |
399 | ||
400 | jsValueResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly); | |
401 | } | |
402 | ||
403 | void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node) | |
404 | { | |
405 | ASSERT(!isInt32Constant(node.child1().index())); | |
406 | ||
407 | if (isKnownInteger(node.child1().index())) { | |
408 | IntegerOperand op1(this, node.child1()); | |
409 | GPRTemporary result(this, op1); | |
410 | m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr()); | |
411 | integerResult(result.gpr(), m_compileIndex); | |
412 | return; | |
413 | } | |
414 | ||
415 | GenerationInfo& childInfo = m_generationInfo[at(node.child1()).virtualRegister()]; | |
416 | if (childInfo.isJSDouble()) { | |
417 | DoubleOperand op1(this, node.child1()); | |
418 | GPRTemporary result(this); | |
419 | FPRReg fpr = op1.fpr(); | |
420 | GPRReg gpr = result.gpr(); | |
421 | op1.use(); | |
422 | JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful); | |
423 | ||
424 | silentSpillAllRegisters(gpr); | |
425 | callOperation(toInt32, gpr, fpr); | |
426 | silentFillAllRegisters(gpr); | |
427 | ||
428 | truncatedToInteger.link(&m_jit); | |
429 | integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly); | |
430 | return; | |
431 | } | |
432 | ||
433 | JSValueOperand op1(this, node.child1()); | |
434 | GPRTemporary result(this, op1); | |
435 | GPRReg jsValueGpr = op1.gpr(); | |
436 | GPRReg resultGPR = result.gpr(); | |
437 | op1.use(); | |
438 | ||
439 | JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); | |
440 | ||
441 | // First handle non-integers | |
442 | silentSpillAllRegisters(resultGPR); | |
443 | callOperation(dfgConvertJSValueToInt32, resultGPR, jsValueGpr); | |
444 | silentFillAllRegisters(resultGPR); | |
445 | JITCompiler::Jump hasCalledToInt32 = m_jit.jump(); | |
446 | ||
447 | // Then handle integers. | |
448 | isInteger.link(&m_jit); | |
449 | m_jit.zeroExtend32ToPtr(jsValueGpr, resultGPR); | |
450 | hasCalledToInt32.link(&m_jit); | |
451 | integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); | |
452 | } | |
453 | ||
454 | void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node) | |
455 | { | |
456 | IntegerOperand op1(this, node.child1()); | |
457 | FPRTemporary boxer(this); | |
458 | GPRTemporary result(this, op1); | |
459 | ||
460 | JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0)); | |
461 | ||
462 | m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr()); | |
463 | m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), boxer.fpr()); | |
464 | ||
465 | boxDouble(boxer.fpr(), result.gpr()); | |
466 | ||
467 | JITCompiler::Jump done = m_jit.jump(); | |
468 | ||
469 | positive.link(&m_jit); | |
470 | ||
471 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr()); | |
472 | ||
473 | done.link(&m_jit); | |
474 | ||
475 | jsValueResult(result.gpr(), m_compileIndex); | |
476 | } | |
477 | ||
478 | JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) | |
479 | { | |
480 | JITCompiler::DataLabelPtr structureToCompare; | |
481 | JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); | |
482 | ||
483 | m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); | |
484 | JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR); | |
485 | ||
486 | JITCompiler::Jump done = m_jit.jump(); | |
487 | ||
488 | structureCheck.m_jump.link(&m_jit); | |
489 | ||
490 | if (slowPathTarget.isSet()) | |
491 | slowPathTarget.link(&m_jit); | |
492 | ||
493 | JITCompiler::Label slowCase = m_jit.label(); | |
494 | ||
495 | if (spillMode == NeedToSpill) | |
496 | silentSpillAllRegisters(resultGPR); | |
497 | JITCompiler::Call functionCall = callOperation(operationGetByIdOptimize, resultGPR, baseGPR, identifier(identifierNumber)); | |
498 | if (spillMode == NeedToSpill) | |
499 | silentFillAllRegisters(resultGPR); | |
500 | ||
501 | done.link(&m_jit); | |
502 | ||
503 | JITCompiler::Label doneLabel = m_jit.label(); | |
504 | ||
505 | m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, loadWithPatch, slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), safeCast<int8_t>(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed)); | |
506 | ||
507 | if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg && spillMode == NeedToSpill) | |
508 | unlock(scratchGPR); | |
509 | ||
510 | return functionCall; | |
511 | } | |
512 | ||
513 | void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget) | |
514 | { | |
515 | ||
516 | JITCompiler::DataLabelPtr structureToCompare; | |
517 | JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); | |
518 | ||
519 | writeBarrier(baseGPR, valueGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR); | |
520 | ||
521 | m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); | |
522 | JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0)); | |
523 | ||
524 | JITCompiler::Jump done = m_jit.jump(); | |
525 | ||
526 | structureCheck.m_jump.link(&m_jit); | |
527 | ||
528 | if (slowPathTarget.isSet()) | |
529 | slowPathTarget.link(&m_jit); | |
530 | ||
531 | JITCompiler::Label slowCase = m_jit.label(); | |
532 | ||
533 | silentSpillAllRegisters(InvalidGPRReg); | |
534 | V_DFGOperation_EJCI optimizedCall; | |
535 | if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) { | |
536 | if (putKind == Direct) | |
537 | optimizedCall = operationPutByIdDirectStrictOptimize; | |
538 | else | |
539 | optimizedCall = operationPutByIdStrictOptimize; | |
540 | } else { | |
541 | if (putKind == Direct) | |
542 | optimizedCall = operationPutByIdDirectNonStrictOptimize; | |
543 | else | |
544 | optimizedCall = operationPutByIdNonStrictOptimize; | |
545 | } | |
546 | JITCompiler::Call functionCall = callOperation(optimizedCall, valueGPR, baseGPR, identifier(identifierNumber)); | |
547 | silentFillAllRegisters(InvalidGPRReg); | |
548 | ||
549 | done.link(&m_jit); | |
550 | JITCompiler::Label doneLabel = m_jit.label(); | |
551 | ||
552 | m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR))); | |
553 | } | |
554 | ||
555 | void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert) | |
556 | { | |
557 | JSValueOperand arg(this, operand); | |
558 | GPRReg argGPR = arg.gpr(); | |
559 | ||
560 | GPRTemporary result(this, arg); | |
561 | GPRReg resultGPR = result.gpr(); | |
562 | ||
563 | JITCompiler::Jump notCell; | |
564 | ||
565 | if (!isKnownCell(operand.index())) | |
566 | notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); | |
567 | ||
568 | m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR); | |
569 | m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultGPR); | |
570 | ||
571 | if (!isKnownCell(operand.index())) { | |
572 | JITCompiler::Jump done = m_jit.jump(); | |
573 | ||
574 | notCell.link(&m_jit); | |
575 | ||
576 | m_jit.move(argGPR, resultGPR); | |
577 | m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR); | |
578 | m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR); | |
579 | ||
580 | done.link(&m_jit); | |
581 | } | |
582 | ||
583 | m_jit.or32(TrustedImm32(ValueFalse), resultGPR); | |
584 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean); | |
585 | } | |
586 | ||
587 | void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert) | |
588 | { | |
589 | Node& branchNode = at(branchNodeIndex); | |
590 | BlockIndex taken = branchNode.takenBlockIndex(); | |
591 | BlockIndex notTaken = branchNode.notTakenBlockIndex(); | |
592 | ||
593 | if (taken == (m_block + 1)) { | |
594 | invert = !invert; | |
595 | BlockIndex tmp = taken; | |
596 | taken = notTaken; | |
597 | notTaken = tmp; | |
598 | } | |
599 | ||
600 | JSValueOperand arg(this, operand); | |
601 | GPRReg argGPR = arg.gpr(); | |
602 | ||
603 | GPRTemporary result(this, arg); | |
604 | GPRReg resultGPR = result.gpr(); | |
605 | ||
606 | JITCompiler::Jump notCell; | |
607 | ||
608 | if (!isKnownCell(operand.index())) | |
609 | notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); | |
610 | ||
611 | m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR); | |
612 | branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), taken); | |
613 | ||
614 | if (!isKnownCell(operand.index())) { | |
615 | jump(notTaken, ForceJump); | |
616 | ||
617 | notCell.link(&m_jit); | |
618 | ||
619 | m_jit.move(argGPR, resultGPR); | |
620 | m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR); | |
621 | branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)), taken); | |
622 | } | |
623 | ||
624 | jump(notTaken); | |
625 | } | |
626 | ||
627 | bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, Edge operand, bool invert) | |
628 | { | |
629 | unsigned branchIndexInBlock = detectPeepHoleBranch(); | |
630 | if (branchIndexInBlock != UINT_MAX) { | |
631 | NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock); | |
632 | ||
633 | ASSERT(node.adjustedRefCount() == 1); | |
634 | ||
635 | nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert); | |
636 | ||
637 | use(node.child1()); | |
638 | use(node.child2()); | |
639 | m_indexInBlock = branchIndexInBlock; | |
640 | m_compileIndex = branchNodeIndex; | |
641 | ||
642 | return true; | |
643 | } | |
644 | ||
645 | nonSpeculativeNonPeepholeCompareNull(operand, invert); | |
646 | ||
647 | return false; | |
648 | } | |
649 | ||
650 | void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) | |
651 | { | |
652 | Node& branchNode = at(branchNodeIndex); | |
653 | BlockIndex taken = branchNode.takenBlockIndex(); | |
654 | BlockIndex notTaken = branchNode.notTakenBlockIndex(); | |
655 | ||
656 | JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero; | |
657 | ||
658 | // The branch instruction will branch to the taken block. | |
659 | // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. | |
660 | if (taken == (m_block + 1)) { | |
661 | cond = JITCompiler::invert(cond); | |
662 | callResultCondition = JITCompiler::Zero; | |
663 | BlockIndex tmp = taken; | |
664 | taken = notTaken; | |
665 | notTaken = tmp; | |
666 | } | |
667 | ||
668 | JSValueOperand arg1(this, node.child1()); | |
669 | JSValueOperand arg2(this, node.child2()); | |
670 | GPRReg arg1GPR = arg1.gpr(); | |
671 | GPRReg arg2GPR = arg2.gpr(); | |
672 | ||
673 | JITCompiler::JumpList slowPath; | |
674 | ||
675 | if (isKnownNotInteger(node.child1().index()) || isKnownNotInteger(node.child2().index())) { | |
676 | GPRResult result(this); | |
677 | GPRReg resultGPR = result.gpr(); | |
678 | ||
679 | arg1.use(); | |
680 | arg2.use(); | |
681 | ||
682 | flushRegisters(); | |
683 | callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); | |
684 | ||
685 | branchTest32(callResultCondition, resultGPR, taken); | |
686 | } else { | |
687 | GPRTemporary result(this, arg2); | |
688 | GPRReg resultGPR = result.gpr(); | |
689 | ||
690 | arg1.use(); | |
691 | arg2.use(); | |
692 | ||
693 | if (!isKnownInteger(node.child1().index())) | |
694 | slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister)); | |
695 | if (!isKnownInteger(node.child2().index())) | |
696 | slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister)); | |
697 | ||
698 | branch32(cond, arg1GPR, arg2GPR, taken); | |
699 | ||
700 | if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) { | |
701 | jump(notTaken, ForceJump); | |
702 | ||
703 | slowPath.link(&m_jit); | |
704 | ||
705 | silentSpillAllRegisters(resultGPR); | |
706 | callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); | |
707 | silentFillAllRegisters(resultGPR); | |
708 | ||
709 | branchTest32(callResultCondition, resultGPR, taken); | |
710 | } | |
711 | } | |
712 | ||
713 | jump(notTaken); | |
714 | ||
715 | m_indexInBlock = m_jit.graph().m_blocks[m_block]->size() - 1; | |
716 | m_compileIndex = branchNodeIndex; | |
717 | } | |
718 | ||
719 | void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) | |
720 | { | |
721 | JSValueOperand arg1(this, node.child1()); | |
722 | JSValueOperand arg2(this, node.child2()); | |
723 | GPRReg arg1GPR = arg1.gpr(); | |
724 | GPRReg arg2GPR = arg2.gpr(); | |
725 | ||
726 | JITCompiler::JumpList slowPath; | |
727 | ||
728 | if (isKnownNotInteger(node.child1().index()) || isKnownNotInteger(node.child2().index())) { | |
729 | GPRResult result(this); | |
730 | GPRReg resultGPR = result.gpr(); | |
731 | ||
732 | arg1.use(); | |
733 | arg2.use(); | |
734 | ||
735 | flushRegisters(); | |
736 | callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); | |
737 | ||
738 | m_jit.or32(TrustedImm32(ValueFalse), resultGPR); | |
739 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly); | |
740 | } else { | |
741 | GPRTemporary result(this, arg2); | |
742 | GPRReg resultGPR = result.gpr(); | |
743 | ||
744 | arg1.use(); | |
745 | arg2.use(); | |
746 | ||
747 | if (!isKnownInteger(node.child1().index())) | |
748 | slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister)); | |
749 | if (!isKnownInteger(node.child2().index())) | |
750 | slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister)); | |
751 | ||
752 | m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR); | |
753 | ||
754 | if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) { | |
755 | JITCompiler::Jump haveResult = m_jit.jump(); | |
756 | ||
757 | slowPath.link(&m_jit); | |
758 | ||
759 | silentSpillAllRegisters(resultGPR); | |
760 | callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); | |
761 | silentFillAllRegisters(resultGPR); | |
762 | ||
763 | m_jit.andPtr(TrustedImm32(1), resultGPR); | |
764 | ||
765 | haveResult.link(&m_jit); | |
766 | } | |
767 | ||
768 | m_jit.or32(TrustedImm32(ValueFalse), resultGPR); | |
769 | ||
770 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly); | |
771 | } | |
772 | } | |
773 | ||
774 | void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert) | |
775 | { | |
776 | Node& branchNode = at(branchNodeIndex); | |
777 | BlockIndex taken = branchNode.takenBlockIndex(); | |
778 | BlockIndex notTaken = branchNode.notTakenBlockIndex(); | |
779 | ||
780 | // The branch instruction will branch to the taken block. | |
781 | // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. | |
782 | if (taken == (m_block + 1)) { | |
783 | invert = !invert; | |
784 | BlockIndex tmp = taken; | |
785 | taken = notTaken; | |
786 | notTaken = tmp; | |
787 | } | |
788 | ||
789 | JSValueOperand arg1(this, node.child1()); | |
790 | JSValueOperand arg2(this, node.child2()); | |
791 | GPRReg arg1GPR = arg1.gpr(); | |
792 | GPRReg arg2GPR = arg2.gpr(); | |
793 | ||
794 | GPRTemporary result(this); | |
795 | GPRReg resultGPR = result.gpr(); | |
796 | ||
797 | arg1.use(); | |
798 | arg2.use(); | |
799 | ||
800 | if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) { | |
801 | // see if we get lucky: if the arguments are cells and they reference the same | |
802 | // cell, then they must be strictly equal. | |
803 | branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken); | |
804 | ||
805 | silentSpillAllRegisters(resultGPR); | |
806 | callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR); | |
807 | silentFillAllRegisters(resultGPR); | |
808 | ||
809 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken); | |
810 | } else { | |
811 | m_jit.orPtr(arg1GPR, arg2GPR, resultGPR); | |
812 | ||
813 | JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister); | |
814 | ||
815 | JITCompiler::Jump leftOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister); | |
816 | JITCompiler::Jump leftDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister); | |
817 | leftOK.link(&m_jit); | |
818 | JITCompiler::Jump rightOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister); | |
819 | JITCompiler::Jump rightDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister); | |
820 | rightOK.link(&m_jit); | |
821 | ||
822 | branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken); | |
823 | jump(notTaken, ForceJump); | |
824 | ||
825 | twoCellsCase.link(&m_jit); | |
826 | branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken); | |
827 | ||
828 | leftDouble.link(&m_jit); | |
829 | rightDouble.link(&m_jit); | |
830 | ||
831 | silentSpillAllRegisters(resultGPR); | |
832 | callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR); | |
833 | silentFillAllRegisters(resultGPR); | |
834 | ||
835 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken); | |
836 | } | |
837 | ||
838 | jump(notTaken); | |
839 | } | |
840 | ||
841 | void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert) | |
842 | { | |
843 | JSValueOperand arg1(this, node.child1()); | |
844 | JSValueOperand arg2(this, node.child2()); | |
845 | GPRReg arg1GPR = arg1.gpr(); | |
846 | GPRReg arg2GPR = arg2.gpr(); | |
847 | ||
848 | GPRTemporary result(this); | |
849 | GPRReg resultGPR = result.gpr(); | |
850 | ||
851 | arg1.use(); | |
852 | arg2.use(); | |
853 | ||
854 | if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) { | |
855 | // see if we get lucky: if the arguments are cells and they reference the same | |
856 | // cell, then they must be strictly equal. | |
857 | JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR); | |
858 | ||
859 | m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR); | |
860 | ||
861 | JITCompiler::Jump done = m_jit.jump(); | |
862 | ||
863 | notEqualCase.link(&m_jit); | |
864 | ||
865 | silentSpillAllRegisters(resultGPR); | |
866 | callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR); | |
867 | silentFillAllRegisters(resultGPR); | |
868 | ||
869 | m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR); | |
870 | m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR); | |
871 | ||
872 | done.link(&m_jit); | |
873 | } else { | |
874 | m_jit.orPtr(arg1GPR, arg2GPR, resultGPR); | |
875 | ||
876 | JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister); | |
877 | ||
878 | JITCompiler::Jump leftOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister); | |
879 | JITCompiler::Jump leftDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister); | |
880 | leftOK.link(&m_jit); | |
881 | JITCompiler::Jump rightOK = m_jit.branchPtr(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister); | |
882 | JITCompiler::Jump rightDouble = m_jit.branchTestPtr(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister); | |
883 | rightOK.link(&m_jit); | |
884 | ||
885 | m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR); | |
886 | ||
887 | JITCompiler::Jump done1 = m_jit.jump(); | |
888 | ||
889 | twoCellsCase.link(&m_jit); | |
890 | JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR); | |
891 | ||
892 | m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR); | |
893 | ||
894 | JITCompiler::Jump done2 = m_jit.jump(); | |
895 | ||
896 | leftDouble.link(&m_jit); | |
897 | rightDouble.link(&m_jit); | |
898 | notEqualCase.link(&m_jit); | |
899 | ||
900 | silentSpillAllRegisters(resultGPR); | |
901 | callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR); | |
902 | silentFillAllRegisters(resultGPR); | |
903 | ||
904 | m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR); | |
905 | ||
906 | done1.link(&m_jit); | |
907 | ||
908 | m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR); | |
909 | ||
910 | done2.link(&m_jit); | |
911 | } | |
912 | ||
913 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly); | |
914 | } | |
915 | ||
916 | void SpeculativeJIT::emitCall(Node& node) | |
917 | { | |
918 | P_DFGOperation_E slowCallFunction; | |
919 | ||
920 | if (node.op() == Call) | |
921 | slowCallFunction = operationLinkCall; | |
922 | else { | |
923 | ASSERT(node.op() == Construct); | |
924 | slowCallFunction = operationLinkConstruct; | |
925 | } | |
926 | ||
927 | // For constructors, the this argument is not passed but we have to make space | |
928 | // for it. | |
929 | int dummyThisArgument = node.op() == Call ? 0 : 1; | |
930 | ||
931 | CallLinkInfo::CallType callType = node.op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct; | |
932 | ||
933 | Edge calleeEdge = m_jit.graph().m_varArgChildren[node.firstChild()]; | |
934 | JSValueOperand callee(this, calleeEdge); | |
935 | GPRReg calleeGPR = callee.gpr(); | |
936 | use(calleeEdge); | |
937 | ||
938 | // The call instruction's first child is either the function (normal call) or the | |
939 | // receiver (method call). subsequent children are the arguments. | |
940 | int numPassedArgs = node.numChildren() - 1; | |
941 | ||
942 | m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount)); | |
943 | m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(RegisterFile::CallerFrame)); | |
944 | m_jit.storePtr(calleeGPR, callFrameSlot(RegisterFile::Callee)); | |
945 | ||
946 | for (int i = 0; i < numPassedArgs; i++) { | |
947 | Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i]; | |
948 | JSValueOperand arg(this, argEdge); | |
949 | GPRReg argGPR = arg.gpr(); | |
950 | use(argEdge); | |
951 | ||
952 | m_jit.storePtr(argGPR, argumentSlot(i + dummyThisArgument)); | |
953 | } | |
954 | ||
955 | flushRegisters(); | |
956 | ||
957 | GPRResult result(this); | |
958 | GPRReg resultGPR = result.gpr(); | |
959 | ||
960 | JITCompiler::DataLabelPtr targetToCheck; | |
961 | JITCompiler::Jump slowPath; | |
962 | ||
963 | slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue()))); | |
964 | m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), resultGPR); | |
965 | m_jit.storePtr(resultGPR, callFrameSlot(RegisterFile::ScopeChain)); | |
966 | ||
967 | m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); | |
968 | ||
969 | CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin; | |
970 | CallBeginToken token = m_jit.beginCall(); | |
971 | JITCompiler::Call fastCall = m_jit.nearCall(); | |
972 | m_jit.notifyCall(fastCall, codeOrigin, token); | |
973 | ||
974 | JITCompiler::Jump done = m_jit.jump(); | |
975 | ||
976 | slowPath.link(&m_jit); | |
977 | ||
978 | m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); | |
979 | token = m_jit.beginCall(); | |
980 | JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction); | |
981 | m_jit.addFastExceptionCheck(slowCall, codeOrigin, token); | |
982 | m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); | |
983 | token = m_jit.beginCall(); | |
984 | JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR); | |
985 | m_jit.notifyCall(theCall, codeOrigin, token); | |
986 | ||
987 | done.link(&m_jit); | |
988 | ||
989 | m_jit.move(GPRInfo::returnValueGPR, resultGPR); | |
990 | ||
991 | jsValueResult(resultGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly); | |
992 | ||
993 | m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, at(m_compileIndex).codeOrigin); | |
994 | } | |
995 | ||
996 | template<bool strict> | |
997 | GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat) | |
998 | { | |
999 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
1000 | dataLog("SpecInt@%d ", nodeIndex); | |
1001 | #endif | |
1002 | Node& node = at(nodeIndex); | |
1003 | VirtualRegister virtualRegister = node.virtualRegister(); | |
1004 | GenerationInfo& info = m_generationInfo[virtualRegister]; | |
1005 | ||
1006 | switch (info.registerFormat()) { | |
1007 | case DataFormatNone: { | |
1008 | if ((node.hasConstant() && !isInt32Constant(nodeIndex)) || info.spillFormat() == DataFormatDouble) { | |
1009 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1010 | returnFormat = DataFormatInteger; | |
1011 | return allocate(); | |
1012 | } | |
1013 | ||
1014 | GPRReg gpr = allocate(); | |
1015 | ||
1016 | if (node.hasConstant()) { | |
1017 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
1018 | ASSERT(isInt32Constant(nodeIndex)); | |
1019 | m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); | |
1020 | info.fillInteger(gpr); | |
1021 | returnFormat = DataFormatInteger; | |
1022 | return gpr; | |
1023 | } | |
1024 | ||
1025 | DataFormat spillFormat = info.spillFormat(); | |
1026 | ||
1027 | ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger); | |
1028 | ||
1029 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
1030 | ||
1031 | if (spillFormat == DataFormatJSInteger || spillFormat == DataFormatInteger) { | |
1032 | // If we know this was spilled as an integer we can fill without checking. | |
1033 | if (strict) { | |
1034 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); | |
1035 | info.fillInteger(gpr); | |
1036 | returnFormat = DataFormatInteger; | |
1037 | return gpr; | |
1038 | } | |
1039 | if (spillFormat == DataFormatInteger) { | |
1040 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); | |
1041 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); | |
1042 | } else | |
1043 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
1044 | info.fillJSValue(gpr, DataFormatJSInteger); | |
1045 | returnFormat = DataFormatJSInteger; | |
1046 | return gpr; | |
1047 | } | |
1048 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
1049 | ||
1050 | // Fill as JSValue, and fall through. | |
1051 | info.fillJSValue(gpr, DataFormatJSInteger); | |
1052 | m_gprs.unlock(gpr); | |
1053 | } | |
1054 | ||
1055 | case DataFormatJS: { | |
1056 | // Check the value is an integer. | |
1057 | GPRReg gpr = info.gpr(); | |
1058 | m_gprs.lock(gpr); | |
1059 | speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister)); | |
1060 | info.fillJSValue(gpr, DataFormatJSInteger); | |
1061 | // If !strict we're done, return. | |
1062 | if (!strict) { | |
1063 | returnFormat = DataFormatJSInteger; | |
1064 | return gpr; | |
1065 | } | |
1066 | // else fall through & handle as DataFormatJSInteger. | |
1067 | m_gprs.unlock(gpr); | |
1068 | } | |
1069 | ||
1070 | case DataFormatJSInteger: { | |
1071 | // In a strict fill we need to strip off the value tag. | |
1072 | if (strict) { | |
1073 | GPRReg gpr = info.gpr(); | |
1074 | GPRReg result; | |
1075 | // If the register has already been locked we need to take a copy. | |
1076 | // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger. | |
1077 | if (m_gprs.isLocked(gpr)) | |
1078 | result = allocate(); | |
1079 | else { | |
1080 | m_gprs.lock(gpr); | |
1081 | info.fillInteger(gpr); | |
1082 | result = gpr; | |
1083 | } | |
1084 | m_jit.zeroExtend32ToPtr(gpr, result); | |
1085 | returnFormat = DataFormatInteger; | |
1086 | return result; | |
1087 | } | |
1088 | ||
1089 | GPRReg gpr = info.gpr(); | |
1090 | m_gprs.lock(gpr); | |
1091 | returnFormat = DataFormatJSInteger; | |
1092 | return gpr; | |
1093 | } | |
1094 | ||
1095 | case DataFormatInteger: { | |
1096 | GPRReg gpr = info.gpr(); | |
1097 | m_gprs.lock(gpr); | |
1098 | returnFormat = DataFormatInteger; | |
1099 | return gpr; | |
1100 | } | |
1101 | ||
1102 | case DataFormatDouble: | |
1103 | case DataFormatJSDouble: { | |
1104 | if (node.hasConstant() && isInt32Constant(nodeIndex)) { | |
1105 | GPRReg gpr = allocate(); | |
1106 | ASSERT(isInt32Constant(nodeIndex)); | |
1107 | m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); | |
1108 | returnFormat = DataFormatInteger; | |
1109 | return gpr; | |
1110 | } | |
1111 | } | |
1112 | case DataFormatCell: | |
1113 | case DataFormatBoolean: | |
1114 | case DataFormatJSCell: | |
1115 | case DataFormatJSBoolean: { | |
1116 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1117 | returnFormat = DataFormatInteger; | |
1118 | return allocate(); | |
1119 | } | |
1120 | ||
1121 | case DataFormatStorage: | |
1122 | ASSERT_NOT_REACHED(); | |
1123 | } | |
1124 | ||
1125 | ASSERT_NOT_REACHED(); | |
1126 | return InvalidGPRReg; | |
1127 | } | |
1128 | ||
1129 | GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat) | |
1130 | { | |
1131 | return fillSpeculateIntInternal<false>(nodeIndex, returnFormat); | |
1132 | } | |
1133 | ||
1134 | GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex) | |
1135 | { | |
1136 | DataFormat mustBeDataFormatInteger; | |
1137 | GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger); | |
1138 | ASSERT(mustBeDataFormatInteger == DataFormatInteger); | |
1139 | return result; | |
1140 | } | |
1141 | ||
1142 | FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) | |
1143 | { | |
1144 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
1145 | dataLog("SpecDouble@%d ", nodeIndex); | |
1146 | #endif | |
1147 | Node& node = at(nodeIndex); | |
1148 | VirtualRegister virtualRegister = node.virtualRegister(); | |
1149 | GenerationInfo& info = m_generationInfo[virtualRegister]; | |
1150 | ||
1151 | if (info.registerFormat() == DataFormatNone) { | |
1152 | if (node.hasConstant()) { | |
1153 | GPRReg gpr = allocate(); | |
1154 | ||
1155 | if (isInt32Constant(nodeIndex)) { | |
1156 | FPRReg fpr = fprAllocate(); | |
1157 | m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(static_cast<double>(valueOfInt32Constant(nodeIndex))))), gpr); | |
1158 | m_jit.movePtrToDouble(gpr, fpr); | |
1159 | unlock(gpr); | |
1160 | ||
1161 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
1162 | info.fillDouble(fpr); | |
1163 | return fpr; | |
1164 | } | |
1165 | if (isNumberConstant(nodeIndex)) { | |
1166 | FPRReg fpr = fprAllocate(); | |
1167 | m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr); | |
1168 | m_jit.movePtrToDouble(gpr, fpr); | |
1169 | unlock(gpr); | |
1170 | ||
1171 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
1172 | info.fillDouble(fpr); | |
1173 | return fpr; | |
1174 | } | |
1175 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1176 | return fprAllocate(); | |
1177 | } | |
1178 | ||
1179 | DataFormat spillFormat = info.spillFormat(); | |
1180 | switch (spillFormat) { | |
1181 | case DataFormatDouble: { | |
1182 | FPRReg fpr = fprAllocate(); | |
1183 | m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); | |
1184 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
1185 | info.fillDouble(fpr); | |
1186 | return fpr; | |
1187 | } | |
1188 | ||
1189 | case DataFormatInteger: { | |
1190 | GPRReg gpr = allocate(); | |
1191 | ||
1192 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
1193 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); | |
1194 | info.fillInteger(gpr); | |
1195 | unlock(gpr); | |
1196 | break; | |
1197 | } | |
1198 | ||
1199 | default: | |
1200 | GPRReg gpr = allocate(); | |
1201 | ||
1202 | ASSERT(spillFormat & DataFormatJS); | |
1203 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
1204 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
1205 | info.fillJSValue(gpr, spillFormat); | |
1206 | unlock(gpr); | |
1207 | break; | |
1208 | } | |
1209 | } | |
1210 | ||
1211 | switch (info.registerFormat()) { | |
1212 | case DataFormatNone: // Should have filled, above. | |
1213 | case DataFormatBoolean: // This type never occurs. | |
1214 | case DataFormatStorage: | |
1215 | ASSERT_NOT_REACHED(); | |
1216 | ||
1217 | case DataFormatCell: | |
1218 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1219 | return fprAllocate(); | |
1220 | ||
1221 | case DataFormatJSCell: | |
1222 | case DataFormatJS: | |
1223 | case DataFormatJSBoolean: { | |
1224 | GPRReg jsValueGpr = info.gpr(); | |
1225 | m_gprs.lock(jsValueGpr); | |
1226 | FPRReg fpr = fprAllocate(); | |
1227 | GPRReg tempGpr = allocate(); | |
1228 | ||
1229 | JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); | |
1230 | ||
1231 | speculationCheck(BadType, JSValueRegs(jsValueGpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister)); | |
1232 | ||
1233 | // First, if we get here we have a double encoded as a JSValue | |
1234 | m_jit.move(jsValueGpr, tempGpr); | |
1235 | unboxDouble(tempGpr, fpr); | |
1236 | JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); | |
1237 | ||
1238 | // Finally, handle integers. | |
1239 | isInteger.link(&m_jit); | |
1240 | m_jit.convertInt32ToDouble(jsValueGpr, fpr); | |
1241 | hasUnboxedDouble.link(&m_jit); | |
1242 | ||
1243 | m_gprs.release(jsValueGpr); | |
1244 | m_gprs.unlock(jsValueGpr); | |
1245 | m_gprs.unlock(tempGpr); | |
1246 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
1247 | info.fillDouble(fpr); | |
1248 | info.killSpilled(); | |
1249 | return fpr; | |
1250 | } | |
1251 | ||
1252 | case DataFormatJSInteger: | |
1253 | case DataFormatInteger: { | |
1254 | FPRReg fpr = fprAllocate(); | |
1255 | GPRReg gpr = info.gpr(); | |
1256 | m_gprs.lock(gpr); | |
1257 | m_jit.convertInt32ToDouble(gpr, fpr); | |
1258 | m_gprs.unlock(gpr); | |
1259 | return fpr; | |
1260 | } | |
1261 | ||
1262 | // Unbox the double | |
1263 | case DataFormatJSDouble: { | |
1264 | GPRReg gpr = info.gpr(); | |
1265 | FPRReg fpr = fprAllocate(); | |
1266 | if (m_gprs.isLocked(gpr)) { | |
1267 | // Make sure we don't trample gpr if it is in use. | |
1268 | GPRReg temp = allocate(); | |
1269 | m_jit.move(gpr, temp); | |
1270 | unboxDouble(temp, fpr); | |
1271 | unlock(temp); | |
1272 | } else | |
1273 | unboxDouble(gpr, fpr); | |
1274 | ||
1275 | m_gprs.release(gpr); | |
1276 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); | |
1277 | ||
1278 | info.fillDouble(fpr); | |
1279 | return fpr; | |
1280 | } | |
1281 | ||
1282 | case DataFormatDouble: { | |
1283 | FPRReg fpr = info.fpr(); | |
1284 | m_fprs.lock(fpr); | |
1285 | return fpr; | |
1286 | } | |
1287 | } | |
1288 | ||
1289 | ASSERT_NOT_REACHED(); | |
1290 | return InvalidFPRReg; | |
1291 | } | |
1292 | ||
1293 | GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) | |
1294 | { | |
1295 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
1296 | dataLog("SpecCell@%d ", nodeIndex); | |
1297 | #endif | |
1298 | Node& node = at(nodeIndex); | |
1299 | VirtualRegister virtualRegister = node.virtualRegister(); | |
1300 | GenerationInfo& info = m_generationInfo[virtualRegister]; | |
1301 | ||
1302 | switch (info.registerFormat()) { | |
1303 | case DataFormatNone: { | |
1304 | if (info.spillFormat() == DataFormatInteger || info.spillFormat() == DataFormatDouble) { | |
1305 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1306 | return allocate(); | |
1307 | } | |
1308 | ||
1309 | GPRReg gpr = allocate(); | |
1310 | ||
1311 | if (node.hasConstant()) { | |
1312 | JSValue jsValue = valueOfJSConstant(nodeIndex); | |
1313 | if (jsValue.isCell()) { | |
1314 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
1315 | m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); | |
1316 | info.fillJSValue(gpr, DataFormatJSCell); | |
1317 | return gpr; | |
1318 | } | |
1319 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1320 | return gpr; | |
1321 | } | |
1322 | ASSERT(info.spillFormat() & DataFormatJS); | |
1323 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
1324 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
1325 | ||
1326 | info.fillJSValue(gpr, DataFormatJS); | |
1327 | if (info.spillFormat() != DataFormatJSCell) | |
1328 | speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister)); | |
1329 | info.fillJSValue(gpr, DataFormatJSCell); | |
1330 | return gpr; | |
1331 | } | |
1332 | ||
1333 | case DataFormatCell: | |
1334 | case DataFormatJSCell: { | |
1335 | GPRReg gpr = info.gpr(); | |
1336 | m_gprs.lock(gpr); | |
1337 | return gpr; | |
1338 | } | |
1339 | ||
1340 | case DataFormatJS: { | |
1341 | GPRReg gpr = info.gpr(); | |
1342 | m_gprs.lock(gpr); | |
1343 | speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister)); | |
1344 | info.fillJSValue(gpr, DataFormatJSCell); | |
1345 | return gpr; | |
1346 | } | |
1347 | ||
1348 | case DataFormatJSInteger: | |
1349 | case DataFormatInteger: | |
1350 | case DataFormatJSDouble: | |
1351 | case DataFormatDouble: | |
1352 | case DataFormatJSBoolean: | |
1353 | case DataFormatBoolean: { | |
1354 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1355 | return allocate(); | |
1356 | } | |
1357 | ||
1358 | case DataFormatStorage: | |
1359 | ASSERT_NOT_REACHED(); | |
1360 | } | |
1361 | ||
1362 | ASSERT_NOT_REACHED(); | |
1363 | return InvalidGPRReg; | |
1364 | } | |
1365 | ||
1366 | GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) | |
1367 | { | |
1368 | #if DFG_ENABLE(DEBUG_VERBOSE) | |
1369 | dataLog("SpecBool@%d ", nodeIndex); | |
1370 | #endif | |
1371 | Node& node = at(nodeIndex); | |
1372 | VirtualRegister virtualRegister = node.virtualRegister(); | |
1373 | GenerationInfo& info = m_generationInfo[virtualRegister]; | |
1374 | ||
1375 | switch (info.registerFormat()) { | |
1376 | case DataFormatNone: { | |
1377 | if (info.spillFormat() == DataFormatInteger || info.spillFormat() == DataFormatDouble) { | |
1378 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1379 | return allocate(); | |
1380 | } | |
1381 | ||
1382 | GPRReg gpr = allocate(); | |
1383 | ||
1384 | if (node.hasConstant()) { | |
1385 | JSValue jsValue = valueOfJSConstant(nodeIndex); | |
1386 | if (jsValue.isBoolean()) { | |
1387 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); | |
1388 | m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr); | |
1389 | info.fillJSValue(gpr, DataFormatJSBoolean); | |
1390 | return gpr; | |
1391 | } | |
1392 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1393 | return gpr; | |
1394 | } | |
1395 | ASSERT(info.spillFormat() & DataFormatJS); | |
1396 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); | |
1397 | m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); | |
1398 | ||
1399 | info.fillJSValue(gpr, DataFormatJS); | |
1400 | if (info.spillFormat() != DataFormatJSBoolean) { | |
1401 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr); | |
1402 | speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg)); | |
1403 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr); | |
1404 | } | |
1405 | info.fillJSValue(gpr, DataFormatJSBoolean); | |
1406 | return gpr; | |
1407 | } | |
1408 | ||
1409 | case DataFormatBoolean: | |
1410 | case DataFormatJSBoolean: { | |
1411 | GPRReg gpr = info.gpr(); | |
1412 | m_gprs.lock(gpr); | |
1413 | return gpr; | |
1414 | } | |
1415 | ||
1416 | case DataFormatJS: { | |
1417 | GPRReg gpr = info.gpr(); | |
1418 | m_gprs.lock(gpr); | |
1419 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr); | |
1420 | speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg)); | |
1421 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr); | |
1422 | info.fillJSValue(gpr, DataFormatJSBoolean); | |
1423 | return gpr; | |
1424 | } | |
1425 | ||
1426 | case DataFormatJSInteger: | |
1427 | case DataFormatInteger: | |
1428 | case DataFormatJSDouble: | |
1429 | case DataFormatDouble: | |
1430 | case DataFormatJSCell: | |
1431 | case DataFormatCell: { | |
1432 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
1433 | return allocate(); | |
1434 | } | |
1435 | ||
1436 | case DataFormatStorage: | |
1437 | ASSERT_NOT_REACHED(); | |
1438 | } | |
1439 | ||
1440 | ASSERT_NOT_REACHED(); | |
1441 | return InvalidGPRReg; | |
1442 | } | |
1443 | ||
1444 | JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp) | |
1445 | { | |
1446 | JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, value, GPRInfo::tagTypeNumberRegister); | |
1447 | ||
1448 | JITCompiler::Jump notNumber = m_jit.branchTestPtr(MacroAssembler::Zero, value, GPRInfo::tagTypeNumberRegister); | |
1449 | ||
1450 | m_jit.move(value, tmp); | |
1451 | unboxDouble(tmp, result); | |
1452 | ||
1453 | JITCompiler::Jump done = m_jit.jump(); | |
1454 | ||
1455 | isInteger.link(&m_jit); | |
1456 | ||
1457 | m_jit.convertInt32ToDouble(value, result); | |
1458 | ||
1459 | done.link(&m_jit); | |
1460 | ||
1461 | return notNumber; | |
1462 | } | |
1463 | ||
1464 | void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInfo, PredictionChecker predictionCheck) | |
1465 | { | |
1466 | SpeculateCellOperand op1(this, node.child1()); | |
1467 | SpeculateCellOperand op2(this, node.child2()); | |
1468 | GPRTemporary result(this, op1); | |
1469 | ||
1470 | GPRReg op1GPR = op1.gpr(); | |
1471 | GPRReg op2GPR = op2.gpr(); | |
1472 | GPRReg resultGPR = result.gpr(); | |
1473 | ||
1474 | if (!predictionCheck(m_state.forNode(node.child1()).m_type)) | |
1475 | speculationCheck(BadType, JSValueRegs(op1GPR), node.child1().index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); | |
1476 | if (!predictionCheck(m_state.forNode(node.child2()).m_type)) | |
1477 | speculationCheck(BadType, JSValueRegs(op2GPR), node.child2().index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); | |
1478 | ||
1479 | MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR); | |
1480 | m_jit.move(TrustedImm32(ValueTrue), resultGPR); | |
1481 | MacroAssembler::Jump done = m_jit.jump(); | |
1482 | falseCase.link(&m_jit); | |
1483 | m_jit.move(TrustedImm32(ValueFalse), resultGPR); | |
1484 | done.link(&m_jit); | |
1485 | ||
1486 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean); | |
1487 | } | |
1488 | ||
1489 | void SpeculativeJIT::compileObjectToObjectOrOtherEquality( | |
1490 | Edge leftChild, Edge rightChild, | |
1491 | const ClassInfo* classInfo, PredictionChecker predictionCheck) | |
1492 | { | |
1493 | SpeculateCellOperand op1(this, leftChild); | |
1494 | JSValueOperand op2(this, rightChild); | |
1495 | GPRTemporary result(this); | |
1496 | ||
1497 | GPRReg op1GPR = op1.gpr(); | |
1498 | GPRReg op2GPR = op2.gpr(); | |
1499 | GPRReg resultGPR = result.gpr(); | |
1500 | ||
1501 | if (!predictionCheck(m_state.forNode(leftChild).m_type)) { | |
1502 | speculationCheck( | |
1503 | BadType, JSValueRegs(op1GPR), leftChild.index(), | |
1504 | m_jit.branchPtr( | |
1505 | MacroAssembler::NotEqual, | |
1506 | MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), | |
1507 | MacroAssembler::TrustedImmPtr(classInfo))); | |
1508 | } | |
1509 | ||
1510 | // It seems that most of the time when programs do a == b where b may be either null/undefined | |
1511 | // or an object, b is usually an object. Balance the branches to make that case fast. | |
1512 | MacroAssembler::Jump rightNotCell = | |
1513 | m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister); | |
1514 | ||
1515 | // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the | |
1516 | // proof, when filtered on cell, demonstrates that we have an object of the desired type | |
1517 | // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the | |
1518 | // speculation. | |
1519 | if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) { | |
1520 | speculationCheck( | |
1521 | BadType, JSValueRegs(op2GPR), rightChild.index(), | |
1522 | m_jit.branchPtr( | |
1523 | MacroAssembler::NotEqual, | |
1524 | MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), | |
1525 | MacroAssembler::TrustedImmPtr(classInfo))); | |
1526 | } | |
1527 | ||
1528 | // At this point we know that we can perform a straight-forward equality comparison on pointer | |
1529 | // values because both left and right are pointers to objects that have no special equality | |
1530 | // protocols. | |
1531 | MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR); | |
1532 | MacroAssembler::Jump trueCase = m_jit.jump(); | |
1533 | ||
1534 | rightNotCell.link(&m_jit); | |
1535 | ||
1536 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to | |
1537 | // prove that it is either null or undefined. | |
1538 | if (!isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell)) { | |
1539 | m_jit.move(op2GPR, resultGPR); | |
1540 | m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR); | |
1541 | ||
1542 | speculationCheck( | |
1543 | BadType, JSValueRegs(op2GPR), rightChild.index(), | |
1544 | m_jit.branchPtr( | |
1545 | MacroAssembler::NotEqual, resultGPR, | |
1546 | MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)))); | |
1547 | } | |
1548 | ||
1549 | falseCase.link(&m_jit); | |
1550 | m_jit.move(TrustedImm32(ValueFalse), resultGPR); | |
1551 | MacroAssembler::Jump done = m_jit.jump(); | |
1552 | trueCase.link(&m_jit); | |
1553 | m_jit.move(TrustedImm32(ValueTrue), resultGPR); | |
1554 | done.link(&m_jit); | |
1555 | ||
1556 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean); | |
1557 | } | |
1558 | ||
1559 | void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality( | |
1560 | Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex, | |
1561 | const ClassInfo* classInfo, PredictionChecker predictionCheck) | |
1562 | { | |
1563 | Node& branchNode = at(branchNodeIndex); | |
1564 | BlockIndex taken = branchNode.takenBlockIndex(); | |
1565 | BlockIndex notTaken = branchNode.notTakenBlockIndex(); | |
1566 | ||
1567 | SpeculateCellOperand op1(this, leftChild); | |
1568 | JSValueOperand op2(this, rightChild); | |
1569 | GPRTemporary result(this); | |
1570 | ||
1571 | GPRReg op1GPR = op1.gpr(); | |
1572 | GPRReg op2GPR = op2.gpr(); | |
1573 | GPRReg resultGPR = result.gpr(); | |
1574 | ||
1575 | if (!predictionCheck(m_state.forNode(leftChild).m_type)) { | |
1576 | speculationCheck( | |
1577 | BadType, JSValueRegs(op1GPR), leftChild.index(), | |
1578 | m_jit.branchPtr( | |
1579 | MacroAssembler::NotEqual, | |
1580 | MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), | |
1581 | MacroAssembler::TrustedImmPtr(classInfo))); | |
1582 | } | |
1583 | ||
1584 | // It seems that most of the time when programs do a == b where b may be either null/undefined | |
1585 | // or an object, b is usually an object. Balance the branches to make that case fast. | |
1586 | MacroAssembler::Jump rightNotCell = | |
1587 | m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister); | |
1588 | ||
1589 | // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the | |
1590 | // proof, when filtered on cell, demonstrates that we have an object of the desired type | |
1591 | // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the | |
1592 | // speculation. | |
1593 | if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) { | |
1594 | speculationCheck( | |
1595 | BadType, JSValueRegs(op2GPR), rightChild.index(), | |
1596 | m_jit.branchPtr( | |
1597 | MacroAssembler::NotEqual, | |
1598 | MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), | |
1599 | MacroAssembler::TrustedImmPtr(classInfo))); | |
1600 | } | |
1601 | ||
1602 | // At this point we know that we can perform a straight-forward equality comparison on pointer | |
1603 | // values because both left and right are pointers to objects that have no special equality | |
1604 | // protocols. | |
1605 | branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken); | |
1606 | ||
1607 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to | |
1608 | // prove that it is either null or undefined. | |
1609 | if (isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell)) | |
1610 | rightNotCell.link(&m_jit); | |
1611 | else { | |
1612 | jump(notTaken, ForceJump); | |
1613 | ||
1614 | rightNotCell.link(&m_jit); | |
1615 | m_jit.move(op2GPR, resultGPR); | |
1616 | m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR); | |
1617 | ||
1618 | speculationCheck( | |
1619 | BadType, JSValueRegs(op2GPR), rightChild.index(), | |
1620 | m_jit.branchPtr( | |
1621 | MacroAssembler::NotEqual, resultGPR, | |
1622 | MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)))); | |
1623 | } | |
1624 | ||
1625 | jump(notTaken); | |
1626 | } | |
1627 | ||
1628 | void SpeculativeJIT::compileIntegerCompare(Node& node, MacroAssembler::RelationalCondition condition) | |
1629 | { | |
1630 | SpeculateIntegerOperand op1(this, node.child1()); | |
1631 | SpeculateIntegerOperand op2(this, node.child2()); | |
1632 | GPRTemporary result(this, op1, op2); | |
1633 | ||
1634 | m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr()); | |
1635 | ||
1636 | // If we add a DataFormatBool, we should use it here. | |
1637 | m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); | |
1638 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
1639 | } | |
1640 | ||
1641 | void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCondition condition) | |
1642 | { | |
1643 | SpeculateDoubleOperand op1(this, node.child1()); | |
1644 | SpeculateDoubleOperand op2(this, node.child2()); | |
1645 | GPRTemporary result(this); | |
1646 | ||
1647 | m_jit.move(TrustedImm32(ValueTrue), result.gpr()); | |
1648 | MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr()); | |
1649 | m_jit.xorPtr(TrustedImm32(true), result.gpr()); | |
1650 | trueCase.link(&m_jit); | |
1651 | ||
1652 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
1653 | } | |
1654 | ||
1655 | void SpeculativeJIT::compileValueAdd(Node& node) | |
1656 | { | |
1657 | JSValueOperand op1(this, node.child1()); | |
1658 | JSValueOperand op2(this, node.child2()); | |
1659 | ||
1660 | GPRReg op1GPR = op1.gpr(); | |
1661 | GPRReg op2GPR = op2.gpr(); | |
1662 | ||
1663 | flushRegisters(); | |
1664 | ||
1665 | GPRResult result(this); | |
1666 | if (isKnownNotNumber(node.child1().index()) || isKnownNotNumber(node.child2().index())) | |
1667 | callOperation(operationValueAddNotNumber, result.gpr(), op1GPR, op2GPR); | |
1668 | else | |
1669 | callOperation(operationValueAdd, result.gpr(), op1GPR, op2GPR); | |
1670 | ||
1671 | jsValueResult(result.gpr(), m_compileIndex); | |
1672 | } | |
1673 | ||
1674 | void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse, const ClassInfo* classInfo, bool needSpeculationCheck) | |
1675 | { | |
1676 | JSValueOperand value(this, nodeUse); | |
1677 | GPRTemporary result(this); | |
1678 | GPRReg valueGPR = value.gpr(); | |
1679 | GPRReg resultGPR = result.gpr(); | |
1680 | ||
1681 | MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister); | |
1682 | if (needSpeculationCheck) | |
1683 | speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valueGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); | |
1684 | m_jit.move(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR); | |
1685 | MacroAssembler::Jump done = m_jit.jump(); | |
1686 | ||
1687 | notCell.link(&m_jit); | |
1688 | ||
1689 | if (needSpeculationCheck) { | |
1690 | m_jit.move(valueGPR, resultGPR); | |
1691 | m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR); | |
1692 | speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse, m_jit.branchPtr(MacroAssembler::NotEqual, resultGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)))); | |
1693 | } | |
1694 | m_jit.move(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR); | |
1695 | ||
1696 | done.link(&m_jit); | |
1697 | ||
1698 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean); | |
1699 | } | |
1700 | ||
1701 | void SpeculativeJIT::compileLogicalNot(Node& node) | |
1702 | { | |
1703 | if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) { | |
1704 | compileObjectOrOtherLogicalNot(node.child1(), &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type)); | |
1705 | return; | |
1706 | } | |
1707 | if (at(node.child1()).shouldSpeculateArrayOrOther()) { | |
1708 | compileObjectOrOtherLogicalNot(node.child1(), &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type)); | |
1709 | return; | |
1710 | } | |
1711 | if (at(node.child1()).shouldSpeculateInteger()) { | |
1712 | SpeculateIntegerOperand value(this, node.child1()); | |
1713 | GPRTemporary result(this, value); | |
1714 | m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr()); | |
1715 | m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); | |
1716 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
1717 | return; | |
1718 | } | |
1719 | if (at(node.child1()).shouldSpeculateNumber()) { | |
1720 | SpeculateDoubleOperand value(this, node.child1()); | |
1721 | FPRTemporary scratch(this); | |
1722 | GPRTemporary result(this); | |
1723 | m_jit.move(TrustedImm32(ValueFalse), result.gpr()); | |
1724 | MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()); | |
1725 | m_jit.xor32(TrustedImm32(true), result.gpr()); | |
1726 | nonZero.link(&m_jit); | |
1727 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
1728 | return; | |
1729 | } | |
1730 | ||
1731 | PredictedType prediction = m_jit.getPrediction(node.child1()); | |
1732 | if (isBooleanPrediction(prediction)) { | |
1733 | if (isBooleanPrediction(m_state.forNode(node.child1()).m_type)) { | |
1734 | SpeculateBooleanOperand value(this, node.child1()); | |
1735 | GPRTemporary result(this, value); | |
1736 | ||
1737 | m_jit.move(value.gpr(), result.gpr()); | |
1738 | m_jit.xorPtr(TrustedImm32(true), result.gpr()); | |
1739 | ||
1740 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
1741 | return; | |
1742 | } | |
1743 | ||
1744 | JSValueOperand value(this, node.child1()); | |
1745 | GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). | |
1746 | ||
1747 | m_jit.move(value.gpr(), result.gpr()); | |
1748 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr()); | |
1749 | speculationCheck(BadType, JSValueRegs(value.gpr()), node.child1(), m_jit.branchTestPtr(JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1)))); | |
1750 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr()); | |
1751 | ||
1752 | // If we add a DataFormatBool, we should use it here. | |
1753 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
1754 | return; | |
1755 | } | |
1756 | ||
1757 | JSValueOperand arg1(this, node.child1()); | |
1758 | GPRTemporary result(this); | |
1759 | ||
1760 | GPRReg arg1GPR = arg1.gpr(); | |
1761 | GPRReg resultGPR = result.gpr(); | |
1762 | ||
1763 | arg1.use(); | |
1764 | ||
1765 | m_jit.move(arg1GPR, resultGPR); | |
1766 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), resultGPR); | |
1767 | JITCompiler::Jump fastCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, TrustedImm32(static_cast<int32_t>(~1))); | |
1768 | ||
1769 | silentSpillAllRegisters(resultGPR); | |
1770 | callOperation(dfgConvertJSValueToBoolean, resultGPR, arg1GPR); | |
1771 | silentFillAllRegisters(resultGPR); | |
1772 | ||
1773 | fastCase.link(&m_jit); | |
1774 | ||
1775 | m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), resultGPR); | |
1776 | jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly); | |
1777 | } | |
1778 | ||
1779 | void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck) | |
1780 | { | |
1781 | JSValueOperand value(this, nodeUse); | |
1782 | GPRTemporary scratch(this); | |
1783 | GPRReg valueGPR = value.gpr(); | |
1784 | GPRReg scratchGPR = scratch.gpr(); | |
1785 | ||
1786 | MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister); | |
1787 | if (needSpeculationCheck) | |
1788 | speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valueGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); | |
1789 | jump(taken, ForceJump); | |
1790 | ||
1791 | notCell.link(&m_jit); | |
1792 | ||
1793 | if (needSpeculationCheck) { | |
1794 | m_jit.move(valueGPR, scratchGPR); | |
1795 | m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR); | |
1796 | speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)))); | |
1797 | } | |
1798 | jump(notTaken); | |
1799 | ||
1800 | noResult(m_compileIndex); | |
1801 | } | |
1802 | ||
1803 | void SpeculativeJIT::emitBranch(Node& node) | |
1804 | { | |
1805 | BlockIndex taken = node.takenBlockIndex(); | |
1806 | BlockIndex notTaken = node.notTakenBlockIndex(); | |
1807 | ||
1808 | if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) { | |
1809 | emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type)); | |
1810 | } else if (at(node.child1()).shouldSpeculateArrayOrOther()) { | |
1811 | emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type)); | |
1812 | } else if (at(node.child1()).shouldSpeculateNumber()) { | |
1813 | if (at(node.child1()).shouldSpeculateInteger()) { | |
1814 | bool invert = false; | |
1815 | ||
1816 | if (taken == (m_block + 1)) { | |
1817 | invert = true; | |
1818 | BlockIndex tmp = taken; | |
1819 | taken = notTaken; | |
1820 | notTaken = tmp; | |
1821 | } | |
1822 | ||
1823 | SpeculateIntegerOperand value(this, node.child1()); | |
1824 | branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken); | |
1825 | } else { | |
1826 | SpeculateDoubleOperand value(this, node.child1()); | |
1827 | FPRTemporary scratch(this); | |
1828 | branchDoubleNonZero(value.fpr(), scratch.fpr(), taken); | |
1829 | } | |
1830 | ||
1831 | jump(notTaken); | |
1832 | ||
1833 | noResult(m_compileIndex); | |
1834 | } else { | |
1835 | JSValueOperand value(this, node.child1()); | |
1836 | GPRReg valueGPR = value.gpr(); | |
1837 | ||
1838 | bool predictBoolean = isBooleanPrediction(m_jit.getPrediction(node.child1())); | |
1839 | ||
1840 | if (predictBoolean) { | |
1841 | if (isBooleanPrediction(m_state.forNode(node.child1()).m_type)) { | |
1842 | MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; | |
1843 | ||
1844 | if (taken == (m_block + 1)) { | |
1845 | condition = MacroAssembler::Zero; | |
1846 | BlockIndex tmp = taken; | |
1847 | taken = notTaken; | |
1848 | notTaken = tmp; | |
1849 | } | |
1850 | ||
1851 | branchTest32(condition, valueGPR, TrustedImm32(true), taken); | |
1852 | jump(notTaken); | |
1853 | } else { | |
1854 | branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken); | |
1855 | branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken); | |
1856 | ||
1857 | speculationCheck(BadType, JSValueRegs(valueGPR), node.child1(), m_jit.jump()); | |
1858 | } | |
1859 | value.use(); | |
1860 | } else { | |
1861 | GPRTemporary result(this); | |
1862 | GPRReg resultGPR = result.gpr(); | |
1863 | ||
1864 | branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsNumber(0))), notTaken); | |
1865 | branchPtr(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken); | |
1866 | ||
1867 | if (!predictBoolean) { | |
1868 | branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken); | |
1869 | branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken); | |
1870 | } | |
1871 | ||
1872 | value.use(); | |
1873 | ||
1874 | silentSpillAllRegisters(resultGPR); | |
1875 | callOperation(dfgConvertJSValueToBoolean, resultGPR, valueGPR); | |
1876 | silentFillAllRegisters(resultGPR); | |
1877 | ||
1878 | branchTest32(MacroAssembler::NonZero, resultGPR, taken); | |
1879 | jump(notTaken); | |
1880 | } | |
1881 | ||
1882 | noResult(m_compileIndex, UseChildrenCalledExplicitly); | |
1883 | } | |
1884 | } | |
1885 | ||
1886 | void SpeculativeJIT::compile(Node& node) | |
1887 | { | |
1888 | NodeType op = node.op(); | |
1889 | ||
1890 | switch (op) { | |
1891 | case JSConstant: | |
1892 | initConstantInfo(m_compileIndex); | |
1893 | break; | |
1894 | ||
1895 | case WeakJSConstant: | |
1896 | m_jit.addWeakReference(node.weakConstant()); | |
1897 | initConstantInfo(m_compileIndex); | |
1898 | break; | |
1899 | ||
1900 | case GetLocal: { | |
1901 | PredictedType prediction = node.variableAccessData()->prediction(); | |
1902 | AbstractValue& value = block()->valuesAtHead.operand(node.local()); | |
1903 | ||
1904 | // If we have no prediction for this local, then don't attempt to compile. | |
1905 | if (prediction == PredictNone || value.isClear()) { | |
1906 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); | |
1907 | break; | |
1908 | } | |
1909 | ||
1910 | if (!m_jit.graph().isCaptured(node.local())) { | |
1911 | if (node.variableAccessData()->shouldUseDoubleFormat()) { | |
1912 | FPRTemporary result(this); | |
1913 | m_jit.loadDouble(JITCompiler::addressFor(node.local()), result.fpr()); | |
1914 | VirtualRegister virtualRegister = node.virtualRegister(); | |
1915 | m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble); | |
1916 | m_generationInfo[virtualRegister].initDouble(m_compileIndex, node.refCount(), result.fpr()); | |
1917 | break; | |
1918 | } | |
1919 | ||
1920 | if (isInt32Prediction(value.m_type)) { | |
1921 | GPRTemporary result(this); | |
1922 | m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr()); | |
1923 | ||
1924 | // Like integerResult, but don't useChildren - our children are phi nodes, | |
1925 | // and don't represent values within this dataflow with virtual registers. | |
1926 | VirtualRegister virtualRegister = node.virtualRegister(); | |
1927 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger); | |
1928 | m_generationInfo[virtualRegister].initInteger(m_compileIndex, node.refCount(), result.gpr()); | |
1929 | break; | |
1930 | } | |
1931 | } | |
1932 | ||
1933 | GPRTemporary result(this); | |
1934 | m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.gpr()); | |
1935 | ||
1936 | // Like jsValueResult, but don't useChildren - our children are phi nodes, | |
1937 | // and don't represent values within this dataflow with virtual registers. | |
1938 | VirtualRegister virtualRegister = node.virtualRegister(); | |
1939 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS); | |
1940 | ||
1941 | DataFormat format; | |
1942 | if (m_jit.graph().isCaptured(node.local())) | |
1943 | format = DataFormatJS; | |
1944 | else if (isCellPrediction(value.m_type)) | |
1945 | format = DataFormatJSCell; | |
1946 | else if (isBooleanPrediction(value.m_type)) | |
1947 | format = DataFormatJSBoolean; | |
1948 | else | |
1949 | format = DataFormatJS; | |
1950 | ||
1951 | m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), result.gpr(), format); | |
1952 | break; | |
1953 | } | |
1954 | ||
1955 | case SetLocal: { | |
1956 | // SetLocal doubles as a hint as to where a node will be stored and | |
1957 | // as a speculation point. So before we speculate make sure that we | |
1958 | // know where the child of this node needs to go in the virtual | |
1959 | // register file. | |
1960 | compileMovHint(node); | |
1961 | ||
1962 | // As far as OSR is concerned, we're on the bytecode index corresponding | |
1963 | // to the *next* instruction, since we've already "executed" the | |
1964 | // SetLocal and whatever other DFG Nodes are associated with the same | |
1965 | // bytecode index as the SetLocal. | |
1966 | ASSERT(m_codeOriginForOSR == node.codeOrigin); | |
1967 | Node* nextNode = &at(block()->at(m_indexInBlock + 1)); | |
1968 | ||
1969 | // But even more oddly, we need to be super careful about the following | |
1970 | // sequence: | |
1971 | // | |
1972 | // a: Foo() | |
1973 | // b: SetLocal(@a) | |
1974 | // c: Flush(@b) | |
1975 | // | |
1976 | // This next piece of crazy takes care of this. | |
1977 | if (nextNode->op() == Flush && nextNode->child1() == m_compileIndex) | |
1978 | nextNode = &at(block()->at(m_indexInBlock + 2)); | |
1979 | ||
1980 | // Oddly, it's possible for the bytecode index for the next node to be | |
1981 | // equal to ours. This will happen for op_post_inc. And, even more oddly, | |
1982 | // this is just fine. Ordinarily, this wouldn't be fine, since if the | |
1983 | // next node failed OSR then we'd be OSR-ing with this SetLocal's local | |
1984 | // variable already set even though from the standpoint of the old JIT, | |
1985 | // this SetLocal should not have executed. But for op_post_inc, it's just | |
1986 | // fine, because this SetLocal's local (i.e. the LHS in a x = y++ | |
1987 | // statement) would be dead anyway - so the fact that DFG would have | |
1988 | // already made the assignment, and baked it into the register file during | |
1989 | // OSR exit, would not be visible to the old JIT in any way. | |
1990 | m_codeOriginForOSR = nextNode->codeOrigin; | |
1991 | ||
1992 | if (!m_jit.graph().isCaptured(node.local())) { | |
1993 | if (node.variableAccessData()->shouldUseDoubleFormat()) { | |
1994 | SpeculateDoubleOperand value(this, node.child1()); | |
1995 | m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); | |
1996 | noResult(m_compileIndex); | |
1997 | // Indicate that it's no longer necessary to retrieve the value of | |
1998 | // this bytecode variable from registers or other locations in the register file, | |
1999 | // but that it is stored as a double. | |
2000 | valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile); | |
2001 | break; | |
2002 | } | |
2003 | ||
2004 | PredictedType predictedType = node.variableAccessData()->argumentAwarePrediction(); | |
2005 | if (isInt32Prediction(predictedType)) { | |
2006 | SpeculateIntegerOperand value(this, node.child1()); | |
2007 | m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); | |
2008 | noResult(m_compileIndex); | |
2009 | valueSourceReferenceForOperand(node.local()) = ValueSource(Int32InRegisterFile); | |
2010 | break; | |
2011 | } | |
2012 | if (isArrayPrediction(predictedType)) { | |
2013 | SpeculateCellOperand cell(this, node.child1()); | |
2014 | GPRReg cellGPR = cell.gpr(); | |
2015 | if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) | |
2016 | speculationCheck(BadType, JSValueRegs(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); | |
2017 | m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local())); | |
2018 | noResult(m_compileIndex); | |
2019 | valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile); | |
2020 | break; | |
2021 | } | |
2022 | if (isBooleanPrediction(predictedType)) { | |
2023 | SpeculateBooleanOperand boolean(this, node.child1()); | |
2024 | m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local())); | |
2025 | noResult(m_compileIndex); | |
2026 | valueSourceReferenceForOperand(node.local()) = ValueSource(BooleanInRegisterFile); | |
2027 | break; | |
2028 | } | |
2029 | } | |
2030 | ||
2031 | JSValueOperand value(this, node.child1()); | |
2032 | m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local())); | |
2033 | noResult(m_compileIndex); | |
2034 | ||
2035 | valueSourceReferenceForOperand(node.local()) = ValueSource(ValueInRegisterFile); | |
2036 | break; | |
2037 | } | |
2038 | ||
2039 | case SetArgument: | |
2040 | // This is a no-op; it just marks the fact that the argument is being used. | |
2041 | // But it may be profitable to use this as a hook to run speculation checks | |
2042 | // on arguments, thereby allowing us to trivially eliminate such checks if | |
2043 | // the argument is not used. | |
2044 | break; | |
2045 | ||
2046 | case BitAnd: | |
2047 | case BitOr: | |
2048 | case BitXor: | |
2049 | if (isInt32Constant(node.child1().index())) { | |
2050 | SpeculateIntegerOperand op2(this, node.child2()); | |
2051 | GPRTemporary result(this, op2); | |
2052 | ||
2053 | bitOp(op, valueOfInt32Constant(node.child1().index()), op2.gpr(), result.gpr()); | |
2054 | ||
2055 | integerResult(result.gpr(), m_compileIndex); | |
2056 | } else if (isInt32Constant(node.child2().index())) { | |
2057 | SpeculateIntegerOperand op1(this, node.child1()); | |
2058 | GPRTemporary result(this, op1); | |
2059 | ||
2060 | bitOp(op, valueOfInt32Constant(node.child2().index()), op1.gpr(), result.gpr()); | |
2061 | ||
2062 | integerResult(result.gpr(), m_compileIndex); | |
2063 | } else { | |
2064 | SpeculateIntegerOperand op1(this, node.child1()); | |
2065 | SpeculateIntegerOperand op2(this, node.child2()); | |
2066 | GPRTemporary result(this, op1, op2); | |
2067 | ||
2068 | GPRReg reg1 = op1.gpr(); | |
2069 | GPRReg reg2 = op2.gpr(); | |
2070 | bitOp(op, reg1, reg2, result.gpr()); | |
2071 | ||
2072 | integerResult(result.gpr(), m_compileIndex); | |
2073 | } | |
2074 | break; | |
2075 | ||
2076 | case BitRShift: | |
2077 | case BitLShift: | |
2078 | case BitURShift: | |
2079 | if (isInt32Constant(node.child2().index())) { | |
2080 | SpeculateIntegerOperand op1(this, node.child1()); | |
2081 | GPRTemporary result(this, op1); | |
2082 | ||
2083 | shiftOp(op, op1.gpr(), valueOfInt32Constant(node.child2().index()) & 0x1f, result.gpr()); | |
2084 | ||
2085 | integerResult(result.gpr(), m_compileIndex); | |
2086 | } else { | |
2087 | // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. | |
2088 | SpeculateIntegerOperand op1(this, node.child1()); | |
2089 | SpeculateIntegerOperand op2(this, node.child2()); | |
2090 | GPRTemporary result(this, op1); | |
2091 | ||
2092 | GPRReg reg1 = op1.gpr(); | |
2093 | GPRReg reg2 = op2.gpr(); | |
2094 | shiftOp(op, reg1, reg2, result.gpr()); | |
2095 | ||
2096 | integerResult(result.gpr(), m_compileIndex); | |
2097 | } | |
2098 | break; | |
2099 | ||
2100 | case UInt32ToNumber: { | |
2101 | compileUInt32ToNumber(node); | |
2102 | break; | |
2103 | } | |
2104 | ||
2105 | case DoubleAsInt32: { | |
2106 | compileDoubleAsInt32(node); | |
2107 | break; | |
2108 | } | |
2109 | ||
2110 | case ValueToInt32: { | |
2111 | compileValueToInt32(node); | |
2112 | break; | |
2113 | } | |
2114 | ||
2115 | case Int32ToDouble: { | |
2116 | compileInt32ToDouble(node); | |
2117 | break; | |
2118 | } | |
2119 | ||
2120 | case CheckNumber: { | |
2121 | if (!isNumberPrediction(m_state.forNode(node.child1()).m_type)) { | |
2122 | JSValueOperand op1(this, node.child1()); | |
2123 | JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, op1.gpr(), GPRInfo::tagTypeNumberRegister); | |
2124 | speculationCheck( | |
2125 | BadType, JSValueRegs(op1.gpr()), node.child1().index(), | |
2126 | m_jit.branchTestPtr(MacroAssembler::Zero, op1.gpr(), GPRInfo::tagTypeNumberRegister)); | |
2127 | isInteger.link(&m_jit); | |
2128 | } | |
2129 | noResult(m_compileIndex); | |
2130 | break; | |
2131 | } | |
2132 | ||
2133 | case ValueAdd: | |
2134 | case ArithAdd: | |
2135 | compileAdd(node); | |
2136 | break; | |
2137 | ||
2138 | case ArithSub: | |
2139 | compileArithSub(node); | |
2140 | break; | |
2141 | ||
2142 | case ArithNegate: | |
2143 | compileArithNegate(node); | |
2144 | break; | |
2145 | ||
2146 | case ArithMul: | |
2147 | compileArithMul(node); | |
2148 | break; | |
2149 | ||
2150 | case ArithDiv: { | |
2151 | if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) { | |
2152 | compileIntegerArithDivForX86(node); | |
2153 | break; | |
2154 | } | |
2155 | ||
2156 | SpeculateDoubleOperand op1(this, node.child1()); | |
2157 | SpeculateDoubleOperand op2(this, node.child2()); | |
2158 | FPRTemporary result(this, op1); | |
2159 | ||
2160 | FPRReg reg1 = op1.fpr(); | |
2161 | FPRReg reg2 = op2.fpr(); | |
2162 | m_jit.divDouble(reg1, reg2, result.fpr()); | |
2163 | ||
2164 | doubleResult(result.fpr(), m_compileIndex); | |
2165 | break; | |
2166 | } | |
2167 | ||
2168 | case ArithMod: { | |
2169 | compileArithMod(node); | |
2170 | break; | |
2171 | } | |
2172 | ||
2173 | case ArithAbs: { | |
2174 | if (at(node.child1()).shouldSpeculateInteger() && node.canSpeculateInteger()) { | |
2175 | SpeculateIntegerOperand op1(this, node.child1()); | |
2176 | GPRTemporary result(this); | |
2177 | GPRTemporary scratch(this); | |
2178 | ||
2179 | m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr()); | |
2180 | m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr()); | |
2181 | m_jit.add32(scratch.gpr(), result.gpr()); | |
2182 | m_jit.xor32(scratch.gpr(), result.gpr()); | |
2183 | speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31))); | |
2184 | integerResult(result.gpr(), m_compileIndex); | |
2185 | break; | |
2186 | } | |
2187 | ||
2188 | SpeculateDoubleOperand op1(this, node.child1()); | |
2189 | FPRTemporary result(this); | |
2190 | ||
2191 | m_jit.absDouble(op1.fpr(), result.fpr()); | |
2192 | doubleResult(result.fpr(), m_compileIndex); | |
2193 | break; | |
2194 | } | |
2195 | ||
2196 | case ArithMin: | |
2197 | case ArithMax: { | |
2198 | if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) { | |
2199 | SpeculateStrictInt32Operand op1(this, node.child1()); | |
2200 | SpeculateStrictInt32Operand op2(this, node.child2()); | |
2201 | GPRTemporary result(this, op1); | |
2202 | ||
2203 | MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr()); | |
2204 | m_jit.move(op2.gpr(), result.gpr()); | |
2205 | if (op1.gpr() != result.gpr()) { | |
2206 | MacroAssembler::Jump done = m_jit.jump(); | |
2207 | op1Less.link(&m_jit); | |
2208 | m_jit.move(op1.gpr(), result.gpr()); | |
2209 | done.link(&m_jit); | |
2210 | } else | |
2211 | op1Less.link(&m_jit); | |
2212 | ||
2213 | integerResult(result.gpr(), m_compileIndex); | |
2214 | break; | |
2215 | } | |
2216 | ||
2217 | SpeculateDoubleOperand op1(this, node.child1()); | |
2218 | SpeculateDoubleOperand op2(this, node.child2()); | |
2219 | FPRTemporary result(this, op1); | |
2220 | ||
2221 | MacroAssembler::JumpList done; | |
2222 | ||
2223 | MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1.fpr(), op2.fpr()); | |
2224 | ||
2225 | // op2 is eather the lesser one or one of then is NaN | |
2226 | MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1.fpr(), op2.fpr()); | |
2227 | ||
2228 | // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding | |
2229 | // op1 + op2 and putting it into result. | |
2230 | m_jit.addDouble(op1.fpr(), op2.fpr(), result.fpr()); | |
2231 | done.append(m_jit.jump()); | |
2232 | ||
2233 | op2Less.link(&m_jit); | |
2234 | m_jit.moveDouble(op2.fpr(), result.fpr()); | |
2235 | ||
2236 | if (op1.fpr() != result.fpr()) { | |
2237 | done.append(m_jit.jump()); | |
2238 | ||
2239 | op1Less.link(&m_jit); | |
2240 | m_jit.moveDouble(op1.fpr(), result.fpr()); | |
2241 | } else | |
2242 | op1Less.link(&m_jit); | |
2243 | ||
2244 | done.link(&m_jit); | |
2245 | ||
2246 | doubleResult(result.fpr(), m_compileIndex); | |
2247 | break; | |
2248 | } | |
2249 | ||
2250 | case ArithSqrt: { | |
2251 | SpeculateDoubleOperand op1(this, node.child1()); | |
2252 | FPRTemporary result(this, op1); | |
2253 | ||
2254 | m_jit.sqrtDouble(op1.fpr(), result.fpr()); | |
2255 | ||
2256 | doubleResult(result.fpr(), m_compileIndex); | |
2257 | break; | |
2258 | } | |
2259 | ||
2260 | case LogicalNot: | |
2261 | compileLogicalNot(node); | |
2262 | break; | |
2263 | ||
2264 | case CompareLess: | |
2265 | if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess)) | |
2266 | return; | |
2267 | break; | |
2268 | ||
2269 | case CompareLessEq: | |
2270 | if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq)) | |
2271 | return; | |
2272 | break; | |
2273 | ||
2274 | case CompareGreater: | |
2275 | if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater)) | |
2276 | return; | |
2277 | break; | |
2278 | ||
2279 | case CompareGreaterEq: | |
2280 | if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq)) | |
2281 | return; | |
2282 | break; | |
2283 | ||
2284 | case CompareEq: | |
2285 | if (isNullConstant(node.child1().index())) { | |
2286 | if (nonSpeculativeCompareNull(node, node.child2())) | |
2287 | return; | |
2288 | break; | |
2289 | } | |
2290 | if (isNullConstant(node.child2().index())) { | |
2291 | if (nonSpeculativeCompareNull(node, node.child1())) | |
2292 | return; | |
2293 | break; | |
2294 | } | |
2295 | if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq)) | |
2296 | return; | |
2297 | break; | |
2298 | ||
2299 | case CompareStrictEq: | |
2300 | if (compileStrictEq(node)) | |
2301 | return; | |
2302 | break; | |
2303 | ||
2304 | case StringCharCodeAt: { | |
2305 | compileGetCharCodeAt(node); | |
2306 | break; | |
2307 | } | |
2308 | ||
2309 | case StringCharAt: { | |
2310 | // Relies on StringCharAt node having same basic layout as GetByVal | |
2311 | compileGetByValOnString(node); | |
2312 | break; | |
2313 | } | |
2314 | ||
2315 | case GetByVal: { | |
2316 | if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) { | |
2317 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); | |
2318 | break; | |
2319 | } | |
2320 | ||
2321 | if (!at(node.child2()).shouldSpeculateInteger() || !isActionableArrayPrediction(at(node.child1()).prediction())) { | |
2322 | JSValueOperand base(this, node.child1()); | |
2323 | JSValueOperand property(this, node.child2()); | |
2324 | GPRReg baseGPR = base.gpr(); | |
2325 | GPRReg propertyGPR = property.gpr(); | |
2326 | ||
2327 | flushRegisters(); | |
2328 | GPRResult result(this); | |
2329 | callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR); | |
2330 | ||
2331 | jsValueResult(result.gpr(), m_compileIndex); | |
2332 | break; | |
2333 | } | |
2334 | ||
2335 | if (at(node.child1()).prediction() == PredictString) { | |
2336 | compileGetByValOnString(node); | |
2337 | if (!m_compileOkay) | |
2338 | return; | |
2339 | break; | |
2340 | } | |
2341 | ||
2342 | if (at(node.child1()).shouldSpeculateInt8Array()) { | |
2343 | compileGetByValOnIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); | |
2344 | if (!m_compileOkay) | |
2345 | return; | |
2346 | break; | |
2347 | } | |
2348 | ||
2349 | if (at(node.child1()).shouldSpeculateInt16Array()) { | |
2350 | compileGetByValOnIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); | |
2351 | if (!m_compileOkay) | |
2352 | return; | |
2353 | break; | |
2354 | } | |
2355 | ||
2356 | if (at(node.child1()).shouldSpeculateInt32Array()) { | |
2357 | compileGetByValOnIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); | |
2358 | if (!m_compileOkay) | |
2359 | return; | |
2360 | break; | |
2361 | } | |
2362 | ||
2363 | if (at(node.child1()).shouldSpeculateUint8Array()) { | |
2364 | compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); | |
2365 | if (!m_compileOkay) | |
2366 | return; | |
2367 | break; | |
2368 | } | |
2369 | ||
2370 | if (at(node.child1()).shouldSpeculateUint8ClampedArray()) { | |
2371 | compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); | |
2372 | if (!m_compileOkay) | |
2373 | return; | |
2374 | break; | |
2375 | } | |
2376 | ||
2377 | if (at(node.child1()).shouldSpeculateUint16Array()) { | |
2378 | compileGetByValOnIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); | |
2379 | if (!m_compileOkay) | |
2380 | return; | |
2381 | break; | |
2382 | } | |
2383 | ||
2384 | if (at(node.child1()).shouldSpeculateUint32Array()) { | |
2385 | compileGetByValOnIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); | |
2386 | if (!m_compileOkay) | |
2387 | return; | |
2388 | break; | |
2389 | } | |
2390 | ||
2391 | if (at(node.child1()).shouldSpeculateFloat32Array()) { | |
2392 | compileGetByValOnFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); | |
2393 | if (!m_compileOkay) | |
2394 | return; | |
2395 | break; | |
2396 | } | |
2397 | ||
2398 | if (at(node.child1()).shouldSpeculateFloat64Array()) { | |
2399 | compileGetByValOnFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); | |
2400 | if (!m_compileOkay) | |
2401 | return; | |
2402 | break; | |
2403 | } | |
2404 | ||
2405 | ASSERT(at(node.child1()).shouldSpeculateArray()); | |
2406 | ||
2407 | SpeculateCellOperand base(this, node.child1()); | |
2408 | SpeculateStrictInt32Operand property(this, node.child2()); | |
2409 | StorageOperand storage(this, node.child3()); | |
2410 | ||
2411 | GPRReg baseReg = base.gpr(); | |
2412 | GPRReg propertyReg = property.gpr(); | |
2413 | GPRReg storageReg = storage.gpr(); | |
2414 | ||
2415 | if (!m_compileOkay) | |
2416 | return; | |
2417 | ||
2418 | if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) | |
2419 | speculationCheck(BadType, JSValueRegs(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); | |
2420 | speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()))); | |
2421 | ||
2422 | // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache | |
2423 | // the storage pointer - especially if there happens to be another register free right now. If we do so, | |
2424 | // then we'll need to allocate a new temporary for result. | |
2425 | GPRTemporary result(this); | |
2426 | m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr()); | |
2427 | speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr())); | |
2428 | ||
2429 | jsValueResult(result.gpr(), m_compileIndex); | |
2430 | break; | |
2431 | } | |
2432 | ||
2433 | case PutByVal: { | |
2434 | if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) { | |
2435 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); | |
2436 | break; | |
2437 | } | |
2438 | ||
2439 | if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArrayPrediction(at(node.child1()).prediction())) { | |
2440 | JSValueOperand arg1(this, node.child1()); | |
2441 | JSValueOperand arg2(this, node.child2()); | |
2442 | JSValueOperand arg3(this, node.child3()); | |
2443 | GPRReg arg1GPR = arg1.gpr(); | |
2444 | GPRReg arg2GPR = arg2.gpr(); | |
2445 | GPRReg arg3GPR = arg3.gpr(); | |
2446 | flushRegisters(); | |
2447 | ||
2448 | callOperation(m_jit.strictModeFor(node.codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR); | |
2449 | ||
2450 | noResult(m_compileIndex); | |
2451 | break; | |
2452 | } | |
2453 | ||
2454 | SpeculateCellOperand base(this, node.child1()); | |
2455 | SpeculateStrictInt32Operand property(this, node.child2()); | |
2456 | if (at(node.child1()).shouldSpeculateInt8Array()) { | |
2457 | compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); | |
2458 | if (!m_compileOkay) | |
2459 | return; | |
2460 | break; | |
2461 | } | |
2462 | ||
2463 | if (at(node.child1()).shouldSpeculateInt16Array()) { | |
2464 | compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); | |
2465 | if (!m_compileOkay) | |
2466 | return; | |
2467 | break; | |
2468 | } | |
2469 | ||
2470 | if (at(node.child1()).shouldSpeculateInt32Array()) { | |
2471 | compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); | |
2472 | if (!m_compileOkay) | |
2473 | return; | |
2474 | break; | |
2475 | } | |
2476 | ||
2477 | if (at(node.child1()).shouldSpeculateUint8Array()) { | |
2478 | compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); | |
2479 | if (!m_compileOkay) | |
2480 | return; | |
2481 | break; | |
2482 | } | |
2483 | ||
2484 | if (at(node.child1()).shouldSpeculateUint8ClampedArray()) { | |
2485 | compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding); | |
2486 | break; | |
2487 | } | |
2488 | ||
2489 | if (at(node.child1()).shouldSpeculateUint16Array()) { | |
2490 | compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); | |
2491 | if (!m_compileOkay) | |
2492 | return; | |
2493 | break; | |
2494 | } | |
2495 | ||
2496 | if (at(node.child1()).shouldSpeculateUint32Array()) { | |
2497 | compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); | |
2498 | if (!m_compileOkay) | |
2499 | return; | |
2500 | break; | |
2501 | } | |
2502 | ||
2503 | if (at(node.child1()).shouldSpeculateFloat32Array()) { | |
2504 | compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); | |
2505 | if (!m_compileOkay) | |
2506 | return; | |
2507 | break; | |
2508 | } | |
2509 | ||
2510 | if (at(node.child1()).shouldSpeculateFloat64Array()) { | |
2511 | compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); | |
2512 | if (!m_compileOkay) | |
2513 | return; | |
2514 | break; | |
2515 | } | |
2516 | ||
2517 | ASSERT(at(node.child1()).shouldSpeculateArray()); | |
2518 | ||
2519 | JSValueOperand value(this, node.child3()); | |
2520 | GPRTemporary scratch(this); | |
2521 | ||
2522 | // Map base, property & value into registers, allocate a scratch register. | |
2523 | GPRReg baseReg = base.gpr(); | |
2524 | GPRReg propertyReg = property.gpr(); | |
2525 | GPRReg valueReg = value.gpr(); | |
2526 | GPRReg scratchReg = scratch.gpr(); | |
2527 | ||
2528 | if (!m_compileOkay) | |
2529 | return; | |
2530 | ||
2531 | writeBarrier(baseReg, value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg); | |
2532 | ||
2533 | // Check that base is an array, and that property is contained within m_vector (< m_vectorLength). | |
2534 | // If we have predicted the base to be type array, we can skip the check. | |
2535 | if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) | |
2536 | speculationCheck(BadType, JSValueRegs(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); | |
2537 | ||
2538 | base.use(); | |
2539 | property.use(); | |
2540 | value.use(); | |
2541 | ||
2542 | MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())); | |
2543 | ||
2544 | // Code to handle put beyond array bounds. | |
2545 | silentSpillAllRegisters(scratchReg); | |
2546 | callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, baseReg, propertyReg, valueReg); | |
2547 | silentFillAllRegisters(scratchReg); | |
2548 | JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump(); | |
2549 | ||
2550 | withinArrayBounds.link(&m_jit); | |
2551 | ||
2552 | // Get the array storage. | |
2553 | GPRReg storageReg = scratchReg; | |
2554 | m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg); | |
2555 | ||
2556 | // Check if we're writing to a hole; if so increment m_numValuesInVector. | |
2557 | MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); | |
2558 | m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); | |
2559 | ||
2560 | // If we're writing to a hole we might be growing the array; | |
2561 | MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length))); | |
2562 | m_jit.add32(TrustedImm32(1), propertyReg); | |
2563 | m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length))); | |
2564 | m_jit.sub32(TrustedImm32(1), propertyReg); | |
2565 | ||
2566 | lengthDoesNotNeedUpdate.link(&m_jit); | |
2567 | notHoleValue.link(&m_jit); | |
2568 | ||
2569 | // Store the value to the array. | |
2570 | m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); | |
2571 | ||
2572 | wasBeyondArrayBounds.link(&m_jit); | |
2573 | ||
2574 | noResult(m_compileIndex, UseChildrenCalledExplicitly); | |
2575 | break; | |
2576 | } | |
2577 | ||
2578 | case PutByValAlias: { | |
2579 | if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) { | |
2580 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); | |
2581 | break; | |
2582 | } | |
2583 | ||
2584 | ASSERT(isActionableMutableArrayPrediction(at(node.child1()).prediction())); | |
2585 | ASSERT(at(node.child2()).shouldSpeculateInteger()); | |
2586 | ||
2587 | SpeculateCellOperand base(this, node.child1()); | |
2588 | SpeculateStrictInt32Operand property(this, node.child2()); | |
2589 | if (at(node.child1()).shouldSpeculateInt8Array()) { | |
2590 | compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray); | |
2591 | if (!m_compileOkay) | |
2592 | return; | |
2593 | break; | |
2594 | } | |
2595 | ||
2596 | if (at(node.child1()).shouldSpeculateInt16Array()) { | |
2597 | compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray); | |
2598 | if (!m_compileOkay) | |
2599 | return; | |
2600 | break; | |
2601 | } | |
2602 | ||
2603 | if (at(node.child1()).shouldSpeculateInt32Array()) { | |
2604 | compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray); | |
2605 | if (!m_compileOkay) | |
2606 | return; | |
2607 | break; | |
2608 | } | |
2609 | ||
2610 | if (at(node.child1()).shouldSpeculateUint8Array()) { | |
2611 | compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray); | |
2612 | if (!m_compileOkay) | |
2613 | return; | |
2614 | break; | |
2615 | } | |
2616 | ||
2617 | if (at(node.child1()).shouldSpeculateUint8ClampedArray()) { | |
2618 | compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding); | |
2619 | if (!m_compileOkay) | |
2620 | return; | |
2621 | break; | |
2622 | } | |
2623 | ||
2624 | if (at(node.child1()).shouldSpeculateUint16Array()) { | |
2625 | compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray); | |
2626 | if (!m_compileOkay) | |
2627 | return; | |
2628 | break; | |
2629 | } | |
2630 | ||
2631 | if (at(node.child1()).shouldSpeculateUint32Array()) { | |
2632 | compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray); | |
2633 | if (!m_compileOkay) | |
2634 | return; | |
2635 | break; | |
2636 | } | |
2637 | ||
2638 | if (at(node.child1()).shouldSpeculateFloat32Array()) { | |
2639 | compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck); | |
2640 | if (!m_compileOkay) | |
2641 | return; | |
2642 | break; | |
2643 | } | |
2644 | ||
2645 | if (at(node.child1()).shouldSpeculateFloat64Array()) { | |
2646 | compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck); | |
2647 | if (!m_compileOkay) | |
2648 | return; | |
2649 | break; | |
2650 | } | |
2651 | ||
2652 | ASSERT(at(node.child1()).shouldSpeculateArray()); | |
2653 | ||
2654 | JSValueOperand value(this, node.child3()); | |
2655 | GPRTemporary scratch(this); | |
2656 | ||
2657 | GPRReg baseReg = base.gpr(); | |
2658 | GPRReg scratchReg = scratch.gpr(); | |
2659 | ||
2660 | writeBarrier(base.gpr(), value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg); | |
2661 | ||
2662 | // Get the array storage. | |
2663 | GPRReg storageReg = scratchReg; | |
2664 | m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg); | |
2665 | ||
2666 | // Store the value to the array. | |
2667 | GPRReg propertyReg = property.gpr(); | |
2668 | GPRReg valueReg = value.gpr(); | |
2669 | m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); | |
2670 | ||
2671 | noResult(m_compileIndex); | |
2672 | break; | |
2673 | } | |
2674 | ||
2675 | case RegExpExec: { | |
2676 | if (compileRegExpExec(node)) | |
2677 | return; | |
2678 | if (!node.adjustedRefCount()) { | |
2679 | SpeculateCellOperand base(this, node.child1()); | |
2680 | SpeculateCellOperand argument(this, node.child2()); | |
2681 | GPRReg baseGPR = base.gpr(); | |
2682 | GPRReg argumentGPR = argument.gpr(); | |
2683 | ||
2684 | flushRegisters(); | |
2685 | GPRResult result(this); | |
2686 | callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); | |
2687 | ||
2688 | // Must use jsValueResult because otherwise we screw up register | |
2689 | // allocation, which thinks that this node has a result. | |
2690 | jsValueResult(result.gpr(), m_compileIndex); | |
2691 | break; | |
2692 | } | |
2693 | ||
2694 | SpeculateCellOperand base(this, node.child1()); | |
2695 | SpeculateCellOperand argument(this, node.child2()); | |
2696 | GPRReg baseGPR = base.gpr(); | |
2697 | GPRReg argumentGPR = argument.gpr(); | |
2698 | ||
2699 | flushRegisters(); | |
2700 | GPRResult result(this); | |
2701 | callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR); | |
2702 | ||
2703 | jsValueResult(result.gpr(), m_compileIndex); | |
2704 | break; | |
2705 | } | |
2706 | ||
2707 | case RegExpTest: { | |
2708 | SpeculateCellOperand base(this, node.child1()); | |
2709 | SpeculateCellOperand argument(this, node.child2()); | |
2710 | GPRReg baseGPR = base.gpr(); | |
2711 | GPRReg argumentGPR = argument.gpr(); | |
2712 | ||
2713 | flushRegisters(); | |
2714 | GPRResult result(this); | |
2715 | callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); | |
2716 | ||
2717 | // If we add a DataFormatBool, we should use it here. | |
2718 | m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); | |
2719 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
2720 | break; | |
2721 | } | |
2722 | ||
2723 | case ArrayPush: { | |
2724 | SpeculateCellOperand base(this, node.child1()); | |
2725 | JSValueOperand value(this, node.child2()); | |
2726 | GPRTemporary storage(this); | |
2727 | GPRTemporary storageLength(this); | |
2728 | ||
2729 | GPRReg baseGPR = base.gpr(); | |
2730 | GPRReg valueGPR = value.gpr(); | |
2731 | GPRReg storageGPR = storage.gpr(); | |
2732 | GPRReg storageLengthGPR = storageLength.gpr(); | |
2733 | ||
2734 | writeBarrier(baseGPR, valueGPR, node.child2(), WriteBarrierForPropertyAccess, storageGPR, storageLengthGPR); | |
2735 | ||
2736 | if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) | |
2737 | speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); | |
2738 | ||
2739 | m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR); | |
2740 | m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR); | |
2741 | ||
2742 | // Refuse to handle bizarre lengths. | |
2743 | speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); | |
2744 | ||
2745 | MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset())); | |
2746 | ||
2747 | m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); | |
2748 | ||
2749 | m_jit.add32(TrustedImm32(1), storageLengthGPR); | |
2750 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length))); | |
2751 | m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); | |
2752 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); | |
2753 | ||
2754 | MacroAssembler::Jump done = m_jit.jump(); | |
2755 | ||
2756 | slowPath.link(&m_jit); | |
2757 | ||
2758 | silentSpillAllRegisters(storageLengthGPR); | |
2759 | callOperation(operationArrayPush, storageLengthGPR, valueGPR, baseGPR); | |
2760 | silentFillAllRegisters(storageLengthGPR); | |
2761 | ||
2762 | done.link(&m_jit); | |
2763 | ||
2764 | jsValueResult(storageLengthGPR, m_compileIndex); | |
2765 | break; | |
2766 | } | |
2767 | ||
2768 | case ArrayPop: { | |
2769 | SpeculateCellOperand base(this, node.child1()); | |
2770 | GPRTemporary value(this); | |
2771 | GPRTemporary storage(this); | |
2772 | GPRTemporary storageLength(this); | |
2773 | ||
2774 | GPRReg baseGPR = base.gpr(); | |
2775 | GPRReg valueGPR = value.gpr(); | |
2776 | GPRReg storageGPR = storage.gpr(); | |
2777 | GPRReg storageLengthGPR = storageLength.gpr(); | |
2778 | ||
2779 | if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) | |
2780 | speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); | |
2781 | ||
2782 | m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR); | |
2783 | m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR); | |
2784 | ||
2785 | MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); | |
2786 | ||
2787 | m_jit.sub32(TrustedImm32(1), storageLengthGPR); | |
2788 | ||
2789 | MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset())); | |
2790 | ||
2791 | m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR); | |
2792 | ||
2793 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length))); | |
2794 | ||
2795 | MacroAssembler::Jump holeCase = m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR); | |
2796 | ||
2797 | m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); | |
2798 | m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); | |
2799 | ||
2800 | MacroAssembler::JumpList done; | |
2801 | ||
2802 | done.append(m_jit.jump()); | |
2803 | ||
2804 | holeCase.link(&m_jit); | |
2805 | emptyArrayCase.link(&m_jit); | |
2806 | m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR); | |
2807 | done.append(m_jit.jump()); | |
2808 | ||
2809 | slowCase.link(&m_jit); | |
2810 | ||
2811 | silentSpillAllRegisters(valueGPR); | |
2812 | callOperation(operationArrayPop, valueGPR, baseGPR); | |
2813 | silentFillAllRegisters(valueGPR); | |
2814 | ||
2815 | done.link(&m_jit); | |
2816 | ||
2817 | jsValueResult(valueGPR, m_compileIndex); | |
2818 | break; | |
2819 | } | |
2820 | ||
2821 | case DFG::Jump: { | |
2822 | BlockIndex taken = node.takenBlockIndex(); | |
2823 | jump(taken); | |
2824 | noResult(m_compileIndex); | |
2825 | break; | |
2826 | } | |
2827 | ||
2828 | case Branch: | |
2829 | if (isStrictInt32(node.child1().index()) || at(node.child1()).shouldSpeculateInteger()) { | |
2830 | SpeculateIntegerOperand op(this, node.child1()); | |
2831 | ||
2832 | BlockIndex taken = node.takenBlockIndex(); | |
2833 | BlockIndex notTaken = node.notTakenBlockIndex(); | |
2834 | ||
2835 | MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; | |
2836 | ||
2837 | if (taken == (m_block + 1)) { | |
2838 | condition = MacroAssembler::Zero; | |
2839 | BlockIndex tmp = taken; | |
2840 | taken = notTaken; | |
2841 | notTaken = tmp; | |
2842 | } | |
2843 | ||
2844 | branchTest32(condition, op.gpr(), taken); | |
2845 | jump(notTaken); | |
2846 | ||
2847 | noResult(m_compileIndex); | |
2848 | break; | |
2849 | } | |
2850 | emitBranch(node); | |
2851 | break; | |
2852 | ||
2853 | case Return: { | |
2854 | ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1); | |
2855 | ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR); | |
2856 | ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister); | |
2857 | ||
2858 | #if DFG_ENABLE(SUCCESS_STATS) | |
2859 | static SamplingCounter counter("SpeculativeJIT"); | |
2860 | m_jit.emitCount(counter); | |
2861 | #endif | |
2862 | ||
2863 | // Return the result in returnValueGPR. | |
2864 | JSValueOperand op1(this, node.child1()); | |
2865 | m_jit.move(op1.gpr(), GPRInfo::returnValueGPR); | |
2866 | ||
2867 | // Grab the return address. | |
2868 | m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT1); | |
2869 | // Restore our caller's "r". | |
2870 | m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister); | |
2871 | // Return. | |
2872 | m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1); | |
2873 | m_jit.ret(); | |
2874 | ||
2875 | noResult(m_compileIndex); | |
2876 | break; | |
2877 | } | |
2878 | ||
2879 | case Throw: | |
2880 | case ThrowReferenceError: { | |
2881 | // We expect that throw statements are rare and are intended to exit the code block | |
2882 | // anyway, so we just OSR back to the old JIT for now. | |
2883 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); | |
2884 | break; | |
2885 | } | |
2886 | ||
2887 | case ToPrimitive: { | |
2888 | if (at(node.child1()).shouldSpeculateInteger()) { | |
2889 | // It's really profitable to speculate integer, since it's really cheap, | |
2890 | // it means we don't have to do any real work, and we emit a lot less code. | |
2891 | ||
2892 | SpeculateIntegerOperand op1(this, node.child1()); | |
2893 | GPRTemporary result(this, op1); | |
2894 | ||
2895 | m_jit.move(op1.gpr(), result.gpr()); | |
2896 | if (op1.format() == DataFormatInteger) | |
2897 | m_jit.orPtr(GPRInfo::tagTypeNumberRegister, result.gpr()); | |
2898 | ||
2899 | jsValueResult(result.gpr(), m_compileIndex); | |
2900 | break; | |
2901 | } | |
2902 | ||
2903 | // FIXME: Add string speculation here. | |
2904 | ||
2905 | JSValueOperand op1(this, node.child1()); | |
2906 | GPRTemporary result(this, op1); | |
2907 | ||
2908 | GPRReg op1GPR = op1.gpr(); | |
2909 | GPRReg resultGPR = result.gpr(); | |
2910 | ||
2911 | op1.use(); | |
2912 | ||
2913 | if (!(m_state.forNode(node.child1()).m_type & ~(PredictNumber | PredictBoolean))) | |
2914 | m_jit.move(op1GPR, resultGPR); | |
2915 | else { | |
2916 | MacroAssembler::JumpList alreadyPrimitive; | |
2917 | ||
2918 | alreadyPrimitive.append(m_jit.branchTestPtr(MacroAssembler::NonZero, op1GPR, GPRInfo::tagMaskRegister)); | |
2919 | alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info))); | |
2920 | ||
2921 | silentSpillAllRegisters(resultGPR); | |
2922 | callOperation(operationToPrimitive, resultGPR, op1GPR); | |
2923 | silentFillAllRegisters(resultGPR); | |
2924 | ||
2925 | MacroAssembler::Jump done = m_jit.jump(); | |
2926 | ||
2927 | alreadyPrimitive.link(&m_jit); | |
2928 | m_jit.move(op1GPR, resultGPR); | |
2929 | ||
2930 | done.link(&m_jit); | |
2931 | } | |
2932 | ||
2933 | jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); | |
2934 | break; | |
2935 | } | |
2936 | ||
2937 | case StrCat: | |
2938 | case NewArray: { | |
2939 | // We really don't want to grow the register file just to do a StrCat or NewArray. | |
2940 | // Say we have 50 functions on the stack that all have a StrCat in them that has | |
2941 | // upwards of 10 operands. In the DFG this would mean that each one gets | |
2942 | // some random virtual register, and then to do the StrCat we'd need a second | |
2943 | // span of 10 operands just to have somewhere to copy the 10 operands to, where | |
2944 | // they'd be contiguous and we could easily tell the C code how to find them. | |
2945 | // Ugly! So instead we use the scratchBuffer infrastructure in JSGlobalData. That | |
2946 | // way, those 50 functions will share the same scratchBuffer for offloading their | |
2947 | // StrCat operands. It's about as good as we can do, unless we start doing | |
2948 | // virtual register coalescing to ensure that operands to StrCat get spilled | |
2949 | // in exactly the place where StrCat wants them, or else have the StrCat | |
2950 | // refer to those operands' SetLocal instructions to force them to spill in | |
2951 | // the right place. Basically, any way you cut it, the current approach | |
2952 | // probably has the best balance of performance and sensibility in the sense | |
2953 | // that it does not increase the complexity of the DFG JIT just to make StrCat | |
2954 | // fast and pretty. | |
2955 | ||
2956 | size_t scratchSize = sizeof(EncodedJSValue) * node.numChildren(); | |
2957 | ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(scratchSize); | |
2958 | EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; | |
2959 | ||
2960 | for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) { | |
2961 | JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]); | |
2962 | GPRReg opGPR = operand.gpr(); | |
2963 | operand.use(); | |
2964 | ||
2965 | m_jit.storePtr(opGPR, buffer + operandIdx); | |
2966 | } | |
2967 | ||
2968 | flushRegisters(); | |
2969 | ||
2970 | if (scratchSize) { | |
2971 | GPRTemporary scratch(this); | |
2972 | ||
2973 | // Tell GC mark phase how much of the scratch buffer is active during call. | |
2974 | m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr()); | |
2975 | m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr()); | |
2976 | } | |
2977 | ||
2978 | GPRResult result(this); | |
2979 | ||
2980 | callOperation(op == StrCat ? operationStrCat : operationNewArray, result.gpr(), static_cast<void *>(buffer), node.numChildren()); | |
2981 | ||
2982 | if (scratchSize) { | |
2983 | GPRTemporary scratch(this); | |
2984 | ||
2985 | m_jit.move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratch.gpr()); | |
2986 | m_jit.storePtr(TrustedImmPtr(0), scratch.gpr()); | |
2987 | } | |
2988 | ||
2989 | cellResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly); | |
2990 | break; | |
2991 | } | |
2992 | ||
2993 | case NewArrayBuffer: { | |
2994 | flushRegisters(); | |
2995 | GPRResult result(this); | |
2996 | ||
2997 | callOperation(operationNewArrayBuffer, result.gpr(), node.startConstant(), node.numConstants()); | |
2998 | ||
2999 | cellResult(result.gpr(), m_compileIndex); | |
3000 | break; | |
3001 | } | |
3002 | ||
3003 | case NewRegexp: { | |
3004 | flushRegisters(); | |
3005 | GPRResult result(this); | |
3006 | ||
3007 | callOperation(operationNewRegexp, result.gpr(), m_jit.codeBlock()->regexp(node.regexpIndex())); | |
3008 | ||
3009 | cellResult(result.gpr(), m_compileIndex); | |
3010 | break; | |
3011 | } | |
3012 | ||
3013 | case ConvertThis: { | |
3014 | if (isObjectPrediction(m_state.forNode(node.child1()).m_type)) { | |
3015 | SpeculateCellOperand thisValue(this, node.child1()); | |
3016 | GPRTemporary result(this, thisValue); | |
3017 | m_jit.move(thisValue.gpr(), result.gpr()); | |
3018 | cellResult(result.gpr(), m_compileIndex); | |
3019 | break; | |
3020 | } | |
3021 | ||
3022 | if (isOtherPrediction(at(node.child1()).prediction())) { | |
3023 | JSValueOperand thisValue(this, node.child1()); | |
3024 | GPRTemporary scratch(this, thisValue); | |
3025 | GPRReg thisValueGPR = thisValue.gpr(); | |
3026 | GPRReg scratchGPR = scratch.gpr(); | |
3027 | ||
3028 | if (!isOtherPrediction(m_state.forNode(node.child1()).m_type)) { | |
3029 | m_jit.move(thisValueGPR, scratchGPR); | |
3030 | m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR); | |
3031 | speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)))); | |
3032 | } | |
3033 | ||
3034 | m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalThisObjectFor(node.codeOrigin)), scratchGPR); | |
3035 | cellResult(scratchGPR, m_compileIndex); | |
3036 | break; | |
3037 | } | |
3038 | ||
3039 | if (isObjectPrediction(at(node.child1()).prediction())) { | |
3040 | SpeculateCellOperand thisValue(this, node.child1()); | |
3041 | GPRTemporary result(this, thisValue); | |
3042 | GPRReg thisValueGPR = thisValue.gpr(); | |
3043 | GPRReg resultGPR = result.gpr(); | |
3044 | ||
3045 | if (!isObjectPrediction(m_state.forNode(node.child1()).m_type)) | |
3046 | speculationCheck(BadType, JSValueRegs(thisValueGPR), node.child1(), m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(thisValueGPR, JSCell::classInfoOffset()), JITCompiler::TrustedImmPtr(&JSString::s_info))); | |
3047 | ||
3048 | m_jit.move(thisValueGPR, resultGPR); | |
3049 | ||
3050 | cellResult(resultGPR, m_compileIndex); | |
3051 | break; | |
3052 | } | |
3053 | ||
3054 | JSValueOperand thisValue(this, node.child1()); | |
3055 | GPRReg thisValueGPR = thisValue.gpr(); | |
3056 | ||
3057 | flushRegisters(); | |
3058 | ||
3059 | GPRResult result(this); | |
3060 | callOperation(operationConvertThis, result.gpr(), thisValueGPR); | |
3061 | ||
3062 | cellResult(result.gpr(), m_compileIndex); | |
3063 | break; | |
3064 | } | |
3065 | ||
3066 | case CreateThis: { | |
3067 | // Note that there is not so much profit to speculate here. The only things we | |
3068 | // speculate on are (1) that it's a cell, since that eliminates cell checks | |
3069 | // later if the proto is reused, and (2) if we have a FinalObject prediction | |
3070 | // then we speculate because we want to get recompiled if it isn't (since | |
3071 | // otherwise we'd start taking slow path a lot). | |
3072 | ||
3073 | SpeculateCellOperand proto(this, node.child1()); | |
3074 | GPRTemporary result(this); | |
3075 | GPRTemporary scratch(this); | |
3076 | ||
3077 | GPRReg protoGPR = proto.gpr(); | |
3078 | GPRReg resultGPR = result.gpr(); | |
3079 | GPRReg scratchGPR = scratch.gpr(); | |
3080 | ||
3081 | proto.use(); | |
3082 | ||
3083 | MacroAssembler::JumpList slowPath; | |
3084 | ||
3085 | // Need to verify that the prototype is an object. If we have reason to believe | |
3086 | // that it's a FinalObject then we speculate on that directly. Otherwise we | |
3087 | // do the slow (structure-based) check. | |
3088 | if (at(node.child1()).shouldSpeculateFinalObject()) { | |
3089 | if (!isFinalObjectPrediction(m_state.forNode(node.child1()).m_type)) | |
3090 | speculationCheck(BadType, JSValueRegs(protoGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(protoGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSFinalObject::s_info))); | |
3091 | } else { | |
3092 | m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSCell::structureOffset()), scratchGPR); | |
3093 | slowPath.append(m_jit.branch8(MacroAssembler::Below, MacroAssembler::Address(scratchGPR, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType))); | |
3094 | } | |
3095 | ||
3096 | // Load the inheritorID (the Structure that objects who have protoGPR as the prototype | |
3097 | // use to refer to that prototype). If the inheritorID is not set, go to slow path. | |
3098 | m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSObject::offsetOfInheritorID()), scratchGPR); | |
3099 | slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR)); | |
3100 | ||
3101 | emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath); | |
3102 | ||
3103 | MacroAssembler::Jump done = m_jit.jump(); | |
3104 | ||
3105 | slowPath.link(&m_jit); | |
3106 | ||
3107 | silentSpillAllRegisters(resultGPR); | |
3108 | if (node.codeOrigin.inlineCallFrame) | |
3109 | callOperation(operationCreateThisInlined, resultGPR, protoGPR, node.codeOrigin.inlineCallFrame->callee.get()); | |
3110 | else | |
3111 | callOperation(operationCreateThis, resultGPR, protoGPR); | |
3112 | silentFillAllRegisters(resultGPR); | |
3113 | ||
3114 | done.link(&m_jit); | |
3115 | ||
3116 | cellResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); | |
3117 | break; | |
3118 | } | |
3119 | ||
3120 | case NewObject: { | |
3121 | GPRTemporary result(this); | |
3122 | GPRTemporary scratch(this); | |
3123 | ||
3124 | GPRReg resultGPR = result.gpr(); | |
3125 | GPRReg scratchGPR = scratch.gpr(); | |
3126 | ||
3127 | MacroAssembler::JumpList slowPath; | |
3128 | ||
3129 | emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)->emptyObjectStructure()), resultGPR, scratchGPR, slowPath); | |
3130 | ||
3131 | MacroAssembler::Jump done = m_jit.jump(); | |
3132 | ||
3133 | slowPath.link(&m_jit); | |
3134 | ||
3135 | silentSpillAllRegisters(resultGPR); | |
3136 | callOperation(operationNewObject, resultGPR); | |
3137 | silentFillAllRegisters(resultGPR); | |
3138 | ||
3139 | done.link(&m_jit); | |
3140 | ||
3141 | cellResult(resultGPR, m_compileIndex); | |
3142 | break; | |
3143 | } | |
3144 | ||
3145 | case GetCallee: { | |
3146 | GPRTemporary result(this); | |
3147 | m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr()); | |
3148 | cellResult(result.gpr(), m_compileIndex); | |
3149 | break; | |
3150 | } | |
3151 | ||
3152 | case GetScopeChain: { | |
3153 | GPRTemporary result(this); | |
3154 | GPRReg resultGPR = result.gpr(); | |
3155 | ||
3156 | m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR); | |
3157 | bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain(); | |
3158 | int skip = node.scopeChainDepth(); | |
3159 | ASSERT(skip || !checkTopLevel); | |
3160 | if (checkTopLevel && skip--) { | |
3161 | JITCompiler::Jump activationNotCreated; | |
3162 | if (checkTopLevel) | |
3163 | activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister()))); | |
3164 | m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR); | |
3165 | activationNotCreated.link(&m_jit); | |
3166 | } | |
3167 | while (skip--) | |
3168 | m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR); | |
3169 | ||
3170 | m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, object)), resultGPR); | |
3171 | ||
3172 | cellResult(resultGPR, m_compileIndex); | |
3173 | break; | |
3174 | } | |
3175 | case GetScopedVar: { | |
3176 | SpeculateCellOperand scopeChain(this, node.child1()); | |
3177 | GPRTemporary result(this); | |
3178 | GPRReg resultGPR = result.gpr(); | |
3179 | m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), resultGPR); | |
3180 | m_jit.loadPtr(JITCompiler::Address(resultGPR, node.varNumber() * sizeof(Register)), resultGPR); | |
3181 | jsValueResult(resultGPR, m_compileIndex); | |
3182 | break; | |
3183 | } | |
3184 | case PutScopedVar: { | |
3185 | SpeculateCellOperand scopeChain(this, node.child1()); | |
3186 | GPRTemporary scratchRegister(this); | |
3187 | GPRReg scratchGPR = scratchRegister.gpr(); | |
3188 | m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR); | |
3189 | JSValueOperand value(this, node.child2()); | |
3190 | m_jit.storePtr(value.gpr(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register))); | |
3191 | writeBarrier(scopeChain.gpr(), value.gpr(), node.child2(), WriteBarrierForVariableAccess, scratchGPR); | |
3192 | noResult(m_compileIndex); | |
3193 | break; | |
3194 | } | |
3195 | case GetById: { | |
3196 | if (!node.prediction()) { | |
3197 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); | |
3198 | break; | |
3199 | } | |
3200 | ||
3201 | if (isCellPrediction(at(node.child1()).prediction())) { | |
3202 | SpeculateCellOperand base(this, node.child1()); | |
3203 | GPRTemporary result(this, base); | |
3204 | ||
3205 | GPRReg baseGPR = base.gpr(); | |
3206 | GPRReg resultGPR = result.gpr(); | |
3207 | GPRReg scratchGPR; | |
3208 | ||
3209 | if (resultGPR == baseGPR) | |
3210 | scratchGPR = tryAllocate(); | |
3211 | else | |
3212 | scratchGPR = resultGPR; | |
3213 | ||
3214 | base.use(); | |
3215 | ||
3216 | cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber()); | |
3217 | ||
3218 | jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); | |
3219 | break; | |
3220 | } | |
3221 | ||
3222 | JSValueOperand base(this, node.child1()); | |
3223 | GPRTemporary result(this, base); | |
3224 | ||
3225 | GPRReg baseGPR = base.gpr(); | |
3226 | GPRReg resultGPR = result.gpr(); | |
3227 | GPRReg scratchGPR; | |
3228 | ||
3229 | if (resultGPR == baseGPR) | |
3230 | scratchGPR = tryAllocate(); | |
3231 | else | |
3232 | scratchGPR = resultGPR; | |
3233 | ||
3234 | base.use(); | |
3235 | ||
3236 | JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister); | |
3237 | ||
3238 | cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell); | |
3239 | ||
3240 | jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); | |
3241 | ||
3242 | break; | |
3243 | } | |
3244 | ||
3245 | case GetByIdFlush: { | |
3246 | if (!node.prediction()) { | |
3247 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); | |
3248 | break; | |
3249 | } | |
3250 | ||
3251 | if (isCellPrediction(at(node.child1()).prediction())) { | |
3252 | SpeculateCellOperand base(this, node.child1()); | |
3253 | GPRReg baseGPR = base.gpr(); | |
3254 | ||
3255 | GPRResult result(this); | |
3256 | ||
3257 | GPRReg resultGPR = result.gpr(); | |
3258 | ||
3259 | GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR); | |
3260 | ||
3261 | base.use(); | |
3262 | ||
3263 | flushRegisters(); | |
3264 | ||
3265 | cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill); | |
3266 | ||
3267 | jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); | |
3268 | break; | |
3269 | } | |
3270 | ||
3271 | JSValueOperand base(this, node.child1()); | |
3272 | GPRReg baseGPR = base.gpr(); | |
3273 | ||
3274 | GPRResult result(this); | |
3275 | GPRReg resultGPR = result.gpr(); | |
3276 | ||
3277 | GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR); | |
3278 | ||
3279 | base.use(); | |
3280 | flushRegisters(); | |
3281 | ||
3282 | JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister); | |
3283 | ||
3284 | cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill); | |
3285 | ||
3286 | jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); | |
3287 | ||
3288 | break; | |
3289 | } | |
3290 | ||
3291 | case GetArrayLength: { | |
3292 | SpeculateCellOperand base(this, node.child1()); | |
3293 | GPRTemporary result(this); | |
3294 | ||
3295 | GPRReg baseGPR = base.gpr(); | |
3296 | GPRReg resultGPR = result.gpr(); | |
3297 | ||
3298 | if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) | |
3299 | speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); | |
3300 | ||
3301 | m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), resultGPR); | |
3302 | m_jit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), resultGPR); | |
3303 | ||
3304 | speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultGPR, MacroAssembler::TrustedImm32(0))); | |
3305 | ||
3306 | integerResult(resultGPR, m_compileIndex); | |
3307 | break; | |
3308 | } | |
3309 | ||
3310 | case GetStringLength: { | |
3311 | SpeculateCellOperand base(this, node.child1()); | |
3312 | GPRTemporary result(this); | |
3313 | ||
3314 | GPRReg baseGPR = base.gpr(); | |
3315 | GPRReg resultGPR = result.gpr(); | |
3316 | ||
3317 | if (!isStringPrediction(m_state.forNode(node.child1()).m_type)) | |
3318 | speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info))); | |
3319 | ||
3320 | m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR); | |
3321 | ||
3322 | integerResult(resultGPR, m_compileIndex); | |
3323 | break; | |
3324 | } | |
3325 | ||
3326 | case GetInt8ArrayLength: { | |
3327 | compileGetTypedArrayLength(m_jit.globalData()->int8ArrayDescriptor(), node, !isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3328 | break; | |
3329 | } | |
3330 | case GetInt16ArrayLength: { | |
3331 | compileGetTypedArrayLength(m_jit.globalData()->int16ArrayDescriptor(), node, !isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3332 | break; | |
3333 | } | |
3334 | case GetInt32ArrayLength: { | |
3335 | compileGetTypedArrayLength(m_jit.globalData()->int32ArrayDescriptor(), node, !isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3336 | break; | |
3337 | } | |
3338 | case GetUint8ArrayLength: { | |
3339 | compileGetTypedArrayLength(m_jit.globalData()->uint8ArrayDescriptor(), node, !isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3340 | break; | |
3341 | } | |
3342 | case GetUint8ClampedArrayLength: { | |
3343 | compileGetTypedArrayLength(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, !isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3344 | break; | |
3345 | } | |
3346 | case GetUint16ArrayLength: { | |
3347 | compileGetTypedArrayLength(m_jit.globalData()->uint16ArrayDescriptor(), node, !isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3348 | break; | |
3349 | } | |
3350 | case GetUint32ArrayLength: { | |
3351 | compileGetTypedArrayLength(m_jit.globalData()->uint32ArrayDescriptor(), node, !isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3352 | break; | |
3353 | } | |
3354 | case GetFloat32ArrayLength: { | |
3355 | compileGetTypedArrayLength(m_jit.globalData()->float32ArrayDescriptor(), node, !isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3356 | break; | |
3357 | } | |
3358 | case GetFloat64ArrayLength: { | |
3359 | compileGetTypedArrayLength(m_jit.globalData()->float64ArrayDescriptor(), node, !isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type)); | |
3360 | break; | |
3361 | } | |
3362 | case CheckFunction: { | |
3363 | SpeculateCellOperand function(this, node.child1()); | |
3364 | speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node.function())); | |
3365 | noResult(m_compileIndex); | |
3366 | break; | |
3367 | } | |
3368 | case CheckStructure: { | |
3369 | if (m_state.forNode(node.child1()).m_structure.isSubsetOf(node.structureSet())) { | |
3370 | noResult(m_compileIndex); | |
3371 | break; | |
3372 | } | |
3373 | ||
3374 | SpeculateCellOperand base(this, node.child1()); | |
3375 | ||
3376 | ASSERT(node.structureSet().size()); | |
3377 | ||
3378 | if (node.structureSet().size() == 1) | |
3379 | speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(base.gpr(), JSCell::structureOffset()), node.structureSet()[0])); | |
3380 | else { | |
3381 | GPRTemporary structure(this); | |
3382 | ||
3383 | m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); | |
3384 | ||
3385 | JITCompiler::JumpList done; | |
3386 | ||
3387 | for (size_t i = 0; i < node.structureSet().size() - 1; ++i) | |
3388 | done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i])); | |
3389 | ||
3390 | speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, structure.gpr(), node.structureSet().last())); | |
3391 | ||
3392 | done.link(&m_jit); | |
3393 | } | |
3394 | ||
3395 | noResult(m_compileIndex); | |
3396 | break; | |
3397 | } | |
3398 | ||
3399 | case PutStructure: { | |
3400 | SpeculateCellOperand base(this, node.child1()); | |
3401 | GPRReg baseGPR = base.gpr(); | |
3402 | ||
3403 | m_jit.addWeakReferenceTransition( | |
3404 | node.codeOrigin.codeOriginOwner(), | |
3405 | node.structureTransitionData().previousStructure, | |
3406 | node.structureTransitionData().newStructure); | |
3407 | ||
3408 | #if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING) | |
3409 | // Must always emit this write barrier as the structure transition itself requires it | |
3410 | writeBarrier(baseGPR, node.structureTransitionData().newStructure, WriteBarrierForGenericAccess); | |
3411 | #endif | |
3412 | ||
3413 | m_jit.storePtr(MacroAssembler::TrustedImmPtr(node.structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset())); | |
3414 | ||
3415 | noResult(m_compileIndex); | |
3416 | break; | |
3417 | } | |
3418 | ||
3419 | case GetPropertyStorage: { | |
3420 | SpeculateCellOperand base(this, node.child1()); | |
3421 | GPRTemporary result(this, base); | |
3422 | ||
3423 | GPRReg baseGPR = base.gpr(); | |
3424 | GPRReg resultGPR = result.gpr(); | |
3425 | ||
3426 | m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); | |
3427 | ||
3428 | storageResult(resultGPR, m_compileIndex); | |
3429 | break; | |
3430 | } | |
3431 | ||
3432 | case GetIndexedPropertyStorage: { | |
3433 | compileGetIndexedPropertyStorage(node); | |
3434 | break; | |
3435 | } | |
3436 | ||
3437 | case GetByOffset: { | |
3438 | StorageOperand storage(this, node.child1()); | |
3439 | GPRTemporary result(this, storage); | |
3440 | ||
3441 | GPRReg storageGPR = storage.gpr(); | |
3442 | GPRReg resultGPR = result.gpr(); | |
3443 | ||
3444 | StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()]; | |
3445 | ||
3446 | m_jit.loadPtr(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)), resultGPR); | |
3447 | ||
3448 | jsValueResult(resultGPR, m_compileIndex); | |
3449 | break; | |
3450 | } | |
3451 | ||
3452 | case PutByOffset: { | |
3453 | #if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING) | |
3454 | SpeculateCellOperand base(this, node.child1()); | |
3455 | #endif | |
3456 | StorageOperand storage(this, node.child2()); | |
3457 | JSValueOperand value(this, node.child3()); | |
3458 | ||
3459 | GPRReg storageGPR = storage.gpr(); | |
3460 | GPRReg valueGPR = value.gpr(); | |
3461 | ||
3462 | #if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING) | |
3463 | writeBarrier(base.gpr(), value.gpr(), node.child3(), WriteBarrierForPropertyAccess); | |
3464 | #endif | |
3465 | ||
3466 | StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()]; | |
3467 | ||
3468 | m_jit.storePtr(valueGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue))); | |
3469 | ||
3470 | noResult(m_compileIndex); | |
3471 | break; | |
3472 | } | |
3473 | ||
3474 | case PutById: { | |
3475 | SpeculateCellOperand base(this, node.child1()); | |
3476 | JSValueOperand value(this, node.child2()); | |
3477 | GPRTemporary scratch(this); | |
3478 | ||
3479 | GPRReg baseGPR = base.gpr(); | |
3480 | GPRReg valueGPR = value.gpr(); | |
3481 | GPRReg scratchGPR = scratch.gpr(); | |
3482 | ||
3483 | base.use(); | |
3484 | value.use(); | |
3485 | ||
3486 | cachedPutById(node.codeOrigin, baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect); | |
3487 | ||
3488 | noResult(m_compileIndex, UseChildrenCalledExplicitly); | |
3489 | break; | |
3490 | } | |
3491 | ||
3492 | case PutByIdDirect: { | |
3493 | SpeculateCellOperand base(this, node.child1()); | |
3494 | JSValueOperand value(this, node.child2()); | |
3495 | GPRTemporary scratch(this); | |
3496 | ||
3497 | GPRReg baseGPR = base.gpr(); | |
3498 | GPRReg valueGPR = value.gpr(); | |
3499 | GPRReg scratchGPR = scratch.gpr(); | |
3500 | ||
3501 | base.use(); | |
3502 | value.use(); | |
3503 | ||
3504 | cachedPutById(node.codeOrigin, baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct); | |
3505 | ||
3506 | noResult(m_compileIndex, UseChildrenCalledExplicitly); | |
3507 | break; | |
3508 | } | |
3509 | ||
3510 | case GetGlobalVar: { | |
3511 | GPRTemporary result(this); | |
3512 | ||
3513 | JSVariableObject* globalObject = m_jit.globalObjectFor(node.codeOrigin); | |
3514 | m_jit.loadPtr(globalObject->addressOfRegisters(), result.gpr()); | |
3515 | m_jit.loadPtr(JITCompiler::addressForGlobalVar(result.gpr(), node.varNumber()), result.gpr()); | |
3516 | ||
3517 | jsValueResult(result.gpr(), m_compileIndex); | |
3518 | break; | |
3519 | } | |
3520 | ||
3521 | case PutGlobalVar: { | |
3522 | JSValueOperand value(this, node.child1()); | |
3523 | GPRTemporary globalObject(this); | |
3524 | GPRTemporary scratch(this); | |
3525 | ||
3526 | GPRReg globalObjectReg = globalObject.gpr(); | |
3527 | GPRReg scratchReg = scratch.gpr(); | |
3528 | ||
3529 | m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectReg); | |
3530 | ||
3531 | writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.gpr(), node.child1(), WriteBarrierForVariableAccess, scratchReg); | |
3532 | ||
3533 | m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg); | |
3534 | m_jit.storePtr(value.gpr(), JITCompiler::addressForGlobalVar(scratchReg, node.varNumber())); | |
3535 | ||
3536 | noResult(m_compileIndex); | |
3537 | break; | |
3538 | } | |
3539 | ||
3540 | case CheckHasInstance: { | |
3541 | SpeculateCellOperand base(this, node.child1()); | |
3542 | GPRTemporary structure(this); | |
3543 | ||
3544 | // Speculate that base 'ImplementsDefaultHasInstance'. | |
3545 | m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); | |
3546 | speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance))); | |
3547 | ||
3548 | noResult(m_compileIndex); | |
3549 | break; | |
3550 | } | |
3551 | ||
3552 | case InstanceOf: { | |
3553 | compileInstanceOf(node); | |
3554 | break; | |
3555 | } | |
3556 | ||
3557 | case IsUndefined: { | |
3558 | JSValueOperand value(this, node.child1()); | |
3559 | GPRTemporary result(this); | |
3560 | ||
3561 | JITCompiler::Jump isCell = m_jit.branchTestPtr(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister); | |
3562 | ||
3563 | m_jit.comparePtr(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr()); | |
3564 | JITCompiler::Jump done = m_jit.jump(); | |
3565 | ||
3566 | isCell.link(&m_jit); | |
3567 | m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr()); | |
3568 | m_jit.test8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), result.gpr()); | |
3569 | ||
3570 | done.link(&m_jit); | |
3571 | m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); | |
3572 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
3573 | break; | |
3574 | } | |
3575 | ||
3576 | case IsBoolean: { | |
3577 | JSValueOperand value(this, node.child1()); | |
3578 | GPRTemporary result(this, value); | |
3579 | ||
3580 | m_jit.move(value.gpr(), result.gpr()); | |
3581 | m_jit.xorPtr(JITCompiler::TrustedImm32(ValueFalse), result.gpr()); | |
3582 | m_jit.testPtr(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr()); | |
3583 | m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); | |
3584 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
3585 | break; | |
3586 | } | |
3587 | ||
3588 | case IsNumber: { | |
3589 | JSValueOperand value(this, node.child1()); | |
3590 | GPRTemporary result(this, value); | |
3591 | ||
3592 | m_jit.testPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagTypeNumberRegister, result.gpr()); | |
3593 | m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); | |
3594 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
3595 | break; | |
3596 | } | |
3597 | ||
3598 | case IsString: { | |
3599 | JSValueOperand value(this, node.child1()); | |
3600 | GPRTemporary result(this, value); | |
3601 | ||
3602 | JITCompiler::Jump isNotCell = m_jit.branchTestPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister); | |
3603 | ||
3604 | m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr()); | |
3605 | m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr()); | |
3606 | m_jit.or32(TrustedImm32(ValueFalse), result.gpr()); | |
3607 | JITCompiler::Jump done = m_jit.jump(); | |
3608 | ||
3609 | isNotCell.link(&m_jit); | |
3610 | m_jit.move(TrustedImm32(ValueFalse), result.gpr()); | |
3611 | ||
3612 | done.link(&m_jit); | |
3613 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
3614 | break; | |
3615 | } | |
3616 | ||
3617 | case IsObject: { | |
3618 | JSValueOperand value(this, node.child1()); | |
3619 | GPRReg valueGPR = value.gpr(); | |
3620 | GPRResult result(this); | |
3621 | GPRReg resultGPR = result.gpr(); | |
3622 | flushRegisters(); | |
3623 | callOperation(operationIsObject, resultGPR, valueGPR); | |
3624 | m_jit.or32(TrustedImm32(ValueFalse), resultGPR); | |
3625 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
3626 | break; | |
3627 | } | |
3628 | ||
3629 | case IsFunction: { | |
3630 | JSValueOperand value(this, node.child1()); | |
3631 | GPRReg valueGPR = value.gpr(); | |
3632 | GPRResult result(this); | |
3633 | GPRReg resultGPR = result.gpr(); | |
3634 | flushRegisters(); | |
3635 | callOperation(operationIsFunction, resultGPR, valueGPR); | |
3636 | m_jit.or32(TrustedImm32(ValueFalse), resultGPR); | |
3637 | jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean); | |
3638 | break; | |
3639 | } | |
3640 | ||
3641 | case Flush: | |
3642 | case Phi: | |
3643 | break; | |
3644 | ||
3645 | case Breakpoint: | |
3646 | #if ENABLE(DEBUG_WITH_BREAKPOINT) | |
3647 | m_jit.breakpoint(); | |
3648 | #else | |
3649 | ASSERT_NOT_REACHED(); | |
3650 | #endif | |
3651 | break; | |
3652 | ||
3653 | case Call: | |
3654 | case Construct: | |
3655 | emitCall(node); | |
3656 | break; | |
3657 | ||
3658 | case Resolve: { | |
3659 | flushRegisters(); | |
3660 | GPRResult result(this); | |
3661 | callOperation(operationResolve, result.gpr(), identifier(node.identifierNumber())); | |
3662 | jsValueResult(result.gpr(), m_compileIndex); | |
3663 | break; | |
3664 | } | |
3665 | ||
3666 | case ResolveBase: { | |
3667 | flushRegisters(); | |
3668 | GPRResult result(this); | |
3669 | callOperation(operationResolveBase, result.gpr(), identifier(node.identifierNumber())); | |
3670 | jsValueResult(result.gpr(), m_compileIndex); | |
3671 | break; | |
3672 | } | |
3673 | ||
3674 | case ResolveBaseStrictPut: { | |
3675 | flushRegisters(); | |
3676 | GPRResult result(this); | |
3677 | callOperation(operationResolveBaseStrictPut, result.gpr(), identifier(node.identifierNumber())); | |
3678 | jsValueResult(result.gpr(), m_compileIndex); | |
3679 | break; | |
3680 | } | |
3681 | ||
3682 | case ResolveGlobal: { | |
3683 | GPRTemporary globalObject(this); | |
3684 | GPRTemporary resolveInfo(this); | |
3685 | GPRTemporary result(this); | |
3686 | ||
3687 | GPRReg globalObjectGPR = globalObject.gpr(); | |
3688 | GPRReg resolveInfoGPR = resolveInfo.gpr(); | |
3689 | GPRReg resultGPR = result.gpr(); | |
3690 | ||
3691 | ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()]; | |
3692 | GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex)); | |
3693 | ||
3694 | // Check Structure of global object | |
3695 | m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR); | |
3696 | m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR); | |
3697 | m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultGPR); | |
3698 | JITCompiler::Jump structuresMatch = m_jit.branchPtr(JITCompiler::Equal, resultGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset())); | |
3699 | ||
3700 | silentSpillAllRegisters(resultGPR); | |
3701 | callOperation(operationResolveGlobal, resultGPR, resolveInfoGPR, &m_jit.codeBlock()->identifier(data.identifierNumber)); | |
3702 | silentFillAllRegisters(resultGPR); | |
3703 | ||
3704 | JITCompiler::Jump wasSlow = m_jit.jump(); | |
3705 | ||
3706 | // Fast case | |
3707 | structuresMatch.link(&m_jit); | |
3708 | m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultGPR); | |
3709 | m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR); | |
3710 | m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr), resultGPR); | |
3711 | ||
3712 | wasSlow.link(&m_jit); | |
3713 | ||
3714 | jsValueResult(resultGPR, m_compileIndex); | |
3715 | break; | |
3716 | } | |
3717 | ||
3718 | case CreateActivation: { | |
3719 | JSValueOperand value(this, node.child1()); | |
3720 | GPRTemporary result(this, value); | |
3721 | ||
3722 | GPRReg valueGPR = value.gpr(); | |
3723 | GPRReg resultGPR = result.gpr(); | |
3724 | ||
3725 | m_jit.move(valueGPR, resultGPR); | |
3726 | ||
3727 | JITCompiler::Jump alreadyCreated = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR); | |
3728 | ||
3729 | silentSpillAllRegisters(resultGPR); | |
3730 | callOperation(operationCreateActivation, resultGPR); | |
3731 | silentFillAllRegisters(resultGPR); | |
3732 | ||
3733 | alreadyCreated.link(&m_jit); | |
3734 | ||
3735 | cellResult(resultGPR, m_compileIndex); | |
3736 | break; | |
3737 | } | |
3738 | ||
3739 | case TearOffActivation: { | |
3740 | JSValueOperand value(this, node.child1()); | |
3741 | GPRReg valueGPR = value.gpr(); | |
3742 | ||
3743 | JITCompiler::Jump notCreated = m_jit.branchTestPtr(JITCompiler::Zero, valueGPR); | |
3744 | ||
3745 | silentSpillAllRegisters(InvalidGPRReg); | |
3746 | callOperation(operationTearOffActivation, valueGPR); | |
3747 | silentFillAllRegisters(InvalidGPRReg); | |
3748 | ||
3749 | notCreated.link(&m_jit); | |
3750 | ||
3751 | noResult(m_compileIndex); | |
3752 | break; | |
3753 | } | |
3754 | ||
3755 | case NewFunctionNoCheck: | |
3756 | compileNewFunctionNoCheck(node); | |
3757 | break; | |
3758 | ||
3759 | case NewFunction: { | |
3760 | JSValueOperand value(this, node.child1()); | |
3761 | GPRTemporary result(this, value); | |
3762 | ||
3763 | GPRReg valueGPR = value.gpr(); | |
3764 | GPRReg resultGPR = result.gpr(); | |
3765 | ||
3766 | m_jit.move(valueGPR, resultGPR); | |
3767 | ||
3768 | JITCompiler::Jump alreadyCreated = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR); | |
3769 | ||
3770 | silentSpillAllRegisters(resultGPR); | |
3771 | callOperation( | |
3772 | operationNewFunction, resultGPR, m_jit.codeBlock()->functionDecl(node.functionDeclIndex())); | |
3773 | silentFillAllRegisters(resultGPR); | |
3774 | ||
3775 | alreadyCreated.link(&m_jit); | |
3776 | ||
3777 | cellResult(resultGPR, m_compileIndex); | |
3778 | break; | |
3779 | } | |
3780 | ||
3781 | case NewFunctionExpression: | |
3782 | compileNewFunctionExpression(node); | |
3783 | break; | |
3784 | ||
3785 | case ForceOSRExit: { | |
3786 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); | |
3787 | break; | |
3788 | } | |
3789 | ||
3790 | case Phantom: | |
3791 | // This is a no-op. | |
3792 | noResult(m_compileIndex); | |
3793 | break; | |
3794 | ||
3795 | case InlineStart: | |
3796 | case Nop: | |
3797 | ASSERT_NOT_REACHED(); | |
3798 | break; | |
3799 | ||
3800 | case LastNodeType: | |
3801 | ASSERT_NOT_REACHED(); | |
3802 | break; | |
3803 | } | |
3804 | ||
3805 | if (!m_compileOkay) | |
3806 | return; | |
3807 | ||
3808 | if (node.hasResult() && node.mustGenerate()) | |
3809 | use(m_compileIndex); | |
3810 | } | |
3811 | ||
3812 | #endif | |
3813 | ||
3814 | } } // namespace JSC::DFG | |
3815 | ||
3816 | #endif |