]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | ||
28 | #if ENABLE(JIT) | |
29 | #include "JIT.h" | |
30 | ||
31 | #include "CodeBlock.h" | |
32 | #include "GCAwareJITStubRoutine.h" | |
33 | #include "GetterSetter.h" | |
34 | #include "Interpreter.h" | |
35 | #include "JITInlines.h" | |
36 | #include "JITStubCall.h" | |
37 | #include "JSArray.h" | |
38 | #include "JSFunction.h" | |
39 | #include "JSPropertyNameIterator.h" | |
40 | #include "JSVariableObject.h" | |
41 | #include "LinkBuffer.h" | |
42 | #include "RepatchBuffer.h" | |
43 | #include "ResultType.h" | |
44 | #include "SamplingTool.h" | |
45 | #include <wtf/StringPrintStream.h> | |
46 | ||
47 | #ifndef NDEBUG | |
48 | #include <stdio.h> | |
49 | #endif | |
50 | ||
51 | using namespace std; | |
52 | ||
53 | namespace JSC { | |
54 | #if USE(JSVALUE64) | |
55 | ||
56 | JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm) | |
57 | { | |
58 | JSInterfaceJIT jit; | |
59 | JumpList failures; | |
60 | failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get()))); | |
61 | ||
62 | // Load string length to regT2, and start the process of loading the data pointer into regT0 | |
63 | jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2); | |
64 | jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0); | |
65 | failures.append(jit.branchTest32(Zero, regT0)); | |
66 | ||
67 | // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large | |
68 | failures.append(jit.branch32(AboveOrEqual, regT1, regT2)); | |
69 | ||
70 | // Load the character | |
71 | JumpList is16Bit; | |
72 | JumpList cont8Bit; | |
73 | // Load the string flags | |
74 | jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT2); | |
75 | jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0); | |
76 | is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(StringImpl::flagIs8Bit()))); | |
77 | jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0); | |
78 | cont8Bit.append(jit.jump()); | |
79 | is16Bit.link(&jit); | |
80 | jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0); | |
81 | cont8Bit.link(&jit); | |
82 | ||
83 | failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100))); | |
84 | jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1); | |
85 | jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0); | |
86 | jit.ret(); | |
87 | ||
88 | failures.link(&jit); | |
89 | jit.move(TrustedImm32(0), regT0); | |
90 | jit.ret(); | |
91 | ||
92 | LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); | |
93 | return FINALIZE_CODE(patchBuffer, ("String get_by_val stub")); | |
94 | } | |
95 | ||
96 | void JIT::emit_op_get_by_val(Instruction* currentInstruction) | |
97 | { | |
98 | unsigned dst = currentInstruction[1].u.operand; | |
99 | unsigned base = currentInstruction[2].u.operand; | |
100 | unsigned property = currentInstruction[3].u.operand; | |
101 | ArrayProfile* profile = currentInstruction[4].u.arrayProfile; | |
102 | ||
103 | emitGetVirtualRegisters(base, regT0, property, regT1); | |
104 | emitJumpSlowCaseIfNotImmediateInteger(regT1); | |
105 | ||
106 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. | |
107 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if | |
108 | // number was signed since m_vectorLength is always less than intmax (since the total allocation | |
109 | // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value | |
110 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign | |
111 | // extending since it makes it easier to re-tag the value in the slow case. | |
112 | zeroExtend32ToPtr(regT1, regT1); | |
113 | ||
114 | emitJumpSlowCaseIfNotJSCell(regT0, base); | |
115 | loadPtr(Address(regT0, JSCell::structureOffset()), regT2); | |
116 | emitArrayProfilingSite(regT2, regT3, profile); | |
117 | and32(TrustedImm32(IndexingShapeMask), regT2); | |
118 | ||
119 | PatchableJump badType; | |
120 | JumpList slowCases; | |
121 | ||
122 | JITArrayMode mode = chooseArrayMode(profile); | |
123 | switch (mode) { | |
124 | case JITInt32: | |
125 | slowCases = emitInt32GetByVal(currentInstruction, badType); | |
126 | break; | |
127 | case JITDouble: | |
128 | slowCases = emitDoubleGetByVal(currentInstruction, badType); | |
129 | break; | |
130 | case JITContiguous: | |
131 | slowCases = emitContiguousGetByVal(currentInstruction, badType); | |
132 | break; | |
133 | case JITArrayStorage: | |
134 | slowCases = emitArrayStorageGetByVal(currentInstruction, badType); | |
135 | break; | |
136 | default: | |
137 | CRASH(); | |
138 | break; | |
139 | } | |
140 | ||
141 | addSlowCase(badType); | |
142 | addSlowCase(slowCases); | |
143 | ||
144 | Label done = label(); | |
145 | ||
146 | #if !ASSERT_DISABLED | |
147 | Jump resultOK = branchTest64(NonZero, regT0); | |
148 | breakpoint(); | |
149 | resultOK.link(this); | |
150 | #endif | |
151 | ||
152 | emitValueProfilingSite(); | |
153 | emitPutVirtualRegister(dst); | |
154 | ||
155 | m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); | |
156 | } | |
157 | ||
158 | JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType) | |
159 | { | |
160 | JumpList slowCases; | |
161 | ||
162 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape)); | |
163 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); | |
164 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()))); | |
165 | loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0); | |
166 | slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0)); | |
167 | moveDoubleTo64(fpRegT0, regT0); | |
168 | sub64(tagTypeNumberRegister, regT0); | |
169 | ||
170 | return slowCases; | |
171 | } | |
172 | ||
173 | JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape) | |
174 | { | |
175 | JumpList slowCases; | |
176 | ||
177 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape)); | |
178 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); | |
179 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()))); | |
180 | load64(BaseIndex(regT2, regT1, TimesEight), regT0); | |
181 | slowCases.append(branchTest64(Zero, regT0)); | |
182 | ||
183 | return slowCases; | |
184 | } | |
185 | ||
186 | JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType) | |
187 | { | |
188 | JumpList slowCases; | |
189 | ||
190 | add32(TrustedImm32(-ArrayStorageShape), regT2, regT3); | |
191 | badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)); | |
192 | ||
193 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); | |
194 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset()))); | |
195 | ||
196 | load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0); | |
197 | slowCases.append(branchTest64(Zero, regT0)); | |
198 | ||
199 | return slowCases; | |
200 | } | |
201 | ||
202 | void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
203 | { | |
204 | unsigned dst = currentInstruction[1].u.operand; | |
205 | unsigned base = currentInstruction[2].u.operand; | |
206 | unsigned property = currentInstruction[3].u.operand; | |
207 | ArrayProfile* profile = currentInstruction[4].u.arrayProfile; | |
208 | ||
209 | linkSlowCase(iter); // property int32 check | |
210 | linkSlowCaseIfNotJSCell(iter, base); // base cell check | |
211 | Jump nonCell = jump(); | |
212 | linkSlowCase(iter); // base array check | |
213 | Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())); | |
214 | emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code())); | |
215 | Jump failed = branchTest64(Zero, regT0); | |
216 | emitPutVirtualRegister(dst, regT0); | |
217 | emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val)); | |
218 | failed.link(this); | |
219 | notString.link(this); | |
220 | nonCell.link(this); | |
221 | ||
222 | Jump skipProfiling = jump(); | |
223 | ||
224 | linkSlowCase(iter); // vector length check | |
225 | linkSlowCase(iter); // empty value | |
226 | ||
227 | emitArrayProfileOutOfBoundsSpecialCase(profile); | |
228 | ||
229 | skipProfiling.link(this); | |
230 | ||
231 | Label slowPath = label(); | |
232 | ||
233 | JITStubCall stubCall(this, cti_op_get_by_val); | |
234 | stubCall.addArgument(base, regT2); | |
235 | stubCall.addArgument(property, regT2); | |
236 | Call call = stubCall.call(dst); | |
237 | ||
238 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; | |
239 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; | |
240 | m_byValInstructionIndex++; | |
241 | ||
242 | emitValueProfilingSite(); | |
243 | } | |
244 | ||
245 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode) | |
246 | { | |
247 | ASSERT(sizeof(JSValue) == 8); | |
248 | ||
249 | if (finalObjectMode == MayBeFinal) { | |
250 | Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset)); | |
251 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); | |
252 | neg32(offset); | |
253 | Jump done = jump(); | |
254 | isInline.link(this); | |
255 | addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch); | |
256 | done.link(this); | |
257 | } else { | |
258 | #if !ASSERT_DISABLED | |
259 | Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); | |
260 | breakpoint(); | |
261 | isOutOfLine.link(this); | |
262 | #endif | |
263 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); | |
264 | neg32(offset); | |
265 | } | |
266 | signExtend32ToPtr(offset, offset); | |
267 | load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result); | |
268 | } | |
269 | ||
270 | void JIT::emit_op_get_by_pname(Instruction* currentInstruction) | |
271 | { | |
272 | unsigned dst = currentInstruction[1].u.operand; | |
273 | unsigned base = currentInstruction[2].u.operand; | |
274 | unsigned property = currentInstruction[3].u.operand; | |
275 | unsigned expected = currentInstruction[4].u.operand; | |
276 | unsigned iter = currentInstruction[5].u.operand; | |
277 | unsigned i = currentInstruction[6].u.operand; | |
278 | ||
279 | emitGetVirtualRegister(property, regT0); | |
280 | addSlowCase(branch64(NotEqual, regT0, addressFor(expected))); | |
281 | emitGetVirtualRegisters(base, regT0, iter, regT1); | |
282 | emitJumpSlowCaseIfNotJSCell(regT0, base); | |
283 | ||
284 | // Test base's structure | |
285 | loadPtr(Address(regT0, JSCell::structureOffset()), regT2); | |
286 | addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); | |
287 | load32(addressFor(i), regT3); | |
288 | sub32(TrustedImm32(1), regT3); | |
289 | addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); | |
290 | Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity))); | |
291 | add32(TrustedImm32(firstOutOfLineOffset), regT3); | |
292 | sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3); | |
293 | inlineProperty.link(this); | |
294 | compileGetDirectOffset(regT0, regT0, regT3, regT1); | |
295 | ||
296 | emitPutVirtualRegister(dst, regT0); | |
297 | } | |
298 | ||
299 | void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
300 | { | |
301 | unsigned dst = currentInstruction[1].u.operand; | |
302 | unsigned base = currentInstruction[2].u.operand; | |
303 | unsigned property = currentInstruction[3].u.operand; | |
304 | ||
305 | linkSlowCase(iter); | |
306 | linkSlowCaseIfNotJSCell(iter, base); | |
307 | linkSlowCase(iter); | |
308 | linkSlowCase(iter); | |
309 | ||
310 | JITStubCall stubCall(this, cti_op_get_by_val_generic); | |
311 | stubCall.addArgument(base, regT2); | |
312 | stubCall.addArgument(property, regT2); | |
313 | stubCall.call(dst); | |
314 | } | |
315 | ||
316 | void JIT::emit_op_put_by_val(Instruction* currentInstruction) | |
317 | { | |
318 | unsigned base = currentInstruction[1].u.operand; | |
319 | unsigned property = currentInstruction[2].u.operand; | |
320 | ArrayProfile* profile = currentInstruction[4].u.arrayProfile; | |
321 | ||
322 | emitGetVirtualRegisters(base, regT0, property, regT1); | |
323 | emitJumpSlowCaseIfNotImmediateInteger(regT1); | |
324 | // See comment in op_get_by_val. | |
325 | zeroExtend32ToPtr(regT1, regT1); | |
326 | emitJumpSlowCaseIfNotJSCell(regT0, base); | |
327 | loadPtr(Address(regT0, JSCell::structureOffset()), regT2); | |
328 | emitArrayProfilingSite(regT2, regT3, profile); | |
329 | and32(TrustedImm32(IndexingShapeMask), regT2); | |
330 | ||
331 | PatchableJump badType; | |
332 | JumpList slowCases; | |
333 | ||
334 | JITArrayMode mode = chooseArrayMode(profile); | |
335 | switch (mode) { | |
336 | case JITInt32: | |
337 | slowCases = emitInt32PutByVal(currentInstruction, badType); | |
338 | break; | |
339 | case JITDouble: | |
340 | slowCases = emitDoublePutByVal(currentInstruction, badType); | |
341 | break; | |
342 | case JITContiguous: | |
343 | slowCases = emitContiguousPutByVal(currentInstruction, badType); | |
344 | break; | |
345 | case JITArrayStorage: | |
346 | slowCases = emitArrayStoragePutByVal(currentInstruction, badType); | |
347 | break; | |
348 | default: | |
349 | CRASH(); | |
350 | break; | |
351 | } | |
352 | ||
353 | addSlowCase(badType); | |
354 | addSlowCase(slowCases); | |
355 | ||
356 | Label done = label(); | |
357 | ||
358 | m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); | |
359 | ||
360 | emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess); | |
361 | } | |
362 | ||
363 | JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape) | |
364 | { | |
365 | unsigned value = currentInstruction[3].u.operand; | |
366 | ArrayProfile* profile = currentInstruction[4].u.arrayProfile; | |
367 | ||
368 | JumpList slowCases; | |
369 | ||
370 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape)); | |
371 | ||
372 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); | |
373 | Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())); | |
374 | ||
375 | Label storeResult = label(); | |
376 | emitGetVirtualRegister(value, regT3); | |
377 | switch (indexingShape) { | |
378 | case Int32Shape: | |
379 | slowCases.append(emitJumpIfNotImmediateInteger(regT3)); | |
380 | store64(regT3, BaseIndex(regT2, regT1, TimesEight)); | |
381 | break; | |
382 | case DoubleShape: { | |
383 | Jump notInt = emitJumpIfNotImmediateInteger(regT3); | |
384 | convertInt32ToDouble(regT3, fpRegT0); | |
385 | Jump ready = jump(); | |
386 | notInt.link(this); | |
387 | add64(tagTypeNumberRegister, regT3); | |
388 | move64ToDouble(regT3, fpRegT0); | |
389 | slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0)); | |
390 | ready.link(this); | |
391 | storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight)); | |
392 | break; | |
393 | } | |
394 | case ContiguousShape: | |
395 | store64(regT3, BaseIndex(regT2, regT1, TimesEight)); | |
396 | break; | |
397 | default: | |
398 | CRASH(); | |
399 | break; | |
400 | } | |
401 | ||
402 | Jump done = jump(); | |
403 | outOfBounds.link(this); | |
404 | ||
405 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength()))); | |
406 | ||
407 | emitArrayProfileStoreToHoleSpecialCase(profile); | |
408 | ||
409 | add32(TrustedImm32(1), regT1, regT3); | |
410 | store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength())); | |
411 | jump().linkTo(storeResult, this); | |
412 | ||
413 | done.link(this); | |
414 | ||
415 | return slowCases; | |
416 | } | |
417 | ||
418 | JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType) | |
419 | { | |
420 | unsigned value = currentInstruction[3].u.operand; | |
421 | ArrayProfile* profile = currentInstruction[4].u.arrayProfile; | |
422 | ||
423 | JumpList slowCases; | |
424 | ||
425 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape)); | |
426 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); | |
427 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset()))); | |
428 | ||
429 | Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); | |
430 | ||
431 | Label storeResult(this); | |
432 | emitGetVirtualRegister(value, regT3); | |
433 | store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); | |
434 | Jump end = jump(); | |
435 | ||
436 | empty.link(this); | |
437 | emitArrayProfileStoreToHoleSpecialCase(profile); | |
438 | add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset())); | |
439 | branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this); | |
440 | ||
441 | add32(TrustedImm32(1), regT1); | |
442 | store32(regT1, Address(regT2, ArrayStorage::lengthOffset())); | |
443 | sub32(TrustedImm32(1), regT1); | |
444 | jump().linkTo(storeResult, this); | |
445 | ||
446 | end.link(this); | |
447 | ||
448 | return slowCases; | |
449 | } | |
450 | ||
451 | void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
452 | { | |
453 | unsigned base = currentInstruction[1].u.operand; | |
454 | unsigned property = currentInstruction[2].u.operand; | |
455 | unsigned value = currentInstruction[3].u.operand; | |
456 | ArrayProfile* profile = currentInstruction[4].u.arrayProfile; | |
457 | ||
458 | linkSlowCase(iter); // property int32 check | |
459 | linkSlowCaseIfNotJSCell(iter, base); // base cell check | |
460 | linkSlowCase(iter); // base not array check | |
461 | ||
462 | JITArrayMode mode = chooseArrayMode(profile); | |
463 | switch (mode) { | |
464 | case JITInt32: | |
465 | case JITDouble: | |
466 | linkSlowCase(iter); // value type check | |
467 | break; | |
468 | default: | |
469 | break; | |
470 | } | |
471 | ||
472 | Jump skipProfiling = jump(); | |
473 | linkSlowCase(iter); // out of bounds | |
474 | emitArrayProfileOutOfBoundsSpecialCase(profile); | |
475 | skipProfiling.link(this); | |
476 | ||
477 | Label slowPath = label(); | |
478 | ||
479 | JITStubCall stubPutByValCall(this, cti_op_put_by_val); | |
480 | stubPutByValCall.addArgument(regT0); | |
481 | stubPutByValCall.addArgument(property, regT2); | |
482 | stubPutByValCall.addArgument(value, regT2); | |
483 | Call call = stubPutByValCall.call(); | |
484 | ||
485 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; | |
486 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; | |
487 | m_byValInstructionIndex++; | |
488 | } | |
489 | ||
490 | void JIT::emit_op_put_by_index(Instruction* currentInstruction) | |
491 | { | |
492 | JITStubCall stubCall(this, cti_op_put_by_index); | |
493 | stubCall.addArgument(currentInstruction[1].u.operand, regT2); | |
494 | stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand)); | |
495 | stubCall.addArgument(currentInstruction[3].u.operand, regT2); | |
496 | stubCall.call(); | |
497 | } | |
498 | ||
499 | void JIT::emit_op_put_getter_setter(Instruction* currentInstruction) | |
500 | { | |
501 | JITStubCall stubCall(this, cti_op_put_getter_setter); | |
502 | stubCall.addArgument(currentInstruction[1].u.operand, regT2); | |
503 | stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); | |
504 | stubCall.addArgument(currentInstruction[3].u.operand, regT2); | |
505 | stubCall.addArgument(currentInstruction[4].u.operand, regT2); | |
506 | stubCall.call(); | |
507 | } | |
508 | ||
509 | void JIT::emit_op_del_by_id(Instruction* currentInstruction) | |
510 | { | |
511 | JITStubCall stubCall(this, cti_op_del_by_id); | |
512 | stubCall.addArgument(currentInstruction[2].u.operand, regT2); | |
513 | stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); | |
514 | stubCall.call(currentInstruction[1].u.operand); | |
515 | } | |
516 | ||
517 | void JIT::emit_op_get_by_id(Instruction* currentInstruction) | |
518 | { | |
519 | unsigned resultVReg = currentInstruction[1].u.operand; | |
520 | unsigned baseVReg = currentInstruction[2].u.operand; | |
521 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); | |
522 | ||
523 | emitGetVirtualRegister(baseVReg, regT0); | |
524 | compileGetByIdHotPath(baseVReg, ident); | |
525 | emitValueProfilingSite(); | |
526 | emitPutVirtualRegister(resultVReg); | |
527 | } | |
528 | ||
529 | void JIT::compileGetByIdHotPath(int baseVReg, Identifier* ident) | |
530 | { | |
531 | // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. | |
532 | // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump | |
533 | // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label | |
534 | // to jump back to if one of these trampolies finds a match. | |
535 | ||
536 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); | |
537 | ||
538 | if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) { | |
539 | loadPtr(Address(regT0, JSCell::structureOffset()), regT1); | |
540 | emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset); | |
541 | } | |
542 | ||
543 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); | |
544 | ||
545 | Label hotPathBegin(this); | |
546 | ||
547 | DataLabelPtr structureToCompare; | |
548 | PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); | |
549 | addSlowCase(structureCheck); | |
550 | ||
551 | ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); | |
552 | DataLabelCompact displacementLabel = load64WithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0); | |
553 | ||
554 | Label putResult(this); | |
555 | ||
556 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); | |
557 | ||
558 | m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel, putResult)); | |
559 | } | |
560 | ||
561 | void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
562 | { | |
563 | unsigned resultVReg = currentInstruction[1].u.operand; | |
564 | unsigned baseVReg = currentInstruction[2].u.operand; | |
565 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); | |
566 | ||
567 | compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter); | |
568 | emitValueProfilingSite(); | |
569 | } | |
570 | ||
571 | void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter) | |
572 | { | |
573 | // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset | |
574 | // so that we only need track one pointer into the slow case code - we track a pointer to the location | |
575 | // of the call (which we can use to look up the patch information), but should a array-length or | |
576 | // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back | |
577 | // the distance from the call to the head of the slow case. | |
578 | ||
579 | linkSlowCaseIfNotJSCell(iter, baseVReg); | |
580 | linkSlowCase(iter); | |
581 | ||
582 | BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); | |
583 | ||
584 | Label coldPathBegin(this); | |
585 | JITStubCall stubCall(this, cti_op_get_by_id); | |
586 | stubCall.addArgument(regT0); | |
587 | stubCall.addArgument(TrustedImmPtr(ident)); | |
588 | Call call = stubCall.call(resultVReg); | |
589 | ||
590 | END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); | |
591 | ||
592 | // Track the location of the call; this will be used to recover patch information. | |
593 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call); | |
594 | } | |
595 | ||
596 | void JIT::emit_op_put_by_id(Instruction* currentInstruction) | |
597 | { | |
598 | unsigned baseVReg = currentInstruction[1].u.operand; | |
599 | unsigned valueVReg = currentInstruction[3].u.operand; | |
600 | ||
601 | // In order to be able to patch both the Structure, and the object offset, we store one pointer, | |
602 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code | |
603 | // such that the Structure & offset are always at the same distance from this. | |
604 | ||
605 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); | |
606 | ||
607 | // Jump to a slow case if either the base object is an immediate, or if the Structure does not match. | |
608 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); | |
609 | ||
610 | BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); | |
611 | ||
612 | Label hotPathBegin(this); | |
613 | ||
614 | // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. | |
615 | DataLabelPtr structureToCompare; | |
616 | addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); | |
617 | ||
618 | ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); | |
619 | DataLabel32 displacementLabel = store64WithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset)); | |
620 | ||
621 | END_UNINTERRUPTED_SEQUENCE(sequencePutById); | |
622 | ||
623 | emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess); | |
624 | ||
625 | m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel)); | |
626 | } | |
627 | ||
628 | void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
629 | { | |
630 | unsigned baseVReg = currentInstruction[1].u.operand; | |
631 | Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); | |
632 | unsigned direct = currentInstruction[8].u.operand; | |
633 | ||
634 | linkSlowCaseIfNotJSCell(iter, baseVReg); | |
635 | linkSlowCase(iter); | |
636 | ||
637 | JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id); | |
638 | stubCall.addArgument(regT0); | |
639 | stubCall.addArgument(TrustedImmPtr(ident)); | |
640 | stubCall.addArgument(regT1); | |
641 | move(regT0, nonArgGPR1); | |
642 | Call call = stubCall.call(); | |
643 | ||
644 | // Track the location of the call; this will be used to recover patch information. | |
645 | m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call); | |
646 | } | |
647 | ||
648 | // Compile a store into an object's property storage. May overwrite the | |
649 | // value in objectReg. | |
650 | void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset) | |
651 | { | |
652 | if (isInlineOffset(cachedOffset)) { | |
653 | store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset))); | |
654 | return; | |
655 | } | |
656 | ||
657 | loadPtr(Address(base, JSObject::butterflyOffset()), base); | |
658 | store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset))); | |
659 | } | |
660 | ||
661 | // Compile a load from an object's property storage. May overwrite base. | |
662 | void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset) | |
663 | { | |
664 | if (isInlineOffset(cachedOffset)) { | |
665 | load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result); | |
666 | return; | |
667 | } | |
668 | ||
669 | loadPtr(Address(base, JSObject::butterflyOffset()), result); | |
670 | load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result); | |
671 | } | |
672 | ||
673 | void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset) | |
674 | { | |
675 | if (isInlineOffset(cachedOffset)) { | |
676 | load64(base->locationForOffset(cachedOffset), result); | |
677 | return; | |
678 | } | |
679 | ||
680 | loadPtr(base->butterflyAddress(), result); | |
681 | load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result); | |
682 | } | |
683 | ||
684 | void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) | |
685 | { | |
686 | move(nonArgGPR1, regT0); | |
687 | ||
688 | JumpList failureCases; | |
689 | // Check eax is an object of the right Structure. | |
690 | failureCases.append(emitJumpIfNotJSCell(regT0)); | |
691 | failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure))); | |
692 | ||
693 | testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo); | |
694 | ||
695 | ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get()); | |
696 | ||
697 | // ecx = baseObject->m_structure | |
698 | if (!direct) { | |
699 | for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) { | |
700 | ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get()); | |
701 | testPrototype((*it)->storedPrototype(), failureCases, stubInfo); | |
702 | } | |
703 | } | |
704 | ||
705 | // If we succeed in all of our checks, and the code was optimizable, then make sure we | |
706 | // decrement the rare case counter. | |
707 | #if ENABLE(VALUE_PROFILER) | |
708 | if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) { | |
709 | sub32( | |
710 | TrustedImm32(1), | |
711 | AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter)); | |
712 | } | |
713 | #endif | |
714 | ||
715 | // emit a call only if storage realloc is needed | |
716 | bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity(); | |
717 | if (willNeedStorageRealloc) { | |
718 | // This trampoline was called to like a JIT stub; before we can can call again we need to | |
719 | // remove the return address from the stack, to prevent the stack from becoming misaligned. | |
720 | preserveReturnAddressAfterCall(regT3); | |
721 | ||
722 | JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc); | |
723 | stubCall.skipArgument(); // base | |
724 | stubCall.skipArgument(); // ident | |
725 | stubCall.skipArgument(); // value | |
726 | stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity())); | |
727 | stubCall.addArgument(TrustedImmPtr(newStructure)); | |
728 | stubCall.call(regT0); | |
729 | emitGetJITStubArg(2, regT1); | |
730 | ||
731 | restoreReturnAddressBeforeReturn(regT3); | |
732 | } | |
733 | ||
734 | // Planting the new structure triggers the write barrier so we need | |
735 | // an unconditional barrier here. | |
736 | emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess); | |
737 | ||
738 | ASSERT(newStructure->classInfo() == oldStructure->classInfo()); | |
739 | storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset())); | |
740 | compilePutDirectOffset(regT0, regT1, cachedOffset); | |
741 | ||
742 | ret(); | |
743 | ||
744 | ASSERT(!failureCases.empty()); | |
745 | failureCases.link(this); | |
746 | restoreArgumentReferenceForTrampoline(); | |
747 | Call failureCall = tailRecursiveCall(); | |
748 | ||
749 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
750 | ||
751 | patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail)); | |
752 | ||
753 | if (willNeedStorageRealloc) { | |
754 | ASSERT(m_calls.size() == 1); | |
755 | patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc)); | |
756 | } | |
757 | ||
758 | stubInfo->stubRoutine = createJITStubRoutine( | |
759 | FINALIZE_CODE( | |
760 | patchBuffer, | |
761 | ("Baseline put_by_id transition for %s, return point %p", | |
762 | toCString(*m_codeBlock).data(), returnAddress.value())), | |
763 | *m_vm, | |
764 | m_codeBlock->ownerExecutable(), | |
765 | willNeedStorageRealloc, | |
766 | newStructure); | |
767 | RepatchBuffer repatchBuffer(m_codeBlock); | |
768 | repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code())); | |
769 | } | |
770 | ||
771 | void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress) | |
772 | { | |
773 | RepatchBuffer repatchBuffer(codeBlock); | |
774 | ||
775 | // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. | |
776 | // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now. | |
777 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail)); | |
778 | ||
779 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. | |
780 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure); | |
781 | repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset)); | |
782 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset)); | |
783 | } | |
784 | ||
785 | void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct) | |
786 | { | |
787 | RepatchBuffer repatchBuffer(codeBlock); | |
788 | ||
789 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. | |
790 | // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. | |
791 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic)); | |
792 | ||
793 | // Patch the offset into the propoerty map to load from, then patch the Structure to look for. | |
794 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure); | |
795 | repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset)); | |
796 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset)); | |
797 | } | |
798 | ||
799 | void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) | |
800 | { | |
801 | StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); | |
802 | ||
803 | // Check eax is an array | |
804 | loadPtr(Address(regT0, JSCell::structureOffset()), regT2); | |
805 | Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray)); | |
806 | Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask)); | |
807 | ||
808 | // Checks out okay! - get the length from the storage | |
809 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3); | |
810 | load32(Address(regT3, ArrayStorage::lengthOffset()), regT2); | |
811 | Jump failureCases3 = branch32(LessThan, regT2, TrustedImm32(0)); | |
812 | ||
813 | emitFastArithIntToImmNoCheck(regT2, regT0); | |
814 | Jump success = jump(); | |
815 | ||
816 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
817 | ||
818 | // Use the patch information to link the failure cases back to the original slow case routine. | |
819 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); | |
820 | patchBuffer.link(failureCases1, slowCaseBegin); | |
821 | patchBuffer.link(failureCases2, slowCaseBegin); | |
822 | patchBuffer.link(failureCases3, slowCaseBegin); | |
823 | ||
824 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
825 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); | |
826 | ||
827 | // Track the stub we have created so that it will be deleted later. | |
828 | stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( | |
829 | patchBuffer, | |
830 | ("Basline JIT get_by_id array length stub for %s, return point %p", | |
831 | toCString(*m_codeBlock).data(), | |
832 | stubInfo->hotPathBegin.labelAtOffset( | |
833 | stubInfo->patch.baseline.u.get.putResult).executableAddress())); | |
834 | ||
835 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
836 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); | |
837 | RepatchBuffer repatchBuffer(m_codeBlock); | |
838 | repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code())); | |
839 | ||
840 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. | |
841 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail)); | |
842 | } | |
843 | ||
844 | void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) | |
845 | { | |
846 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is | |
847 | // referencing the prototype object - let's speculatively load it's table nice and early!) | |
848 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); | |
849 | ||
850 | // Check eax is an object of the right Structure. | |
851 | Jump failureCases1 = checkStructure(regT0, structure); | |
852 | ||
853 | // Check the prototype object's Structure had not changed. | |
854 | Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3); | |
855 | ||
856 | bool needsStubLink = false; | |
857 | ||
858 | // Checks out okay! | |
859 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
860 | needsStubLink = true; | |
861 | compileGetDirectOffset(protoObject, regT1, cachedOffset); | |
862 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
863 | stubCall.addArgument(regT1); | |
864 | stubCall.addArgument(regT0); | |
865 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
866 | stubCall.call(); | |
867 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
868 | needsStubLink = true; | |
869 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
870 | stubCall.addArgument(TrustedImmPtr(protoObject)); | |
871 | stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
872 | stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident))); | |
873 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
874 | stubCall.call(); | |
875 | } else | |
876 | compileGetDirectOffset(protoObject, regT0, cachedOffset); | |
877 | Jump success = jump(); | |
878 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
879 | ||
880 | // Use the patch information to link the failure cases back to the original slow case routine. | |
881 | CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); | |
882 | patchBuffer.link(failureCases1, slowCaseBegin); | |
883 | if (failureCases2.isSet()) | |
884 | patchBuffer.link(failureCases2, slowCaseBegin); | |
885 | ||
886 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
887 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); | |
888 | ||
889 | if (needsStubLink) { | |
890 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
891 | if (iter->to) | |
892 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
893 | } | |
894 | } | |
895 | // Track the stub we have created so that it will be deleted later. | |
896 | stubInfo->stubRoutine = createJITStubRoutine( | |
897 | FINALIZE_CODE( | |
898 | patchBuffer, | |
899 | ("Baseline JIT get_by_id proto stub for %s, return point %p", | |
900 | toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset( | |
901 | stubInfo->patch.baseline.u.get.putResult).executableAddress())), | |
902 | *m_vm, | |
903 | m_codeBlock->ownerExecutable(), | |
904 | needsStubLink); | |
905 | ||
906 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
907 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); | |
908 | RepatchBuffer repatchBuffer(m_codeBlock); | |
909 | repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code())); | |
910 | ||
911 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. | |
912 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); | |
913 | } | |
914 | ||
915 | void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) | |
916 | { | |
917 | Jump failureCase = checkStructure(regT0, structure); | |
918 | bool needsStubLink = false; | |
919 | bool isDirect = false; | |
920 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
921 | needsStubLink = true; | |
922 | compileGetDirectOffset(regT0, regT1, cachedOffset); | |
923 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
924 | stubCall.addArgument(regT1); | |
925 | stubCall.addArgument(regT0); | |
926 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
927 | stubCall.call(); | |
928 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
929 | needsStubLink = true; | |
930 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
931 | stubCall.addArgument(regT0); | |
932 | stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
933 | stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident))); | |
934 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
935 | stubCall.call(); | |
936 | } else { | |
937 | isDirect = true; | |
938 | compileGetDirectOffset(regT0, regT0, cachedOffset); | |
939 | } | |
940 | Jump success = jump(); | |
941 | ||
942 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
943 | ||
944 | if (needsStubLink) { | |
945 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
946 | if (iter->to) | |
947 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
948 | } | |
949 | } | |
950 | ||
951 | // Use the patch information to link the failure cases back to the original slow case routine. | |
952 | CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine)); | |
953 | if (!lastProtoBegin) | |
954 | lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); | |
955 | ||
956 | patchBuffer.link(failureCase, lastProtoBegin); | |
957 | ||
958 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
959 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); | |
960 | ||
961 | RefPtr<JITStubRoutine> stubCode = createJITStubRoutine( | |
962 | FINALIZE_CODE( | |
963 | patchBuffer, | |
964 | ("Baseline JIT get_by_id list stub for %s, return point %p", | |
965 | toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset( | |
966 | stubInfo->patch.baseline.u.get.putResult).executableAddress())), | |
967 | *m_vm, | |
968 | m_codeBlock->ownerExecutable(), | |
969 | needsStubLink); | |
970 | ||
971 | polymorphicStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect); | |
972 | ||
973 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
974 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); | |
975 | RepatchBuffer repatchBuffer(m_codeBlock); | |
976 | repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code())); | |
977 | } | |
978 | ||
979 | void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame) | |
980 | { | |
981 | // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is | |
982 | // referencing the prototype object - let's speculatively load it's table nice and early!) | |
983 | JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); | |
984 | ||
985 | // Check eax is an object of the right Structure. | |
986 | Jump failureCases1 = checkStructure(regT0, structure); | |
987 | ||
988 | // Check the prototype object's Structure had not changed. | |
989 | Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3); | |
990 | ||
991 | // Checks out okay! | |
992 | bool needsStubLink = false; | |
993 | bool isDirect = false; | |
994 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
995 | needsStubLink = true; | |
996 | compileGetDirectOffset(protoObject, regT1, cachedOffset); | |
997 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
998 | stubCall.addArgument(regT1); | |
999 | stubCall.addArgument(regT0); | |
1000 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1001 | stubCall.call(); | |
1002 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
1003 | needsStubLink = true; | |
1004 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
1005 | stubCall.addArgument(TrustedImmPtr(protoObject)); | |
1006 | stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
1007 | stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident))); | |
1008 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1009 | stubCall.call(); | |
1010 | } else { | |
1011 | isDirect = true; | |
1012 | compileGetDirectOffset(protoObject, regT0, cachedOffset); | |
1013 | } | |
1014 | ||
1015 | Jump success = jump(); | |
1016 | ||
1017 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
1018 | ||
1019 | if (needsStubLink) { | |
1020 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
1021 | if (iter->to) | |
1022 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
1023 | } | |
1024 | } | |
1025 | ||
1026 | // Use the patch information to link the failure cases back to the original slow case routine. | |
1027 | CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); | |
1028 | patchBuffer.link(failureCases1, lastProtoBegin); | |
1029 | if (failureCases2.isSet()) | |
1030 | patchBuffer.link(failureCases2, lastProtoBegin); | |
1031 | ||
1032 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
1033 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); | |
1034 | ||
1035 | RefPtr<JITStubRoutine> stubCode = createJITStubRoutine( | |
1036 | FINALIZE_CODE( | |
1037 | patchBuffer, | |
1038 | ("Baseline JIT get_by_id proto list stub for %s, return point %p", | |
1039 | toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset( | |
1040 | stubInfo->patch.baseline.u.get.putResult).executableAddress())), | |
1041 | *m_vm, | |
1042 | m_codeBlock->ownerExecutable(), | |
1043 | needsStubLink); | |
1044 | prototypeStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect); | |
1045 | ||
1046 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
1047 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); | |
1048 | RepatchBuffer repatchBuffer(m_codeBlock); | |
1049 | repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code())); | |
1050 | } | |
1051 | ||
1052 | void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame) | |
1053 | { | |
1054 | ASSERT(count); | |
1055 | JumpList bucketsOfFail; | |
1056 | ||
1057 | // Check eax is an object of the right Structure. | |
1058 | Jump baseObjectCheck = checkStructure(regT0, structure); | |
1059 | bucketsOfFail.append(baseObjectCheck); | |
1060 | ||
1061 | Structure* currStructure = structure; | |
1062 | WriteBarrier<Structure>* it = chain->head(); | |
1063 | JSObject* protoObject = 0; | |
1064 | for (unsigned i = 0; i < count; ++i, ++it) { | |
1065 | protoObject = asObject(currStructure->prototypeForLookup(callFrame)); | |
1066 | currStructure = it->get(); | |
1067 | testPrototype(protoObject, bucketsOfFail, stubInfo); | |
1068 | } | |
1069 | ASSERT(protoObject); | |
1070 | ||
1071 | bool needsStubLink = false; | |
1072 | bool isDirect = false; | |
1073 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
1074 | needsStubLink = true; | |
1075 | compileGetDirectOffset(protoObject, regT1, cachedOffset); | |
1076 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
1077 | stubCall.addArgument(regT1); | |
1078 | stubCall.addArgument(regT0); | |
1079 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1080 | stubCall.call(); | |
1081 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
1082 | needsStubLink = true; | |
1083 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
1084 | stubCall.addArgument(TrustedImmPtr(protoObject)); | |
1085 | stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
1086 | stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident))); | |
1087 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1088 | stubCall.call(); | |
1089 | } else { | |
1090 | isDirect = true; | |
1091 | compileGetDirectOffset(protoObject, regT0, cachedOffset); | |
1092 | } | |
1093 | Jump success = jump(); | |
1094 | ||
1095 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
1096 | ||
1097 | if (needsStubLink) { | |
1098 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
1099 | if (iter->to) | |
1100 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | // Use the patch information to link the failure cases back to the original slow case routine. | |
1105 | CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); | |
1106 | ||
1107 | patchBuffer.link(bucketsOfFail, lastProtoBegin); | |
1108 | ||
1109 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
1110 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); | |
1111 | ||
1112 | RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( | |
1113 | FINALIZE_CODE( | |
1114 | patchBuffer, | |
1115 | ("Baseline JIT get_by_id chain list stub for %s, return point %p", | |
1116 | toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset( | |
1117 | stubInfo->patch.baseline.u.get.putResult).executableAddress())), | |
1118 | *m_vm, | |
1119 | m_codeBlock->ownerExecutable(), | |
1120 | needsStubLink); | |
1121 | ||
1122 | // Track the stub we have created so that it will be deleted later. | |
1123 | prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect); | |
1124 | ||
1125 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
1126 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); | |
1127 | RepatchBuffer repatchBuffer(m_codeBlock); | |
1128 | repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); | |
1129 | } | |
1130 | ||
1131 | void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) | |
1132 | { | |
1133 | ASSERT(count); | |
1134 | ||
1135 | JumpList bucketsOfFail; | |
1136 | ||
1137 | // Check eax is an object of the right Structure. | |
1138 | bucketsOfFail.append(checkStructure(regT0, structure)); | |
1139 | ||
1140 | Structure* currStructure = structure; | |
1141 | WriteBarrier<Structure>* it = chain->head(); | |
1142 | JSObject* protoObject = 0; | |
1143 | for (unsigned i = 0; i < count; ++i, ++it) { | |
1144 | protoObject = asObject(currStructure->prototypeForLookup(callFrame)); | |
1145 | currStructure = it->get(); | |
1146 | testPrototype(protoObject, bucketsOfFail, stubInfo); | |
1147 | } | |
1148 | ASSERT(protoObject); | |
1149 | ||
1150 | bool needsStubLink = false; | |
1151 | if (slot.cachedPropertyType() == PropertySlot::Getter) { | |
1152 | needsStubLink = true; | |
1153 | compileGetDirectOffset(protoObject, regT1, cachedOffset); | |
1154 | JITStubCall stubCall(this, cti_op_get_by_id_getter_stub); | |
1155 | stubCall.addArgument(regT1); | |
1156 | stubCall.addArgument(regT0); | |
1157 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1158 | stubCall.call(); | |
1159 | } else if (slot.cachedPropertyType() == PropertySlot::Custom) { | |
1160 | needsStubLink = true; | |
1161 | JITStubCall stubCall(this, cti_op_get_by_id_custom_stub); | |
1162 | stubCall.addArgument(TrustedImmPtr(protoObject)); | |
1163 | stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress())); | |
1164 | stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident))); | |
1165 | stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress())); | |
1166 | stubCall.call(); | |
1167 | } else | |
1168 | compileGetDirectOffset(protoObject, regT0, cachedOffset); | |
1169 | Jump success = jump(); | |
1170 | ||
1171 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
1172 | ||
1173 | if (needsStubLink) { | |
1174 | for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { | |
1175 | if (iter->to) | |
1176 | patchBuffer.link(iter->from, FunctionPtr(iter->to)); | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | // Use the patch information to link the failure cases back to the original slow case routine. | |
1181 | patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin)); | |
1182 | ||
1183 | // On success return back to the hot patch code, at a point it will perform the store to dest for us. | |
1184 | patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); | |
1185 | ||
1186 | // Track the stub we have created so that it will be deleted later. | |
1187 | RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( | |
1188 | FINALIZE_CODE( | |
1189 | patchBuffer, | |
1190 | ("Baseline JIT get_by_id chain stub for %s, return point %p", | |
1191 | toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset( | |
1192 | stubInfo->patch.baseline.u.get.putResult).executableAddress())), | |
1193 | *m_vm, | |
1194 | m_codeBlock->ownerExecutable(), | |
1195 | needsStubLink); | |
1196 | stubInfo->stubRoutine = stubRoutine; | |
1197 | ||
1198 | // Finally patch the jump to slow case back in the hot path to jump here instead. | |
1199 | CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); | |
1200 | RepatchBuffer repatchBuffer(m_codeBlock); | |
1201 | repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); | |
1202 | ||
1203 | // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. | |
1204 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); | |
1205 | } | |
1206 | ||
1207 | void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) | |
1208 | { | |
1209 | int skip = currentInstruction[3].u.operand; | |
1210 | ||
1211 | emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0); | |
1212 | bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain(); | |
1213 | ASSERT(skip || !checkTopLevel); | |
1214 | if (checkTopLevel && skip--) { | |
1215 | Jump activationNotCreated; | |
1216 | if (checkTopLevel) | |
1217 | activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister())); | |
1218 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); | |
1219 | activationNotCreated.link(this); | |
1220 | } | |
1221 | while (skip--) | |
1222 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); | |
1223 | ||
1224 | loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0); | |
1225 | loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0); | |
1226 | emitValueProfilingSite(); | |
1227 | emitPutVirtualRegister(currentInstruction[1].u.operand); | |
1228 | } | |
1229 | ||
1230 | void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) | |
1231 | { | |
1232 | int skip = currentInstruction[2].u.operand; | |
1233 | ||
1234 | emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); | |
1235 | ||
1236 | emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1); | |
1237 | bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain(); | |
1238 | ASSERT(skip || !checkTopLevel); | |
1239 | if (checkTopLevel && skip--) { | |
1240 | Jump activationNotCreated; | |
1241 | if (checkTopLevel) | |
1242 | activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister())); | |
1243 | loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1); | |
1244 | activationNotCreated.link(this); | |
1245 | } | |
1246 | while (skip--) | |
1247 | loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1); | |
1248 | ||
1249 | emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess); | |
1250 | ||
1251 | loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1); | |
1252 | storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register))); | |
1253 | } | |
1254 | ||
1255 | void JIT::emit_op_init_global_const(Instruction* currentInstruction) | |
1256 | { | |
1257 | JSGlobalObject* globalObject = m_codeBlock->globalObject(); | |
1258 | ||
1259 | emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); | |
1260 | ||
1261 | store64(regT0, currentInstruction[1].u.registerPointer); | |
1262 | if (Heap::isWriteBarrierEnabled()) | |
1263 | emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess); | |
1264 | } | |
1265 | ||
1266 | void JIT::emit_op_init_global_const_check(Instruction* currentInstruction) | |
1267 | { | |
1268 | emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); | |
1269 | ||
1270 | addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer))); | |
1271 | ||
1272 | JSGlobalObject* globalObject = m_codeBlock->globalObject(); | |
1273 | ||
1274 | store64(regT0, currentInstruction[1].u.registerPointer); | |
1275 | if (Heap::isWriteBarrierEnabled()) | |
1276 | emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess); | |
1277 | } | |
1278 | ||
1279 | void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
1280 | { | |
1281 | linkSlowCase(iter); | |
1282 | ||
1283 | JITStubCall stubCall(this, cti_op_init_global_const_check); | |
1284 | stubCall.addArgument(regT0); | |
1285 | stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand)); | |
1286 | stubCall.call(); | |
1287 | } | |
1288 | ||
1289 | void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo) | |
1290 | { | |
1291 | repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id); | |
1292 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(unusedPointer)); | |
1293 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0); | |
1294 | repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin)); | |
1295 | } | |
1296 | ||
1297 | void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo) | |
1298 | { | |
1299 | if (isDirectPutById(stubInfo)) | |
1300 | repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct); | |
1301 | else | |
1302 | repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id); | |
1303 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(unusedPointer)); | |
1304 | repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0); | |
1305 | } | |
1306 | ||
1307 | #endif // USE(JSVALUE64) | |
1308 | ||
1309 | void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind) | |
1310 | { | |
1311 | UNUSED_PARAM(owner); | |
1312 | UNUSED_PARAM(scratch); | |
1313 | UNUSED_PARAM(scratch2); | |
1314 | UNUSED_PARAM(useKind); | |
1315 | UNUSED_PARAM(value); | |
1316 | UNUSED_PARAM(mode); | |
1317 | ASSERT(owner != scratch); | |
1318 | ASSERT(owner != scratch2); | |
1319 | ||
1320 | #if ENABLE(WRITE_BARRIER_PROFILING) | |
1321 | emitCount(WriteBarrierCounters::jitCounterFor(useKind)); | |
1322 | #endif | |
1323 | } | |
1324 | ||
1325 | void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind) | |
1326 | { | |
1327 | UNUSED_PARAM(owner); | |
1328 | UNUSED_PARAM(scratch); | |
1329 | UNUSED_PARAM(useKind); | |
1330 | UNUSED_PARAM(value); | |
1331 | UNUSED_PARAM(mode); | |
1332 | ||
1333 | #if ENABLE(WRITE_BARRIER_PROFILING) | |
1334 | emitCount(WriteBarrierCounters::jitCounterFor(useKind)); | |
1335 | #endif | |
1336 | } | |
1337 | ||
1338 | JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch) | |
1339 | { | |
1340 | if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) { | |
1341 | structure->addTransitionWatchpoint(stubInfo->addWatchpoint(m_codeBlock)); | |
1342 | #if !ASSERT_DISABLED | |
1343 | move(TrustedImmPtr(object), scratch); | |
1344 | Jump ok = branchPtr(Equal, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure)); | |
1345 | breakpoint(); | |
1346 | ok.link(this); | |
1347 | #endif | |
1348 | Jump result; // Returning an unset jump this way because otherwise VC++ would complain. | |
1349 | return result; | |
1350 | } | |
1351 | ||
1352 | move(TrustedImmPtr(object), scratch); | |
1353 | return branchPtr(NotEqual, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure)); | |
1354 | } | |
1355 | ||
1356 | void JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, JumpList& failureCases, RegisterID scratch) | |
1357 | { | |
1358 | Jump failureCase = addStructureTransitionCheck(object, structure, stubInfo, scratch); | |
1359 | if (!failureCase.isSet()) | |
1360 | return; | |
1361 | ||
1362 | failureCases.append(failureCase); | |
1363 | } | |
1364 | ||
1365 | void JIT::testPrototype(JSValue prototype, JumpList& failureCases, StructureStubInfo* stubInfo) | |
1366 | { | |
1367 | if (prototype.isNull()) | |
1368 | return; | |
1369 | ||
1370 | ASSERT(prototype.isCell()); | |
1371 | addStructureTransitionCheck(prototype.asCell(), prototype.asCell()->structure(), stubInfo, failureCases, regT3); | |
1372 | } | |
1373 | ||
1374 | bool JIT::isDirectPutById(StructureStubInfo* stubInfo) | |
1375 | { | |
1376 | switch (stubInfo->accessType) { | |
1377 | case access_put_by_id_transition_normal: | |
1378 | return false; | |
1379 | case access_put_by_id_transition_direct: | |
1380 | return true; | |
1381 | case access_put_by_id_replace: | |
1382 | case access_put_by_id_generic: { | |
1383 | void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress(); | |
1384 | if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct) | |
1385 | || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic) | |
1386 | || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail)) | |
1387 | return true; | |
1388 | ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id) | |
1389 | || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic) | |
1390 | || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail)); | |
1391 | return false; | |
1392 | } | |
1393 | default: | |
1394 | RELEASE_ASSERT_NOT_REACHED(); | |
1395 | return false; | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) | |
1400 | { | |
1401 | Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; | |
1402 | ||
1403 | PatchableJump badType; | |
1404 | JumpList slowCases; | |
1405 | ||
1406 | switch (arrayMode) { | |
1407 | case JITInt32: | |
1408 | slowCases = emitInt32GetByVal(currentInstruction, badType); | |
1409 | break; | |
1410 | case JITDouble: | |
1411 | slowCases = emitDoubleGetByVal(currentInstruction, badType); | |
1412 | break; | |
1413 | case JITContiguous: | |
1414 | slowCases = emitContiguousGetByVal(currentInstruction, badType); | |
1415 | break; | |
1416 | case JITArrayStorage: | |
1417 | slowCases = emitArrayStorageGetByVal(currentInstruction, badType); | |
1418 | break; | |
1419 | case JITInt8Array: | |
1420 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray); | |
1421 | break; | |
1422 | case JITInt16Array: | |
1423 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray); | |
1424 | break; | |
1425 | case JITInt32Array: | |
1426 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray); | |
1427 | break; | |
1428 | case JITUint8Array: | |
1429 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray); | |
1430 | break; | |
1431 | case JITUint8ClampedArray: | |
1432 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray); | |
1433 | break; | |
1434 | case JITUint16Array: | |
1435 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray); | |
1436 | break; | |
1437 | case JITUint32Array: | |
1438 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray); | |
1439 | break; | |
1440 | case JITFloat32Array: | |
1441 | slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4); | |
1442 | break; | |
1443 | case JITFloat64Array: | |
1444 | slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8); | |
1445 | break; | |
1446 | default: | |
1447 | CRASH(); | |
1448 | } | |
1449 | ||
1450 | Jump done = jump(); | |
1451 | ||
1452 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
1453 | ||
1454 | patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); | |
1455 | patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); | |
1456 | ||
1457 | patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); | |
1458 | ||
1459 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( | |
1460 | patchBuffer, | |
1461 | ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); | |
1462 | ||
1463 | RepatchBuffer repatchBuffer(m_codeBlock); | |
1464 | repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); | |
1465 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_val_generic)); | |
1466 | } | |
1467 | ||
1468 | void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) | |
1469 | { | |
1470 | Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; | |
1471 | ||
1472 | PatchableJump badType; | |
1473 | JumpList slowCases; | |
1474 | ||
1475 | switch (arrayMode) { | |
1476 | case JITInt32: | |
1477 | slowCases = emitInt32PutByVal(currentInstruction, badType); | |
1478 | break; | |
1479 | case JITDouble: | |
1480 | slowCases = emitDoublePutByVal(currentInstruction, badType); | |
1481 | break; | |
1482 | case JITContiguous: | |
1483 | slowCases = emitContiguousPutByVal(currentInstruction, badType); | |
1484 | break; | |
1485 | case JITArrayStorage: | |
1486 | slowCases = emitArrayStoragePutByVal(currentInstruction, badType); | |
1487 | break; | |
1488 | case JITInt8Array: | |
1489 | slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray, TruncateRounding); | |
1490 | break; | |
1491 | case JITInt16Array: | |
1492 | slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray, TruncateRounding); | |
1493 | break; | |
1494 | case JITInt32Array: | |
1495 | slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray, TruncateRounding); | |
1496 | break; | |
1497 | case JITUint8Array: | |
1498 | slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray, TruncateRounding); | |
1499 | break; | |
1500 | case JITUint8ClampedArray: | |
1501 | slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray, ClampRounding); | |
1502 | break; | |
1503 | case JITUint16Array: | |
1504 | slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray, TruncateRounding); | |
1505 | break; | |
1506 | case JITUint32Array: | |
1507 | slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray, TruncateRounding); | |
1508 | break; | |
1509 | case JITFloat32Array: | |
1510 | slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4); | |
1511 | break; | |
1512 | case JITFloat64Array: | |
1513 | slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8); | |
1514 | break; | |
1515 | default: | |
1516 | CRASH(); | |
1517 | break; | |
1518 | } | |
1519 | ||
1520 | Jump done = jump(); | |
1521 | ||
1522 | LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); | |
1523 | ||
1524 | patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); | |
1525 | patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); | |
1526 | ||
1527 | patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); | |
1528 | ||
1529 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( | |
1530 | patchBuffer, | |
1531 | ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); | |
1532 | ||
1533 | RepatchBuffer repatchBuffer(m_codeBlock); | |
1534 | repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); | |
1535 | repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_val_generic)); | |
1536 | } | |
1537 | ||
1538 | JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness) | |
1539 | { | |
1540 | // The best way to test the array type is to use the classInfo. We need to do so without | |
1541 | // clobbering the register that holds the indexing type, base, and property. | |
1542 | ||
1543 | #if USE(JSVALUE64) | |
1544 | RegisterID base = regT0; | |
1545 | RegisterID property = regT1; | |
1546 | RegisterID resultPayload = regT0; | |
1547 | RegisterID scratch = regT3; | |
1548 | #else | |
1549 | RegisterID base = regT0; | |
1550 | RegisterID property = regT2; | |
1551 | RegisterID resultPayload = regT0; | |
1552 | RegisterID resultTag = regT1; | |
1553 | RegisterID scratch = regT3; | |
1554 | #endif | |
1555 | ||
1556 | JumpList slowCases; | |
1557 | ||
1558 | loadPtr(Address(base, JSCell::structureOffset()), scratch); | |
1559 | badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo)); | |
1560 | slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset))); | |
1561 | loadPtr(Address(base, descriptor.m_storageOffset), base); | |
1562 | ||
1563 | switch (elementSize) { | |
1564 | case 1: | |
1565 | if (signedness == SignedTypedArray) | |
1566 | load8Signed(BaseIndex(base, property, TimesOne), resultPayload); | |
1567 | else | |
1568 | load8(BaseIndex(base, property, TimesOne), resultPayload); | |
1569 | break; | |
1570 | case 2: | |
1571 | if (signedness == SignedTypedArray) | |
1572 | load16Signed(BaseIndex(base, property, TimesTwo), resultPayload); | |
1573 | else | |
1574 | load16(BaseIndex(base, property, TimesTwo), resultPayload); | |
1575 | break; | |
1576 | case 4: | |
1577 | load32(BaseIndex(base, property, TimesFour), resultPayload); | |
1578 | break; | |
1579 | default: | |
1580 | CRASH(); | |
1581 | } | |
1582 | ||
1583 | Jump done; | |
1584 | if (elementSize == 4 && signedness == UnsignedTypedArray) { | |
1585 | Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0)); | |
1586 | ||
1587 | convertInt32ToDouble(resultPayload, fpRegT0); | |
1588 | addDouble(AbsoluteAddress(&twoToThe32), fpRegT0); | |
1589 | #if USE(JSVALUE64) | |
1590 | moveDoubleTo64(fpRegT0, resultPayload); | |
1591 | sub64(tagTypeNumberRegister, resultPayload); | |
1592 | #else | |
1593 | moveDoubleToInts(fpRegT0, resultPayload, resultTag); | |
1594 | #endif | |
1595 | ||
1596 | done = jump(); | |
1597 | canBeInt.link(this); | |
1598 | } | |
1599 | ||
1600 | #if USE(JSVALUE64) | |
1601 | or64(tagTypeNumberRegister, resultPayload); | |
1602 | #else | |
1603 | move(TrustedImm32(JSValue::Int32Tag), resultTag); | |
1604 | #endif | |
1605 | if (done.isSet()) | |
1606 | done.link(this); | |
1607 | return slowCases; | |
1608 | } | |
1609 | ||
1610 | JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize) | |
1611 | { | |
1612 | #if USE(JSVALUE64) | |
1613 | RegisterID base = regT0; | |
1614 | RegisterID property = regT1; | |
1615 | RegisterID resultPayload = regT0; | |
1616 | RegisterID scratch = regT3; | |
1617 | #else | |
1618 | RegisterID base = regT0; | |
1619 | RegisterID property = regT2; | |
1620 | RegisterID resultPayload = regT0; | |
1621 | RegisterID resultTag = regT1; | |
1622 | RegisterID scratch = regT3; | |
1623 | #endif | |
1624 | ||
1625 | JumpList slowCases; | |
1626 | ||
1627 | loadPtr(Address(base, JSCell::structureOffset()), scratch); | |
1628 | badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo)); | |
1629 | slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset))); | |
1630 | loadPtr(Address(base, descriptor.m_storageOffset), base); | |
1631 | ||
1632 | switch (elementSize) { | |
1633 | case 4: | |
1634 | loadFloat(BaseIndex(base, property, TimesFour), fpRegT0); | |
1635 | convertFloatToDouble(fpRegT0, fpRegT0); | |
1636 | break; | |
1637 | case 8: { | |
1638 | loadDouble(BaseIndex(base, property, TimesEight), fpRegT0); | |
1639 | break; | |
1640 | } | |
1641 | default: | |
1642 | CRASH(); | |
1643 | } | |
1644 | ||
1645 | Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0); | |
1646 | static const double NaN = QNaN; | |
1647 | loadDouble(&NaN, fpRegT0); | |
1648 | notNaN.link(this); | |
1649 | ||
1650 | #if USE(JSVALUE64) | |
1651 | moveDoubleTo64(fpRegT0, resultPayload); | |
1652 | sub64(tagTypeNumberRegister, resultPayload); | |
1653 | #else | |
1654 | moveDoubleToInts(fpRegT0, resultPayload, resultTag); | |
1655 | #endif | |
1656 | return slowCases; | |
1657 | } | |
1658 | ||
1659 | JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding) | |
1660 | { | |
1661 | unsigned value = currentInstruction[3].u.operand; | |
1662 | ||
1663 | #if USE(JSVALUE64) | |
1664 | RegisterID base = regT0; | |
1665 | RegisterID property = regT1; | |
1666 | RegisterID earlyScratch = regT3; | |
1667 | RegisterID lateScratch = regT2; | |
1668 | #else | |
1669 | RegisterID base = regT0; | |
1670 | RegisterID property = regT2; | |
1671 | RegisterID earlyScratch = regT3; | |
1672 | RegisterID lateScratch = regT1; | |
1673 | #endif | |
1674 | ||
1675 | JumpList slowCases; | |
1676 | ||
1677 | loadPtr(Address(base, JSCell::structureOffset()), earlyScratch); | |
1678 | badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo)); | |
1679 | slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset))); | |
1680 | ||
1681 | #if USE(JSVALUE64) | |
1682 | emitGetVirtualRegister(value, earlyScratch); | |
1683 | slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch)); | |
1684 | #else | |
1685 | emitLoad(value, lateScratch, earlyScratch); | |
1686 | slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag))); | |
1687 | #endif | |
1688 | ||
1689 | // We would be loading this into base as in get_by_val, except that the slow | |
1690 | // path expects the base to be unclobbered. | |
1691 | loadPtr(Address(base, descriptor.m_storageOffset), lateScratch); | |
1692 | ||
1693 | if (rounding == ClampRounding) { | |
1694 | ASSERT(elementSize == 1); | |
1695 | ASSERT_UNUSED(signedness, signedness = UnsignedTypedArray); | |
1696 | Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff)); | |
1697 | Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff)); | |
1698 | xor32(earlyScratch, earlyScratch); | |
1699 | Jump clamped = jump(); | |
1700 | tooBig.link(this); | |
1701 | move(TrustedImm32(0xff), earlyScratch); | |
1702 | clamped.link(this); | |
1703 | inBounds.link(this); | |
1704 | } | |
1705 | ||
1706 | switch (elementSize) { | |
1707 | case 1: | |
1708 | store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne)); | |
1709 | break; | |
1710 | case 2: | |
1711 | store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo)); | |
1712 | break; | |
1713 | case 4: | |
1714 | store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour)); | |
1715 | break; | |
1716 | default: | |
1717 | CRASH(); | |
1718 | } | |
1719 | ||
1720 | return slowCases; | |
1721 | } | |
1722 | ||
1723 | JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize) | |
1724 | { | |
1725 | unsigned value = currentInstruction[3].u.operand; | |
1726 | ||
1727 | #if USE(JSVALUE64) | |
1728 | RegisterID base = regT0; | |
1729 | RegisterID property = regT1; | |
1730 | RegisterID earlyScratch = regT3; | |
1731 | RegisterID lateScratch = regT2; | |
1732 | #else | |
1733 | RegisterID base = regT0; | |
1734 | RegisterID property = regT2; | |
1735 | RegisterID earlyScratch = regT3; | |
1736 | RegisterID lateScratch = regT1; | |
1737 | #endif | |
1738 | ||
1739 | JumpList slowCases; | |
1740 | ||
1741 | loadPtr(Address(base, JSCell::structureOffset()), earlyScratch); | |
1742 | badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo)); | |
1743 | slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset))); | |
1744 | ||
1745 | #if USE(JSVALUE64) | |
1746 | emitGetVirtualRegister(value, earlyScratch); | |
1747 | Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch); | |
1748 | convertInt32ToDouble(earlyScratch, fpRegT0); | |
1749 | Jump ready = jump(); | |
1750 | doubleCase.link(this); | |
1751 | slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch)); | |
1752 | add64(tagTypeNumberRegister, earlyScratch); | |
1753 | move64ToDouble(earlyScratch, fpRegT0); | |
1754 | ready.link(this); | |
1755 | #else | |
1756 | emitLoad(value, lateScratch, earlyScratch); | |
1757 | Jump doubleCase = branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)); | |
1758 | convertInt32ToDouble(earlyScratch, fpRegT0); | |
1759 | Jump ready = jump(); | |
1760 | doubleCase.link(this); | |
1761 | slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag))); | |
1762 | moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1); | |
1763 | ready.link(this); | |
1764 | #endif | |
1765 | ||
1766 | // We would be loading this into base as in get_by_val, except that the slow | |
1767 | // path expects the base to be unclobbered. | |
1768 | loadPtr(Address(base, descriptor.m_storageOffset), lateScratch); | |
1769 | ||
1770 | switch (elementSize) { | |
1771 | case 4: | |
1772 | convertDoubleToFloat(fpRegT0, fpRegT0); | |
1773 | storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour)); | |
1774 | break; | |
1775 | case 8: | |
1776 | storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight)); | |
1777 | break; | |
1778 | default: | |
1779 | CRASH(); | |
1780 | } | |
1781 | ||
1782 | return slowCases; | |
1783 | } | |
1784 | ||
1785 | } // namespace JSC | |
1786 | ||
1787 | #endif // ENABLE(JIT) |