2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "GCAwareJITStubRoutine.h"
33 #include "GetterSetter.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
36 #include "JITStubCall.h"
38 #include "JSFunction.h"
39 #include "JSPropertyNameIterator.h"
40 #include "JSVariableObject.h"
41 #include "LinkBuffer.h"
42 #include "RepatchBuffer.h"
43 #include "ResultType.h"
44 #include "SamplingTool.h"
45 #include <wtf/StringPrintStream.h>
56 JIT::CodeRef
JIT::stringGetByValStubGenerator(VM
* vm
)
60 failures
.append(jit
.branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(vm
->stringStructure
.get())));
62 // Load string length to regT2, and start the process of loading the data pointer into regT0
63 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT2
);
64 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
65 failures
.append(jit
.branchTest32(Zero
, regT0
));
67 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
68 failures
.append(jit
.branch32(AboveOrEqual
, regT1
, regT2
));
73 // Load the string flags
74 jit
.loadPtr(Address(regT0
, StringImpl::flagsOffset()), regT2
);
75 jit
.loadPtr(Address(regT0
, StringImpl::dataOffset()), regT0
);
76 is16Bit
.append(jit
.branchTest32(Zero
, regT2
, TrustedImm32(StringImpl::flagIs8Bit())));
77 jit
.load8(BaseIndex(regT0
, regT1
, TimesOne
, 0), regT0
);
78 cont8Bit
.append(jit
.jump());
80 jit
.load16(BaseIndex(regT0
, regT1
, TimesTwo
, 0), regT0
);
83 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
84 jit
.move(TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), regT1
);
85 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
89 jit
.move(TrustedImm32(0), regT0
);
92 LinkBuffer
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
);
93 return FINALIZE_CODE(patchBuffer
, ("String get_by_val stub"));
96 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
98 unsigned dst
= currentInstruction
[1].u
.operand
;
99 unsigned base
= currentInstruction
[2].u
.operand
;
100 unsigned property
= currentInstruction
[3].u
.operand
;
101 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
103 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
104 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
106 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
107 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
108 // number was signed since m_vectorLength is always less than intmax (since the total allocation
109 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
110 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
111 // extending since it makes it easier to re-tag the value in the slow case.
112 zeroExtend32ToPtr(regT1
, regT1
);
114 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
115 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
116 emitArrayProfilingSite(regT2
, regT3
, profile
);
117 and32(TrustedImm32(IndexingShapeMask
), regT2
);
119 PatchableJump badType
;
122 JITArrayMode mode
= chooseArrayMode(profile
);
125 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
128 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
131 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
133 case JITArrayStorage
:
134 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
141 addSlowCase(badType
);
142 addSlowCase(slowCases
);
144 Label done
= label();
147 Jump resultOK
= branchTest64(NonZero
, regT0
);
152 emitValueProfilingSite();
153 emitPutVirtualRegister(dst
);
155 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
158 JIT::JumpList
JIT::emitDoubleGetByVal(Instruction
*, PatchableJump
& badType
)
162 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(DoubleShape
));
163 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
164 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength())));
165 loadDouble(BaseIndex(regT2
, regT1
, TimesEight
), fpRegT0
);
166 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
167 moveDoubleTo64(fpRegT0
, regT0
);
168 sub64(tagTypeNumberRegister
, regT0
);
173 JIT::JumpList
JIT::emitContiguousGetByVal(Instruction
*, PatchableJump
& badType
, IndexingType expectedShape
)
177 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(expectedShape
));
178 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
179 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength())));
180 load64(BaseIndex(regT2
, regT1
, TimesEight
), regT0
);
181 slowCases
.append(branchTest64(Zero
, regT0
));
186 JIT::JumpList
JIT::emitArrayStorageGetByVal(Instruction
*, PatchableJump
& badType
)
190 add32(TrustedImm32(-ArrayStorageShape
), regT2
, regT3
);
191 badType
= patchableBranch32(Above
, regT3
, TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
));
193 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
194 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, ArrayStorage::vectorLengthOffset())));
196 load64(BaseIndex(regT2
, regT1
, TimesEight
, ArrayStorage::vectorOffset()), regT0
);
197 slowCases
.append(branchTest64(Zero
, regT0
));
202 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
204 unsigned dst
= currentInstruction
[1].u
.operand
;
205 unsigned base
= currentInstruction
[2].u
.operand
;
206 unsigned property
= currentInstruction
[3].u
.operand
;
207 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
209 linkSlowCase(iter
); // property int32 check
210 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
211 Jump nonCell
= jump();
212 linkSlowCase(iter
); // base array check
213 Jump notString
= branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(m_vm
->stringStructure
.get()));
214 emitNakedCall(CodeLocationLabel(m_vm
->getCTIStub(stringGetByValStubGenerator
).code()));
215 Jump failed
= branchTest64(Zero
, regT0
);
216 emitPutVirtualRegister(dst
, regT0
);
217 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
219 notString
.link(this);
222 Jump skipProfiling
= jump();
224 linkSlowCase(iter
); // vector length check
225 linkSlowCase(iter
); // empty value
227 emitArrayProfileOutOfBoundsSpecialCase(profile
);
229 skipProfiling
.link(this);
231 Label slowPath
= label();
233 JITStubCall
stubCall(this, cti_op_get_by_val
);
234 stubCall
.addArgument(base
, regT2
);
235 stubCall
.addArgument(property
, regT2
);
236 Call call
= stubCall
.call(dst
);
238 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
239 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
240 m_byValInstructionIndex
++;
242 emitValueProfilingSite();
245 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, RegisterID offset
, RegisterID scratch
, FinalObjectMode finalObjectMode
)
247 ASSERT(sizeof(JSValue
) == 8);
249 if (finalObjectMode
== MayBeFinal
) {
250 Jump isInline
= branch32(LessThan
, offset
, TrustedImm32(firstOutOfLineOffset
));
251 loadPtr(Address(base
, JSObject::butterflyOffset()), scratch
);
255 addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), base
, scratch
);
259 Jump isOutOfLine
= branch32(GreaterThanOrEqual
, offset
, TrustedImm32(firstOutOfLineOffset
));
261 isOutOfLine
.link(this);
263 loadPtr(Address(base
, JSObject::butterflyOffset()), scratch
);
266 signExtend32ToPtr(offset
, offset
);
267 load64(BaseIndex(scratch
, offset
, TimesEight
, (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), result
);
270 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
272 unsigned dst
= currentInstruction
[1].u
.operand
;
273 unsigned base
= currentInstruction
[2].u
.operand
;
274 unsigned property
= currentInstruction
[3].u
.operand
;
275 unsigned expected
= currentInstruction
[4].u
.operand
;
276 unsigned iter
= currentInstruction
[5].u
.operand
;
277 unsigned i
= currentInstruction
[6].u
.operand
;
279 emitGetVirtualRegister(property
, regT0
);
280 addSlowCase(branch64(NotEqual
, regT0
, addressFor(expected
)));
281 emitGetVirtualRegisters(base
, regT0
, iter
, regT1
);
282 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
284 // Test base's structure
285 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
286 addSlowCase(branchPtr(NotEqual
, regT2
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
287 load32(addressFor(i
), regT3
);
288 sub32(TrustedImm32(1), regT3
);
289 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
290 Jump inlineProperty
= branch32(Below
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)));
291 add32(TrustedImm32(firstOutOfLineOffset
), regT3
);
292 sub32(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)), regT3
);
293 inlineProperty
.link(this);
294 compileGetDirectOffset(regT0
, regT0
, regT3
, regT1
);
296 emitPutVirtualRegister(dst
, regT0
);
299 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
301 unsigned dst
= currentInstruction
[1].u
.operand
;
302 unsigned base
= currentInstruction
[2].u
.operand
;
303 unsigned property
= currentInstruction
[3].u
.operand
;
306 linkSlowCaseIfNotJSCell(iter
, base
);
310 JITStubCall
stubCall(this, cti_op_get_by_val_generic
);
311 stubCall
.addArgument(base
, regT2
);
312 stubCall
.addArgument(property
, regT2
);
316 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
318 unsigned base
= currentInstruction
[1].u
.operand
;
319 unsigned property
= currentInstruction
[2].u
.operand
;
320 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
322 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
323 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
324 // See comment in op_get_by_val.
325 zeroExtend32ToPtr(regT1
, regT1
);
326 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
327 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
328 emitArrayProfilingSite(regT2
, regT3
, profile
);
329 and32(TrustedImm32(IndexingShapeMask
), regT2
);
331 PatchableJump badType
;
334 JITArrayMode mode
= chooseArrayMode(profile
);
337 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
340 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
343 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
345 case JITArrayStorage
:
346 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
353 addSlowCase(badType
);
354 addSlowCase(slowCases
);
356 Label done
= label();
358 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
360 emitWriteBarrier(regT0
, regT3
, regT1
, regT3
, ShouldFilterImmediates
, WriteBarrierForPropertyAccess
);
363 JIT::JumpList
JIT::emitGenericContiguousPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, IndexingType indexingShape
)
365 unsigned value
= currentInstruction
[3].u
.operand
;
366 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
370 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(indexingShape
));
372 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
373 Jump outOfBounds
= branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength()));
375 Label storeResult
= label();
376 emitGetVirtualRegister(value
, regT3
);
377 switch (indexingShape
) {
379 slowCases
.append(emitJumpIfNotImmediateInteger(regT3
));
380 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
));
383 Jump notInt
= emitJumpIfNotImmediateInteger(regT3
);
384 convertInt32ToDouble(regT3
, fpRegT0
);
387 add64(tagTypeNumberRegister
, regT3
);
388 move64ToDouble(regT3
, fpRegT0
);
389 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
391 storeDouble(fpRegT0
, BaseIndex(regT2
, regT1
, TimesEight
));
394 case ContiguousShape
:
395 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
));
403 outOfBounds
.link(this);
405 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfVectorLength())));
407 emitArrayProfileStoreToHoleSpecialCase(profile
);
409 add32(TrustedImm32(1), regT1
, regT3
);
410 store32(regT3
, Address(regT2
, Butterfly::offsetOfPublicLength()));
411 jump().linkTo(storeResult
, this);
418 JIT::JumpList
JIT::emitArrayStoragePutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
)
420 unsigned value
= currentInstruction
[3].u
.operand
;
421 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
425 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(ArrayStorageShape
));
426 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
427 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, ArrayStorage::vectorLengthOffset())));
429 Jump empty
= branchTest64(Zero
, BaseIndex(regT2
, regT1
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
431 Label
storeResult(this);
432 emitGetVirtualRegister(value
, regT3
);
433 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
437 emitArrayProfileStoreToHoleSpecialCase(profile
);
438 add32(TrustedImm32(1), Address(regT2
, ArrayStorage::numValuesInVectorOffset()));
439 branch32(Below
, regT1
, Address(regT2
, ArrayStorage::lengthOffset())).linkTo(storeResult
, this);
441 add32(TrustedImm32(1), regT1
);
442 store32(regT1
, Address(regT2
, ArrayStorage::lengthOffset()));
443 sub32(TrustedImm32(1), regT1
);
444 jump().linkTo(storeResult
, this);
451 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
453 unsigned base
= currentInstruction
[1].u
.operand
;
454 unsigned property
= currentInstruction
[2].u
.operand
;
455 unsigned value
= currentInstruction
[3].u
.operand
;
456 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
458 linkSlowCase(iter
); // property int32 check
459 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
460 linkSlowCase(iter
); // base not array check
462 JITArrayMode mode
= chooseArrayMode(profile
);
466 linkSlowCase(iter
); // value type check
472 Jump skipProfiling
= jump();
473 linkSlowCase(iter
); // out of bounds
474 emitArrayProfileOutOfBoundsSpecialCase(profile
);
475 skipProfiling
.link(this);
477 Label slowPath
= label();
479 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
480 stubPutByValCall
.addArgument(regT0
);
481 stubPutByValCall
.addArgument(property
, regT2
);
482 stubPutByValCall
.addArgument(value
, regT2
);
483 Call call
= stubPutByValCall
.call();
485 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
486 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
487 m_byValInstructionIndex
++;
490 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
492 JITStubCall
stubCall(this, cti_op_put_by_index
);
493 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
494 stubCall
.addArgument(TrustedImm32(currentInstruction
[2].u
.operand
));
495 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
499 void JIT::emit_op_put_getter_setter(Instruction
* currentInstruction
)
501 JITStubCall
stubCall(this, cti_op_put_getter_setter
);
502 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
503 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
504 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
505 stubCall
.addArgument(currentInstruction
[4].u
.operand
, regT2
);
509 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
511 JITStubCall
stubCall(this, cti_op_del_by_id
);
512 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
513 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
514 stubCall
.call(currentInstruction
[1].u
.operand
);
517 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
519 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
520 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
521 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
523 emitGetVirtualRegister(baseVReg
, regT0
);
524 compileGetByIdHotPath(baseVReg
, ident
);
525 emitValueProfilingSite();
526 emitPutVirtualRegister(resultVReg
);
529 void JIT::compileGetByIdHotPath(int baseVReg
, Identifier
* ident
)
531 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
532 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
533 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
534 // to jump back to if one of these trampolies finds a match.
536 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
538 if (*ident
== m_vm
->propertyNames
->length
&& shouldEmitProfiling()) {
539 loadPtr(Address(regT0
, JSCell::structureOffset()), regT1
);
540 emitArrayProfilingSiteForBytecodeIndex(regT1
, regT2
, m_bytecodeOffset
);
543 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
545 Label
hotPathBegin(this);
547 DataLabelPtr structureToCompare
;
548 PatchableJump structureCheck
= patchableBranchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
549 addSlowCase(structureCheck
);
551 ConvertibleLoadLabel propertyStorageLoad
= convertibleLoadPtr(Address(regT0
, JSObject::butterflyOffset()), regT0
);
552 DataLabelCompact displacementLabel
= load64WithCompactAddressOffsetPatch(Address(regT0
, patchGetByIdDefaultOffset
), regT0
);
554 Label
putResult(this);
556 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
558 m_propertyAccessCompilationInfo
.append(PropertyStubCompilationInfo(PropertyStubGetById
, m_bytecodeOffset
, hotPathBegin
, structureToCompare
, structureCheck
, propertyStorageLoad
, displacementLabel
, putResult
));
561 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
563 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
564 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
565 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
567 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
);
568 emitValueProfilingSite();
571 void JIT::compileGetByIdSlowCase(int resultVReg
, int baseVReg
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
)
573 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
574 // so that we only need track one pointer into the slow case code - we track a pointer to the location
575 // of the call (which we can use to look up the patch information), but should a array-length or
576 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
577 // the distance from the call to the head of the slow case.
579 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
582 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
584 Label
coldPathBegin(this);
585 JITStubCall
stubCall(this, cti_op_get_by_id
);
586 stubCall
.addArgument(regT0
);
587 stubCall
.addArgument(TrustedImmPtr(ident
));
588 Call call
= stubCall
.call(resultVReg
);
590 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
592 // Track the location of the call; this will be used to recover patch information.
593 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
++].slowCaseInfo(PropertyStubGetById
, coldPathBegin
, call
);
596 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
598 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
599 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
601 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
602 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
603 // such that the Structure & offset are always at the same distance from this.
605 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
607 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
608 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
610 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
612 Label
hotPathBegin(this);
614 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
615 DataLabelPtr structureToCompare
;
616 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
618 ConvertibleLoadLabel propertyStorageLoad
= convertibleLoadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
619 DataLabel32 displacementLabel
= store64WithAddressOffsetPatch(regT1
, Address(regT2
, patchPutByIdDefaultOffset
));
621 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
623 emitWriteBarrier(regT0
, regT1
, regT2
, regT3
, ShouldFilterImmediates
, WriteBarrierForPropertyAccess
);
625 m_propertyAccessCompilationInfo
.append(PropertyStubCompilationInfo(PropertyStubPutById
, m_bytecodeOffset
, hotPathBegin
, structureToCompare
, propertyStorageLoad
, displacementLabel
));
628 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
630 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
631 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
632 unsigned direct
= currentInstruction
[8].u
.operand
;
634 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
637 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct
: cti_op_put_by_id
);
638 stubCall
.addArgument(regT0
);
639 stubCall
.addArgument(TrustedImmPtr(ident
));
640 stubCall
.addArgument(regT1
);
641 move(regT0
, nonArgGPR1
);
642 Call call
= stubCall
.call();
644 // Track the location of the call; this will be used to recover patch information.
645 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
++].slowCaseInfo(PropertyStubPutById
, call
);
648 // Compile a store into an object's property storage. May overwrite the
649 // value in objectReg.
650 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, PropertyOffset cachedOffset
)
652 if (isInlineOffset(cachedOffset
)) {
653 store64(value
, Address(base
, JSObject::offsetOfInlineStorage() + sizeof(JSValue
) * offsetInInlineStorage(cachedOffset
)));
657 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
658 store64(value
, Address(base
, sizeof(JSValue
) * offsetInButterfly(cachedOffset
)));
661 // Compile a load from an object's property storage. May overwrite base.
662 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, PropertyOffset cachedOffset
)
664 if (isInlineOffset(cachedOffset
)) {
665 load64(Address(base
, JSObject::offsetOfInlineStorage() + sizeof(JSValue
) * offsetInInlineStorage(cachedOffset
)), result
);
669 loadPtr(Address(base
, JSObject::butterflyOffset()), result
);
670 load64(Address(result
, sizeof(JSValue
) * offsetInButterfly(cachedOffset
)), result
);
673 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID result
, PropertyOffset cachedOffset
)
675 if (isInlineOffset(cachedOffset
)) {
676 load64(base
->locationForOffset(cachedOffset
), result
);
680 loadPtr(base
->butterflyAddress(), result
);
681 load64(Address(result
, offsetInButterfly(cachedOffset
) * sizeof(WriteBarrier
<Unknown
>)), result
);
684 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, PropertyOffset cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
, bool direct
)
686 move(nonArgGPR1
, regT0
);
688 JumpList failureCases
;
689 // Check eax is an object of the right Structure.
690 failureCases
.append(emitJumpIfNotJSCell(regT0
));
691 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(oldStructure
)));
693 testPrototype(oldStructure
->storedPrototype(), failureCases
, stubInfo
);
695 ASSERT(oldStructure
->storedPrototype().isNull() || oldStructure
->storedPrototype().asCell()->structure() == chain
->head()->get());
697 // ecx = baseObject->m_structure
699 for (WriteBarrier
<Structure
>* it
= chain
->head(); *it
; ++it
) {
700 ASSERT((*it
)->storedPrototype().isNull() || (*it
)->storedPrototype().asCell()->structure() == it
[1].get());
701 testPrototype((*it
)->storedPrototype(), failureCases
, stubInfo
);
705 // If we succeed in all of our checks, and the code was optimizable, then make sure we
706 // decrement the rare case counter.
707 #if ENABLE(VALUE_PROFILER)
708 if (m_codeBlock
->canCompileWithDFG() >= DFG::MayInline
) {
711 AbsoluteAddress(&m_codeBlock
->rareCaseProfileForBytecodeOffset(stubInfo
->bytecodeIndex
)->m_counter
));
715 // emit a call only if storage realloc is needed
716 bool willNeedStorageRealloc
= oldStructure
->outOfLineCapacity() != newStructure
->outOfLineCapacity();
717 if (willNeedStorageRealloc
) {
718 // This trampoline was called to like a JIT stub; before we can can call again we need to
719 // remove the return address from the stack, to prevent the stack from becoming misaligned.
720 preserveReturnAddressAfterCall(regT3
);
722 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
723 stubCall
.skipArgument(); // base
724 stubCall
.skipArgument(); // ident
725 stubCall
.skipArgument(); // value
726 stubCall
.addArgument(TrustedImm32(oldStructure
->outOfLineCapacity()));
727 stubCall
.addArgument(TrustedImmPtr(newStructure
));
728 stubCall
.call(regT0
);
729 emitGetJITStubArg(2, regT1
);
731 restoreReturnAddressBeforeReturn(regT3
);
734 // Planting the new structure triggers the write barrier so we need
735 // an unconditional barrier here.
736 emitWriteBarrier(regT0
, regT1
, regT2
, regT3
, UnconditionalWriteBarrier
, WriteBarrierForPropertyAccess
);
738 ASSERT(newStructure
->classInfo() == oldStructure
->classInfo());
739 storePtr(TrustedImmPtr(newStructure
), Address(regT0
, JSCell::structureOffset()));
740 compilePutDirectOffset(regT0
, regT1
, cachedOffset
);
744 ASSERT(!failureCases
.empty());
745 failureCases
.link(this);
746 restoreArgumentReferenceForTrampoline();
747 Call failureCall
= tailRecursiveCall();
749 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
751 patchBuffer
.link(failureCall
, FunctionPtr(direct
? cti_op_put_by_id_direct_fail
: cti_op_put_by_id_fail
));
753 if (willNeedStorageRealloc
) {
754 ASSERT(m_calls
.size() == 1);
755 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
758 stubInfo
->stubRoutine
= createJITStubRoutine(
761 ("Baseline put_by_id transition for %s, return point %p",
762 toCString(*m_codeBlock
).data(), returnAddress
.value())),
764 m_codeBlock
->ownerExecutable(),
765 willNeedStorageRealloc
,
767 RepatchBuffer
repatchBuffer(m_codeBlock
);
768 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, CodeLocationLabel(stubInfo
->stubRoutine
->code().code()));
771 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
)
773 RepatchBuffer
repatchBuffer(codeBlock
);
775 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
776 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
777 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
779 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
780 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureToCompare
), structure
);
781 repatchBuffer
.setLoadInstructionIsActive(stubInfo
->hotPathBegin
.convertibleLoadAtOffset(stubInfo
->patch
.baseline
.u
.get
.propertyStorageLoad
), isOutOfLineOffset(cachedOffset
));
782 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel
), offsetRelativeToPatchedStorage(cachedOffset
));
785 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
, bool direct
)
787 RepatchBuffer
repatchBuffer(codeBlock
);
789 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
790 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
791 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(direct
? cti_op_put_by_id_direct_generic
: cti_op_put_by_id_generic
));
793 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
794 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.put
.structureToCompare
), structure
);
795 repatchBuffer
.setLoadInstructionIsActive(stubInfo
->hotPathBegin
.convertibleLoadAtOffset(stubInfo
->patch
.baseline
.u
.put
.propertyStorageLoad
), isOutOfLineOffset(cachedOffset
));
796 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel
), offsetRelativeToPatchedStorage(cachedOffset
));
799 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
801 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
803 // Check eax is an array
804 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
805 Jump failureCases1
= branchTest32(Zero
, regT2
, TrustedImm32(IsArray
));
806 Jump failureCases2
= branchTest32(Zero
, regT2
, TrustedImm32(IndexingShapeMask
));
808 // Checks out okay! - get the length from the storage
809 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
810 load32(Address(regT3
, ArrayStorage::lengthOffset()), regT2
);
811 Jump failureCases3
= branch32(LessThan
, regT2
, TrustedImm32(0));
813 emitFastArithIntToImmNoCheck(regT2
, regT0
);
814 Jump success
= jump();
816 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
818 // Use the patch information to link the failure cases back to the original slow case routine.
819 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
820 patchBuffer
.link(failureCases1
, slowCaseBegin
);
821 patchBuffer
.link(failureCases2
, slowCaseBegin
);
822 patchBuffer
.link(failureCases3
, slowCaseBegin
);
824 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
825 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
827 // Track the stub we have created so that it will be deleted later.
828 stubInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
830 ("Basline JIT get_by_id array length stub for %s, return point %p",
831 toCString(*m_codeBlock
).data(),
832 stubInfo
->hotPathBegin
.labelAtOffset(
833 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress()));
835 // Finally patch the jump to slow case back in the hot path to jump here instead.
836 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
837 RepatchBuffer
repatchBuffer(m_codeBlock
);
838 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubInfo
->stubRoutine
->code().code()));
840 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
841 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
844 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
846 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
847 // referencing the prototype object - let's speculatively load it's table nice and early!)
848 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
850 // Check eax is an object of the right Structure.
851 Jump failureCases1
= checkStructure(regT0
, structure
);
853 // Check the prototype object's Structure had not changed.
854 Jump failureCases2
= addStructureTransitionCheck(protoObject
, prototypeStructure
, stubInfo
, regT3
);
856 bool needsStubLink
= false;
859 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
860 needsStubLink
= true;
861 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
862 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
863 stubCall
.addArgument(regT1
);
864 stubCall
.addArgument(regT0
);
865 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
867 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
868 needsStubLink
= true;
869 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
870 stubCall
.addArgument(TrustedImmPtr(protoObject
));
871 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
872 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
873 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
876 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
877 Jump success
= jump();
878 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
880 // Use the patch information to link the failure cases back to the original slow case routine.
881 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
882 patchBuffer
.link(failureCases1
, slowCaseBegin
);
883 if (failureCases2
.isSet())
884 patchBuffer
.link(failureCases2
, slowCaseBegin
);
886 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
887 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
890 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
892 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
895 // Track the stub we have created so that it will be deleted later.
896 stubInfo
->stubRoutine
= createJITStubRoutine(
899 ("Baseline JIT get_by_id proto stub for %s, return point %p",
900 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
901 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
903 m_codeBlock
->ownerExecutable(),
906 // Finally patch the jump to slow case back in the hot path to jump here instead.
907 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
908 RepatchBuffer
repatchBuffer(m_codeBlock
);
909 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubInfo
->stubRoutine
->code().code()));
911 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
912 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
915 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
)
917 Jump failureCase
= checkStructure(regT0
, structure
);
918 bool needsStubLink
= false;
919 bool isDirect
= false;
920 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
921 needsStubLink
= true;
922 compileGetDirectOffset(regT0
, regT1
, cachedOffset
);
923 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
924 stubCall
.addArgument(regT1
);
925 stubCall
.addArgument(regT0
);
926 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
928 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
929 needsStubLink
= true;
930 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
931 stubCall
.addArgument(regT0
);
932 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
933 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
934 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
938 compileGetDirectOffset(regT0
, regT0
, cachedOffset
);
940 Jump success
= jump();
942 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
945 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
947 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
951 // Use the patch information to link the failure cases back to the original slow case routine.
952 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
));
954 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
956 patchBuffer
.link(failureCase
, lastProtoBegin
);
958 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
959 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
961 RefPtr
<JITStubRoutine
> stubCode
= createJITStubRoutine(
964 ("Baseline JIT get_by_id list stub for %s, return point %p",
965 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
966 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
968 m_codeBlock
->ownerExecutable(),
971 polymorphicStructures
->list
[currentIndex
].set(*m_vm
, m_codeBlock
->ownerExecutable(), stubCode
, structure
, isDirect
);
973 // Finally patch the jump to slow case back in the hot path to jump here instead.
974 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
975 RepatchBuffer
repatchBuffer(m_codeBlock
);
976 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubCode
->code().code()));
979 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, CallFrame
* callFrame
)
981 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
982 // referencing the prototype object - let's speculatively load it's table nice and early!)
983 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
985 // Check eax is an object of the right Structure.
986 Jump failureCases1
= checkStructure(regT0
, structure
);
988 // Check the prototype object's Structure had not changed.
989 Jump failureCases2
= addStructureTransitionCheck(protoObject
, prototypeStructure
, stubInfo
, regT3
);
992 bool needsStubLink
= false;
993 bool isDirect
= false;
994 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
995 needsStubLink
= true;
996 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
997 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
998 stubCall
.addArgument(regT1
);
999 stubCall
.addArgument(regT0
);
1000 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1002 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1003 needsStubLink
= true;
1004 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1005 stubCall
.addArgument(TrustedImmPtr(protoObject
));
1006 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1007 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
1008 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1012 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
1015 Jump success
= jump();
1017 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
1019 if (needsStubLink
) {
1020 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1022 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1026 // Use the patch information to link the failure cases back to the original slow case routine.
1027 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures
->list
[currentIndex
- 1].stubRoutine
));
1028 patchBuffer
.link(failureCases1
, lastProtoBegin
);
1029 if (failureCases2
.isSet())
1030 patchBuffer
.link(failureCases2
, lastProtoBegin
);
1032 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1033 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
1035 RefPtr
<JITStubRoutine
> stubCode
= createJITStubRoutine(
1038 ("Baseline JIT get_by_id proto list stub for %s, return point %p",
1039 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
1040 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
1042 m_codeBlock
->ownerExecutable(),
1044 prototypeStructures
->list
[currentIndex
].set(*m_vm
, m_codeBlock
->ownerExecutable(), stubCode
, structure
, prototypeStructure
, isDirect
);
1046 // Finally patch the jump to slow case back in the hot path to jump here instead.
1047 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
1048 RepatchBuffer
repatchBuffer(m_codeBlock
);
1049 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubCode
->code().code()));
1052 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, CallFrame
* callFrame
)
1055 JumpList bucketsOfFail
;
1057 // Check eax is an object of the right Structure.
1058 Jump baseObjectCheck
= checkStructure(regT0
, structure
);
1059 bucketsOfFail
.append(baseObjectCheck
);
1061 Structure
* currStructure
= structure
;
1062 WriteBarrier
<Structure
>* it
= chain
->head();
1063 JSObject
* protoObject
= 0;
1064 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
1065 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1066 currStructure
= it
->get();
1067 testPrototype(protoObject
, bucketsOfFail
, stubInfo
);
1069 ASSERT(protoObject
);
1071 bool needsStubLink
= false;
1072 bool isDirect
= false;
1073 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
1074 needsStubLink
= true;
1075 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
1076 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
1077 stubCall
.addArgument(regT1
);
1078 stubCall
.addArgument(regT0
);
1079 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1081 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1082 needsStubLink
= true;
1083 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1084 stubCall
.addArgument(TrustedImmPtr(protoObject
));
1085 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1086 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
1087 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1091 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
1093 Jump success
= jump();
1095 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
1097 if (needsStubLink
) {
1098 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1100 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1104 // Use the patch information to link the failure cases back to the original slow case routine.
1105 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures
->list
[currentIndex
- 1].stubRoutine
));
1107 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
1109 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1110 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
1112 RefPtr
<JITStubRoutine
> stubRoutine
= createJITStubRoutine(
1115 ("Baseline JIT get_by_id chain list stub for %s, return point %p",
1116 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
1117 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
1119 m_codeBlock
->ownerExecutable(),
1122 // Track the stub we have created so that it will be deleted later.
1123 prototypeStructures
->list
[currentIndex
].set(callFrame
->vm(), m_codeBlock
->ownerExecutable(), stubRoutine
, structure
, chain
, isDirect
);
1125 // Finally patch the jump to slow case back in the hot path to jump here instead.
1126 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
1127 RepatchBuffer
repatchBuffer(m_codeBlock
);
1128 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
->code().code()));
1131 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1135 JumpList bucketsOfFail
;
1137 // Check eax is an object of the right Structure.
1138 bucketsOfFail
.append(checkStructure(regT0
, structure
));
1140 Structure
* currStructure
= structure
;
1141 WriteBarrier
<Structure
>* it
= chain
->head();
1142 JSObject
* protoObject
= 0;
1143 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
1144 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1145 currStructure
= it
->get();
1146 testPrototype(protoObject
, bucketsOfFail
, stubInfo
);
1148 ASSERT(protoObject
);
1150 bool needsStubLink
= false;
1151 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
1152 needsStubLink
= true;
1153 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
1154 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
1155 stubCall
.addArgument(regT1
);
1156 stubCall
.addArgument(regT0
);
1157 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1159 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1160 needsStubLink
= true;
1161 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1162 stubCall
.addArgument(TrustedImmPtr(protoObject
));
1163 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1164 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
1165 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1168 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
1169 Jump success
= jump();
1171 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
1173 if (needsStubLink
) {
1174 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1176 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1180 // Use the patch information to link the failure cases back to the original slow case routine.
1181 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
));
1183 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1184 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
1186 // Track the stub we have created so that it will be deleted later.
1187 RefPtr
<JITStubRoutine
> stubRoutine
= createJITStubRoutine(
1190 ("Baseline JIT get_by_id chain stub for %s, return point %p",
1191 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
1192 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
1194 m_codeBlock
->ownerExecutable(),
1196 stubInfo
->stubRoutine
= stubRoutine
;
1198 // Finally patch the jump to slow case back in the hot path to jump here instead.
1199 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
1200 RepatchBuffer
repatchBuffer(m_codeBlock
);
1201 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
->code().code()));
1203 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1204 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1207 void JIT::emit_op_get_scoped_var(Instruction
* currentInstruction
)
1209 int skip
= currentInstruction
[3].u
.operand
;
1211 emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, regT0
);
1212 bool checkTopLevel
= m_codeBlock
->codeType() == FunctionCode
&& m_codeBlock
->needsFullScopeChain();
1213 ASSERT(skip
|| !checkTopLevel
);
1214 if (checkTopLevel
&& skip
--) {
1215 Jump activationNotCreated
;
1217 activationNotCreated
= branchTestPtr(Zero
, addressFor(m_codeBlock
->activationRegister()));
1218 loadPtr(Address(regT0
, JSScope::offsetOfNext()), regT0
);
1219 activationNotCreated
.link(this);
1222 loadPtr(Address(regT0
, JSScope::offsetOfNext()), regT0
);
1224 loadPtr(Address(regT0
, JSVariableObject::offsetOfRegisters()), regT0
);
1225 loadPtr(Address(regT0
, currentInstruction
[2].u
.operand
* sizeof(Register
)), regT0
);
1226 emitValueProfilingSite();
1227 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1230 void JIT::emit_op_put_scoped_var(Instruction
* currentInstruction
)
1232 int skip
= currentInstruction
[2].u
.operand
;
1234 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT0
);
1236 emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, regT1
);
1237 bool checkTopLevel
= m_codeBlock
->codeType() == FunctionCode
&& m_codeBlock
->needsFullScopeChain();
1238 ASSERT(skip
|| !checkTopLevel
);
1239 if (checkTopLevel
&& skip
--) {
1240 Jump activationNotCreated
;
1242 activationNotCreated
= branchTestPtr(Zero
, addressFor(m_codeBlock
->activationRegister()));
1243 loadPtr(Address(regT1
, JSScope::offsetOfNext()), regT1
);
1244 activationNotCreated
.link(this);
1247 loadPtr(Address(regT1
, JSScope::offsetOfNext()), regT1
);
1249 emitWriteBarrier(regT1
, regT0
, regT2
, regT3
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1251 loadPtr(Address(regT1
, JSVariableObject::offsetOfRegisters()), regT1
);
1252 storePtr(regT0
, Address(regT1
, currentInstruction
[1].u
.operand
* sizeof(Register
)));
1255 void JIT::emit_op_init_global_const(Instruction
* currentInstruction
)
1257 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
1259 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
1261 store64(regT0
, currentInstruction
[1].u
.registerPointer
);
1262 if (Heap::isWriteBarrierEnabled())
1263 emitWriteBarrier(globalObject
, regT0
, regT2
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1266 void JIT::emit_op_init_global_const_check(Instruction
* currentInstruction
)
1268 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
1270 addSlowCase(branchTest8(NonZero
, AbsoluteAddress(currentInstruction
[3].u
.predicatePointer
)));
1272 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
1274 store64(regT0
, currentInstruction
[1].u
.registerPointer
);
1275 if (Heap::isWriteBarrierEnabled())
1276 emitWriteBarrier(globalObject
, regT0
, regT2
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1279 void JIT::emitSlow_op_init_global_const_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1283 JITStubCall
stubCall(this, cti_op_init_global_const_check
);
1284 stubCall
.addArgument(regT0
);
1285 stubCall
.addArgument(TrustedImm32(currentInstruction
[4].u
.operand
));
1289 void JIT::resetPatchGetById(RepatchBuffer
& repatchBuffer
, StructureStubInfo
* stubInfo
)
1291 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_get_by_id
);
1292 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureToCompare
), reinterpret_cast<void*>(unusedPointer
));
1293 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel
), 0);
1294 repatchBuffer
.relink(stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
), stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
));
1297 void JIT::resetPatchPutById(RepatchBuffer
& repatchBuffer
, StructureStubInfo
* stubInfo
)
1299 if (isDirectPutById(stubInfo
))
1300 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_put_by_id_direct
);
1302 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_put_by_id
);
1303 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.put
.structureToCompare
), reinterpret_cast<void*>(unusedPointer
));
1304 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel
), 0);
1307 #endif // USE(JSVALUE64)
1309 void JIT::emitWriteBarrier(RegisterID owner
, RegisterID value
, RegisterID scratch
, RegisterID scratch2
, WriteBarrierMode mode
, WriteBarrierUseKind useKind
)
1311 UNUSED_PARAM(owner
);
1312 UNUSED_PARAM(scratch
);
1313 UNUSED_PARAM(scratch2
);
1314 UNUSED_PARAM(useKind
);
1315 UNUSED_PARAM(value
);
1317 ASSERT(owner
!= scratch
);
1318 ASSERT(owner
!= scratch2
);
1320 #if ENABLE(WRITE_BARRIER_PROFILING)
1321 emitCount(WriteBarrierCounters::jitCounterFor(useKind
));
1325 void JIT::emitWriteBarrier(JSCell
* owner
, RegisterID value
, RegisterID scratch
, WriteBarrierMode mode
, WriteBarrierUseKind useKind
)
1327 UNUSED_PARAM(owner
);
1328 UNUSED_PARAM(scratch
);
1329 UNUSED_PARAM(useKind
);
1330 UNUSED_PARAM(value
);
1333 #if ENABLE(WRITE_BARRIER_PROFILING)
1334 emitCount(WriteBarrierCounters::jitCounterFor(useKind
));
1338 JIT::Jump
JIT::addStructureTransitionCheck(JSCell
* object
, Structure
* structure
, StructureStubInfo
* stubInfo
, RegisterID scratch
)
1340 if (object
->structure() == structure
&& structure
->transitionWatchpointSetIsStillValid()) {
1341 structure
->addTransitionWatchpoint(stubInfo
->addWatchpoint(m_codeBlock
));
1342 #if !ASSERT_DISABLED
1343 move(TrustedImmPtr(object
), scratch
);
1344 Jump ok
= branchPtr(Equal
, Address(scratch
, JSCell::structureOffset()), TrustedImmPtr(structure
));
1348 Jump result
; // Returning an unset jump this way because otherwise VC++ would complain.
1352 move(TrustedImmPtr(object
), scratch
);
1353 return branchPtr(NotEqual
, Address(scratch
, JSCell::structureOffset()), TrustedImmPtr(structure
));
1356 void JIT::addStructureTransitionCheck(JSCell
* object
, Structure
* structure
, StructureStubInfo
* stubInfo
, JumpList
& failureCases
, RegisterID scratch
)
1358 Jump failureCase
= addStructureTransitionCheck(object
, structure
, stubInfo
, scratch
);
1359 if (!failureCase
.isSet())
1362 failureCases
.append(failureCase
);
1365 void JIT::testPrototype(JSValue prototype
, JumpList
& failureCases
, StructureStubInfo
* stubInfo
)
1367 if (prototype
.isNull())
1370 ASSERT(prototype
.isCell());
1371 addStructureTransitionCheck(prototype
.asCell(), prototype
.asCell()->structure(), stubInfo
, failureCases
, regT3
);
1374 bool JIT::isDirectPutById(StructureStubInfo
* stubInfo
)
1376 switch (stubInfo
->accessType
) {
1377 case access_put_by_id_transition_normal
:
1379 case access_put_by_id_transition_direct
:
1381 case access_put_by_id_replace
:
1382 case access_put_by_id_generic
: {
1383 void* oldCall
= MacroAssembler::readCallTarget(stubInfo
->callReturnLocation
).executableAddress();
1384 if (oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_direct
)
1385 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_direct_generic
)
1386 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_direct_fail
))
1388 ASSERT(oldCall
== bitwise_cast
<void*>(cti_op_put_by_id
)
1389 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_generic
)
1390 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_fail
));
1394 RELEASE_ASSERT_NOT_REACHED();
1399 void JIT::privateCompileGetByVal(ByValInfo
* byValInfo
, ReturnAddressPtr returnAddress
, JITArrayMode arrayMode
)
1401 Instruction
* currentInstruction
= m_codeBlock
->instructions().begin() + byValInfo
->bytecodeIndex
;
1403 PatchableJump badType
;
1406 switch (arrayMode
) {
1408 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
1411 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
1414 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
1416 case JITArrayStorage
:
1417 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
1420 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->int8ArrayDescriptor(), 1, SignedTypedArray
);
1423 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->int16ArrayDescriptor(), 2, SignedTypedArray
);
1426 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->int32ArrayDescriptor(), 4, SignedTypedArray
);
1429 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->uint8ArrayDescriptor(), 1, UnsignedTypedArray
);
1431 case JITUint8ClampedArray
:
1432 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray
);
1434 case JITUint16Array
:
1435 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->uint16ArrayDescriptor(), 2, UnsignedTypedArray
);
1437 case JITUint32Array
:
1438 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->uint32ArrayDescriptor(), 4, UnsignedTypedArray
);
1440 case JITFloat32Array
:
1441 slowCases
= emitFloatTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->float32ArrayDescriptor(), 4);
1443 case JITFloat64Array
:
1444 slowCases
= emitFloatTypedArrayGetByVal(currentInstruction
, badType
, m_vm
->float64ArrayDescriptor(), 8);
1452 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
1454 patchBuffer
.link(badType
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1455 patchBuffer
.link(slowCases
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1457 patchBuffer
.link(done
, byValInfo
->badTypeJump
.labelAtOffset(byValInfo
->badTypeJumpToDone
));
1459 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1461 ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1463 RepatchBuffer
repatchBuffer(m_codeBlock
);
1464 repatchBuffer
.relink(byValInfo
->badTypeJump
, CodeLocationLabel(byValInfo
->stubRoutine
->code().code()));
1465 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_val_generic
));
1468 void JIT::privateCompilePutByVal(ByValInfo
* byValInfo
, ReturnAddressPtr returnAddress
, JITArrayMode arrayMode
)
1470 Instruction
* currentInstruction
= m_codeBlock
->instructions().begin() + byValInfo
->bytecodeIndex
;
1472 PatchableJump badType
;
1475 switch (arrayMode
) {
1477 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
1480 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
1483 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
1485 case JITArrayStorage
:
1486 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
1489 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->int8ArrayDescriptor(), 1, SignedTypedArray
, TruncateRounding
);
1492 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->int16ArrayDescriptor(), 2, SignedTypedArray
, TruncateRounding
);
1495 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->int32ArrayDescriptor(), 4, SignedTypedArray
, TruncateRounding
);
1498 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->uint8ArrayDescriptor(), 1, UnsignedTypedArray
, TruncateRounding
);
1500 case JITUint8ClampedArray
:
1501 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray
, ClampRounding
);
1503 case JITUint16Array
:
1504 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->uint16ArrayDescriptor(), 2, UnsignedTypedArray
, TruncateRounding
);
1506 case JITUint32Array
:
1507 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->uint32ArrayDescriptor(), 4, UnsignedTypedArray
, TruncateRounding
);
1509 case JITFloat32Array
:
1510 slowCases
= emitFloatTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->float32ArrayDescriptor(), 4);
1512 case JITFloat64Array
:
1513 slowCases
= emitFloatTypedArrayPutByVal(currentInstruction
, badType
, m_vm
->float64ArrayDescriptor(), 8);
1522 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
1524 patchBuffer
.link(badType
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1525 patchBuffer
.link(slowCases
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1527 patchBuffer
.link(done
, byValInfo
->badTypeJump
.labelAtOffset(byValInfo
->badTypeJumpToDone
));
1529 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1531 ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1533 RepatchBuffer
repatchBuffer(m_codeBlock
);
1534 repatchBuffer
.relink(byValInfo
->badTypeJump
, CodeLocationLabel(byValInfo
->stubRoutine
->code().code()));
1535 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_put_by_val_generic
));
1538 JIT::JumpList
JIT::emitIntTypedArrayGetByVal(Instruction
*, PatchableJump
& badType
, const TypedArrayDescriptor
& descriptor
, size_t elementSize
, TypedArraySignedness signedness
)
1540 // The best way to test the array type is to use the classInfo. We need to do so without
1541 // clobbering the register that holds the indexing type, base, and property.
1544 RegisterID base
= regT0
;
1545 RegisterID property
= regT1
;
1546 RegisterID resultPayload
= regT0
;
1547 RegisterID scratch
= regT3
;
1549 RegisterID base
= regT0
;
1550 RegisterID property
= regT2
;
1551 RegisterID resultPayload
= regT0
;
1552 RegisterID resultTag
= regT1
;
1553 RegisterID scratch
= regT3
;
1558 loadPtr(Address(base
, JSCell::structureOffset()), scratch
);
1559 badType
= patchableBranchPtr(NotEqual
, Address(scratch
, Structure::classInfoOffset()), TrustedImmPtr(descriptor
.m_classInfo
));
1560 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, descriptor
.m_lengthOffset
)));
1561 loadPtr(Address(base
, descriptor
.m_storageOffset
), base
);
1563 switch (elementSize
) {
1565 if (signedness
== SignedTypedArray
)
1566 load8Signed(BaseIndex(base
, property
, TimesOne
), resultPayload
);
1568 load8(BaseIndex(base
, property
, TimesOne
), resultPayload
);
1571 if (signedness
== SignedTypedArray
)
1572 load16Signed(BaseIndex(base
, property
, TimesTwo
), resultPayload
);
1574 load16(BaseIndex(base
, property
, TimesTwo
), resultPayload
);
1577 load32(BaseIndex(base
, property
, TimesFour
), resultPayload
);
1584 if (elementSize
== 4 && signedness
== UnsignedTypedArray
) {
1585 Jump canBeInt
= branch32(GreaterThanOrEqual
, resultPayload
, TrustedImm32(0));
1587 convertInt32ToDouble(resultPayload
, fpRegT0
);
1588 addDouble(AbsoluteAddress(&twoToThe32
), fpRegT0
);
1590 moveDoubleTo64(fpRegT0
, resultPayload
);
1591 sub64(tagTypeNumberRegister
, resultPayload
);
1593 moveDoubleToInts(fpRegT0
, resultPayload
, resultTag
);
1597 canBeInt
.link(this);
1601 or64(tagTypeNumberRegister
, resultPayload
);
1603 move(TrustedImm32(JSValue::Int32Tag
), resultTag
);
1610 JIT::JumpList
JIT::emitFloatTypedArrayGetByVal(Instruction
*, PatchableJump
& badType
, const TypedArrayDescriptor
& descriptor
, size_t elementSize
)
1613 RegisterID base
= regT0
;
1614 RegisterID property
= regT1
;
1615 RegisterID resultPayload
= regT0
;
1616 RegisterID scratch
= regT3
;
1618 RegisterID base
= regT0
;
1619 RegisterID property
= regT2
;
1620 RegisterID resultPayload
= regT0
;
1621 RegisterID resultTag
= regT1
;
1622 RegisterID scratch
= regT3
;
1627 loadPtr(Address(base
, JSCell::structureOffset()), scratch
);
1628 badType
= patchableBranchPtr(NotEqual
, Address(scratch
, Structure::classInfoOffset()), TrustedImmPtr(descriptor
.m_classInfo
));
1629 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, descriptor
.m_lengthOffset
)));
1630 loadPtr(Address(base
, descriptor
.m_storageOffset
), base
);
1632 switch (elementSize
) {
1634 loadFloat(BaseIndex(base
, property
, TimesFour
), fpRegT0
);
1635 convertFloatToDouble(fpRegT0
, fpRegT0
);
1638 loadDouble(BaseIndex(base
, property
, TimesEight
), fpRegT0
);
1645 Jump notNaN
= branchDouble(DoubleEqual
, fpRegT0
, fpRegT0
);
1646 static const double NaN
= QNaN
;
1647 loadDouble(&NaN
, fpRegT0
);
1651 moveDoubleTo64(fpRegT0
, resultPayload
);
1652 sub64(tagTypeNumberRegister
, resultPayload
);
1654 moveDoubleToInts(fpRegT0
, resultPayload
, resultTag
);
1659 JIT::JumpList
JIT::emitIntTypedArrayPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, const TypedArrayDescriptor
& descriptor
, size_t elementSize
, TypedArraySignedness signedness
, TypedArrayRounding rounding
)
1661 unsigned value
= currentInstruction
[3].u
.operand
;
1664 RegisterID base
= regT0
;
1665 RegisterID property
= regT1
;
1666 RegisterID earlyScratch
= regT3
;
1667 RegisterID lateScratch
= regT2
;
1669 RegisterID base
= regT0
;
1670 RegisterID property
= regT2
;
1671 RegisterID earlyScratch
= regT3
;
1672 RegisterID lateScratch
= regT1
;
1677 loadPtr(Address(base
, JSCell::structureOffset()), earlyScratch
);
1678 badType
= patchableBranchPtr(NotEqual
, Address(earlyScratch
, Structure::classInfoOffset()), TrustedImmPtr(descriptor
.m_classInfo
));
1679 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, descriptor
.m_lengthOffset
)));
1682 emitGetVirtualRegister(value
, earlyScratch
);
1683 slowCases
.append(emitJumpIfNotImmediateInteger(earlyScratch
));
1685 emitLoad(value
, lateScratch
, earlyScratch
);
1686 slowCases
.append(branch32(NotEqual
, lateScratch
, TrustedImm32(JSValue::Int32Tag
)));
1689 // We would be loading this into base as in get_by_val, except that the slow
1690 // path expects the base to be unclobbered.
1691 loadPtr(Address(base
, descriptor
.m_storageOffset
), lateScratch
);
1693 if (rounding
== ClampRounding
) {
1694 ASSERT(elementSize
== 1);
1695 ASSERT_UNUSED(signedness
, signedness
= UnsignedTypedArray
);
1696 Jump inBounds
= branch32(BelowOrEqual
, earlyScratch
, TrustedImm32(0xff));
1697 Jump tooBig
= branch32(GreaterThan
, earlyScratch
, TrustedImm32(0xff));
1698 xor32(earlyScratch
, earlyScratch
);
1699 Jump clamped
= jump();
1701 move(TrustedImm32(0xff), earlyScratch
);
1703 inBounds
.link(this);
1706 switch (elementSize
) {
1708 store8(earlyScratch
, BaseIndex(lateScratch
, property
, TimesOne
));
1711 store16(earlyScratch
, BaseIndex(lateScratch
, property
, TimesTwo
));
1714 store32(earlyScratch
, BaseIndex(lateScratch
, property
, TimesFour
));
1723 JIT::JumpList
JIT::emitFloatTypedArrayPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, const TypedArrayDescriptor
& descriptor
, size_t elementSize
)
1725 unsigned value
= currentInstruction
[3].u
.operand
;
1728 RegisterID base
= regT0
;
1729 RegisterID property
= regT1
;
1730 RegisterID earlyScratch
= regT3
;
1731 RegisterID lateScratch
= regT2
;
1733 RegisterID base
= regT0
;
1734 RegisterID property
= regT2
;
1735 RegisterID earlyScratch
= regT3
;
1736 RegisterID lateScratch
= regT1
;
1741 loadPtr(Address(base
, JSCell::structureOffset()), earlyScratch
);
1742 badType
= patchableBranchPtr(NotEqual
, Address(earlyScratch
, Structure::classInfoOffset()), TrustedImmPtr(descriptor
.m_classInfo
));
1743 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, descriptor
.m_lengthOffset
)));
1746 emitGetVirtualRegister(value
, earlyScratch
);
1747 Jump doubleCase
= emitJumpIfNotImmediateInteger(earlyScratch
);
1748 convertInt32ToDouble(earlyScratch
, fpRegT0
);
1749 Jump ready
= jump();
1750 doubleCase
.link(this);
1751 slowCases
.append(emitJumpIfNotImmediateNumber(earlyScratch
));
1752 add64(tagTypeNumberRegister
, earlyScratch
);
1753 move64ToDouble(earlyScratch
, fpRegT0
);
1756 emitLoad(value
, lateScratch
, earlyScratch
);
1757 Jump doubleCase
= branch32(NotEqual
, lateScratch
, TrustedImm32(JSValue::Int32Tag
));
1758 convertInt32ToDouble(earlyScratch
, fpRegT0
);
1759 Jump ready
= jump();
1760 doubleCase
.link(this);
1761 slowCases
.append(branch32(Above
, lateScratch
, TrustedImm32(JSValue::LowestTag
)));
1762 moveIntsToDouble(earlyScratch
, lateScratch
, fpRegT0
, fpRegT1
);
1766 // We would be loading this into base as in get_by_val, except that the slow
1767 // path expects the base to be unclobbered.
1768 loadPtr(Address(base
, descriptor
.m_storageOffset
), lateScratch
);
1770 switch (elementSize
) {
1772 convertDoubleToFloat(fpRegT0
, fpRegT0
);
1773 storeFloat(fpRegT0
, BaseIndex(lateScratch
, property
, TimesFour
));
1776 storeDouble(fpRegT0
, BaseIndex(lateScratch
, property
, TimesEight
));
1787 #endif // ENABLE(JIT)