2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "GetterSetter.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
36 #include "JSFunction.h"
37 #include "JSPropertyNameIterator.h"
38 #include "Interpreter.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
53 JIT::CodeRef
JIT::stringGetByValStubGenerator(JSGlobalData
* globalData
)
57 failures
.append(jit
.branchPtr(NotEqual
, Address(regT0
, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info
)));
59 // Load string length to regT2, and start the process of loading the data pointer into regT0
60 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT2
);
61 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
62 failures
.append(jit
.branchTest32(Zero
, regT0
));
64 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
65 failures
.append(jit
.branch32(AboveOrEqual
, regT1
, regT2
));
70 // Load the string flags
71 jit
.loadPtr(Address(regT0
, ThunkHelpers::stringImplFlagsOffset()), regT2
);
72 jit
.loadPtr(Address(regT0
, ThunkHelpers::stringImplDataOffset()), regT0
);
73 is16Bit
.append(jit
.branchTest32(Zero
, regT2
, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
74 jit
.load8(BaseIndex(regT0
, regT1
, TimesOne
, 0), regT0
);
75 cont8Bit
.append(jit
.jump());
77 jit
.load16(BaseIndex(regT0
, regT1
, TimesTwo
, 0), regT0
);
80 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
81 jit
.move(TrustedImmPtr(globalData
->smallStrings
.singleCharacterStrings()), regT1
);
82 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
86 jit
.move(TrustedImm32(0), regT0
);
89 LinkBuffer
patchBuffer(*globalData
, &jit
, GLOBAL_THUNK_ID
);
90 return patchBuffer
.finalizeCode();
93 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
95 unsigned dst
= currentInstruction
[1].u
.operand
;
96 unsigned base
= currentInstruction
[2].u
.operand
;
97 unsigned property
= currentInstruction
[3].u
.operand
;
99 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
100 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
102 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
103 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
104 // number was signed since m_vectorLength is always less than intmax (since the total allocation
105 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
106 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
107 // extending since it makes it easier to re-tag the value in the slow case.
108 zeroExtend32ToPtr(regT1
, regT1
);
110 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
111 addSlowCase(branchPtr(NotEqual
, Address(regT0
, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info
)));
113 loadPtr(Address(regT0
, JSArray::storageOffset()), regT2
);
114 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, JSArray::vectorLengthOffset())));
116 loadPtr(BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
);
117 addSlowCase(branchTestPtr(Zero
, regT0
));
119 emitValueProfilingSite();
120 emitPutVirtualRegister(dst
);
123 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
125 unsigned dst
= currentInstruction
[1].u
.operand
;
126 unsigned base
= currentInstruction
[2].u
.operand
;
127 unsigned property
= currentInstruction
[3].u
.operand
;
129 linkSlowCase(iter
); // property int32 check
130 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
131 Jump nonCell
= jump();
132 linkSlowCase(iter
); // base array check
133 Jump notString
= branchPtr(NotEqual
, Address(regT0
, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info
));
134 emitNakedCall(CodeLocationLabel(m_globalData
->getCTIStub(stringGetByValStubGenerator
).code()));
135 Jump failed
= branchTestPtr(Zero
, regT0
);
136 emitPutVirtualRegister(dst
, regT0
);
137 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
139 notString
.link(this);
142 linkSlowCase(iter
); // vector length check
143 linkSlowCase(iter
); // empty value
145 JITStubCall
stubCall(this, cti_op_get_by_val
);
146 stubCall
.addArgument(base
, regT2
);
147 stubCall
.addArgument(property
, regT2
);
150 emitValueProfilingSite();
153 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, RegisterID offset
, RegisterID scratch
)
155 loadPtr(Address(base
, JSObject::offsetOfPropertyStorage()), scratch
);
156 loadPtr(BaseIndex(scratch
, offset
, ScalePtr
, 0), result
);
159 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
161 unsigned dst
= currentInstruction
[1].u
.operand
;
162 unsigned base
= currentInstruction
[2].u
.operand
;
163 unsigned property
= currentInstruction
[3].u
.operand
;
164 unsigned expected
= currentInstruction
[4].u
.operand
;
165 unsigned iter
= currentInstruction
[5].u
.operand
;
166 unsigned i
= currentInstruction
[6].u
.operand
;
168 emitGetVirtualRegister(property
, regT0
);
169 addSlowCase(branchPtr(NotEqual
, regT0
, addressFor(expected
)));
170 emitGetVirtualRegisters(base
, regT0
, iter
, regT1
);
171 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
173 // Test base's structure
174 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
175 addSlowCase(branchPtr(NotEqual
, regT2
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
176 load32(addressFor(i
), regT3
);
177 sub32(TrustedImm32(1), regT3
);
178 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
179 compileGetDirectOffset(regT0
, regT0
, regT3
, regT1
);
181 emitPutVirtualRegister(dst
, regT0
);
184 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
186 unsigned dst
= currentInstruction
[1].u
.operand
;
187 unsigned base
= currentInstruction
[2].u
.operand
;
188 unsigned property
= currentInstruction
[3].u
.operand
;
191 linkSlowCaseIfNotJSCell(iter
, base
);
195 JITStubCall
stubCall(this, cti_op_get_by_val
);
196 stubCall
.addArgument(base
, regT2
);
197 stubCall
.addArgument(property
, regT2
);
201 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
203 unsigned base
= currentInstruction
[1].u
.operand
;
204 unsigned property
= currentInstruction
[2].u
.operand
;
205 unsigned value
= currentInstruction
[3].u
.operand
;
207 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
208 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
209 // See comment in op_get_by_val.
210 zeroExtend32ToPtr(regT1
, regT1
);
211 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
212 addSlowCase(branchPtr(NotEqual
, Address(regT0
, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info
)));
213 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, JSArray::vectorLengthOffset())));
215 loadPtr(Address(regT0
, JSArray::storageOffset()), regT2
);
216 Jump empty
= branchTestPtr(Zero
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
218 Label
storeResult(this);
219 emitGetVirtualRegister(value
, regT3
);
220 storePtr(regT3
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
224 add32(TrustedImm32(1), Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
225 branch32(Below
, regT1
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
))).linkTo(storeResult
, this);
227 add32(TrustedImm32(1), regT1
);
228 store32(regT1
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
229 sub32(TrustedImm32(1), regT1
);
230 jump().linkTo(storeResult
, this);
234 emitWriteBarrier(regT0
, regT3
, regT1
, regT3
, ShouldFilterImmediates
, WriteBarrierForPropertyAccess
);
237 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
239 unsigned base
= currentInstruction
[1].u
.operand
;
240 unsigned property
= currentInstruction
[2].u
.operand
;
241 unsigned value
= currentInstruction
[3].u
.operand
;
243 linkSlowCase(iter
); // property int32 check
244 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
245 linkSlowCase(iter
); // base not array check
246 linkSlowCase(iter
); // in vector check
248 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
249 stubPutByValCall
.addArgument(regT0
);
250 stubPutByValCall
.addArgument(property
, regT2
);
251 stubPutByValCall
.addArgument(value
, regT2
);
252 stubPutByValCall
.call();
255 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
257 JITStubCall
stubCall(this, cti_op_put_by_index
);
258 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
259 stubCall
.addArgument(TrustedImm32(currentInstruction
[2].u
.operand
));
260 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
264 void JIT::emit_op_put_getter_setter(Instruction
* currentInstruction
)
266 JITStubCall
stubCall(this, cti_op_put_getter_setter
);
267 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
268 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
269 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
270 stubCall
.addArgument(currentInstruction
[4].u
.operand
, regT2
);
274 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
276 JITStubCall
stubCall(this, cti_op_del_by_id
);
277 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
278 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
279 stubCall
.call(currentInstruction
[1].u
.operand
);
282 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
284 // Assert that the following instruction is a get_by_id.
285 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
287 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
288 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
289 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
290 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
292 emitGetVirtualRegister(baseVReg
, regT0
);
294 // Do the method check - check the object & its prototype's structure inline (this is the common case).
295 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_bytecodeOffset
, m_propertyAccessCompilationInfo
.size()));
296 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
298 Jump notCell
= emitJumpIfNotJSCell(regT0
);
300 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
302 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), info
.structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
303 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(TrustedImmPtr(0), regT1
);
304 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT1
, JSCell::structureOffset()), protoStructureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
306 // This will be relinked to load the function without doing a load.
307 DataLabelPtr putFunction
= moveWithPatch(TrustedImmPtr(0), regT0
);
309 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
313 // Link the failure cases here.
315 structureCheck
.link(this);
316 protoStructureCheck
.link(this);
318 // Do a regular(ish) get_by_id (the slow case will be link to
319 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
320 compileGetByIdHotPath(baseVReg
, ident
);
323 emitValueProfilingSite(m_bytecodeOffset
+ OPCODE_LENGTH(op_method_check
));
324 emitPutVirtualRegister(resultVReg
);
326 // We've already generated the following get_by_id, so make sure it's skipped over.
327 m_bytecodeOffset
+= OPCODE_LENGTH(op_get_by_id
);
329 m_propertyAccessCompilationInfo
.last().addMethodCheckInfo(info
.structureToCompare
, protoObj
, protoStructureToCompare
, putFunction
);
332 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
334 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
335 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
336 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
337 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
339 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, true);
340 emitValueProfilingSite(m_bytecodeOffset
+ OPCODE_LENGTH(op_method_check
));
342 // We've already generated the following get_by_id, so make sure it's skipped over.
343 m_bytecodeOffset
+= OPCODE_LENGTH(op_get_by_id
);
346 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
348 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
349 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
350 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
352 emitGetVirtualRegister(baseVReg
, regT0
);
353 compileGetByIdHotPath(baseVReg
, ident
);
354 emitValueProfilingSite();
355 emitPutVirtualRegister(resultVReg
);
358 void JIT::compileGetByIdHotPath(int baseVReg
, Identifier
*)
360 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
361 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
362 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
363 // to jump back to if one of these trampolies finds a match.
365 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
367 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
369 Label
hotPathBegin(this);
371 DataLabelPtr structureToCompare
;
372 PatchableJump structureCheck
= patchableBranchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
373 addSlowCase(structureCheck
);
375 loadPtr(Address(regT0
, JSObject::offsetOfPropertyStorage()), regT0
);
376 DataLabelCompact displacementLabel
= loadPtrWithCompactAddressOffsetPatch(Address(regT0
, patchGetByIdDefaultOffset
), regT0
);
378 Label
putResult(this);
380 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
382 m_propertyAccessCompilationInfo
.append(PropertyStubCompilationInfo(PropertyStubGetById
, m_bytecodeOffset
, hotPathBegin
, structureToCompare
, structureCheck
, displacementLabel
, putResult
));
385 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
387 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
388 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
389 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
391 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, false);
392 emitValueProfilingSite();
395 void JIT::compileGetByIdSlowCase(int resultVReg
, int baseVReg
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
397 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
398 // so that we only need track one pointer into the slow case code - we track a pointer to the location
399 // of the call (which we can use to look up the patch information), but should a array-length or
400 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
401 // the distance from the call to the head of the slow case.
403 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
406 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
408 Label
coldPathBegin(this);
409 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
410 stubCall
.addArgument(regT0
);
411 stubCall
.addArgument(TrustedImmPtr(ident
));
412 Call call
= stubCall
.call(resultVReg
);
414 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
416 // Track the location of the call; this will be used to recover patch information.
417 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
++].slowCaseInfo(PropertyStubGetById
, coldPathBegin
, call
);
420 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
422 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
423 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
425 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
426 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
427 // such that the Structure & offset are always at the same distance from this.
429 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
431 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
432 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
434 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
436 Label
hotPathBegin(this);
438 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
439 DataLabelPtr structureToCompare
;
440 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
442 loadPtr(Address(regT0
, JSObject::offsetOfPropertyStorage()), regT2
);
443 DataLabel32 displacementLabel
= storePtrWithAddressOffsetPatch(regT1
, Address(regT2
, patchPutByIdDefaultOffset
));
445 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
447 emitWriteBarrier(regT0
, regT1
, regT2
, regT3
, ShouldFilterImmediates
, WriteBarrierForPropertyAccess
);
449 m_propertyAccessCompilationInfo
.append(PropertyStubCompilationInfo(PropertyStubPutById
, m_bytecodeOffset
, hotPathBegin
, structureToCompare
, displacementLabel
));
452 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
454 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
455 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
456 unsigned direct
= currentInstruction
[8].u
.operand
;
458 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
461 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct
: cti_op_put_by_id
);
462 stubCall
.addArgument(regT0
);
463 stubCall
.addArgument(TrustedImmPtr(ident
));
464 stubCall
.addArgument(regT1
);
465 Call call
= stubCall
.call();
467 // Track the location of the call; this will be used to recover patch information.
468 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
++].slowCaseInfo(PropertyStubPutById
, call
);
471 // Compile a store into an object's property storage. May overwrite the
472 // value in objectReg.
473 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, size_t cachedOffset
)
475 int offset
= cachedOffset
* sizeof(JSValue
);
476 loadPtr(Address(base
, JSObject::offsetOfPropertyStorage()), base
);
477 storePtr(value
, Address(base
, offset
));
480 // Compile a load from an object's property storage. May overwrite base.
481 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, size_t cachedOffset
)
483 int offset
= cachedOffset
* sizeof(JSValue
);
484 loadPtr(Address(base
, JSObject::offsetOfPropertyStorage()), result
);
485 loadPtr(Address(result
, offset
), result
);
488 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID result
, size_t cachedOffset
)
490 loadPtr(base
->addressOfPropertyStorage(), result
);
491 loadPtr(Address(result
, cachedOffset
* sizeof(WriteBarrier
<Unknown
>)), result
);
494 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
, bool direct
)
496 JumpList failureCases
;
497 // Check eax is an object of the right Structure.
498 failureCases
.append(emitJumpIfNotJSCell(regT0
));
499 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(oldStructure
)));
501 testPrototype(oldStructure
->storedPrototype(), failureCases
);
503 ASSERT(oldStructure
->storedPrototype().isNull() || oldStructure
->storedPrototype().asCell()->structure() == chain
->head()->get());
505 // ecx = baseObject->m_structure
507 for (WriteBarrier
<Structure
>* it
= chain
->head(); *it
; ++it
) {
508 ASSERT((*it
)->storedPrototype().isNull() || (*it
)->storedPrototype().asCell()->structure() == it
[1].get());
509 testPrototype((*it
)->storedPrototype(), failureCases
);
513 // If we succeed in all of our checks, and the code was optimizable, then make sure we
514 // decrement the rare case counter.
515 #if ENABLE(VALUE_PROFILER)
516 if (m_codeBlock
->canCompileWithDFG()) {
519 AbsoluteAddress(&m_codeBlock
->rareCaseProfileForBytecodeOffset(stubInfo
->bytecodeIndex
)->m_counter
));
523 // emit a call only if storage realloc is needed
524 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
525 if (willNeedStorageRealloc
) {
526 // This trampoline was called to like a JIT stub; before we can can call again we need to
527 // remove the return address from the stack, to prevent the stack from becoming misaligned.
528 preserveReturnAddressAfterCall(regT3
);
530 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
531 stubCall
.skipArgument(); // base
532 stubCall
.skipArgument(); // ident
533 stubCall
.skipArgument(); // value
534 stubCall
.addArgument(TrustedImm32(oldStructure
->propertyStorageCapacity()));
535 stubCall
.addArgument(TrustedImmPtr(newStructure
));
536 stubCall
.call(regT0
);
537 emitGetJITStubArg(2, regT1
);
539 restoreReturnAddressBeforeReturn(regT3
);
542 // Planting the new structure triggers the write barrier so we need
543 // an unconditional barrier here.
544 emitWriteBarrier(regT0
, regT1
, regT2
, regT3
, UnconditionalWriteBarrier
, WriteBarrierForPropertyAccess
);
546 ASSERT(newStructure
->classInfo() == oldStructure
->classInfo());
547 storePtr(TrustedImmPtr(newStructure
), Address(regT0
, JSCell::structureOffset()));
548 compilePutDirectOffset(regT0
, regT1
, cachedOffset
);
552 ASSERT(!failureCases
.empty());
553 failureCases
.link(this);
554 restoreArgumentReferenceForTrampoline();
555 Call failureCall
= tailRecursiveCall();
557 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
);
559 patchBuffer
.link(failureCall
, FunctionPtr(direct
? cti_op_put_by_id_direct_fail
: cti_op_put_by_id_fail
));
561 if (willNeedStorageRealloc
) {
562 ASSERT(m_calls
.size() == 1);
563 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
566 stubInfo
->stubRoutine
= patchBuffer
.finalizeCode();
567 RepatchBuffer
repatchBuffer(m_codeBlock
);
568 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, CodeLocationLabel(stubInfo
->stubRoutine
.code()));
571 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
573 RepatchBuffer
repatchBuffer(codeBlock
);
575 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
576 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
577 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
579 int offset
= sizeof(JSValue
) * cachedOffset
;
581 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
582 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureToCompare
), structure
);
583 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel
), offset
);
586 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, bool direct
)
588 RepatchBuffer
repatchBuffer(codeBlock
);
590 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
591 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
592 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(direct
? cti_op_put_by_id_direct_generic
: cti_op_put_by_id_generic
));
594 int offset
= sizeof(JSValue
) * cachedOffset
;
596 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
597 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.put
.structureToCompare
), structure
);
598 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel
), offset
);
601 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
603 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
605 // Check eax is an array
606 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info
));
608 // Checks out okay! - get the length from the storage
609 loadPtr(Address(regT0
, JSArray::storageOffset()), regT3
);
610 load32(Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
611 Jump failureCases2
= branch32(LessThan
, regT2
, TrustedImm32(0));
613 emitFastArithIntToImmNoCheck(regT2
, regT0
);
614 Jump success
= jump();
616 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
);
618 // Use the patch information to link the failure cases back to the original slow case routine.
619 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
620 patchBuffer
.link(failureCases1
, slowCaseBegin
);
621 patchBuffer
.link(failureCases2
, slowCaseBegin
);
623 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
624 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
626 // Track the stub we have created so that it will be deleted later.
627 stubInfo
->stubRoutine
= patchBuffer
.finalizeCode();
629 // Finally patch the jump to slow case back in the hot path to jump here instead.
630 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
631 RepatchBuffer
repatchBuffer(m_codeBlock
);
632 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubInfo
->stubRoutine
.code()));
634 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
635 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
638 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
640 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
641 // referencing the prototype object - let's speculatively load it's table nice and early!)
642 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
644 // Check eax is an object of the right Structure.
645 Jump failureCases1
= checkStructure(regT0
, structure
);
647 // Check the prototype object's Structure had not changed.
648 move(TrustedImmPtr(protoObject
), regT3
);
649 Jump failureCases2
= branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure
));
651 bool needsStubLink
= false;
654 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
655 needsStubLink
= true;
656 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
657 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
658 stubCall
.addArgument(regT1
);
659 stubCall
.addArgument(regT0
);
660 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
662 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
663 needsStubLink
= true;
664 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
665 stubCall
.addArgument(TrustedImmPtr(protoObject
));
666 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
667 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
668 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
671 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
672 Jump success
= jump();
673 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
);
675 // Use the patch information to link the failure cases back to the original slow case routine.
676 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
677 patchBuffer
.link(failureCases1
, slowCaseBegin
);
678 patchBuffer
.link(failureCases2
, slowCaseBegin
);
680 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
681 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
684 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
686 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
689 // Track the stub we have created so that it will be deleted later.
690 stubInfo
->stubRoutine
= patchBuffer
.finalizeCode();
692 // Finally patch the jump to slow case back in the hot path to jump here instead.
693 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
694 RepatchBuffer
repatchBuffer(m_codeBlock
);
695 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubInfo
->stubRoutine
.code()));
697 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
698 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
701 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
)
703 Jump failureCase
= checkStructure(regT0
, structure
);
704 bool needsStubLink
= false;
705 bool isDirect
= false;
706 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
707 needsStubLink
= true;
708 compileGetDirectOffset(regT0
, regT1
, cachedOffset
);
709 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
710 stubCall
.addArgument(regT1
);
711 stubCall
.addArgument(regT0
);
712 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
714 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
715 needsStubLink
= true;
716 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
717 stubCall
.addArgument(regT0
);
718 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
719 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
720 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
724 compileGetDirectOffset(regT0
, regT0
, cachedOffset
);
726 Jump success
= jump();
728 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
);
731 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
733 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
737 // Use the patch information to link the failure cases back to the original slow case routine.
738 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
.code());
740 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
742 patchBuffer
.link(failureCase
, lastProtoBegin
);
744 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
745 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
747 MacroAssemblerCodeRef stubCode
= patchBuffer
.finalizeCode();
749 polymorphicStructures
->list
[currentIndex
].set(*m_globalData
, m_codeBlock
->ownerExecutable(), stubCode
, structure
, isDirect
);
751 // Finally patch the jump to slow case back in the hot path to jump here instead.
752 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
753 RepatchBuffer
repatchBuffer(m_codeBlock
);
754 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubCode
.code()));
757 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
759 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
760 // referencing the prototype object - let's speculatively load it's table nice and early!)
761 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
763 // Check eax is an object of the right Structure.
764 Jump failureCases1
= checkStructure(regT0
, structure
);
766 // Check the prototype object's Structure had not changed.
767 move(TrustedImmPtr(protoObject
), regT3
);
768 Jump failureCases2
= branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure
));
771 bool needsStubLink
= false;
772 bool isDirect
= false;
773 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
774 needsStubLink
= true;
775 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
776 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
777 stubCall
.addArgument(regT1
);
778 stubCall
.addArgument(regT0
);
779 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
781 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
782 needsStubLink
= true;
783 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
784 stubCall
.addArgument(TrustedImmPtr(protoObject
));
785 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
786 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
787 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
791 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
794 Jump success
= jump();
796 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
);
799 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
801 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
805 // Use the patch information to link the failure cases back to the original slow case routine.
806 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(prototypeStructures
->list
[currentIndex
- 1].stubRoutine
.code());
807 patchBuffer
.link(failureCases1
, lastProtoBegin
);
808 patchBuffer
.link(failureCases2
, lastProtoBegin
);
810 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
811 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
813 MacroAssemblerCodeRef stubCode
= patchBuffer
.finalizeCode();
814 prototypeStructures
->list
[currentIndex
].set(*m_globalData
, m_codeBlock
->ownerExecutable(), stubCode
, structure
, prototypeStructure
, isDirect
);
816 // Finally patch the jump to slow case back in the hot path to jump here instead.
817 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
818 RepatchBuffer
repatchBuffer(m_codeBlock
);
819 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubCode
.code()));
822 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
825 JumpList bucketsOfFail
;
827 // Check eax is an object of the right Structure.
828 Jump baseObjectCheck
= checkStructure(regT0
, structure
);
829 bucketsOfFail
.append(baseObjectCheck
);
831 Structure
* currStructure
= structure
;
832 WriteBarrier
<Structure
>* it
= chain
->head();
833 JSObject
* protoObject
= 0;
834 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
835 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
836 currStructure
= it
->get();
837 testPrototype(protoObject
, bucketsOfFail
);
841 bool needsStubLink
= false;
842 bool isDirect
= false;
843 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
844 needsStubLink
= true;
845 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
846 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
847 stubCall
.addArgument(regT1
);
848 stubCall
.addArgument(regT0
);
849 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
851 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
852 needsStubLink
= true;
853 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
854 stubCall
.addArgument(TrustedImmPtr(protoObject
));
855 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
856 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
857 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
861 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
863 Jump success
= jump();
865 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
);
868 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
870 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
874 // Use the patch information to link the failure cases back to the original slow case routine.
875 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(prototypeStructures
->list
[currentIndex
- 1].stubRoutine
.code());
877 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
879 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
880 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
882 CodeRef stubRoutine
= patchBuffer
.finalizeCode();
884 // Track the stub we have created so that it will be deleted later.
885 prototypeStructures
->list
[currentIndex
].set(callFrame
->globalData(), m_codeBlock
->ownerExecutable(), stubRoutine
, structure
, chain
, isDirect
);
887 // Finally patch the jump to slow case back in the hot path to jump here instead.
888 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
889 RepatchBuffer
repatchBuffer(m_codeBlock
);
890 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
.code()));
893 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
897 JumpList bucketsOfFail
;
899 // Check eax is an object of the right Structure.
900 bucketsOfFail
.append(checkStructure(regT0
, structure
));
902 Structure
* currStructure
= structure
;
903 WriteBarrier
<Structure
>* it
= chain
->head();
904 JSObject
* protoObject
= 0;
905 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
906 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
907 currStructure
= it
->get();
908 testPrototype(protoObject
, bucketsOfFail
);
912 bool needsStubLink
= false;
913 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
914 needsStubLink
= true;
915 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
916 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
917 stubCall
.addArgument(regT1
);
918 stubCall
.addArgument(regT0
);
919 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
921 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
922 needsStubLink
= true;
923 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
924 stubCall
.addArgument(TrustedImmPtr(protoObject
));
925 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
926 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
927 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
930 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
931 Jump success
= jump();
933 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
);
936 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
938 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
942 // Use the patch information to link the failure cases back to the original slow case routine.
943 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
));
945 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
946 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
948 // Track the stub we have created so that it will be deleted later.
949 CodeRef stubRoutine
= patchBuffer
.finalizeCode();
950 stubInfo
->stubRoutine
= stubRoutine
;
952 // Finally patch the jump to slow case back in the hot path to jump here instead.
953 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
954 RepatchBuffer
repatchBuffer(m_codeBlock
);
955 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
.code()));
957 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
958 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
961 void JIT::emit_op_get_scoped_var(Instruction
* currentInstruction
)
963 int skip
= currentInstruction
[3].u
.operand
;
965 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT0
);
966 bool checkTopLevel
= m_codeBlock
->codeType() == FunctionCode
&& m_codeBlock
->needsFullScopeChain();
967 ASSERT(skip
|| !checkTopLevel
);
968 if (checkTopLevel
&& skip
--) {
969 Jump activationNotCreated
;
971 activationNotCreated
= branchTestPtr(Zero
, addressFor(m_codeBlock
->activationRegister()));
972 loadPtr(Address(regT0
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT0
);
973 activationNotCreated
.link(this);
976 loadPtr(Address(regT0
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT0
);
978 loadPtr(Address(regT0
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT0
);
979 loadPtr(Address(regT0
, JSVariableObject::offsetOfRegisters()), regT0
);
980 loadPtr(Address(regT0
, currentInstruction
[2].u
.operand
* sizeof(Register
)), regT0
);
981 emitValueProfilingSite();
982 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
985 void JIT::emit_op_put_scoped_var(Instruction
* currentInstruction
)
987 int skip
= currentInstruction
[2].u
.operand
;
989 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT0
);
991 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain
, regT1
);
992 bool checkTopLevel
= m_codeBlock
->codeType() == FunctionCode
&& m_codeBlock
->needsFullScopeChain();
993 ASSERT(skip
|| !checkTopLevel
);
994 if (checkTopLevel
&& skip
--) {
995 Jump activationNotCreated
;
997 activationNotCreated
= branchTestPtr(Zero
, addressFor(m_codeBlock
->activationRegister()));
998 loadPtr(Address(regT1
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT1
);
999 activationNotCreated
.link(this);
1002 loadPtr(Address(regT1
, OBJECT_OFFSETOF(ScopeChainNode
, next
)), regT1
);
1003 loadPtr(Address(regT1
, OBJECT_OFFSETOF(ScopeChainNode
, object
)), regT1
);
1005 emitWriteBarrier(regT1
, regT0
, regT2
, regT3
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1007 loadPtr(Address(regT1
, JSVariableObject::offsetOfRegisters()), regT1
);
1008 storePtr(regT0
, Address(regT1
, currentInstruction
[1].u
.operand
* sizeof(Register
)));
1011 void JIT::emit_op_get_global_var(Instruction
* currentInstruction
)
1013 JSVariableObject
* globalObject
= m_codeBlock
->globalObject();
1014 loadPtr(&globalObject
->m_registers
, regT0
);
1015 loadPtr(Address(regT0
, currentInstruction
[2].u
.operand
* sizeof(Register
)), regT0
);
1016 emitValueProfilingSite();
1017 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
1020 void JIT::emit_op_put_global_var(Instruction
* currentInstruction
)
1022 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
1024 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
1026 move(TrustedImmPtr(globalObject
), regT1
);
1027 loadPtr(Address(regT1
, JSVariableObject::offsetOfRegisters()), regT1
);
1028 storePtr(regT0
, Address(regT1
, currentInstruction
[1].u
.operand
* sizeof(Register
)));
1029 emitWriteBarrier(globalObject
, regT0
, regT2
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1032 void JIT::resetPatchGetById(RepatchBuffer
& repatchBuffer
, StructureStubInfo
* stubInfo
)
1034 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_get_by_id
);
1035 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureToCompare
), reinterpret_cast<void*>(-1));
1036 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel
), 0);
1037 repatchBuffer
.relink(stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
), stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
));
1040 void JIT::resetPatchPutById(RepatchBuffer
& repatchBuffer
, StructureStubInfo
* stubInfo
)
1042 if (isDirectPutById(stubInfo
))
1043 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_put_by_id_direct
);
1045 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_put_by_id
);
1046 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.put
.structureToCompare
), reinterpret_cast<void*>(-1));
1047 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel
), 0);
1050 #endif // USE(JSVALUE64)
1052 void JIT::emitWriteBarrier(RegisterID owner
, RegisterID value
, RegisterID scratch
, RegisterID scratch2
, WriteBarrierMode mode
, WriteBarrierUseKind useKind
)
1054 UNUSED_PARAM(owner
);
1055 UNUSED_PARAM(scratch
);
1056 UNUSED_PARAM(scratch2
);
1057 UNUSED_PARAM(useKind
);
1058 UNUSED_PARAM(value
);
1060 ASSERT(owner
!= scratch
);
1061 ASSERT(owner
!= scratch2
);
1063 #if ENABLE(WRITE_BARRIER_PROFILING)
1064 emitCount(WriteBarrierCounters::jitCounterFor(useKind
));
1069 if (mode
== ShouldFilterImmediates
)
1070 filterCells
= emitJumpIfNotJSCell(value
);
1071 move(owner
, scratch
);
1072 andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask
)), scratch
);
1073 move(owner
, scratch2
);
1074 // consume additional 8 bits as we're using an approximate filter
1075 rshift32(TrustedImm32(MarkedBlock::atomShift
+ 8), scratch2
);
1076 andPtr(TrustedImm32(MarkedBlock::atomMask
>> 8), scratch2
);
1077 Jump filter
= branchTest8(Zero
, BaseIndex(scratch
, scratch2
, TimesOne
, MarkedBlock::offsetOfMarks()));
1078 move(owner
, scratch2
);
1079 rshift32(TrustedImm32(MarkedBlock::cardShift
), scratch2
);
1080 andPtr(TrustedImm32(MarkedBlock::cardMask
), scratch2
);
1081 store8(TrustedImm32(1), BaseIndex(scratch
, scratch2
, TimesOne
, MarkedBlock::offsetOfCards()));
1083 if (mode
== ShouldFilterImmediates
)
1084 filterCells
.link(this);
1088 void JIT::emitWriteBarrier(JSCell
* owner
, RegisterID value
, RegisterID scratch
, WriteBarrierMode mode
, WriteBarrierUseKind useKind
)
1090 UNUSED_PARAM(owner
);
1091 UNUSED_PARAM(scratch
);
1092 UNUSED_PARAM(useKind
);
1093 UNUSED_PARAM(value
);
1096 #if ENABLE(WRITE_BARRIER_PROFILING)
1097 emitCount(WriteBarrierCounters::jitCounterFor(useKind
));
1102 if (mode
== ShouldFilterImmediates
)
1103 filterCells
= emitJumpIfNotJSCell(value
);
1104 uint8_t* cardAddress
= Heap::addressOfCardFor(owner
);
1105 move(TrustedImmPtr(cardAddress
), scratch
);
1106 store8(TrustedImm32(1), Address(scratch
));
1107 if (mode
== ShouldFilterImmediates
)
1108 filterCells
.link(this);
1112 void JIT::testPrototype(JSValue prototype
, JumpList
& failureCases
)
1114 if (prototype
.isNull())
1117 ASSERT(prototype
.isCell());
1118 move(TrustedImmPtr(prototype
.asCell()), regT3
);
1119 failureCases
.append(branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototype
.asCell()->structure())));
1122 void JIT::patchMethodCallProto(JSGlobalData
& globalData
, CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, StructureStubInfo
& stubInfo
, JSObject
* callee
, Structure
* structure
, JSObject
* proto
, ReturnAddressPtr returnAddress
)
1124 RepatchBuffer
repatchBuffer(codeBlock
);
1126 CodeLocationDataLabelPtr structureLocation
= methodCallLinkInfo
.cachedStructure
.location();
1127 methodCallLinkInfo
.cachedStructure
.set(globalData
, structureLocation
, codeBlock
->ownerExecutable(), structure
);
1129 Structure
* prototypeStructure
= proto
->structure();
1130 methodCallLinkInfo
.cachedPrototypeStructure
.set(globalData
, structureLocation
.dataLabelPtrAtOffset(stubInfo
.patch
.baseline
.methodCheckProtoStructureToCompare
), codeBlock
->ownerExecutable(), prototypeStructure
);
1131 methodCallLinkInfo
.cachedPrototype
.set(globalData
, structureLocation
.dataLabelPtrAtOffset(stubInfo
.patch
.baseline
.methodCheckProtoObj
), codeBlock
->ownerExecutable(), proto
);
1132 methodCallLinkInfo
.cachedFunction
.set(globalData
, structureLocation
.dataLabelPtrAtOffset(stubInfo
.patch
.baseline
.methodCheckPutFunction
), codeBlock
->ownerExecutable(), callee
);
1133 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_method_check_update
));
1136 bool JIT::isDirectPutById(StructureStubInfo
* stubInfo
)
1138 switch (stubInfo
->accessType
) {
1139 case access_put_by_id_transition_normal
:
1141 case access_put_by_id_transition_direct
:
1143 case access_put_by_id_replace
:
1144 case access_put_by_id_generic
: {
1145 void* oldCall
= MacroAssembler::readCallTarget(stubInfo
->callReturnLocation
).executableAddress();
1146 if (oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_direct
)
1147 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_direct_generic
)
1148 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_direct_fail
))
1150 ASSERT(oldCall
== bitwise_cast
<void*>(cti_op_put_by_id
)
1151 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_generic
)
1152 || oldCall
== bitwise_cast
<void*>(cti_op_put_by_id_fail
));
1156 ASSERT_NOT_REACHED();
1163 #endif // ENABLE(JIT)