2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "GetterSetter.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
36 #include "JSFunction.h"
37 #include "JSPropertyNameIterator.h"
38 #include "Interpreter.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
53 JIT::CodePtr
JIT::stringGetByValStubGenerator(JSGlobalData
* globalData
, ExecutablePool
* pool
)
57 failures
.append(jit
.branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(globalData
->jsStringVPtr
)));
58 failures
.append(jit
.branchTest32(NonZero
, Address(regT0
, OBJECT_OFFSETOF(JSString
, m_fiberCount
))));
60 // Load string length to regT2, and start the process of loading the data pointer into regT0
61 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT2
);
62 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
63 jit
.loadPtr(Address(regT0
, ThunkHelpers::stringImplDataOffset()), regT0
);
65 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
66 failures
.append(jit
.branch32(AboveOrEqual
, regT1
, regT2
));
69 jit
.load16(BaseIndex(regT0
, regT1
, TimesTwo
, 0), regT0
);
71 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
72 jit
.move(TrustedImmPtr(globalData
->smallStrings
.singleCharacterStrings()), regT1
);
73 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
77 jit
.move(TrustedImm32(0), regT0
);
80 LinkBuffer
patchBuffer(*globalData
, &jit
, pool
);
81 return patchBuffer
.finalizeCode().m_code
;
84 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
86 unsigned dst
= currentInstruction
[1].u
.operand
;
87 unsigned base
= currentInstruction
[2].u
.operand
;
88 unsigned property
= currentInstruction
[3].u
.operand
;
90 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
91 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
93 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
94 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
95 // number was signed since m_vectorLength is always less than intmax (since the total allocation
96 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
97 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
98 // extending since it makes it easier to re-tag the value in the slow case.
99 zeroExtend32ToPtr(regT1
, regT1
);
101 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
102 addSlowCase(branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsArrayVPtr
)));
104 loadPtr(Address(regT0
, JSArray::storageOffset()), regT2
);
105 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, JSArray::vectorLengthOffset())));
107 loadPtr(BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
);
108 addSlowCase(branchTestPtr(Zero
, regT0
));
110 emitPutVirtualRegister(dst
);
113 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
115 unsigned dst
= currentInstruction
[1].u
.operand
;
116 unsigned base
= currentInstruction
[2].u
.operand
;
117 unsigned property
= currentInstruction
[3].u
.operand
;
119 linkSlowCase(iter
); // property int32 check
120 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
121 Jump nonCell
= jump();
122 linkSlowCase(iter
); // base array check
123 Jump notString
= branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsStringVPtr
));
124 emitNakedCall(m_globalData
->getCTIStub(stringGetByValStubGenerator
));
125 Jump failed
= branchTestPtr(Zero
, regT0
);
126 emitPutVirtualRegister(dst
, regT0
);
127 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
129 notString
.link(this);
132 linkSlowCase(iter
); // vector length check
133 linkSlowCase(iter
); // empty value
135 JITStubCall
stubCall(this, cti_op_get_by_val
);
136 stubCall
.addArgument(base
, regT2
);
137 stubCall
.addArgument(property
, regT2
);
141 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, RegisterID offset
, RegisterID scratch
)
143 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), scratch
);
144 loadPtr(BaseIndex(scratch
, offset
, ScalePtr
, 0), result
);
147 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
149 unsigned dst
= currentInstruction
[1].u
.operand
;
150 unsigned base
= currentInstruction
[2].u
.operand
;
151 unsigned property
= currentInstruction
[3].u
.operand
;
152 unsigned expected
= currentInstruction
[4].u
.operand
;
153 unsigned iter
= currentInstruction
[5].u
.operand
;
154 unsigned i
= currentInstruction
[6].u
.operand
;
156 emitGetVirtualRegister(property
, regT0
);
157 addSlowCase(branchPtr(NotEqual
, regT0
, addressFor(expected
)));
158 emitGetVirtualRegisters(base
, regT0
, iter
, regT1
);
159 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
161 // Test base's structure
162 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
163 addSlowCase(branchPtr(NotEqual
, regT2
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
164 load32(addressFor(i
), regT3
);
165 sub32(TrustedImm32(1), regT3
);
166 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
167 compileGetDirectOffset(regT0
, regT0
, regT3
, regT1
);
169 emitPutVirtualRegister(dst
, regT0
);
172 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
174 unsigned dst
= currentInstruction
[1].u
.operand
;
175 unsigned base
= currentInstruction
[2].u
.operand
;
176 unsigned property
= currentInstruction
[3].u
.operand
;
179 linkSlowCaseIfNotJSCell(iter
, base
);
183 JITStubCall
stubCall(this, cti_op_get_by_val
);
184 stubCall
.addArgument(base
, regT2
);
185 stubCall
.addArgument(property
, regT2
);
189 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
191 unsigned base
= currentInstruction
[1].u
.operand
;
192 unsigned property
= currentInstruction
[2].u
.operand
;
193 unsigned value
= currentInstruction
[3].u
.operand
;
195 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
196 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
197 // See comment in op_get_by_val.
198 zeroExtend32ToPtr(regT1
, regT1
);
199 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
200 addSlowCase(branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsArrayVPtr
)));
201 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, JSArray::vectorLengthOffset())));
203 loadPtr(Address(regT0
, JSArray::storageOffset()), regT2
);
204 Jump empty
= branchTestPtr(Zero
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
206 Label
storeResult(this);
207 emitGetVirtualRegister(value
, regT0
);
208 storePtr(regT0
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
212 add32(TrustedImm32(1), Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
213 branch32(Below
, regT1
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
))).linkTo(storeResult
, this);
216 add32(TrustedImm32(1), regT0
);
217 store32(regT0
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
218 jump().linkTo(storeResult
, this);
223 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
225 JITStubCall
stubCall(this, cti_op_put_by_index
);
226 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
227 stubCall
.addArgument(TrustedImm32(currentInstruction
[2].u
.operand
));
228 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
232 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
234 JITStubCall
stubCall(this, cti_op_put_getter
);
235 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
236 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
237 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
241 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
243 JITStubCall
stubCall(this, cti_op_put_setter
);
244 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
245 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
246 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
250 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
252 JITStubCall
stubCall(this, cti_op_del_by_id
);
253 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
254 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
255 stubCall
.call(currentInstruction
[1].u
.operand
);
259 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
261 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
263 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
264 void JIT::emit_op_method_check(Instruction
*) {}
265 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
266 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
267 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
270 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
272 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
273 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
274 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
276 emitGetVirtualRegister(baseVReg
, regT0
);
277 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
278 stubCall
.addArgument(regT0
);
279 stubCall
.addArgument(TrustedImmPtr(ident
));
280 stubCall
.call(resultVReg
);
282 m_propertyAccessInstructionIndex
++;
285 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
287 ASSERT_NOT_REACHED();
290 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
292 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
293 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
294 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
295 unsigned direct
= currentInstruction
[8].u
.operand
;
297 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
299 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct_generic
, cti_op_put_by_id_generic
);
300 stubCall
.addArgument(regT0
);
301 stubCall
.addArgument(TrustedImmPtr(ident
));
302 stubCall
.addArgument(regT1
);
305 m_propertyAccessInstructionIndex
++;
308 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
310 ASSERT_NOT_REACHED();
313 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
315 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
317 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
319 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
321 // Assert that the following instruction is a get_by_id.
322 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
324 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
325 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
326 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
327 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
329 emitGetVirtualRegister(baseVReg
, regT0
);
331 // Do the method check - check the object & its prototype's structure inline (this is the common case).
332 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
333 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
335 Jump notCell
= emitJumpIfNotJSCell(regT0
);
337 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
339 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), info
.structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
340 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(TrustedImmPtr(0), regT1
);
341 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT1
, JSCell::structureOffset()), protoStructureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
343 // This will be relinked to load the function without doing a load.
344 DataLabelPtr putFunction
= moveWithPatch(TrustedImmPtr(0), regT0
);
346 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
350 ASSERT_JIT_OFFSET_UNUSED(protoObj
, differenceBetween(info
.structureToCompare
, protoObj
), patchOffsetMethodCheckProtoObj
);
351 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, protoStructureToCompare
), patchOffsetMethodCheckProtoStruct
);
352 ASSERT_JIT_OFFSET_UNUSED(putFunction
, differenceBetween(info
.structureToCompare
, putFunction
), patchOffsetMethodCheckPutFunction
);
354 // Link the failure cases here.
356 structureCheck
.link(this);
357 protoStructureCheck
.link(this);
359 // Do a regular(ish) get_by_id (the slow case will be link to
360 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
361 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
364 emitPutVirtualRegister(resultVReg
);
366 // We've already generated the following get_by_id, so make sure it's skipped over.
367 m_bytecodeOffset
+= OPCODE_LENGTH(op_get_by_id
);
370 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
372 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
373 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
374 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
375 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
377 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, true);
379 // We've already generated the following get_by_id, so make sure it's skipped over.
380 m_bytecodeOffset
+= OPCODE_LENGTH(op_get_by_id
);
383 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
385 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
386 void JIT::emit_op_method_check(Instruction
*) {}
387 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
391 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
393 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
394 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
395 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
397 emitGetVirtualRegister(baseVReg
, regT0
);
398 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
399 emitPutVirtualRegister(resultVReg
);
402 void JIT::compileGetByIdHotPath(int, int baseVReg
, Identifier
*, unsigned propertyAccessInstructionIndex
)
404 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
405 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
406 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
407 // to jump back to if one of these trampolies finds a match.
409 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
411 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
413 Label
hotPathBegin(this);
414 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
416 DataLabelPtr structureToCompare
;
417 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
418 addSlowCase(structureCheck
);
419 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetGetByIdStructure
);
420 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureCheck
), patchOffsetGetByIdBranchToSlowCase
)
422 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), regT0
);
423 DataLabelCompact displacementLabel
= loadPtrWithCompactAddressOffsetPatch(Address(regT0
, patchGetByIdDefaultOffset
), regT0
);
424 ASSERT_JIT_OFFSET_UNUSED(displacementLabel
, differenceBetween(hotPathBegin
, displacementLabel
), patchOffsetGetByIdPropertyMapOffset
);
426 Label
putResult(this);
428 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
430 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, putResult
), patchOffsetGetByIdPutResult
);
433 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
435 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
436 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
437 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
439 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, false);
442 void JIT::compileGetByIdSlowCase(int resultVReg
, int baseVReg
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
444 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
445 // so that we only need track one pointer into the slow case code - we track a pointer to the location
446 // of the call (which we can use to look up the patch information), but should a array-length or
447 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
448 // the distance from the call to the head of the slow case.
450 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
453 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
456 Label
coldPathBegin(this);
458 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
459 stubCall
.addArgument(regT0
);
460 stubCall
.addArgument(TrustedImmPtr(ident
));
461 Call call
= stubCall
.call(resultVReg
);
463 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
465 ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin
, call
), patchOffsetGetByIdSlowCaseCall
);
467 // Track the location of the call; this will be used to recover patch information.
468 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
469 m_propertyAccessInstructionIndex
++;
472 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
474 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
475 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
477 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
479 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
480 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
481 // such that the Structure & offset are always at the same distance from this.
483 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
485 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
486 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
488 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
490 Label
hotPathBegin(this);
491 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
493 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
494 DataLabelPtr structureToCompare
;
495 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
496 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetPutByIdStructure
);
498 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), regT0
);
499 DataLabel32 displacementLabel
= storePtrWithAddressOffsetPatch(regT1
, Address(regT0
, patchPutByIdDefaultOffset
));
501 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
503 ASSERT_JIT_OFFSET_UNUSED(displacementLabel
, differenceBetween(hotPathBegin
, displacementLabel
), patchOffsetPutByIdPropertyMapOffset
);
506 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
508 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
509 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
510 unsigned direct
= currentInstruction
[8].u
.operand
;
512 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
514 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
517 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct
: cti_op_put_by_id
);
518 stubCall
.addArgument(regT0
);
519 stubCall
.addArgument(TrustedImmPtr(ident
));
520 stubCall
.addArgument(regT1
);
521 Call call
= stubCall
.call();
523 // Track the location of the call; this will be used to recover patch information.
524 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].callReturnLocation
= call
;
527 // Compile a store into an object's property storage. May overwrite the
528 // value in objectReg.
529 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, Structure
* structure
, size_t cachedOffset
)
531 int offset
= cachedOffset
* sizeof(JSValue
);
532 if (structure
->isUsingInlineStorage())
533 offset
+= JSObject::offsetOfInlineStorage();
535 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), base
);
536 storePtr(value
, Address(base
, offset
));
539 // Compile a load from an object's property storage. May overwrite base.
540 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, Structure
* structure
, size_t cachedOffset
)
542 int offset
= cachedOffset
* sizeof(JSValue
);
543 if (structure
->isUsingInlineStorage()) {
544 offset
+= JSObject::offsetOfInlineStorage();
545 loadPtr(Address(base
, offset
), result
);
547 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), result
);
548 loadPtr(Address(result
, offset
), result
);
552 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID result
, size_t cachedOffset
)
554 loadPtr(static_cast<void*>(&base
->m_propertyStorage
[cachedOffset
]), result
);
557 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
, bool direct
)
559 JumpList failureCases
;
560 // Check eax is an object of the right Structure.
561 failureCases
.append(emitJumpIfNotJSCell(regT0
));
562 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(oldStructure
)));
563 testPrototype(oldStructure
->storedPrototype(), failureCases
);
565 // ecx = baseObject->m_structure
567 for (WriteBarrier
<Structure
>* it
= chain
->head(); *it
; ++it
)
568 testPrototype((*it
)->storedPrototype(), failureCases
);
573 // emit a call only if storage realloc is needed
574 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
575 if (willNeedStorageRealloc
) {
576 // This trampoline was called to like a JIT stub; before we can can call again we need to
577 // remove the return address from the stack, to prevent the stack from becoming misaligned.
578 preserveReturnAddressAfterCall(regT3
);
580 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
581 stubCall
.skipArgument(); // base
582 stubCall
.skipArgument(); // ident
583 stubCall
.skipArgument(); // value
584 stubCall
.addArgument(TrustedImm32(oldStructure
->propertyStorageCapacity()));
585 stubCall
.addArgument(TrustedImm32(newStructure
->propertyStorageCapacity()));
586 stubCall
.call(regT0
);
587 emitGetJITStubArg(2, regT1
);
589 restoreReturnAddressBeforeReturn(regT3
);
592 storePtrWithWriteBarrier(TrustedImmPtr(newStructure
), regT0
, Address(regT0
, JSCell::structureOffset()));
595 compilePutDirectOffset(regT0
, regT1
, newStructure
, cachedOffset
);
599 ASSERT(!failureCases
.empty());
600 failureCases
.link(this);
601 restoreArgumentReferenceForTrampoline();
602 Call failureCall
= tailRecursiveCall();
604 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
606 patchBuffer
.link(failureCall
, FunctionPtr(direct
? cti_op_put_by_id_direct_fail
: cti_op_put_by_id_fail
));
608 if (willNeedStorageRealloc
) {
609 ASSERT(m_calls
.size() == 1);
610 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
613 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
614 stubInfo
->stubRoutine
= entryLabel
;
615 RepatchBuffer
repatchBuffer(m_codeBlock
);
616 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
619 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
621 RepatchBuffer
repatchBuffer(codeBlock
);
623 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
624 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
625 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
627 int offset
= sizeof(JSValue
) * cachedOffset
;
629 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
630 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
631 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset
), offset
);
634 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, bool direct
)
636 RepatchBuffer
repatchBuffer(codeBlock
);
638 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
639 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
640 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(direct
? cti_op_put_by_id_direct_generic
: cti_op_put_by_id_generic
));
642 int offset
= sizeof(JSValue
) * cachedOffset
;
644 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
645 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
646 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset
), offset
);
649 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
651 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
653 // Check eax is an array
654 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsArrayVPtr
));
656 // Checks out okay! - get the length from the storage
657 loadPtr(Address(regT0
, JSArray::storageOffset()), regT3
);
658 load32(Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
659 Jump failureCases2
= branch32(LessThan
, regT2
, TrustedImm32(0));
661 emitFastArithIntToImmNoCheck(regT2
, regT0
);
662 Jump success
= jump();
664 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
666 // Use the patch information to link the failure cases back to the original slow case routine.
667 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
668 patchBuffer
.link(failureCases1
, slowCaseBegin
);
669 patchBuffer
.link(failureCases2
, slowCaseBegin
);
671 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
672 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
674 // Track the stub we have created so that it will be deleted later.
675 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
676 stubInfo
->stubRoutine
= entryLabel
;
678 // Finally patch the jump to slow case back in the hot path to jump here instead.
679 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
680 RepatchBuffer
repatchBuffer(m_codeBlock
);
681 repatchBuffer
.relink(jumpLocation
, entryLabel
);
683 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
684 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
687 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
689 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
690 // referencing the prototype object - let's speculatively load it's table nice and early!)
691 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
693 // Check eax is an object of the right Structure.
694 Jump failureCases1
= checkStructure(regT0
, structure
);
696 // Check the prototype object's Structure had not changed.
697 move(TrustedImmPtr(protoObject
), regT3
);
698 Jump failureCases2
= branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure
));
700 bool needsStubLink
= false;
703 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
704 needsStubLink
= true;
705 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
706 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
707 stubCall
.addArgument(regT1
);
708 stubCall
.addArgument(regT0
);
709 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
711 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
712 needsStubLink
= true;
713 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
714 stubCall
.addArgument(TrustedImmPtr(protoObject
));
715 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
716 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
717 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
720 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
721 Jump success
= jump();
722 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
724 // Use the patch information to link the failure cases back to the original slow case routine.
725 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
726 patchBuffer
.link(failureCases1
, slowCaseBegin
);
727 patchBuffer
.link(failureCases2
, slowCaseBegin
);
729 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
730 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
733 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
735 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
738 // Track the stub we have created so that it will be deleted later.
739 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
740 stubInfo
->stubRoutine
= entryLabel
;
742 // Finally patch the jump to slow case back in the hot path to jump here instead.
743 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
744 RepatchBuffer
repatchBuffer(m_codeBlock
);
745 repatchBuffer
.relink(jumpLocation
, entryLabel
);
747 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
748 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
751 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
)
753 Jump failureCase
= checkStructure(regT0
, structure
);
754 bool needsStubLink
= false;
755 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
756 needsStubLink
= true;
757 compileGetDirectOffset(regT0
, regT1
, structure
, cachedOffset
);
758 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
759 stubCall
.addArgument(regT1
);
760 stubCall
.addArgument(regT0
);
761 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
763 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
764 needsStubLink
= true;
765 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
766 stubCall
.addArgument(regT0
);
767 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
768 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
769 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
772 compileGetDirectOffset(regT0
, regT0
, structure
, cachedOffset
);
773 Jump success
= jump();
775 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
778 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
780 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
784 // Use the patch information to link the failure cases back to the original slow case routine.
785 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
787 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
789 patchBuffer
.link(failureCase
, lastProtoBegin
);
791 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
792 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
794 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
796 polymorphicStructures
->list
[currentIndex
].set(*m_globalData
, m_codeBlock
->ownerExecutable(), entryLabel
, structure
);
798 // Finally patch the jump to slow case back in the hot path to jump here instead.
799 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
800 RepatchBuffer
repatchBuffer(m_codeBlock
);
801 repatchBuffer
.relink(jumpLocation
, entryLabel
);
804 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
806 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
807 // referencing the prototype object - let's speculatively load it's table nice and early!)
808 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
810 // Check eax is an object of the right Structure.
811 Jump failureCases1
= checkStructure(regT0
, structure
);
813 // Check the prototype object's Structure had not changed.
814 move(TrustedImmPtr(protoObject
), regT3
);
815 Jump failureCases2
= branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure
));
818 bool needsStubLink
= false;
819 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
820 needsStubLink
= true;
821 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
822 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
823 stubCall
.addArgument(regT1
);
824 stubCall
.addArgument(regT0
);
825 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
827 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
828 needsStubLink
= true;
829 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
830 stubCall
.addArgument(TrustedImmPtr(protoObject
));
831 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
832 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
833 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
836 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
838 Jump success
= jump();
840 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
843 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
845 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
849 // Use the patch information to link the failure cases back to the original slow case routine.
850 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
851 patchBuffer
.link(failureCases1
, lastProtoBegin
);
852 patchBuffer
.link(failureCases2
, lastProtoBegin
);
854 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
855 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
857 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
858 prototypeStructures
->list
[currentIndex
].set(*m_globalData
, m_codeBlock
->ownerExecutable(), entryLabel
, structure
, prototypeStructure
);
860 // Finally patch the jump to slow case back in the hot path to jump here instead.
861 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
862 RepatchBuffer
repatchBuffer(m_codeBlock
);
863 repatchBuffer
.relink(jumpLocation
, entryLabel
);
866 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
869 JumpList bucketsOfFail
;
871 // Check eax is an object of the right Structure.
872 Jump baseObjectCheck
= checkStructure(regT0
, structure
);
873 bucketsOfFail
.append(baseObjectCheck
);
875 Structure
* currStructure
= structure
;
876 WriteBarrier
<Structure
>* it
= chain
->head();
877 JSObject
* protoObject
= 0;
878 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
879 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
880 currStructure
= it
->get();
881 testPrototype(protoObject
, bucketsOfFail
);
885 bool needsStubLink
= false;
886 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
887 needsStubLink
= true;
888 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
889 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
890 stubCall
.addArgument(regT1
);
891 stubCall
.addArgument(regT0
);
892 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
894 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
895 needsStubLink
= true;
896 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
897 stubCall
.addArgument(TrustedImmPtr(protoObject
));
898 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
899 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
900 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
903 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
904 Jump success
= jump();
906 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
909 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
911 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
915 // Use the patch information to link the failure cases back to the original slow case routine.
916 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
918 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
920 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
921 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
923 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
925 // Track the stub we have created so that it will be deleted later.
926 prototypeStructures
->list
[currentIndex
].set(callFrame
->globalData(), m_codeBlock
->ownerExecutable(), entryLabel
, structure
, chain
);
928 // Finally patch the jump to slow case back in the hot path to jump here instead.
929 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
930 RepatchBuffer
repatchBuffer(m_codeBlock
);
931 repatchBuffer
.relink(jumpLocation
, entryLabel
);
934 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
938 JumpList bucketsOfFail
;
940 // Check eax is an object of the right Structure.
941 bucketsOfFail
.append(checkStructure(regT0
, structure
));
943 Structure
* currStructure
= structure
;
944 WriteBarrier
<Structure
>* it
= chain
->head();
945 JSObject
* protoObject
= 0;
946 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
947 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
948 currStructure
= it
->get();
949 testPrototype(protoObject
, bucketsOfFail
);
953 bool needsStubLink
= false;
954 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
955 needsStubLink
= true;
956 compileGetDirectOffset(protoObject
, regT1
, cachedOffset
);
957 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
958 stubCall
.addArgument(regT1
);
959 stubCall
.addArgument(regT0
);
960 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
962 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
963 needsStubLink
= true;
964 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
965 stubCall
.addArgument(TrustedImmPtr(protoObject
));
966 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
967 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
968 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
971 compileGetDirectOffset(protoObject
, regT0
, cachedOffset
);
972 Jump success
= jump();
974 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
977 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
979 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
983 // Use the patch information to link the failure cases back to the original slow case routine.
984 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
986 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
987 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
989 // Track the stub we have created so that it will be deleted later.
990 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
991 stubInfo
->stubRoutine
= entryLabel
;
993 // Finally patch the jump to slow case back in the hot path to jump here instead.
994 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
995 RepatchBuffer
repatchBuffer(m_codeBlock
);
996 repatchBuffer
.relink(jumpLocation
, entryLabel
);
998 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
999 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1002 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1004 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1006 #endif // USE(JSVALUE64)
1008 void JIT::testPrototype(JSValue prototype
, JumpList
& failureCases
)
1010 if (prototype
.isNull())
1013 ASSERT(prototype
.isCell());
1014 move(TrustedImmPtr(prototype
.asCell()), regT3
);
1015 failureCases
.append(branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototype
.asCell()->structure())));
1018 void JIT::patchMethodCallProto(JSGlobalData
& globalData
, CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, JSObjectWithGlobalObject
* callee
, Structure
* structure
, JSObject
* proto
, ReturnAddressPtr returnAddress
)
1020 RepatchBuffer
repatchBuffer(codeBlock
);
1022 ASSERT(!methodCallLinkInfo
.cachedStructure
);
1023 CodeLocationDataLabelPtr structureLocation
= methodCallLinkInfo
.cachedStructure
.location();
1024 methodCallLinkInfo
.cachedStructure
.set(globalData
, structureLocation
, codeBlock
->ownerExecutable(), structure
);
1026 Structure
* prototypeStructure
= proto
->structure();
1027 methodCallLinkInfo
.cachedPrototypeStructure
.set(globalData
, structureLocation
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct
), codeBlock
->ownerExecutable(), prototypeStructure
);
1028 methodCallLinkInfo
.cachedPrototype
.set(globalData
, structureLocation
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj
), codeBlock
->ownerExecutable(), proto
);
1029 methodCallLinkInfo
.cachedFunction
.set(globalData
, structureLocation
.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction
), codeBlock
->ownerExecutable(), callee
);
1030 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id
));
1035 #endif // ENABLE(JIT)