2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #if !USE(JSVALUE32_64)
34 #include "CodeBlock.h"
35 #include "GetterSetter.h"
36 #include "JITInlineMethods.h"
37 #include "JITStubCall.h"
39 #include "JSFunction.h"
40 #include "JSPropertyNameIterator.h"
41 #include "Interpreter.h"
42 #include "LinkBuffer.h"
43 #include "RepatchBuffer.h"
44 #include "ResultType.h"
45 #include "SamplingTool.h"
55 PassRefPtr
<NativeExecutable
> JIT::stringGetByValStubGenerator(JSGlobalData
* globalData
, ExecutablePool
* pool
)
59 failures
.append(jit
.branchPtr(NotEqual
, Address(regT0
), ImmPtr(globalData
->jsStringVPtr
)));
60 failures
.append(jit
.branchTest32(NonZero
, Address(regT0
, OBJECT_OFFSETOF(JSString
, m_fiberCount
))));
62 jit
.zeroExtend32ToPtr(regT1
, regT1
);
64 jit
.emitFastArithImmToInt(regT1
);
67 // Load string length to regT1, and start the process of loading the data pointer into regT0
68 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT2
);
69 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
70 jit
.loadPtr(Address(regT0
, ThunkHelpers::stringImplDataOffset()), regT0
);
72 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
73 failures
.append(jit
.branch32(AboveOrEqual
, regT1
, regT2
));
76 jit
.load16(BaseIndex(regT0
, regT1
, TimesTwo
, 0), regT0
);
78 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, Imm32(0x100)));
79 jit
.move(ImmPtr(globalData
->smallStrings
.singleCharacterStrings()), regT1
);
80 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
84 jit
.move(Imm32(0), regT0
);
87 LinkBuffer
patchBuffer(&jit
, pool
);
88 return adoptRef(new NativeExecutable(patchBuffer
.finalizeCode()));
91 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
93 unsigned dst
= currentInstruction
[1].u
.operand
;
94 unsigned base
= currentInstruction
[2].u
.operand
;
95 unsigned property
= currentInstruction
[3].u
.operand
;
97 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
98 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
100 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
101 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
102 // number was signed since m_vectorLength is always less than intmax (since the total allocation
103 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
104 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
105 // extending since it makes it easier to re-tag the value in the slow case.
106 zeroExtend32ToPtr(regT1
, regT1
);
108 emitFastArithImmToInt(regT1
);
110 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
111 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
113 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
114 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
116 loadPtr(BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
);
117 addSlowCase(branchTestPtr(Zero
, regT0
));
119 emitPutVirtualRegister(dst
);
122 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
124 unsigned dst
= currentInstruction
[1].u
.operand
;
125 unsigned base
= currentInstruction
[2].u
.operand
;
126 unsigned property
= currentInstruction
[3].u
.operand
;
128 linkSlowCase(iter
); // property int32 check
129 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
130 Jump nonCell
= jump();
131 linkSlowCase(iter
); // base array check
132 Jump notString
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
));
133 emitNakedCall(m_globalData
->getThunk(stringGetByValStubGenerator
)->generatedJITCode().addressForCall());
134 Jump failed
= branchTestPtr(Zero
, regT0
);
135 emitPutVirtualRegister(dst
, regT0
);
136 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
138 notString
.link(this);
141 linkSlowCase(iter
); // vector length check
142 linkSlowCase(iter
); // empty value
144 JITStubCall
stubCall(this, cti_op_get_by_val
);
145 stubCall
.addArgument(base
, regT2
);
146 stubCall
.addArgument(property
, regT2
);
150 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, RegisterID structure
, RegisterID offset
, RegisterID scratch
)
152 ASSERT(sizeof(((Structure
*)0)->m_propertyStorageCapacity
) == sizeof(int32_t));
153 ASSERT(sizeof(JSObject::inlineStorageCapacity
) == sizeof(int32_t));
155 Jump notUsingInlineStorage
= branch32(NotEqual
, Address(structure
, OBJECT_OFFSETOF(Structure
, m_propertyStorageCapacity
)), Imm32(JSObject::inlineStorageCapacity
));
156 loadPtr(BaseIndex(base
, offset
, ScalePtr
, OBJECT_OFFSETOF(JSObject
, m_inlineStorage
)), result
);
157 Jump finishedLoad
= jump();
158 notUsingInlineStorage
.link(this);
159 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), scratch
);
160 loadPtr(BaseIndex(scratch
, offset
, ScalePtr
, 0), result
);
161 finishedLoad
.link(this);
164 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
166 unsigned dst
= currentInstruction
[1].u
.operand
;
167 unsigned base
= currentInstruction
[2].u
.operand
;
168 unsigned property
= currentInstruction
[3].u
.operand
;
169 unsigned expected
= currentInstruction
[4].u
.operand
;
170 unsigned iter
= currentInstruction
[5].u
.operand
;
171 unsigned i
= currentInstruction
[6].u
.operand
;
173 emitGetVirtualRegister(property
, regT0
);
174 addSlowCase(branchPtr(NotEqual
, regT0
, addressFor(expected
)));
175 emitGetVirtualRegisters(base
, regT0
, iter
, regT1
);
176 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
178 // Test base's structure
179 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
180 addSlowCase(branchPtr(NotEqual
, regT2
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
181 load32(addressFor(i
), regT3
);
182 sub32(Imm32(1), regT3
);
183 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
184 compileGetDirectOffset(regT0
, regT0
, regT2
, regT3
, regT1
);
186 emitPutVirtualRegister(dst
, regT0
);
189 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
191 unsigned dst
= currentInstruction
[1].u
.operand
;
192 unsigned base
= currentInstruction
[2].u
.operand
;
193 unsigned property
= currentInstruction
[3].u
.operand
;
196 linkSlowCaseIfNotJSCell(iter
, base
);
200 JITStubCall
stubCall(this, cti_op_get_by_val
);
201 stubCall
.addArgument(base
, regT2
);
202 stubCall
.addArgument(property
, regT2
);
206 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
208 unsigned base
= currentInstruction
[1].u
.operand
;
209 unsigned property
= currentInstruction
[2].u
.operand
;
210 unsigned value
= currentInstruction
[3].u
.operand
;
212 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
213 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
215 // See comment in op_get_by_val.
216 zeroExtend32ToPtr(regT1
, regT1
);
218 emitFastArithImmToInt(regT1
);
220 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
221 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
222 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
224 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
226 Jump empty
= branchTestPtr(Zero
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
228 Label
storeResult(this);
229 emitGetVirtualRegister(value
, regT0
);
230 storePtr(regT0
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
234 add32(Imm32(1), Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
235 branch32(Below
, regT1
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
))).linkTo(storeResult
, this);
238 add32(Imm32(1), regT0
);
239 store32(regT0
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
240 jump().linkTo(storeResult
, this);
245 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
247 JITStubCall
stubCall(this, cti_op_put_by_index
);
248 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
249 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
250 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
254 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
256 JITStubCall
stubCall(this, cti_op_put_getter
);
257 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
258 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
259 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
263 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
265 JITStubCall
stubCall(this, cti_op_put_setter
);
266 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
267 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
268 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
272 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
274 JITStubCall
stubCall(this, cti_op_del_by_id
);
275 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
276 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
277 stubCall
.call(currentInstruction
[1].u
.operand
);
281 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
283 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
285 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
286 void JIT::emit_op_method_check(Instruction
*) {}
287 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
288 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
289 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
292 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
294 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
295 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
296 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
298 emitGetVirtualRegister(baseVReg
, regT0
);
299 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
300 stubCall
.addArgument(regT0
);
301 stubCall
.addArgument(ImmPtr(ident
));
302 stubCall
.call(resultVReg
);
304 m_propertyAccessInstructionIndex
++;
307 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
309 ASSERT_NOT_REACHED();
312 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
314 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
315 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
316 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
317 unsigned direct
= currentInstruction
[8].u
.operand
;
319 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
321 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct_generic
, cti_op_put_by_id_generic
);
322 stubCall
.addArgument(regT0
);
323 stubCall
.addArgument(ImmPtr(ident
));
324 stubCall
.addArgument(regT1
);
327 m_propertyAccessInstructionIndex
++;
330 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
332 ASSERT_NOT_REACHED();
335 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
337 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
339 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
341 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
343 // Assert that the following instruction is a get_by_id.
344 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
346 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
347 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
348 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
349 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
351 emitGetVirtualRegister(baseVReg
, regT0
);
353 // Do the method check - check the object & its prototype's structure inline (this is the common case).
354 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
355 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
357 Jump notCell
= emitJumpIfNotJSCell(regT0
);
359 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
361 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), info
.structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
362 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(ImmPtr(0), regT1
);
363 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT1
, OBJECT_OFFSETOF(JSCell
, m_structure
)), protoStructureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
365 // This will be relinked to load the function without doing a load.
366 DataLabelPtr putFunction
= moveWithPatch(ImmPtr(0), regT0
);
368 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
372 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, protoObj
), patchOffsetMethodCheckProtoObj
);
373 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, protoStructureToCompare
), patchOffsetMethodCheckProtoStruct
);
374 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, putFunction
), patchOffsetMethodCheckPutFunction
);
376 // Link the failure cases here.
378 structureCheck
.link(this);
379 protoStructureCheck
.link(this);
381 // Do a regular(ish) get_by_id (the slow case will be link to
382 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
383 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
386 emitPutVirtualRegister(resultVReg
);
388 // We've already generated the following get_by_id, so make sure it's skipped over.
389 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
392 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
394 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
395 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
396 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
397 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
399 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, true);
401 // We've already generated the following get_by_id, so make sure it's skipped over.
402 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
405 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
407 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
408 void JIT::emit_op_method_check(Instruction
*) {}
409 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
413 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
415 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
416 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
417 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
419 emitGetVirtualRegister(baseVReg
, regT0
);
420 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
421 emitPutVirtualRegister(resultVReg
);
424 void JIT::compileGetByIdHotPath(int, int baseVReg
, Identifier
*, unsigned propertyAccessInstructionIndex
)
426 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
427 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
428 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
429 // to jump back to if one of these trampolies finds a match.
431 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
433 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
435 Label
hotPathBegin(this);
436 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
438 DataLabelPtr structureToCompare
;
439 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
440 addSlowCase(structureCheck
);
441 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetGetByIdStructure
);
442 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureCheck
), patchOffsetGetByIdBranchToSlowCase
)
444 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
445 Label
externalLoadComplete(this);
446 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, externalLoad
), patchOffsetGetByIdExternalLoad
);
447 ASSERT_JIT_OFFSET(differenceBetween(externalLoad
, externalLoadComplete
), patchLengthGetByIdExternalLoad
);
449 DataLabel32 displacementLabel
= loadPtrWithAddressOffsetPatch(Address(regT0
, patchGetByIdDefaultOffset
), regT0
);
450 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel
), patchOffsetGetByIdPropertyMapOffset
);
452 Label
putResult(this);
454 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
456 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, putResult
), patchOffsetGetByIdPutResult
);
459 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
461 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
462 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
463 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
465 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, false);
468 void JIT::compileGetByIdSlowCase(int resultVReg
, int baseVReg
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
470 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
471 // so that we only need track one pointer into the slow case code - we track a pointer to the location
472 // of the call (which we can use to look up the patch information), but should a array-length or
473 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
474 // the distance from the call to the head of the slow case.
476 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
479 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
482 Label
coldPathBegin(this);
484 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
485 stubCall
.addArgument(regT0
);
486 stubCall
.addArgument(ImmPtr(ident
));
487 Call call
= stubCall
.call(resultVReg
);
489 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
491 ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin
, call
), patchOffsetGetByIdSlowCaseCall
);
493 // Track the location of the call; this will be used to recover patch information.
494 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
495 m_propertyAccessInstructionIndex
++;
498 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
500 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
501 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
503 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
505 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
506 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
507 // such that the Structure & offset are always at the same distance from this.
509 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
511 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
512 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
514 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
516 Label
hotPathBegin(this);
517 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
519 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
520 DataLabelPtr structureToCompare
;
521 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
522 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetPutByIdStructure
);
524 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
525 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
526 Label
externalLoadComplete(this);
527 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, externalLoad
), patchOffsetPutByIdExternalLoad
);
528 ASSERT_JIT_OFFSET(differenceBetween(externalLoad
, externalLoadComplete
), patchLengthPutByIdExternalLoad
);
530 DataLabel32 displacementLabel
= storePtrWithAddressOffsetPatch(regT1
, Address(regT0
, patchGetByIdDefaultOffset
));
532 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
534 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel
), patchOffsetPutByIdPropertyMapOffset
);
537 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
539 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
540 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
541 unsigned direct
= currentInstruction
[8].u
.operand
;
543 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
545 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
548 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct
: cti_op_put_by_id
);
549 stubCall
.addArgument(regT0
);
550 stubCall
.addArgument(ImmPtr(ident
));
551 stubCall
.addArgument(regT1
);
552 Call call
= stubCall
.call();
554 // Track the location of the call; this will be used to recover patch information.
555 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].callReturnLocation
= call
;
558 // Compile a store into an object's property storage. May overwrite the
559 // value in objectReg.
560 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, Structure
* structure
, size_t cachedOffset
)
562 int offset
= cachedOffset
* sizeof(JSValue
);
563 if (structure
->isUsingInlineStorage())
564 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
);
566 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
567 storePtr(value
, Address(base
, offset
));
570 // Compile a load from an object's property storage. May overwrite base.
571 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, Structure
* structure
, size_t cachedOffset
)
573 int offset
= cachedOffset
* sizeof(JSValue
);
574 if (structure
->isUsingInlineStorage())
575 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
);
577 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
578 loadPtr(Address(base
, offset
), result
);
581 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID temp
, RegisterID result
, size_t cachedOffset
)
583 if (base
->isUsingInlineStorage())
584 loadPtr(static_cast<void*>(&base
->m_inlineStorage
[cachedOffset
]), result
);
586 PropertyStorage
* protoPropertyStorage
= &base
->m_externalStorage
;
587 loadPtr(static_cast<void*>(protoPropertyStorage
), temp
);
588 loadPtr(Address(temp
, cachedOffset
* sizeof(JSValue
)), result
);
592 void JIT::testPrototype(Structure
* structure
, JumpList
& failureCases
)
594 if (structure
->m_prototype
.isNull())
597 move(ImmPtr(&asCell(structure
->m_prototype
)->m_structure
), regT2
);
598 move(ImmPtr(asCell(structure
->m_prototype
)->m_structure
), regT3
);
599 failureCases
.append(branchPtr(NotEqual
, Address(regT2
), regT3
));
602 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
, bool direct
)
604 JumpList failureCases
;
605 // Check eax is an object of the right Structure.
606 failureCases
.append(emitJumpIfNotJSCell(regT0
));
607 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(oldStructure
)));
608 testPrototype(oldStructure
, failureCases
);
610 // ecx = baseObject->m_structure
612 for (RefPtr
<Structure
>* it
= chain
->head(); *it
; ++it
)
613 testPrototype(it
->get(), failureCases
);
618 // emit a call only if storage realloc is needed
619 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
620 if (willNeedStorageRealloc
) {
621 // This trampoline was called to like a JIT stub; before we can can call again we need to
622 // remove the return address from the stack, to prevent the stack from becoming misaligned.
623 preserveReturnAddressAfterCall(regT3
);
625 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
626 stubCall
.skipArgument(); // base
627 stubCall
.skipArgument(); // ident
628 stubCall
.skipArgument(); // value
629 stubCall
.addArgument(Imm32(oldStructure
->propertyStorageCapacity()));
630 stubCall
.addArgument(Imm32(newStructure
->propertyStorageCapacity()));
631 stubCall
.call(regT0
);
632 emitGetJITStubArg(2, regT1
);
634 restoreReturnAddressBeforeReturn(regT3
);
637 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
638 // codeblock should ensure oldStructure->m_refCount > 0
639 sub32(Imm32(1), AbsoluteAddress(oldStructure
->addressOfCount()));
640 add32(Imm32(1), AbsoluteAddress(newStructure
->addressOfCount()));
641 storePtr(ImmPtr(newStructure
), Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)));
644 compilePutDirectOffset(regT0
, regT1
, newStructure
, cachedOffset
);
648 ASSERT(!failureCases
.empty());
649 failureCases
.link(this);
650 restoreArgumentReferenceForTrampoline();
651 Call failureCall
= tailRecursiveCall();
653 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
655 patchBuffer
.link(failureCall
, FunctionPtr(direct
? cti_op_put_by_id_direct_fail
: cti_op_put_by_id_fail
));
657 if (willNeedStorageRealloc
) {
658 ASSERT(m_calls
.size() == 1);
659 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
662 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
663 stubInfo
->stubRoutine
= entryLabel
;
664 RepatchBuffer
repatchBuffer(m_codeBlock
);
665 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
668 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
670 RepatchBuffer
repatchBuffer(codeBlock
);
672 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
673 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
674 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
676 int offset
= sizeof(JSValue
) * cachedOffset
;
678 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
679 // and makes the subsequent load's offset automatically correct
680 if (structure
->isUsingInlineStorage())
681 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetGetByIdExternalLoad
));
683 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
684 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
685 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset
), offset
);
688 void JIT::patchMethodCallProto(CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, JSFunction
* callee
, Structure
* structure
, JSObject
* proto
, ReturnAddressPtr returnAddress
)
690 RepatchBuffer
repatchBuffer(codeBlock
);
692 ASSERT(!methodCallLinkInfo
.cachedStructure
);
693 methodCallLinkInfo
.cachedStructure
= structure
;
696 Structure
* prototypeStructure
= proto
->structure();
697 methodCallLinkInfo
.cachedPrototypeStructure
= prototypeStructure
;
698 prototypeStructure
->ref();
700 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
, structure
);
701 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj
), proto
);
702 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct
), prototypeStructure
);
703 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction
), callee
);
705 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id
));
708 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, bool direct
)
710 RepatchBuffer
repatchBuffer(codeBlock
);
712 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
713 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
714 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(direct
? cti_op_put_by_id_direct_generic
: cti_op_put_by_id_generic
));
716 int offset
= sizeof(JSValue
) * cachedOffset
;
718 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
719 // and makes the subsequent load's offset automatically correct
720 if (structure
->isUsingInlineStorage())
721 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetPutByIdExternalLoad
));
723 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
724 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
725 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset
), offset
);
728 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
730 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
732 // Check eax is an array
733 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
));
735 // Checks out okay! - get the length from the storage
736 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
737 load32(Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
739 Jump failureCases2
= branch32(Above
, regT2
, Imm32(JSImmediate::maxImmediateInt
));
741 emitFastArithIntToImmNoCheck(regT2
, regT0
);
742 Jump success
= jump();
744 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
746 // Use the patch information to link the failure cases back to the original slow case routine.
747 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
748 patchBuffer
.link(failureCases1
, slowCaseBegin
);
749 patchBuffer
.link(failureCases2
, slowCaseBegin
);
751 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
752 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
754 // Track the stub we have created so that it will be deleted later.
755 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
756 stubInfo
->stubRoutine
= entryLabel
;
758 // Finally patch the jump to slow case back in the hot path to jump here instead.
759 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
760 RepatchBuffer
repatchBuffer(m_codeBlock
);
761 repatchBuffer
.relink(jumpLocation
, entryLabel
);
763 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
764 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
767 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
769 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
770 // referencing the prototype object - let's speculatively load it's table nice and early!)
771 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
773 // Check eax is an object of the right Structure.
774 Jump failureCases1
= checkStructure(regT0
, structure
);
776 // Check the prototype object's Structure had not changed.
777 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
779 move(ImmPtr(prototypeStructure
), regT3
);
780 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
782 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
785 bool needsStubLink
= false;
788 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
789 needsStubLink
= true;
790 compileGetDirectOffset(protoObject
, regT1
, regT1
, cachedOffset
);
791 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
792 stubCall
.addArgument(regT1
);
793 stubCall
.addArgument(regT0
);
794 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
796 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
797 needsStubLink
= true;
798 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
799 stubCall
.addArgument(ImmPtr(protoObject
));
800 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
801 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
802 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
805 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
806 Jump success
= jump();
807 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
809 // Use the patch information to link the failure cases back to the original slow case routine.
810 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
811 patchBuffer
.link(failureCases1
, slowCaseBegin
);
812 patchBuffer
.link(failureCases2
, slowCaseBegin
);
814 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
815 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
818 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
820 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
823 // Track the stub we have created so that it will be deleted later.
824 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
825 stubInfo
->stubRoutine
= entryLabel
;
827 // Finally patch the jump to slow case back in the hot path to jump here instead.
828 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
829 RepatchBuffer
repatchBuffer(m_codeBlock
);
830 repatchBuffer
.relink(jumpLocation
, entryLabel
);
832 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
833 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
836 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
)
838 Jump failureCase
= checkStructure(regT0
, structure
);
839 bool needsStubLink
= false;
840 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
841 needsStubLink
= true;
842 if (!structure
->isUsingInlineStorage()) {
844 compileGetDirectOffset(regT1
, regT1
, structure
, cachedOffset
);
846 compileGetDirectOffset(regT0
, regT1
, structure
, cachedOffset
);
847 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
848 stubCall
.addArgument(regT1
);
849 stubCall
.addArgument(regT0
);
850 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
852 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
853 needsStubLink
= true;
854 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
855 stubCall
.addArgument(regT0
);
856 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
857 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
858 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
861 compileGetDirectOffset(regT0
, regT0
, structure
, cachedOffset
);
862 Jump success
= jump();
864 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
867 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
869 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
873 // Use the patch information to link the failure cases back to the original slow case routine.
874 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
876 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
878 patchBuffer
.link(failureCase
, lastProtoBegin
);
880 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
881 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
883 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
886 polymorphicStructures
->list
[currentIndex
].set(entryLabel
, structure
);
888 // Finally patch the jump to slow case back in the hot path to jump here instead.
889 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
890 RepatchBuffer
repatchBuffer(m_codeBlock
);
891 repatchBuffer
.relink(jumpLocation
, entryLabel
);
894 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
896 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
897 // referencing the prototype object - let's speculatively load it's table nice and early!)
898 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
900 // Check eax is an object of the right Structure.
901 Jump failureCases1
= checkStructure(regT0
, structure
);
903 // Check the prototype object's Structure had not changed.
904 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
906 move(ImmPtr(prototypeStructure
), regT3
);
907 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
909 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
913 bool needsStubLink
= false;
914 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
915 needsStubLink
= true;
916 compileGetDirectOffset(protoObject
, regT1
, regT1
, cachedOffset
);
917 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
918 stubCall
.addArgument(regT1
);
919 stubCall
.addArgument(regT0
);
920 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
922 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
923 needsStubLink
= true;
924 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
925 stubCall
.addArgument(ImmPtr(protoObject
));
926 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
927 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
928 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
931 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
933 Jump success
= jump();
935 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
938 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
940 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
944 // Use the patch information to link the failure cases back to the original slow case routine.
945 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
946 patchBuffer
.link(failureCases1
, lastProtoBegin
);
947 patchBuffer
.link(failureCases2
, lastProtoBegin
);
949 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
950 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
952 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
955 prototypeStructure
->ref();
956 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, prototypeStructure
);
958 // Finally patch the jump to slow case back in the hot path to jump here instead.
959 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
960 RepatchBuffer
repatchBuffer(m_codeBlock
);
961 repatchBuffer
.relink(jumpLocation
, entryLabel
);
964 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
967 JumpList bucketsOfFail
;
969 // Check eax is an object of the right Structure.
970 Jump baseObjectCheck
= checkStructure(regT0
, structure
);
971 bucketsOfFail
.append(baseObjectCheck
);
973 Structure
* currStructure
= structure
;
974 RefPtr
<Structure
>* chainEntries
= chain
->head();
975 JSObject
* protoObject
= 0;
976 for (unsigned i
= 0; i
< count
; ++i
) {
977 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
978 currStructure
= chainEntries
[i
].get();
980 // Check the prototype object's Structure had not changed.
981 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
983 move(ImmPtr(currStructure
), regT3
);
984 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
986 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
991 bool needsStubLink
= false;
992 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
993 needsStubLink
= true;
994 compileGetDirectOffset(protoObject
, regT1
, regT1
, cachedOffset
);
995 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
996 stubCall
.addArgument(regT1
);
997 stubCall
.addArgument(regT0
);
998 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1000 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1001 needsStubLink
= true;
1002 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1003 stubCall
.addArgument(ImmPtr(protoObject
));
1004 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1005 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
1006 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1009 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1010 Jump success
= jump();
1012 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1014 if (needsStubLink
) {
1015 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1017 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1021 // Use the patch information to link the failure cases back to the original slow case routine.
1022 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
1024 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
1026 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1027 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1029 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1031 // Track the stub we have created so that it will be deleted later.
1034 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, chain
);
1036 // Finally patch the jump to slow case back in the hot path to jump here instead.
1037 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1038 RepatchBuffer
repatchBuffer(m_codeBlock
);
1039 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1042 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1046 JumpList bucketsOfFail
;
1048 // Check eax is an object of the right Structure.
1049 bucketsOfFail
.append(checkStructure(regT0
, structure
));
1051 Structure
* currStructure
= structure
;
1052 RefPtr
<Structure
>* chainEntries
= chain
->head();
1053 JSObject
* protoObject
= 0;
1054 for (unsigned i
= 0; i
< count
; ++i
) {
1055 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1056 currStructure
= chainEntries
[i
].get();
1058 // Check the prototype object's Structure had not changed.
1059 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1061 move(ImmPtr(currStructure
), regT3
);
1062 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
1064 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
1067 ASSERT(protoObject
);
1069 bool needsStubLink
= false;
1070 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
1071 needsStubLink
= true;
1072 compileGetDirectOffset(protoObject
, regT1
, regT1
, cachedOffset
);
1073 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
1074 stubCall
.addArgument(regT1
);
1075 stubCall
.addArgument(regT0
);
1076 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1078 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1079 needsStubLink
= true;
1080 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1081 stubCall
.addArgument(ImmPtr(protoObject
));
1082 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1083 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
1084 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1087 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1088 Jump success
= jump();
1090 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1092 if (needsStubLink
) {
1093 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1095 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1099 // Use the patch information to link the failure cases back to the original slow case routine.
1100 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
1102 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1103 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1105 // Track the stub we have created so that it will be deleted later.
1106 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1107 stubInfo
->stubRoutine
= entryLabel
;
1109 // Finally patch the jump to slow case back in the hot path to jump here instead.
1110 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1111 RepatchBuffer
repatchBuffer(m_codeBlock
);
1112 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1114 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1115 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1118 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1120 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1124 #endif // ENABLE(JIT)
1126 #endif // !USE(JSVALUE32_64)