2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
35 #include "JSFunction.h"
36 #include "JSPropertyNameIterator.h"
37 #include "Interpreter.h"
38 #include "LinkBuffer.h"
39 #include "RepatchBuffer.h"
40 #include "ResultType.h"
41 #include "SamplingTool.h"
53 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
55 unsigned base
= currentInstruction
[1].u
.operand
;
56 unsigned property
= currentInstruction
[2].u
.operand
;
57 unsigned value
= currentInstruction
[3].u
.operand
;
59 JITStubCall
stubCall(this, cti_op_put_by_index
);
60 stubCall
.addArgument(base
);
61 stubCall
.addArgument(Imm32(property
));
62 stubCall
.addArgument(value
);
66 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
68 unsigned base
= currentInstruction
[1].u
.operand
;
69 unsigned property
= currentInstruction
[2].u
.operand
;
70 unsigned function
= currentInstruction
[3].u
.operand
;
72 JITStubCall
stubCall(this, cti_op_put_getter
);
73 stubCall
.addArgument(base
);
74 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
75 stubCall
.addArgument(function
);
79 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
81 unsigned base
= currentInstruction
[1].u
.operand
;
82 unsigned property
= currentInstruction
[2].u
.operand
;
83 unsigned function
= currentInstruction
[3].u
.operand
;
85 JITStubCall
stubCall(this, cti_op_put_setter
);
86 stubCall
.addArgument(base
);
87 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
88 stubCall
.addArgument(function
);
92 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
94 unsigned dst
= currentInstruction
[1].u
.operand
;
95 unsigned base
= currentInstruction
[2].u
.operand
;
96 unsigned property
= currentInstruction
[3].u
.operand
;
98 JITStubCall
stubCall(this, cti_op_del_by_id
);
99 stubCall
.addArgument(base
);
100 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
105 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
107 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
109 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
110 void JIT::emit_op_method_check(Instruction
*) {}
111 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
112 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
113 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
116 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
118 unsigned dst
= currentInstruction
[1].u
.operand
;
119 unsigned base
= currentInstruction
[2].u
.operand
;
120 unsigned property
= currentInstruction
[3].u
.operand
;
122 JITStubCall
stubCall(this, cti_op_get_by_val
);
123 stubCall
.addArgument(base
);
124 stubCall
.addArgument(property
);
128 void JIT::emitSlow_op_get_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
130 ASSERT_NOT_REACHED();
133 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
135 unsigned base
= currentInstruction
[1].u
.operand
;
136 unsigned property
= currentInstruction
[2].u
.operand
;
137 unsigned value
= currentInstruction
[3].u
.operand
;
139 JITStubCall
stubCall(this, cti_op_put_by_val
);
140 stubCall
.addArgument(base
);
141 stubCall
.addArgument(property
);
142 stubCall
.addArgument(value
);
146 void JIT::emitSlow_op_put_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
148 ASSERT_NOT_REACHED();
151 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
153 int dst
= currentInstruction
[1].u
.operand
;
154 int base
= currentInstruction
[2].u
.operand
;
155 int ident
= currentInstruction
[3].u
.operand
;
157 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
158 stubCall
.addArgument(base
);
159 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
162 m_propertyAccessInstructionIndex
++;
165 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
167 m_propertyAccessInstructionIndex
++;
168 ASSERT_NOT_REACHED();
171 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
173 int base
= currentInstruction
[1].u
.operand
;
174 int ident
= currentInstruction
[2].u
.operand
;
175 int value
= currentInstruction
[3].u
.operand
;
177 JITStubCall
stubCall(this, cti_op_put_by_id_generic
);
178 stubCall
.addArgument(base
);
179 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
180 stubCall
.addArgument(value
);
183 m_propertyAccessInstructionIndex
++;
186 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
188 m_propertyAccessInstructionIndex
++;
189 ASSERT_NOT_REACHED();
192 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
194 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
196 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
198 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
200 // Assert that the following instruction is a get_by_id.
201 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
203 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
205 // Do the method check - check the object & its prototype's structure inline (this is the common case).
206 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
207 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
209 int dst
= currentInstruction
[1].u
.operand
;
210 int base
= currentInstruction
[2].u
.operand
;
212 emitLoad(base
, regT1
, regT0
);
213 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
215 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
217 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), info
.structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
218 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(ImmPtr(0), regT2
);
219 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), protoStructureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
221 // This will be relinked to load the function without doing a load.
222 DataLabelPtr putFunction
= moveWithPatch(ImmPtr(0), regT0
);
224 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
226 move(Imm32(JSValue::CellTag
), regT1
);
229 ASSERT(differenceBetween(info
.structureToCompare
, protoObj
) == patchOffsetMethodCheckProtoObj
);
230 ASSERT(differenceBetween(info
.structureToCompare
, protoStructureToCompare
) == patchOffsetMethodCheckProtoStruct
);
231 ASSERT(differenceBetween(info
.structureToCompare
, putFunction
) == patchOffsetMethodCheckPutFunction
);
233 // Link the failure cases here.
234 structureCheck
.link(this);
235 protoStructureCheck
.link(this);
237 // Do a regular(ish) get_by_id (the slow case will be link to
238 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
239 compileGetByIdHotPath();
242 emitStore(dst
, regT1
, regT0
);
243 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_method_check
), dst
, regT1
, regT0
);
245 // We've already generated the following get_by_id, so make sure it's skipped over.
246 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
249 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
251 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
253 int dst
= currentInstruction
[1].u
.operand
;
254 int base
= currentInstruction
[2].u
.operand
;
255 int ident
= currentInstruction
[3].u
.operand
;
257 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
, true);
259 // We've already generated the following get_by_id, so make sure it's skipped over.
260 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
263 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
265 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
266 void JIT::emit_op_method_check(Instruction
*) {}
267 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
271 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
273 unsigned dst
= currentInstruction
[1].u
.operand
;
274 unsigned base
= currentInstruction
[2].u
.operand
;
275 unsigned property
= currentInstruction
[3].u
.operand
;
277 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
279 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
280 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
281 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
283 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT3
);
284 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
286 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4), regT1
); // tag
287 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
); // payload
288 addSlowCase(branch32(Equal
, regT1
, Imm32(JSValue::EmptyValueTag
)));
290 emitStore(dst
, regT1
, regT0
);
291 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_val
), dst
, regT1
, regT0
);
294 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
296 unsigned dst
= currentInstruction
[1].u
.operand
;
297 unsigned base
= currentInstruction
[2].u
.operand
;
298 unsigned property
= currentInstruction
[3].u
.operand
;
300 linkSlowCase(iter
); // property int32 check
301 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
302 linkSlowCase(iter
); // base array check
303 linkSlowCase(iter
); // vector length check
304 linkSlowCase(iter
); // empty value
306 JITStubCall
stubCall(this, cti_op_get_by_val
);
307 stubCall
.addArgument(base
);
308 stubCall
.addArgument(property
);
312 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
314 unsigned base
= currentInstruction
[1].u
.operand
;
315 unsigned property
= currentInstruction
[2].u
.operand
;
316 unsigned value
= currentInstruction
[3].u
.operand
;
318 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
320 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
321 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
322 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
323 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
325 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT3
);
327 Jump empty
= branch32(Equal
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4), Imm32(JSValue::EmptyValueTag
));
329 Label
storeResult(this);
330 emitLoad(value
, regT1
, regT0
);
331 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]))); // payload
332 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4)); // tag
336 add32(Imm32(1), Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
337 branch32(Below
, regT2
, Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
))).linkTo(storeResult
, this);
339 add32(Imm32(1), regT2
, regT0
);
340 store32(regT0
, Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
341 jump().linkTo(storeResult
, this);
346 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
348 unsigned base
= currentInstruction
[1].u
.operand
;
349 unsigned property
= currentInstruction
[2].u
.operand
;
350 unsigned value
= currentInstruction
[3].u
.operand
;
352 linkSlowCase(iter
); // property int32 check
353 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
354 linkSlowCase(iter
); // base not array check
355 linkSlowCase(iter
); // in vector check
357 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
358 stubPutByValCall
.addArgument(base
);
359 stubPutByValCall
.addArgument(property
);
360 stubPutByValCall
.addArgument(value
);
361 stubPutByValCall
.call();
364 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
366 int dst
= currentInstruction
[1].u
.operand
;
367 int base
= currentInstruction
[2].u
.operand
;
369 emitLoad(base
, regT1
, regT0
);
370 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
371 compileGetByIdHotPath();
372 emitStore(dst
, regT1
, regT0
);
373 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_id
), dst
, regT1
, regT0
);
376 void JIT::compileGetByIdHotPath()
378 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
379 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
380 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
381 // to jump back to if one of these trampolies finds a match.
383 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
385 Label
hotPathBegin(this);
386 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
387 m_propertyAccessInstructionIndex
++;
389 DataLabelPtr structureToCompare
;
390 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
391 addSlowCase(structureCheck
);
392 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetGetByIdStructure
);
393 ASSERT(differenceBetween(hotPathBegin
, structureCheck
) == patchOffsetGetByIdBranchToSlowCase
);
395 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT2
);
396 Label
externalLoadComplete(this);
397 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetGetByIdExternalLoad
);
398 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthGetByIdExternalLoad
);
400 DataLabel32 displacementLabel1
= loadPtrWithAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT0
); // payload
401 ASSERT(differenceBetween(hotPathBegin
, displacementLabel1
) == patchOffsetGetByIdPropertyMapOffset1
);
402 DataLabel32 displacementLabel2
= loadPtrWithAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT1
); // tag
403 ASSERT(differenceBetween(hotPathBegin
, displacementLabel2
) == patchOffsetGetByIdPropertyMapOffset2
);
405 Label
putResult(this);
406 ASSERT(differenceBetween(hotPathBegin
, putResult
) == patchOffsetGetByIdPutResult
);
408 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
411 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
413 int dst
= currentInstruction
[1].u
.operand
;
414 int base
= currentInstruction
[2].u
.operand
;
415 int ident
= currentInstruction
[3].u
.operand
;
417 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
);
420 void JIT::compileGetByIdSlowCase(int dst
, int base
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
422 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
423 // so that we only need track one pointer into the slow case code - we track a pointer to the location
424 // of the call (which we can use to look up the patch information), but should a array-length or
425 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
426 // the distance from the call to the head of the slow case.
427 linkSlowCaseIfNotJSCell(iter
, base
);
430 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
433 Label
coldPathBegin(this);
435 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
436 stubCall
.addArgument(regT1
, regT0
);
437 stubCall
.addArgument(ImmPtr(ident
));
438 Call call
= stubCall
.call(dst
);
440 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
442 ASSERT(differenceBetween(coldPathBegin
, call
) == patchOffsetGetByIdSlowCaseCall
);
444 // Track the location of the call; this will be used to recover patch information.
445 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
446 m_propertyAccessInstructionIndex
++;
449 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
451 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
452 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
453 // such that the Structure & offset are always at the same distance from this.
455 int base
= currentInstruction
[1].u
.operand
;
456 int value
= currentInstruction
[3].u
.operand
;
458 emitLoad2(base
, regT1
, regT0
, value
, regT3
, regT2
);
460 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
462 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
464 Label
hotPathBegin(this);
465 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
466 m_propertyAccessInstructionIndex
++;
468 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
469 DataLabelPtr structureToCompare
;
470 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
471 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetPutByIdStructure
);
473 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
474 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
475 Label
externalLoadComplete(this);
476 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetPutByIdExternalLoad
);
477 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthPutByIdExternalLoad
);
479 DataLabel32 displacementLabel1
= storePtrWithAddressOffsetPatch(regT2
, Address(regT0
, patchGetByIdDefaultOffset
)); // payload
480 DataLabel32 displacementLabel2
= storePtrWithAddressOffsetPatch(regT3
, Address(regT0
, patchGetByIdDefaultOffset
)); // tag
482 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
484 ASSERT(differenceBetween(hotPathBegin
, displacementLabel1
) == patchOffsetPutByIdPropertyMapOffset1
);
485 ASSERT(differenceBetween(hotPathBegin
, displacementLabel2
) == patchOffsetPutByIdPropertyMapOffset2
);
488 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
490 int base
= currentInstruction
[1].u
.operand
;
491 int ident
= currentInstruction
[2].u
.operand
;
493 linkSlowCaseIfNotJSCell(iter
, base
);
496 JITStubCall
stubCall(this, cti_op_put_by_id
);
497 stubCall
.addArgument(regT1
, regT0
);
498 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
499 stubCall
.addArgument(regT3
, regT2
);
500 Call call
= stubCall
.call();
502 // Track the location of the call; this will be used to recover patch information.
503 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
504 m_propertyAccessInstructionIndex
++;
507 // Compile a store into an object's property storage. May overwrite base.
508 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID valueTag
, RegisterID valuePayload
, Structure
* structure
, size_t cachedOffset
)
510 int offset
= cachedOffset
;
511 if (structure
->isUsingInlineStorage())
512 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
) / sizeof(Register
);
514 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
515 emitStore(offset
, valueTag
, valuePayload
, base
);
518 // Compile a load from an object's property storage. May overwrite base.
519 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, Structure
* structure
, size_t cachedOffset
)
521 int offset
= cachedOffset
;
522 if (structure
->isUsingInlineStorage())
523 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
) / sizeof(Register
);
525 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
526 emitLoad(offset
, resultTag
, resultPayload
, base
);
529 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID temp
, RegisterID resultTag
, RegisterID resultPayload
, size_t cachedOffset
)
531 if (base
->isUsingInlineStorage()) {
532 load32(reinterpret_cast<char*>(&base
->m_inlineStorage
[cachedOffset
]), resultPayload
);
533 load32(reinterpret_cast<char*>(&base
->m_inlineStorage
[cachedOffset
]) + 4, resultTag
);
537 size_t offset
= cachedOffset
* sizeof(JSValue
);
539 PropertyStorage
* protoPropertyStorage
= &base
->m_externalStorage
;
540 loadPtr(static_cast<void*>(protoPropertyStorage
), temp
);
541 load32(Address(temp
, offset
), resultPayload
);
542 load32(Address(temp
, offset
+ 4), resultTag
);
545 void JIT::testPrototype(Structure
* structure
, JumpList
& failureCases
)
547 if (structure
->m_prototype
.isNull())
550 failureCases
.append(branchPtr(NotEqual
, AbsoluteAddress(&asCell(structure
->m_prototype
)->m_structure
), ImmPtr(asCell(structure
->m_prototype
)->m_structure
)));
553 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
)
555 // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
557 JumpList failureCases
;
558 failureCases
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)));
559 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(oldStructure
)));
560 testPrototype(oldStructure
, failureCases
);
562 // Verify that nothing in the prototype chain has a setter for this property.
563 for (RefPtr
<Structure
>* it
= chain
->head(); *it
; ++it
)
564 testPrototype(it
->get(), failureCases
);
566 // Reallocate property storage if needed.
568 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
569 if (willNeedStorageRealloc
) {
570 // This trampoline was called to like a JIT stub; before we can can call again we need to
571 // remove the return address from the stack, to prevent the stack from becoming misaligned.
572 preserveReturnAddressAfterCall(regT3
);
574 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
575 stubCall
.skipArgument(); // base
576 stubCall
.skipArgument(); // ident
577 stubCall
.skipArgument(); // value
578 stubCall
.addArgument(Imm32(oldStructure
->propertyStorageCapacity()));
579 stubCall
.addArgument(Imm32(newStructure
->propertyStorageCapacity()));
580 stubCall
.call(regT0
);
582 restoreReturnAddressBeforeReturn(regT3
);
585 sub32(Imm32(1), AbsoluteAddress(oldStructure
->addressOfCount()));
586 add32(Imm32(1), AbsoluteAddress(newStructure
->addressOfCount()));
587 storePtr(ImmPtr(newStructure
), Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)));
589 load32(Address(stackPointerRegister
, offsetof(struct JITStackFrame
, args
[2]) + sizeof(void*)), regT3
);
590 load32(Address(stackPointerRegister
, offsetof(struct JITStackFrame
, args
[2]) + sizeof(void*) + 4), regT2
);
593 compilePutDirectOffset(regT0
, regT2
, regT3
, newStructure
, cachedOffset
);
597 ASSERT(!failureCases
.empty());
598 failureCases
.link(this);
599 restoreArgumentReferenceForTrampoline();
600 Call failureCall
= tailRecursiveCall();
602 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
604 patchBuffer
.link(failureCall
, FunctionPtr(cti_op_put_by_id_fail
));
606 if (willNeedStorageRealloc
) {
607 ASSERT(m_calls
.size() == 1);
608 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
611 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
612 stubInfo
->stubRoutine
= entryLabel
;
613 RepatchBuffer
repatchBuffer(m_codeBlock
);
614 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
617 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
619 RepatchBuffer
repatchBuffer(codeBlock
);
621 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
622 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
623 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
625 int offset
= sizeof(JSValue
) * cachedOffset
;
627 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
628 // and makes the subsequent load's offset automatically correct
629 if (structure
->isUsingInlineStorage())
630 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetGetByIdExternalLoad
));
632 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
633 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
634 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1
), offset
); // payload
635 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2
), offset
+ 4); // tag
638 void JIT::patchMethodCallProto(CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, JSFunction
* callee
, Structure
* structure
, JSObject
* proto
, ReturnAddressPtr returnAddress
)
640 RepatchBuffer
repatchBuffer(codeBlock
);
642 ASSERT(!methodCallLinkInfo
.cachedStructure
);
643 methodCallLinkInfo
.cachedStructure
= structure
;
646 Structure
* prototypeStructure
= proto
->structure();
647 ASSERT(!methodCallLinkInfo
.cachedPrototypeStructure
);
648 methodCallLinkInfo
.cachedPrototypeStructure
= prototypeStructure
;
649 prototypeStructure
->ref();
651 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
, structure
);
652 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj
), proto
);
653 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct
), prototypeStructure
);
654 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction
), callee
);
656 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id
));
659 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
661 RepatchBuffer
repatchBuffer(codeBlock
);
663 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
664 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
665 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_put_by_id_generic
));
667 int offset
= sizeof(JSValue
) * cachedOffset
;
669 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
670 // and makes the subsequent load's offset automatically correct
671 if (structure
->isUsingInlineStorage())
672 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetPutByIdExternalLoad
));
674 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
675 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
676 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1
), offset
); // payload
677 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2
), offset
+ 4); // tag
680 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
682 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
684 // regT0 holds a JSCell*
687 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
));
689 // Checks out okay! - get the length from the storage
690 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
691 load32(Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
693 Jump failureCases2
= branch32(Above
, regT2
, Imm32(INT_MAX
));
695 move(Imm32(JSValue::Int32Tag
), regT1
);
696 Jump success
= jump();
698 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
700 // Use the patch information to link the failure cases back to the original slow case routine.
701 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
702 patchBuffer
.link(failureCases1
, slowCaseBegin
);
703 patchBuffer
.link(failureCases2
, slowCaseBegin
);
705 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
706 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
708 // Track the stub we have created so that it will be deleted later.
709 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
710 stubInfo
->stubRoutine
= entryLabel
;
712 // Finally patch the jump to slow case back in the hot path to jump here instead.
713 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
714 RepatchBuffer
repatchBuffer(m_codeBlock
);
715 repatchBuffer
.relink(jumpLocation
, entryLabel
);
717 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
718 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
721 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
723 // regT0 holds a JSCell*
725 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
726 // referencing the prototype object - let's speculatively load it's table nice and early!)
727 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
729 Jump failureCases1
= checkStructure(regT0
, structure
);
731 // Check the prototype object's Structure had not changed.
732 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
734 move(ImmPtr(prototypeStructure
), regT3
);
735 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
737 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
740 // Checks out okay! - getDirectOffset
741 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
743 Jump success
= jump();
745 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
747 // Use the patch information to link the failure cases back to the original slow case routine.
748 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
749 patchBuffer
.link(failureCases1
, slowCaseBegin
);
750 patchBuffer
.link(failureCases2
, slowCaseBegin
);
752 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
753 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
755 // Track the stub we have created so that it will be deleted later.
756 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
757 stubInfo
->stubRoutine
= entryLabel
;
759 // Finally patch the jump to slow case back in the hot path to jump here instead.
760 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
761 RepatchBuffer
repatchBuffer(m_codeBlock
);
762 repatchBuffer
.relink(jumpLocation
, entryLabel
);
764 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
765 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
769 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, size_t cachedOffset
)
771 // regT0 holds a JSCell*
773 Jump failureCase
= checkStructure(regT0
, structure
);
774 compileGetDirectOffset(regT0
, regT1
, regT0
, structure
, cachedOffset
);
775 Jump success
= jump();
777 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
779 // Use the patch information to link the failure cases back to the original slow case routine.
780 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
782 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
784 patchBuffer
.link(failureCase
, lastProtoBegin
);
786 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
787 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
789 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
792 polymorphicStructures
->list
[currentIndex
].set(entryLabel
, structure
);
794 // Finally patch the jump to slow case back in the hot path to jump here instead.
795 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
796 RepatchBuffer
repatchBuffer(m_codeBlock
);
797 repatchBuffer
.relink(jumpLocation
, entryLabel
);
800 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, CallFrame
* callFrame
)
802 // regT0 holds a JSCell*
804 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
805 // referencing the prototype object - let's speculatively load it's table nice and early!)
806 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
808 // Check eax is an object of the right Structure.
809 Jump failureCases1
= checkStructure(regT0
, structure
);
811 // Check the prototype object's Structure had not changed.
812 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
814 move(ImmPtr(prototypeStructure
), regT3
);
815 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
817 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
820 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
822 Jump success
= jump();
824 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
826 // Use the patch information to link the failure cases back to the original slow case routine.
827 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
828 patchBuffer
.link(failureCases1
, lastProtoBegin
);
829 patchBuffer
.link(failureCases2
, lastProtoBegin
);
831 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
832 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
834 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
837 prototypeStructure
->ref();
838 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, prototypeStructure
);
840 // Finally patch the jump to slow case back in the hot path to jump here instead.
841 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
842 RepatchBuffer
repatchBuffer(m_codeBlock
);
843 repatchBuffer
.relink(jumpLocation
, entryLabel
);
846 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, CallFrame
* callFrame
)
848 // regT0 holds a JSCell*
852 JumpList bucketsOfFail
;
854 // Check eax is an object of the right Structure.
855 bucketsOfFail
.append(checkStructure(regT0
, structure
));
857 Structure
* currStructure
= structure
;
858 RefPtr
<Structure
>* chainEntries
= chain
->head();
859 JSObject
* protoObject
= 0;
860 for (unsigned i
= 0; i
< count
; ++i
) {
861 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
862 currStructure
= chainEntries
[i
].get();
864 // Check the prototype object's Structure had not changed.
865 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
867 move(ImmPtr(currStructure
), regT3
);
868 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
870 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
875 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
876 Jump success
= jump();
878 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
880 // Use the patch information to link the failure cases back to the original slow case routine.
881 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
883 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
885 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
886 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
888 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
890 // Track the stub we have created so that it will be deleted later.
893 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, chain
);
895 // Finally patch the jump to slow case back in the hot path to jump here instead.
896 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
897 RepatchBuffer
repatchBuffer(m_codeBlock
);
898 repatchBuffer
.relink(jumpLocation
, entryLabel
);
901 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
903 // regT0 holds a JSCell*
907 JumpList bucketsOfFail
;
909 // Check eax is an object of the right Structure.
910 bucketsOfFail
.append(checkStructure(regT0
, structure
));
912 Structure
* currStructure
= structure
;
913 RefPtr
<Structure
>* chainEntries
= chain
->head();
914 JSObject
* protoObject
= 0;
915 for (unsigned i
= 0; i
< count
; ++i
) {
916 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
917 currStructure
= chainEntries
[i
].get();
919 // Check the prototype object's Structure had not changed.
920 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
922 move(ImmPtr(currStructure
), regT3
);
923 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
925 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
930 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
931 Jump success
= jump();
933 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
935 // Use the patch information to link the failure cases back to the original slow case routine.
936 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
938 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
939 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
941 // Track the stub we have created so that it will be deleted later.
942 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
943 stubInfo
->stubRoutine
= entryLabel
;
945 // Finally patch the jump to slow case back in the hot path to jump here instead.
946 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
947 RepatchBuffer
repatchBuffer(m_codeBlock
);
948 repatchBuffer
.relink(jumpLocation
, entryLabel
);
950 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
951 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
954 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
956 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
958 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, RegisterID structure
, RegisterID offset
)
960 ASSERT(sizeof(((Structure
*)0)->m_propertyStorageCapacity
) == sizeof(int32_t));
961 ASSERT(sizeof(JSObject::inlineStorageCapacity
) == sizeof(int32_t));
962 ASSERT(sizeof(JSValue
) == 8);
964 Jump notUsingInlineStorage
= branch32(NotEqual
, Address(structure
, OBJECT_OFFSETOF(Structure
, m_propertyStorageCapacity
)), Imm32(JSObject::inlineStorageCapacity
));
965 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSObject
, m_inlineStorage
)+OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
966 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSObject
, m_inlineStorage
)+OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
967 Jump finishedLoad
= jump();
968 notUsingInlineStorage
.link(this);
969 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
970 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
971 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
972 finishedLoad
.link(this);
975 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
977 unsigned dst
= currentInstruction
[1].u
.operand
;
978 unsigned base
= currentInstruction
[2].u
.operand
;
979 unsigned property
= currentInstruction
[3].u
.operand
;
980 unsigned expected
= currentInstruction
[4].u
.operand
;
981 unsigned iter
= currentInstruction
[5].u
.operand
;
982 unsigned i
= currentInstruction
[6].u
.operand
;
984 emitLoad2(property
, regT1
, regT0
, base
, regT3
, regT2
);
985 emitJumpSlowCaseIfNotJSCell(property
, regT1
);
986 addSlowCase(branchPtr(NotEqual
, regT0
, payloadFor(expected
)));
987 // Property registers are now available as the property is known
988 emitJumpSlowCaseIfNotJSCell(base
, regT3
);
989 emitLoadPayload(iter
, regT1
);
991 // Test base's structure
992 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
993 addSlowCase(branchPtr(NotEqual
, regT0
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
994 load32(addressFor(i
), regT3
);
995 sub32(Imm32(1), regT3
);
996 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
997 compileGetDirectOffset(regT2
, regT1
, regT0
, regT0
, regT3
);
999 emitStore(dst
, regT1
, regT0
);
1000 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_pname
), dst
, regT1
, regT0
);
1003 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1005 unsigned dst
= currentInstruction
[1].u
.operand
;
1006 unsigned base
= currentInstruction
[2].u
.operand
;
1007 unsigned property
= currentInstruction
[3].u
.operand
;
1009 linkSlowCaseIfNotJSCell(iter
, property
);
1011 linkSlowCaseIfNotJSCell(iter
, base
);
1015 JITStubCall
stubCall(this, cti_op_get_by_val
);
1016 stubCall
.addArgument(base
);
1017 stubCall
.addArgument(property
);
1021 #else // USE(JSVALUE32_64)
1023 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
1025 unsigned dst
= currentInstruction
[1].u
.operand
;
1026 unsigned base
= currentInstruction
[2].u
.operand
;
1027 unsigned property
= currentInstruction
[3].u
.operand
;
1029 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
1030 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1032 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
1033 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
1034 // number was signed since m_vectorLength is always less than intmax (since the total allocation
1035 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
1036 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
1037 // extending since it makes it easier to re-tag the value in the slow case.
1038 zeroExtend32ToPtr(regT1
, regT1
);
1040 emitFastArithImmToInt(regT1
);
1042 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
1043 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
1045 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
1046 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
1048 loadPtr(BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
);
1049 addSlowCase(branchTestPtr(Zero
, regT0
));
1051 emitPutVirtualRegister(dst
);
1054 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, RegisterID structure
, RegisterID offset
, RegisterID scratch
)
1056 ASSERT(sizeof(((Structure
*)0)->m_propertyStorageCapacity
) == sizeof(int32_t));
1057 ASSERT(sizeof(JSObject::inlineStorageCapacity
) == sizeof(int32_t));
1059 Jump notUsingInlineStorage
= branch32(NotEqual
, Address(structure
, OBJECT_OFFSETOF(Structure
, m_propertyStorageCapacity
)), Imm32(JSObject::inlineStorageCapacity
));
1060 loadPtr(BaseIndex(base
, offset
, ScalePtr
, OBJECT_OFFSETOF(JSObject
, m_inlineStorage
)), result
);
1061 Jump finishedLoad
= jump();
1062 notUsingInlineStorage
.link(this);
1063 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), scratch
);
1064 loadPtr(BaseIndex(scratch
, offset
, ScalePtr
, 0), result
);
1065 finishedLoad
.link(this);
1068 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
1070 unsigned dst
= currentInstruction
[1].u
.operand
;
1071 unsigned base
= currentInstruction
[2].u
.operand
;
1072 unsigned property
= currentInstruction
[3].u
.operand
;
1073 unsigned expected
= currentInstruction
[4].u
.operand
;
1074 unsigned iter
= currentInstruction
[5].u
.operand
;
1075 unsigned i
= currentInstruction
[6].u
.operand
;
1077 emitGetVirtualRegister(property
, regT0
);
1078 addSlowCase(branchPtr(NotEqual
, regT0
, addressFor(expected
)));
1079 emitGetVirtualRegisters(base
, regT0
, iter
, regT1
);
1080 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
1082 // Test base's structure
1083 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1084 addSlowCase(branchPtr(NotEqual
, regT2
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
1085 load32(addressFor(i
), regT3
);
1086 sub32(Imm32(1), regT3
);
1087 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
1088 compileGetDirectOffset(regT0
, regT0
, regT2
, regT3
, regT1
);
1090 emitPutVirtualRegister(dst
, regT0
);
1093 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1095 unsigned dst
= currentInstruction
[1].u
.operand
;
1096 unsigned base
= currentInstruction
[2].u
.operand
;
1097 unsigned property
= currentInstruction
[3].u
.operand
;
1100 linkSlowCaseIfNotJSCell(iter
, base
);
1104 JITStubCall
stubCall(this, cti_op_get_by_val
);
1105 stubCall
.addArgument(base
, regT2
);
1106 stubCall
.addArgument(property
, regT2
);
1110 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
1112 unsigned base
= currentInstruction
[1].u
.operand
;
1113 unsigned property
= currentInstruction
[2].u
.operand
;
1114 unsigned value
= currentInstruction
[3].u
.operand
;
1116 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
1117 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1119 // See comment in op_get_by_val.
1120 zeroExtend32ToPtr(regT1
, regT1
);
1122 emitFastArithImmToInt(regT1
);
1124 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
1125 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
1126 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
1128 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
1130 Jump empty
= branchTestPtr(Zero
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
1132 Label
storeResult(this);
1133 emitGetVirtualRegister(value
, regT0
);
1134 storePtr(regT0
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
1138 add32(Imm32(1), Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
1139 branch32(Below
, regT1
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
))).linkTo(storeResult
, this);
1142 add32(Imm32(1), regT0
);
1143 store32(regT0
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
1144 jump().linkTo(storeResult
, this);
1149 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
1151 JITStubCall
stubCall(this, cti_op_put_by_index
);
1152 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
1153 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
1154 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
1158 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
1160 JITStubCall
stubCall(this, cti_op_put_getter
);
1161 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
1162 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
1163 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
1167 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
1169 JITStubCall
stubCall(this, cti_op_put_setter
);
1170 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
1171 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
1172 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
1176 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
1178 JITStubCall
stubCall(this, cti_op_del_by_id
);
1179 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
1180 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
1181 stubCall
.call(currentInstruction
[1].u
.operand
);
1185 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1187 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1189 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1190 void JIT::emit_op_method_check(Instruction
*) {}
1191 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
1192 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1193 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
1196 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
1198 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1199 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1200 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1202 emitGetVirtualRegister(baseVReg
, regT0
);
1203 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
1204 stubCall
.addArgument(regT0
);
1205 stubCall
.addArgument(ImmPtr(ident
));
1206 stubCall
.call(resultVReg
);
1208 m_propertyAccessInstructionIndex
++;
1211 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
1213 ASSERT_NOT_REACHED();
1216 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
1218 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
1219 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
1220 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
1222 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
1224 JITStubCall
stubCall(this, cti_op_put_by_id_generic
);
1225 stubCall
.addArgument(regT0
);
1226 stubCall
.addArgument(ImmPtr(ident
));
1227 stubCall
.addArgument(regT1
);
1230 m_propertyAccessInstructionIndex
++;
1233 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
1235 ASSERT_NOT_REACHED();
1238 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1240 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1242 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1244 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
1246 // Assert that the following instruction is a get_by_id.
1247 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
1249 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
1250 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1251 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1252 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1254 emitGetVirtualRegister(baseVReg
, regT0
);
1256 // Do the method check - check the object & its prototype's structure inline (this is the common case).
1257 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
1258 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
1260 Jump notCell
= emitJumpIfNotJSCell(regT0
);
1262 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
1264 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), info
.structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
1265 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(ImmPtr(0), regT1
);
1266 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT1
, OBJECT_OFFSETOF(JSCell
, m_structure
)), protoStructureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
1268 // This will be relinked to load the function without doing a load.
1269 DataLabelPtr putFunction
= moveWithPatch(ImmPtr(0), regT0
);
1271 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
1273 Jump match
= jump();
1275 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, protoObj
), patchOffsetMethodCheckProtoObj
);
1276 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, protoStructureToCompare
), patchOffsetMethodCheckProtoStruct
);
1277 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, putFunction
), patchOffsetMethodCheckPutFunction
);
1279 // Link the failure cases here.
1281 structureCheck
.link(this);
1282 protoStructureCheck
.link(this);
1284 // Do a regular(ish) get_by_id (the slow case will be link to
1285 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
1286 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
1289 emitPutVirtualRegister(resultVReg
);
1291 // We've already generated the following get_by_id, so make sure it's skipped over.
1292 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
1295 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1297 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
1298 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1299 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1300 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1302 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, true);
1304 // We've already generated the following get_by_id, so make sure it's skipped over.
1305 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
1308 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1310 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1311 void JIT::emit_op_method_check(Instruction
*) {}
1312 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
1316 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
1318 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1319 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1320 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1322 emitGetVirtualRegister(baseVReg
, regT0
);
1323 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
1324 emitPutVirtualRegister(resultVReg
);
1327 void JIT::compileGetByIdHotPath(int, int baseVReg
, Identifier
*, unsigned propertyAccessInstructionIndex
)
1329 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
1330 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
1331 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1332 // to jump back to if one of these trampolies finds a match.
1334 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
1336 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
1338 Label
hotPathBegin(this);
1339 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
1341 DataLabelPtr structureToCompare
;
1342 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
1343 addSlowCase(structureCheck
);
1344 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetGetByIdStructure
);
1345 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureCheck
), patchOffsetGetByIdBranchToSlowCase
)
1347 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
1348 Label
externalLoadComplete(this);
1349 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, externalLoad
), patchOffsetGetByIdExternalLoad
);
1350 ASSERT_JIT_OFFSET(differenceBetween(externalLoad
, externalLoadComplete
), patchLengthGetByIdExternalLoad
);
1352 DataLabel32 displacementLabel
= loadPtrWithAddressOffsetPatch(Address(regT0
, patchGetByIdDefaultOffset
), regT0
);
1353 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel
), patchOffsetGetByIdPropertyMapOffset
);
1355 Label
putResult(this);
1357 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
1359 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, putResult
), patchOffsetGetByIdPutResult
);
1362 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1364 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1365 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1366 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1368 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, false);
1371 void JIT::compileGetByIdSlowCase(int resultVReg
, int baseVReg
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
1373 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1374 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1375 // of the call (which we can use to look up the patch information), but should a array-length or
1376 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
1377 // the distance from the call to the head of the slow case.
1379 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
1382 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
1385 Label
coldPathBegin(this);
1387 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
1388 stubCall
.addArgument(regT0
);
1389 stubCall
.addArgument(ImmPtr(ident
));
1390 Call call
= stubCall
.call(resultVReg
);
1392 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
1394 ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin
, call
), patchOffsetGetByIdSlowCaseCall
);
1396 // Track the location of the call; this will be used to recover patch information.
1397 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
1398 m_propertyAccessInstructionIndex
++;
1401 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
1403 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
1404 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
1406 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
1408 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
1409 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1410 // such that the Structure & offset are always at the same distance from this.
1412 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
1414 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
1415 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
1417 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
1419 Label
hotPathBegin(this);
1420 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
1422 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1423 DataLabelPtr structureToCompare
;
1424 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
1425 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetPutByIdStructure
);
1427 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1428 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
1429 Label
externalLoadComplete(this);
1430 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, externalLoad
), patchOffsetPutByIdExternalLoad
);
1431 ASSERT_JIT_OFFSET(differenceBetween(externalLoad
, externalLoadComplete
), patchLengthPutByIdExternalLoad
);
1433 DataLabel32 displacementLabel
= storePtrWithAddressOffsetPatch(regT1
, Address(regT0
, patchGetByIdDefaultOffset
));
1435 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
1437 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel
), patchOffsetPutByIdPropertyMapOffset
);
1440 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1442 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
1443 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
1445 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
1447 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
1450 JITStubCall
stubCall(this, cti_op_put_by_id
);
1451 stubCall
.addArgument(regT0
);
1452 stubCall
.addArgument(ImmPtr(ident
));
1453 stubCall
.addArgument(regT1
);
1454 Call call
= stubCall
.call();
1456 // Track the location of the call; this will be used to recover patch information.
1457 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].callReturnLocation
= call
;
1460 // Compile a store into an object's property storage. May overwrite the
1461 // value in objectReg.
1462 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, Structure
* structure
, size_t cachedOffset
)
1464 int offset
= cachedOffset
* sizeof(JSValue
);
1465 if (structure
->isUsingInlineStorage())
1466 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
);
1468 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
1469 storePtr(value
, Address(base
, offset
));
1472 // Compile a load from an object's property storage. May overwrite base.
1473 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, Structure
* structure
, size_t cachedOffset
)
1475 int offset
= cachedOffset
* sizeof(JSValue
);
1476 if (structure
->isUsingInlineStorage())
1477 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
);
1479 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
1480 loadPtr(Address(base
, offset
), result
);
1483 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID temp
, RegisterID result
, size_t cachedOffset
)
1485 if (base
->isUsingInlineStorage())
1486 loadPtr(static_cast<void*>(&base
->m_inlineStorage
[cachedOffset
]), result
);
1488 PropertyStorage
* protoPropertyStorage
= &base
->m_externalStorage
;
1489 loadPtr(static_cast<void*>(protoPropertyStorage
), temp
);
1490 loadPtr(Address(temp
, cachedOffset
* sizeof(JSValue
)), result
);
1494 void JIT::testPrototype(Structure
* structure
, JumpList
& failureCases
)
1496 if (structure
->m_prototype
.isNull())
1499 move(ImmPtr(&asCell(structure
->m_prototype
)->m_structure
), regT2
);
1500 move(ImmPtr(asCell(structure
->m_prototype
)->m_structure
), regT3
);
1501 failureCases
.append(branchPtr(NotEqual
, Address(regT2
), regT3
));
1504 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
)
1506 JumpList failureCases
;
1507 // Check eax is an object of the right Structure.
1508 failureCases
.append(emitJumpIfNotJSCell(regT0
));
1509 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(oldStructure
)));
1510 testPrototype(oldStructure
, failureCases
);
1512 // ecx = baseObject->m_structure
1513 for (RefPtr
<Structure
>* it
= chain
->head(); *it
; ++it
)
1514 testPrototype(it
->get(), failureCases
);
1518 // emit a call only if storage realloc is needed
1519 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
1520 if (willNeedStorageRealloc
) {
1521 // This trampoline was called to like a JIT stub; before we can can call again we need to
1522 // remove the return address from the stack, to prevent the stack from becoming misaligned.
1523 preserveReturnAddressAfterCall(regT3
);
1525 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
1526 stubCall
.skipArgument(); // base
1527 stubCall
.skipArgument(); // ident
1528 stubCall
.skipArgument(); // value
1529 stubCall
.addArgument(Imm32(oldStructure
->propertyStorageCapacity()));
1530 stubCall
.addArgument(Imm32(newStructure
->propertyStorageCapacity()));
1531 stubCall
.call(regT0
);
1532 emitGetJITStubArg(2, regT1
);
1534 restoreReturnAddressBeforeReturn(regT3
);
1537 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
1538 // codeblock should ensure oldStructure->m_refCount > 0
1539 sub32(Imm32(1), AbsoluteAddress(oldStructure
->addressOfCount()));
1540 add32(Imm32(1), AbsoluteAddress(newStructure
->addressOfCount()));
1541 storePtr(ImmPtr(newStructure
), Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)));
1544 compilePutDirectOffset(regT0
, regT1
, newStructure
, cachedOffset
);
1548 ASSERT(!failureCases
.empty());
1549 failureCases
.link(this);
1550 restoreArgumentReferenceForTrampoline();
1551 Call failureCall
= tailRecursiveCall();
1553 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1555 patchBuffer
.link(failureCall
, FunctionPtr(cti_op_put_by_id_fail
));
1557 if (willNeedStorageRealloc
) {
1558 ASSERT(m_calls
.size() == 1);
1559 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
1562 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1563 stubInfo
->stubRoutine
= entryLabel
;
1564 RepatchBuffer
repatchBuffer(m_codeBlock
);
1565 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
1568 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
1570 RepatchBuffer
repatchBuffer(codeBlock
);
1572 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
1573 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
1574 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
1576 int offset
= sizeof(JSValue
) * cachedOffset
;
1578 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1579 // and makes the subsequent load's offset automatically correct
1580 if (structure
->isUsingInlineStorage())
1581 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetGetByIdExternalLoad
));
1583 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1584 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
1585 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset
), offset
);
1588 void JIT::patchMethodCallProto(CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, JSFunction
* callee
, Structure
* structure
, JSObject
* proto
, ReturnAddressPtr returnAddress
)
1590 RepatchBuffer
repatchBuffer(codeBlock
);
1592 ASSERT(!methodCallLinkInfo
.cachedStructure
);
1593 methodCallLinkInfo
.cachedStructure
= structure
;
1596 Structure
* prototypeStructure
= proto
->structure();
1597 ASSERT(!methodCallLinkInfo
.cachedPrototypeStructure
);
1598 methodCallLinkInfo
.cachedPrototypeStructure
= prototypeStructure
;
1599 prototypeStructure
->ref();
1601 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
, structure
);
1602 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj
), proto
);
1603 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct
), prototypeStructure
);
1604 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction
), callee
);
1606 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id
));
1609 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
1611 RepatchBuffer
repatchBuffer(codeBlock
);
1613 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1614 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
1615 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_put_by_id_generic
));
1617 int offset
= sizeof(JSValue
) * cachedOffset
;
1619 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1620 // and makes the subsequent load's offset automatically correct
1621 if (structure
->isUsingInlineStorage())
1622 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetPutByIdExternalLoad
));
1624 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1625 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
1626 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset
), offset
);
1629 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
1631 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
1633 // Check eax is an array
1634 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
));
1636 // Checks out okay! - get the length from the storage
1637 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
1638 load32(Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
1640 Jump failureCases2
= branch32(Above
, regT2
, Imm32(JSImmediate::maxImmediateInt
));
1642 emitFastArithIntToImmNoCheck(regT2
, regT0
);
1643 Jump success
= jump();
1645 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1647 // Use the patch information to link the failure cases back to the original slow case routine.
1648 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
1649 patchBuffer
.link(failureCases1
, slowCaseBegin
);
1650 patchBuffer
.link(failureCases2
, slowCaseBegin
);
1652 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1653 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1655 // Track the stub we have created so that it will be deleted later.
1656 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1657 stubInfo
->stubRoutine
= entryLabel
;
1659 // Finally patch the jump to slow case back in the hot path to jump here instead.
1660 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1661 RepatchBuffer
repatchBuffer(m_codeBlock
);
1662 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1664 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1665 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
1668 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1670 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1671 // referencing the prototype object - let's speculatively load it's table nice and early!)
1672 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
1674 // Check eax is an object of the right Structure.
1675 Jump failureCases1
= checkStructure(regT0
, structure
);
1677 // Check the prototype object's Structure had not changed.
1678 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1680 move(ImmPtr(prototypeStructure
), regT3
);
1681 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
1683 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
1686 // Checks out okay! - getDirectOffset
1687 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1689 Jump success
= jump();
1691 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1693 // Use the patch information to link the failure cases back to the original slow case routine.
1694 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
1695 patchBuffer
.link(failureCases1
, slowCaseBegin
);
1696 patchBuffer
.link(failureCases2
, slowCaseBegin
);
1698 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1699 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1701 // Track the stub we have created so that it will be deleted later.
1702 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1703 stubInfo
->stubRoutine
= entryLabel
;
1705 // Finally patch the jump to slow case back in the hot path to jump here instead.
1706 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1707 RepatchBuffer
repatchBuffer(m_codeBlock
);
1708 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1710 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1711 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1714 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, size_t cachedOffset
)
1716 Jump failureCase
= checkStructure(regT0
, structure
);
1717 compileGetDirectOffset(regT0
, regT0
, structure
, cachedOffset
);
1718 Jump success
= jump();
1720 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1722 // Use the patch information to link the failure cases back to the original slow case routine.
1723 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
1724 if (!lastProtoBegin
)
1725 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
1727 patchBuffer
.link(failureCase
, lastProtoBegin
);
1729 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1730 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1732 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1735 polymorphicStructures
->list
[currentIndex
].set(entryLabel
, structure
);
1737 // Finally patch the jump to slow case back in the hot path to jump here instead.
1738 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1739 RepatchBuffer
repatchBuffer(m_codeBlock
);
1740 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1743 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, CallFrame
* callFrame
)
1745 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1746 // referencing the prototype object - let's speculatively load it's table nice and early!)
1747 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
1749 // Check eax is an object of the right Structure.
1750 Jump failureCases1
= checkStructure(regT0
, structure
);
1752 // Check the prototype object's Structure had not changed.
1753 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1755 move(ImmPtr(prototypeStructure
), regT3
);
1756 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
1758 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
1761 // Checks out okay! - getDirectOffset
1762 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1764 Jump success
= jump();
1766 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1768 // Use the patch information to link the failure cases back to the original slow case routine.
1769 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
1770 patchBuffer
.link(failureCases1
, lastProtoBegin
);
1771 patchBuffer
.link(failureCases2
, lastProtoBegin
);
1773 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1774 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1776 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1779 prototypeStructure
->ref();
1780 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, prototypeStructure
);
1782 // Finally patch the jump to slow case back in the hot path to jump here instead.
1783 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1784 RepatchBuffer
repatchBuffer(m_codeBlock
);
1785 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1788 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, CallFrame
* callFrame
)
1792 JumpList bucketsOfFail
;
1794 // Check eax is an object of the right Structure.
1795 Jump baseObjectCheck
= checkStructure(regT0
, structure
);
1796 bucketsOfFail
.append(baseObjectCheck
);
1798 Structure
* currStructure
= structure
;
1799 RefPtr
<Structure
>* chainEntries
= chain
->head();
1800 JSObject
* protoObject
= 0;
1801 for (unsigned i
= 0; i
< count
; ++i
) {
1802 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1803 currStructure
= chainEntries
[i
].get();
1805 // Check the prototype object's Structure had not changed.
1806 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1808 move(ImmPtr(currStructure
), regT3
);
1809 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
1811 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
1814 ASSERT(protoObject
);
1816 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1817 Jump success
= jump();
1819 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1821 // Use the patch information to link the failure cases back to the original slow case routine.
1822 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
1824 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
1826 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1827 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1829 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1831 // Track the stub we have created so that it will be deleted later.
1834 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, chain
);
1836 // Finally patch the jump to slow case back in the hot path to jump here instead.
1837 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1838 RepatchBuffer
repatchBuffer(m_codeBlock
);
1839 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1842 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1846 JumpList bucketsOfFail
;
1848 // Check eax is an object of the right Structure.
1849 bucketsOfFail
.append(checkStructure(regT0
, structure
));
1851 Structure
* currStructure
= structure
;
1852 RefPtr
<Structure
>* chainEntries
= chain
->head();
1853 JSObject
* protoObject
= 0;
1854 for (unsigned i
= 0; i
< count
; ++i
) {
1855 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1856 currStructure
= chainEntries
[i
].get();
1858 // Check the prototype object's Structure had not changed.
1859 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1861 move(ImmPtr(currStructure
), regT3
);
1862 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
1864 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
1867 ASSERT(protoObject
);
1869 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1870 Jump success
= jump();
1872 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1874 // Use the patch information to link the failure cases back to the original slow case routine.
1875 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
1877 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1878 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1880 // Track the stub we have created so that it will be deleted later.
1881 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1882 stubInfo
->stubRoutine
= entryLabel
;
1884 // Finally patch the jump to slow case back in the hot path to jump here instead.
1885 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1886 RepatchBuffer
repatchBuffer(m_codeBlock
);
1887 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1889 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1890 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1893 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1895 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1897 #endif // USE(JSVALUE32_64)
1901 #endif // ENABLE(JIT)