2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
35 #include "JSFunction.h"
36 #include "Interpreter.h"
37 #include "LinkBuffer.h"
38 #include "RepatchBuffer.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
52 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
54 unsigned base
= currentInstruction
[1].u
.operand
;
55 unsigned property
= currentInstruction
[2].u
.operand
;
56 unsigned value
= currentInstruction
[3].u
.operand
;
58 JITStubCall
stubCall(this, cti_op_put_by_index
);
59 stubCall
.addArgument(base
);
60 stubCall
.addArgument(Imm32(property
));
61 stubCall
.addArgument(value
);
65 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
67 unsigned base
= currentInstruction
[1].u
.operand
;
68 unsigned property
= currentInstruction
[2].u
.operand
;
69 unsigned function
= currentInstruction
[3].u
.operand
;
71 JITStubCall
stubCall(this, cti_op_put_getter
);
72 stubCall
.addArgument(base
);
73 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
74 stubCall
.addArgument(function
);
78 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
80 unsigned base
= currentInstruction
[1].u
.operand
;
81 unsigned property
= currentInstruction
[2].u
.operand
;
82 unsigned function
= currentInstruction
[3].u
.operand
;
84 JITStubCall
stubCall(this, cti_op_put_setter
);
85 stubCall
.addArgument(base
);
86 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
87 stubCall
.addArgument(function
);
91 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
93 unsigned dst
= currentInstruction
[1].u
.operand
;
94 unsigned base
= currentInstruction
[2].u
.operand
;
95 unsigned property
= currentInstruction
[3].u
.operand
;
97 JITStubCall
stubCall(this, cti_op_del_by_id
);
98 stubCall
.addArgument(base
);
99 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
104 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
106 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
108 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
109 void JIT::emit_op_method_check(Instruction
*) {}
110 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
111 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
112 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
115 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
117 unsigned dst
= currentInstruction
[1].u
.operand
;
118 unsigned base
= currentInstruction
[2].u
.operand
;
119 unsigned property
= currentInstruction
[3].u
.operand
;
121 JITStubCall
stubCall(this, cti_op_get_by_val
);
122 stubCall
.addArgument(base
);
123 stubCall
.addArgument(property
);
127 void JIT::emitSlow_op_get_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
129 ASSERT_NOT_REACHED();
132 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
134 unsigned base
= currentInstruction
[1].u
.operand
;
135 unsigned property
= currentInstruction
[2].u
.operand
;
136 unsigned value
= currentInstruction
[3].u
.operand
;
138 JITStubCall
stubCall(this, cti_op_put_by_val
);
139 stubCall
.addArgument(base
);
140 stubCall
.addArgument(property
);
141 stubCall
.addArgument(value
);
145 void JIT::emitSlow_op_put_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
147 ASSERT_NOT_REACHED();
150 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
152 int dst
= currentInstruction
[1].u
.operand
;
153 int base
= currentInstruction
[2].u
.operand
;
154 int ident
= currentInstruction
[3].u
.operand
;
156 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
157 stubCall
.addArgument(base
);
158 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
161 m_propertyAccessInstructionIndex
++;
164 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
166 m_propertyAccessInstructionIndex
++;
167 ASSERT_NOT_REACHED();
170 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
172 int base
= currentInstruction
[1].u
.operand
;
173 int ident
= currentInstruction
[2].u
.operand
;
174 int value
= currentInstruction
[3].u
.operand
;
176 JITStubCall
stubCall(this, cti_op_put_by_id_generic
);
177 stubCall
.addArgument(base
);
178 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
179 stubCall
.addArgument(value
);
182 m_propertyAccessInstructionIndex
++;
185 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
187 m_propertyAccessInstructionIndex
++;
188 ASSERT_NOT_REACHED();
191 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
193 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
195 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
197 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
199 // Assert that the following instruction is a get_by_id.
200 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
202 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
204 // Do the method check - check the object & its prototype's structure inline (this is the common case).
205 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
206 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
208 int dst
= currentInstruction
[1].u
.operand
;
209 int base
= currentInstruction
[2].u
.operand
;
211 emitLoad(base
, regT1
, regT0
);
212 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
214 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), info
.structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
215 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(ImmPtr(0), regT2
);
216 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), protoStructureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
218 // This will be relinked to load the function without doing a load.
219 DataLabelPtr putFunction
= moveWithPatch(ImmPtr(0), regT0
);
220 move(Imm32(JSValue::CellTag
), regT1
);
223 ASSERT(differenceBetween(info
.structureToCompare
, protoObj
) == patchOffsetMethodCheckProtoObj
);
224 ASSERT(differenceBetween(info
.structureToCompare
, protoStructureToCompare
) == patchOffsetMethodCheckProtoStruct
);
225 ASSERT(differenceBetween(info
.structureToCompare
, putFunction
) == patchOffsetMethodCheckPutFunction
);
227 // Link the failure cases here.
228 structureCheck
.link(this);
229 protoStructureCheck
.link(this);
231 // Do a regular(ish) get_by_id (the slow case will be link to
232 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
233 compileGetByIdHotPath();
236 emitStore(dst
, regT1
, regT0
);
237 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_method_check
), dst
, regT1
, regT0
);
239 // We've already generated the following get_by_id, so make sure it's skipped over.
240 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
243 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
245 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
247 int dst
= currentInstruction
[1].u
.operand
;
248 int base
= currentInstruction
[2].u
.operand
;
249 int ident
= currentInstruction
[3].u
.operand
;
251 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
, true);
253 // We've already generated the following get_by_id, so make sure it's skipped over.
254 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
257 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
259 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
260 void JIT::emit_op_method_check(Instruction
*) {}
261 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
265 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
267 unsigned dst
= currentInstruction
[1].u
.operand
;
268 unsigned base
= currentInstruction
[2].u
.operand
;
269 unsigned property
= currentInstruction
[3].u
.operand
;
271 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
273 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
274 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
275 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
276 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_fastAccessCutoff
))));
278 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT0
);
279 load32(BaseIndex(regT0
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4), regT1
); // tag
280 load32(BaseIndex(regT0
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
); // payload
281 emitStore(dst
, regT1
, regT0
);
282 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_val
), dst
, regT1
, regT0
);
285 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
287 unsigned dst
= currentInstruction
[1].u
.operand
;
288 unsigned base
= currentInstruction
[2].u
.operand
;
289 unsigned property
= currentInstruction
[3].u
.operand
;
291 // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
292 Label
callGetByValJITStub(this);
294 linkSlowCase(iter
); // property int32 check
295 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
296 linkSlowCase(iter
); // base array check
298 JITStubCall
stubCall(this, cti_op_get_by_val
);
299 stubCall
.addArgument(base
);
300 stubCall
.addArgument(property
);
303 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
305 linkSlowCase(iter
); // array fast cut-off check
307 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT0
);
308 branch32(AboveOrEqual
, regT2
, Address(regT0
, OBJECT_OFFSETOF(ArrayStorage
, m_vectorLength
)), callGetByValJITStub
);
310 // Missed the fast region, but it is still in the vector.
311 load32(BaseIndex(regT0
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4), regT1
); // tag
312 load32(BaseIndex(regT0
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
); // payload
314 // FIXME: Maybe we can optimize this comparison to JSValue().
315 Jump skip
= branch32(NotEqual
, regT0
, Imm32(0));
316 branch32(Equal
, regT1
, Imm32(JSValue::CellTag
), callGetByValJITStub
);
319 emitStore(dst
, regT1
, regT0
);
322 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
324 unsigned base
= currentInstruction
[1].u
.operand
;
325 unsigned property
= currentInstruction
[2].u
.operand
;
326 unsigned value
= currentInstruction
[3].u
.operand
;
328 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
330 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
331 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
332 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
333 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT3
);
335 Jump inFastVector
= branch32(Below
, regT2
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_fastAccessCutoff
)));
337 // Check if the access is within the vector.
338 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_vectorLength
))));
340 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
341 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
342 Jump skip
= branch32(NotEqual
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4), Imm32(JSValue::CellTag
));
343 addSlowCase(branch32(Equal
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), Imm32(0)));
346 inFastVector
.link(this);
348 emitLoad(value
, regT1
, regT0
);
349 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]))); // payload
350 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4)); // tag
353 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
355 unsigned base
= currentInstruction
[1].u
.operand
;
356 unsigned property
= currentInstruction
[2].u
.operand
;
357 unsigned value
= currentInstruction
[3].u
.operand
;
359 linkSlowCase(iter
); // property int32 check
360 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
361 linkSlowCase(iter
); // base not array check
363 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
364 stubPutByValCall
.addArgument(base
);
365 stubPutByValCall
.addArgument(property
);
366 stubPutByValCall
.addArgument(value
);
367 stubPutByValCall
.call();
369 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
371 // Slow cases for immediate int accesses to arrays.
372 linkSlowCase(iter
); // in vector check
373 linkSlowCase(iter
); // written to slot check
375 JITStubCall
stubCall(this, cti_op_put_by_val_array
);
376 stubCall
.addArgument(regT1
, regT0
);
377 stubCall
.addArgument(regT2
);
378 stubCall
.addArgument(value
);
382 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
384 int dst
= currentInstruction
[1].u
.operand
;
385 int base
= currentInstruction
[2].u
.operand
;
387 emitLoad(base
, regT1
, regT0
);
388 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
389 compileGetByIdHotPath();
390 emitStore(dst
, regT1
, regT0
);
391 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_id
), dst
, regT1
, regT0
);
394 void JIT::compileGetByIdHotPath()
396 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
397 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
398 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
399 // to jump back to if one of these trampolies finds a match.
400 Label
hotPathBegin(this);
401 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
402 m_propertyAccessInstructionIndex
++;
404 DataLabelPtr structureToCompare
;
405 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
406 addSlowCase(structureCheck
);
407 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetGetByIdStructure
);
408 ASSERT(differenceBetween(hotPathBegin
, structureCheck
) == patchOffsetGetByIdBranchToSlowCase
);
410 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT2
);
411 Label
externalLoadComplete(this);
412 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetGetByIdExternalLoad
);
413 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthGetByIdExternalLoad
);
415 DataLabel32 displacementLabel1
= loadPtrWithAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT0
); // payload
416 ASSERT(differenceBetween(hotPathBegin
, displacementLabel1
) == patchOffsetGetByIdPropertyMapOffset1
);
417 DataLabel32 displacementLabel2
= loadPtrWithAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT1
); // tag
418 ASSERT(differenceBetween(hotPathBegin
, displacementLabel2
) == patchOffsetGetByIdPropertyMapOffset2
);
420 Label
putResult(this);
421 ASSERT(differenceBetween(hotPathBegin
, putResult
) == patchOffsetGetByIdPutResult
);
424 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
426 int dst
= currentInstruction
[1].u
.operand
;
427 int base
= currentInstruction
[2].u
.operand
;
428 int ident
= currentInstruction
[3].u
.operand
;
430 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
);
433 void JIT::compileGetByIdSlowCase(int dst
, int base
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
435 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
436 // so that we only need track one pointer into the slow case code - we track a pointer to the location
437 // of the call (which we can use to look up the patch information), but should a array-length or
438 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
439 // the distance from the call to the head of the slow case.
440 linkSlowCaseIfNotJSCell(iter
, base
);
443 Label
coldPathBegin(this);
445 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
446 stubCall
.addArgument(regT1
, regT0
);
447 stubCall
.addArgument(ImmPtr(ident
));
448 Call call
= stubCall
.call(dst
);
450 ASSERT(differenceBetween(coldPathBegin
, call
) == patchOffsetGetByIdSlowCaseCall
);
452 // Track the location of the call; this will be used to recover patch information.
453 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
454 m_propertyAccessInstructionIndex
++;
457 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
459 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
460 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
461 // such that the Structure & offset are always at the same distance from this.
463 int base
= currentInstruction
[1].u
.operand
;
464 int value
= currentInstruction
[3].u
.operand
;
466 emitLoad2(base
, regT1
, regT0
, value
, regT3
, regT2
);
468 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
470 Label
hotPathBegin(this);
471 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
472 m_propertyAccessInstructionIndex
++;
474 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
475 DataLabelPtr structureToCompare
;
476 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
477 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetPutByIdStructure
);
479 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
480 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
481 Label
externalLoadComplete(this);
482 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetPutByIdExternalLoad
);
483 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthPutByIdExternalLoad
);
485 DataLabel32 displacementLabel1
= storePtrWithAddressOffsetPatch(regT2
, Address(regT0
, patchGetByIdDefaultOffset
)); // payload
486 DataLabel32 displacementLabel2
= storePtrWithAddressOffsetPatch(regT3
, Address(regT0
, patchGetByIdDefaultOffset
)); // tag
487 ASSERT(differenceBetween(hotPathBegin
, displacementLabel1
) == patchOffsetPutByIdPropertyMapOffset1
);
488 ASSERT(differenceBetween(hotPathBegin
, displacementLabel2
) == patchOffsetPutByIdPropertyMapOffset2
);
491 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
493 int base
= currentInstruction
[1].u
.operand
;
494 int ident
= currentInstruction
[2].u
.operand
;
496 linkSlowCaseIfNotJSCell(iter
, base
);
499 JITStubCall
stubCall(this, cti_op_put_by_id
);
500 stubCall
.addArgument(regT1
, regT0
);
501 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
502 stubCall
.addArgument(regT3
, regT2
);
503 Call call
= stubCall
.call();
505 // Track the location of the call; this will be used to recover patch information.
506 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
507 m_propertyAccessInstructionIndex
++;
510 // Compile a store into an object's property storage. May overwrite base.
511 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID valueTag
, RegisterID valuePayload
, Structure
* structure
, size_t cachedOffset
)
513 int offset
= cachedOffset
;
514 if (structure
->isUsingInlineStorage())
515 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
) / sizeof(Register
);
517 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
518 emitStore(offset
, valueTag
, valuePayload
, base
);
521 // Compile a load from an object's property storage. May overwrite base.
522 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, Structure
* structure
, size_t cachedOffset
)
524 int offset
= cachedOffset
;
525 if (structure
->isUsingInlineStorage())
526 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
) / sizeof(Register
);
528 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
529 emitLoad(offset
, resultTag
, resultPayload
, base
);
532 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID temp
, RegisterID resultTag
, RegisterID resultPayload
, size_t cachedOffset
)
534 if (base
->isUsingInlineStorage()) {
535 load32(reinterpret_cast<char*>(&base
->m_inlineStorage
[cachedOffset
]), resultPayload
);
536 load32(reinterpret_cast<char*>(&base
->m_inlineStorage
[cachedOffset
]) + 4, resultTag
);
540 size_t offset
= cachedOffset
* sizeof(JSValue
);
542 PropertyStorage
* protoPropertyStorage
= &base
->m_externalStorage
;
543 loadPtr(static_cast<void*>(protoPropertyStorage
), temp
);
544 load32(Address(temp
, offset
), resultPayload
);
545 load32(Address(temp
, offset
+ 4), resultTag
);
548 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
)
550 // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
552 JumpList failureCases
;
553 failureCases
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)));
555 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
556 failureCases
.append(branchPtr(NotEqual
, regT2
, ImmPtr(oldStructure
)));
558 // Verify that nothing in the prototype chain has a setter for this property.
559 for (RefPtr
<Structure
>* it
= chain
->head(); *it
; ++it
) {
560 loadPtr(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
)), regT2
);
561 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
562 failureCases
.append(branchPtr(NotEqual
, regT2
, ImmPtr(it
->get())));
565 // Reallocate property storage if needed.
567 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
568 if (willNeedStorageRealloc
) {
569 // This trampoline was called to like a JIT stub; before we can can call again we need to
570 // remove the return address from the stack, to prevent the stack from becoming misaligned.
571 preserveReturnAddressAfterCall(regT3
);
573 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
574 stubCall
.skipArgument(); // base
575 stubCall
.skipArgument(); // ident
576 stubCall
.skipArgument(); // value
577 stubCall
.addArgument(Imm32(oldStructure
->propertyStorageCapacity()));
578 stubCall
.addArgument(Imm32(newStructure
->propertyStorageCapacity()));
579 stubCall
.call(regT0
);
581 restoreReturnAddressBeforeReturn(regT3
);
584 sub32(Imm32(1), AbsoluteAddress(oldStructure
->addressOfCount()));
585 add32(Imm32(1), AbsoluteAddress(newStructure
->addressOfCount()));
586 storePtr(ImmPtr(newStructure
), Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)));
588 load32(Address(stackPointerRegister
, offsetof(struct JITStackFrame
, args
[2]) + sizeof(void*)), regT3
);
589 load32(Address(stackPointerRegister
, offsetof(struct JITStackFrame
, args
[2]) + sizeof(void*) + 4), regT2
);
592 compilePutDirectOffset(regT0
, regT2
, regT3
, newStructure
, cachedOffset
);
596 ASSERT(!failureCases
.empty());
597 failureCases
.link(this);
598 restoreArgumentReferenceForTrampoline();
599 Call failureCall
= tailRecursiveCall();
601 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
603 patchBuffer
.link(failureCall
, FunctionPtr(cti_op_put_by_id_fail
));
605 if (willNeedStorageRealloc
) {
606 ASSERT(m_calls
.size() == 1);
607 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
610 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
611 stubInfo
->stubRoutine
= entryLabel
;
612 RepatchBuffer
repatchBuffer(m_codeBlock
);
613 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
616 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
618 RepatchBuffer
repatchBuffer(codeBlock
);
620 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
621 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
622 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
624 int offset
= sizeof(JSValue
) * cachedOffset
;
626 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
627 // and makes the subsequent load's offset automatically correct
628 if (structure
->isUsingInlineStorage())
629 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetGetByIdExternalLoad
));
631 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
632 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
633 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1
), offset
); // payload
634 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2
), offset
+ 4); // tag
637 void JIT::patchMethodCallProto(CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, JSFunction
* callee
, Structure
* structure
, JSObject
* proto
)
639 RepatchBuffer
repatchBuffer(codeBlock
);
641 ASSERT(!methodCallLinkInfo
.cachedStructure
);
642 methodCallLinkInfo
.cachedStructure
= structure
;
645 Structure
* prototypeStructure
= proto
->structure();
646 ASSERT(!methodCallLinkInfo
.cachedPrototypeStructure
);
647 methodCallLinkInfo
.cachedPrototypeStructure
= prototypeStructure
;
648 prototypeStructure
->ref();
650 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
, structure
);
651 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj
), proto
);
652 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct
), prototypeStructure
);
653 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction
), callee
);
656 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
658 RepatchBuffer
repatchBuffer(codeBlock
);
660 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
661 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
662 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_put_by_id_generic
));
664 int offset
= sizeof(JSValue
) * cachedOffset
;
666 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
667 // and makes the subsequent load's offset automatically correct
668 if (structure
->isUsingInlineStorage())
669 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetPutByIdExternalLoad
));
671 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
672 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
673 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1
), offset
); // payload
674 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2
), offset
+ 4); // tag
677 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
679 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
681 // regT0 holds a JSCell*
684 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
));
686 // Checks out okay! - get the length from the storage
687 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
688 load32(Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
690 Jump failureCases2
= branch32(Above
, regT2
, Imm32(INT_MAX
));
692 move(Imm32(JSValue::Int32Tag
), regT1
);
693 Jump success
= jump();
695 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
697 // Use the patch information to link the failure cases back to the original slow case routine.
698 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
699 patchBuffer
.link(failureCases1
, slowCaseBegin
);
700 patchBuffer
.link(failureCases2
, slowCaseBegin
);
702 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
703 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
705 // Track the stub we have created so that it will be deleted later.
706 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
707 stubInfo
->stubRoutine
= entryLabel
;
709 // Finally patch the jump to slow case back in the hot path to jump here instead.
710 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
711 RepatchBuffer
repatchBuffer(m_codeBlock
);
712 repatchBuffer
.relink(jumpLocation
, entryLabel
);
714 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
715 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
718 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
720 // regT0 holds a JSCell*
722 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
723 // referencing the prototype object - let's speculatively load it's table nice and early!)
724 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
726 Jump failureCases1
= checkStructure(regT0
, structure
);
728 // Check the prototype object's Structure had not changed.
729 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
731 move(ImmPtr(prototypeStructure
), regT3
);
732 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
734 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
737 // Checks out okay! - getDirectOffset
738 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
740 Jump success
= jump();
742 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
744 // Use the patch information to link the failure cases back to the original slow case routine.
745 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
746 patchBuffer
.link(failureCases1
, slowCaseBegin
);
747 patchBuffer
.link(failureCases2
, slowCaseBegin
);
749 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
750 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
752 // Track the stub we have created so that it will be deleted later.
753 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
754 stubInfo
->stubRoutine
= entryLabel
;
756 // Finally patch the jump to slow case back in the hot path to jump here instead.
757 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
758 RepatchBuffer
repatchBuffer(m_codeBlock
);
759 repatchBuffer
.relink(jumpLocation
, entryLabel
);
761 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
762 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
766 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, size_t cachedOffset
)
768 // regT0 holds a JSCell*
770 Jump failureCase
= checkStructure(regT0
, structure
);
771 compileGetDirectOffset(regT0
, regT1
, regT0
, structure
, cachedOffset
);
772 Jump success
= jump();
774 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
776 // Use the patch information to link the failure cases back to the original slow case routine.
777 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
779 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
781 patchBuffer
.link(failureCase
, lastProtoBegin
);
783 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
784 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
786 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
789 polymorphicStructures
->list
[currentIndex
].set(entryLabel
, structure
);
791 // Finally patch the jump to slow case back in the hot path to jump here instead.
792 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
793 RepatchBuffer
repatchBuffer(m_codeBlock
);
794 repatchBuffer
.relink(jumpLocation
, entryLabel
);
797 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, CallFrame
* callFrame
)
799 // regT0 holds a JSCell*
801 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
802 // referencing the prototype object - let's speculatively load it's table nice and early!)
803 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
805 // Check eax is an object of the right Structure.
806 Jump failureCases1
= checkStructure(regT0
, structure
);
808 // Check the prototype object's Structure had not changed.
809 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
811 move(ImmPtr(prototypeStructure
), regT3
);
812 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
814 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
817 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
819 Jump success
= jump();
821 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
823 // Use the patch information to link the failure cases back to the original slow case routine.
824 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
825 patchBuffer
.link(failureCases1
, lastProtoBegin
);
826 patchBuffer
.link(failureCases2
, lastProtoBegin
);
828 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
829 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
831 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
834 prototypeStructure
->ref();
835 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, prototypeStructure
);
837 // Finally patch the jump to slow case back in the hot path to jump here instead.
838 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
839 RepatchBuffer
repatchBuffer(m_codeBlock
);
840 repatchBuffer
.relink(jumpLocation
, entryLabel
);
843 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, CallFrame
* callFrame
)
845 // regT0 holds a JSCell*
849 JumpList bucketsOfFail
;
851 // Check eax is an object of the right Structure.
852 bucketsOfFail
.append(checkStructure(regT0
, structure
));
854 Structure
* currStructure
= structure
;
855 RefPtr
<Structure
>* chainEntries
= chain
->head();
856 JSObject
* protoObject
= 0;
857 for (unsigned i
= 0; i
< count
; ++i
) {
858 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
859 currStructure
= chainEntries
[i
].get();
861 // Check the prototype object's Structure had not changed.
862 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
864 move(ImmPtr(currStructure
), regT3
);
865 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
867 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
872 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
873 Jump success
= jump();
875 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
877 // Use the patch information to link the failure cases back to the original slow case routine.
878 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
880 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
882 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
883 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
885 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
887 // Track the stub we have created so that it will be deleted later.
890 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, chain
);
892 // Finally patch the jump to slow case back in the hot path to jump here instead.
893 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
894 RepatchBuffer
repatchBuffer(m_codeBlock
);
895 repatchBuffer
.relink(jumpLocation
, entryLabel
);
898 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
900 // regT0 holds a JSCell*
904 JumpList bucketsOfFail
;
906 // Check eax is an object of the right Structure.
907 bucketsOfFail
.append(checkStructure(regT0
, structure
));
909 Structure
* currStructure
= structure
;
910 RefPtr
<Structure
>* chainEntries
= chain
->head();
911 JSObject
* protoObject
= 0;
912 for (unsigned i
= 0; i
< count
; ++i
) {
913 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
914 currStructure
= chainEntries
[i
].get();
916 // Check the prototype object's Structure had not changed.
917 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
919 move(ImmPtr(currStructure
), regT3
);
920 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
922 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
927 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
928 Jump success
= jump();
930 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
932 // Use the patch information to link the failure cases back to the original slow case routine.
933 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
935 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
936 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
938 // Track the stub we have created so that it will be deleted later.
939 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
940 stubInfo
->stubRoutine
= entryLabel
;
942 // Finally patch the jump to slow case back in the hot path to jump here instead.
943 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
944 RepatchBuffer
repatchBuffer(m_codeBlock
);
945 repatchBuffer
.relink(jumpLocation
, entryLabel
);
947 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
948 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
951 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
953 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
955 #else // USE(JSVALUE32_64)
957 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
959 emitGetVirtualRegisters(currentInstruction
[2].u
.operand
, regT0
, currentInstruction
[3].u
.operand
, regT1
);
960 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
962 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
963 // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
964 // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
965 // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
966 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
967 // extending since it makes it easier to re-tag the value in the slow case.
968 zeroExtend32ToPtr(regT1
, regT1
);
970 emitFastArithImmToInt(regT1
);
972 emitJumpSlowCaseIfNotJSCell(regT0
);
973 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
975 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
976 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
977 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_fastAccessCutoff
))));
979 // Get the value from the vector
980 loadPtr(BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
);
981 emitPutVirtualRegister(currentInstruction
[1].u
.operand
);
984 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
986 emitGetVirtualRegisters(currentInstruction
[1].u
.operand
, regT0
, currentInstruction
[2].u
.operand
, regT1
);
987 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
989 // See comment in op_get_by_val.
990 zeroExtend32ToPtr(regT1
, regT1
);
992 emitFastArithImmToInt(regT1
);
994 emitJumpSlowCaseIfNotJSCell(regT0
);
995 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
997 // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
998 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
999 Jump inFastVector
= branch32(Below
, regT1
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_fastAccessCutoff
)));
1000 // No; oh well, check if the access if within the vector - if so, we may still be okay.
1001 addSlowCase(branch32(AboveOrEqual
, regT1
, Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_vectorLength
))));
1003 // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1004 // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1005 addSlowCase(branchTestPtr(Zero
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]))));
1007 // All good - put the value into the array.
1008 inFastVector
.link(this);
1009 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT0
);
1010 storePtr(regT0
, BaseIndex(regT2
, regT1
, ScalePtr
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
1013 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
1015 JITStubCall
stubCall(this, cti_op_put_by_index
);
1016 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
1017 stubCall
.addArgument(Imm32(currentInstruction
[2].u
.operand
));
1018 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
1022 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
1024 JITStubCall
stubCall(this, cti_op_put_getter
);
1025 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
1026 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
1027 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
1031 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
1033 JITStubCall
stubCall(this, cti_op_put_setter
);
1034 stubCall
.addArgument(currentInstruction
[1].u
.operand
, regT2
);
1035 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[2].u
.operand
)));
1036 stubCall
.addArgument(currentInstruction
[3].u
.operand
, regT2
);
1040 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
1042 JITStubCall
stubCall(this, cti_op_del_by_id
);
1043 stubCall
.addArgument(currentInstruction
[2].u
.operand
, regT2
);
1044 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(currentInstruction
[3].u
.operand
)));
1045 stubCall
.call(currentInstruction
[1].u
.operand
);
1049 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1051 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1053 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1054 void JIT::emit_op_method_check(Instruction
*) {}
1055 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
1056 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1057 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
1060 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
1062 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1063 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1064 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1066 emitGetVirtualRegister(baseVReg
, regT0
);
1067 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
1068 stubCall
.addArgument(regT0
);
1069 stubCall
.addArgument(ImmPtr(ident
));
1070 stubCall
.call(resultVReg
);
1072 m_propertyAccessInstructionIndex
++;
1075 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
1077 ASSERT_NOT_REACHED();
1080 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
1082 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
1083 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
1084 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
1086 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
1088 JITStubCall
stubCall(this, cti_op_put_by_id_generic
);
1089 stubCall
.addArgument(regT0
);
1090 stubCall
.addArgument(ImmPtr(ident
));
1091 stubCall
.addArgument(regT1
);
1094 m_propertyAccessInstructionIndex
++;
1097 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
1099 ASSERT_NOT_REACHED();
1102 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1104 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1106 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1108 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
1110 // Assert that the following instruction is a get_by_id.
1111 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
1113 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
1114 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1115 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1116 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1118 emitGetVirtualRegister(baseVReg
, regT0
);
1120 // Do the method check - check the object & its prototype's structure inline (this is the common case).
1121 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
1122 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
1123 Jump notCell
= emitJumpIfNotJSCell(regT0
);
1124 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), info
.structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
1125 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(ImmPtr(0), regT1
);
1126 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT1
, OBJECT_OFFSETOF(JSCell
, m_structure
)), protoStructureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
1128 // This will be relinked to load the function without doing a load.
1129 DataLabelPtr putFunction
= moveWithPatch(ImmPtr(0), regT0
);
1130 Jump match
= jump();
1132 ASSERT(differenceBetween(info
.structureToCompare
, protoObj
) == patchOffsetMethodCheckProtoObj
);
1133 ASSERT(differenceBetween(info
.structureToCompare
, protoStructureToCompare
) == patchOffsetMethodCheckProtoStruct
);
1134 ASSERT(differenceBetween(info
.structureToCompare
, putFunction
) == patchOffsetMethodCheckPutFunction
);
1136 // Link the failure cases here.
1138 structureCheck
.link(this);
1139 protoStructureCheck
.link(this);
1141 // Do a regular(ish) get_by_id (the slow case will be link to
1142 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
1143 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
1146 emitPutVirtualRegister(resultVReg
);
1148 // We've already generated the following get_by_id, so make sure it's skipped over.
1149 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
1152 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1154 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
1155 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1156 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1157 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1159 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, true);
1161 // We've already generated the following get_by_id, so make sure it's skipped over.
1162 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
1165 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1167 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1168 void JIT::emit_op_method_check(Instruction
*) {}
1169 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
1173 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
1175 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1176 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1177 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1179 emitGetVirtualRegister(baseVReg
, regT0
);
1180 compileGetByIdHotPath(resultVReg
, baseVReg
, ident
, m_propertyAccessInstructionIndex
++);
1181 emitPutVirtualRegister(resultVReg
);
1184 void JIT::compileGetByIdHotPath(int, int baseVReg
, Identifier
*, unsigned propertyAccessInstructionIndex
)
1186 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
1187 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
1188 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1189 // to jump back to if one of these trampolies finds a match.
1191 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
1193 Label
hotPathBegin(this);
1194 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
1196 DataLabelPtr structureToCompare
;
1197 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
1198 addSlowCase(structureCheck
);
1199 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetGetByIdStructure
);
1200 ASSERT(differenceBetween(hotPathBegin
, structureCheck
) == patchOffsetGetByIdBranchToSlowCase
);
1202 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
1203 Label
externalLoadComplete(this);
1204 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetGetByIdExternalLoad
);
1205 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthGetByIdExternalLoad
);
1207 DataLabel32 displacementLabel
= loadPtrWithAddressOffsetPatch(Address(regT0
, patchGetByIdDefaultOffset
), regT0
);
1208 ASSERT(differenceBetween(hotPathBegin
, displacementLabel
) == patchOffsetGetByIdPropertyMapOffset
);
1210 Label
putResult(this);
1211 ASSERT(differenceBetween(hotPathBegin
, putResult
) == patchOffsetGetByIdPutResult
);
1214 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1216 unsigned resultVReg
= currentInstruction
[1].u
.operand
;
1217 unsigned baseVReg
= currentInstruction
[2].u
.operand
;
1218 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
1220 compileGetByIdSlowCase(resultVReg
, baseVReg
, ident
, iter
, false);
1223 void JIT::compileGetByIdSlowCase(int resultVReg
, int baseVReg
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
1225 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1226 // so that we only need track one pointer into the slow case code - we track a pointer to the location
1227 // of the call (which we can use to look up the patch information), but should a array-length or
1228 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
1229 // the distance from the call to the head of the slow case.
1231 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
1235 Label
coldPathBegin(this);
1237 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
1238 stubCall
.addArgument(regT0
);
1239 stubCall
.addArgument(ImmPtr(ident
));
1240 Call call
= stubCall
.call(resultVReg
);
1242 ASSERT(differenceBetween(coldPathBegin
, call
) == patchOffsetGetByIdSlowCaseCall
);
1244 // Track the location of the call; this will be used to recover patch information.
1245 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
1246 m_propertyAccessInstructionIndex
++;
1249 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
1251 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
1252 unsigned valueVReg
= currentInstruction
[3].u
.operand
;
1254 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
1256 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
1257 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1258 // such that the Structure & offset are always at the same distance from this.
1260 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
1262 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
1263 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
1265 Label
hotPathBegin(this);
1266 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
1268 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1269 DataLabelPtr structureToCompare
;
1270 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
1271 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetPutByIdStructure
);
1273 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1274 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
1275 Label
externalLoadComplete(this);
1276 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetPutByIdExternalLoad
);
1277 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthPutByIdExternalLoad
);
1279 DataLabel32 displacementLabel
= storePtrWithAddressOffsetPatch(regT1
, Address(regT0
, patchGetByIdDefaultOffset
));
1280 ASSERT(differenceBetween(hotPathBegin
, displacementLabel
) == patchOffsetPutByIdPropertyMapOffset
);
1283 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1285 unsigned baseVReg
= currentInstruction
[1].u
.operand
;
1286 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
1288 unsigned propertyAccessInstructionIndex
= m_propertyAccessInstructionIndex
++;
1290 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
1293 JITStubCall
stubCall(this, cti_op_put_by_id
);
1294 stubCall
.addArgument(regT0
);
1295 stubCall
.addArgument(ImmPtr(ident
));
1296 stubCall
.addArgument(regT1
);
1297 Call call
= stubCall
.call();
1299 // Track the location of the call; this will be used to recover patch information.
1300 m_propertyAccessCompilationInfo
[propertyAccessInstructionIndex
].callReturnLocation
= call
;
1303 // Compile a store into an object's property storage. May overwrite the
1304 // value in objectReg.
1305 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, Structure
* structure
, size_t cachedOffset
)
1307 int offset
= cachedOffset
* sizeof(JSValue
);
1308 if (structure
->isUsingInlineStorage())
1309 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
);
1311 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
1312 storePtr(value
, Address(base
, offset
));
1315 // Compile a load from an object's property storage. May overwrite base.
1316 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, Structure
* structure
, size_t cachedOffset
)
1318 int offset
= cachedOffset
* sizeof(JSValue
);
1319 if (structure
->isUsingInlineStorage())
1320 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
);
1322 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
1323 loadPtr(Address(base
, offset
), result
);
1326 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID temp
, RegisterID result
, size_t cachedOffset
)
1328 if (base
->isUsingInlineStorage())
1329 loadPtr(static_cast<void*>(&base
->m_inlineStorage
[cachedOffset
]), result
);
1331 PropertyStorage
* protoPropertyStorage
= &base
->m_externalStorage
;
1332 loadPtr(static_cast<void*>(protoPropertyStorage
), temp
);
1333 loadPtr(Address(temp
, cachedOffset
* sizeof(JSValue
)), result
);
1337 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
)
1339 JumpList failureCases
;
1340 // Check eax is an object of the right Structure.
1341 failureCases
.append(emitJumpIfNotJSCell(regT0
));
1342 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(oldStructure
)));
1343 JumpList successCases
;
1346 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1347 // proto(ecx) = baseObject->structure()->prototype()
1348 failureCases
.append(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
) + OBJECT_OFFSETOF(TypeInfo
, m_type
)), Imm32(ObjectType
)));
1350 loadPtr(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
)), regT2
);
1352 // ecx = baseObject->m_structure
1353 for (RefPtr
<Structure
>* it
= chain
->head(); *it
; ++it
) {
1354 // null check the prototype
1355 successCases
.append(branchPtr(Equal
, regT2
, ImmPtr(JSValue::encode(jsNull()))));
1357 // Check the structure id
1358 failureCases
.append(branchPtr(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(it
->get())));
1360 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT2
);
1361 failureCases
.append(branch32(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(Structure
, m_typeInfo
) + OBJECT_OFFSETOF(TypeInfo
, m_type
)), Imm32(ObjectType
)));
1362 loadPtr(Address(regT2
, OBJECT_OFFSETOF(Structure
, m_prototype
)), regT2
);
1365 successCases
.link(this);
1369 // emit a call only if storage realloc is needed
1370 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
1371 if (willNeedStorageRealloc
) {
1372 // This trampoline was called to like a JIT stub; before we can can call again we need to
1373 // remove the return address from the stack, to prevent the stack from becoming misaligned.
1374 preserveReturnAddressAfterCall(regT3
);
1376 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
1377 stubCall
.skipArgument(); // base
1378 stubCall
.skipArgument(); // ident
1379 stubCall
.skipArgument(); // value
1380 stubCall
.addArgument(Imm32(oldStructure
->propertyStorageCapacity()));
1381 stubCall
.addArgument(Imm32(newStructure
->propertyStorageCapacity()));
1382 stubCall
.call(regT0
);
1383 emitGetJITStubArg(3, regT1
);
1385 restoreReturnAddressBeforeReturn(regT3
);
1388 // Assumes m_refCount can be decremented easily, refcount decrement is safe as
1389 // codeblock should ensure oldStructure->m_refCount > 0
1390 sub32(Imm32(1), AbsoluteAddress(oldStructure
->addressOfCount()));
1391 add32(Imm32(1), AbsoluteAddress(newStructure
->addressOfCount()));
1392 storePtr(ImmPtr(newStructure
), Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)));
1395 compilePutDirectOffset(regT0
, regT1
, newStructure
, cachedOffset
);
1399 ASSERT(!failureCases
.empty());
1400 failureCases
.link(this);
1401 restoreArgumentReferenceForTrampoline();
1402 Call failureCall
= tailRecursiveCall();
1404 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1406 patchBuffer
.link(failureCall
, FunctionPtr(cti_op_put_by_id_fail
));
1408 if (willNeedStorageRealloc
) {
1409 ASSERT(m_calls
.size() == 1);
1410 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
1413 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1414 stubInfo
->stubRoutine
= entryLabel
;
1415 RepatchBuffer
repatchBuffer(m_codeBlock
);
1416 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
1419 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
1421 RepatchBuffer
repatchBuffer(codeBlock
);
1423 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
1424 // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
1425 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
1427 int offset
= sizeof(JSValue
) * cachedOffset
;
1429 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1430 // and makes the subsequent load's offset automatically correct
1431 if (structure
->isUsingInlineStorage())
1432 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetGetByIdExternalLoad
));
1434 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1435 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
1436 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset
), offset
);
1439 void JIT::patchMethodCallProto(CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, JSFunction
* callee
, Structure
* structure
, JSObject
* proto
)
1441 RepatchBuffer
repatchBuffer(codeBlock
);
1443 ASSERT(!methodCallLinkInfo
.cachedStructure
);
1444 methodCallLinkInfo
.cachedStructure
= structure
;
1447 Structure
* prototypeStructure
= proto
->structure();
1448 ASSERT(!methodCallLinkInfo
.cachedPrototypeStructure
);
1449 methodCallLinkInfo
.cachedPrototypeStructure
= prototypeStructure
;
1450 prototypeStructure
->ref();
1452 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
, structure
);
1453 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj
), proto
);
1454 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct
), prototypeStructure
);
1455 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction
), callee
);
1458 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
1460 RepatchBuffer
repatchBuffer(codeBlock
);
1462 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1463 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
1464 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_put_by_id_generic
));
1466 int offset
= sizeof(JSValue
) * cachedOffset
;
1468 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1469 // and makes the subsequent load's offset automatically correct
1470 if (structure
->isUsingInlineStorage())
1471 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetPutByIdExternalLoad
));
1473 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1474 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
1475 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset
), offset
);
1478 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
1480 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
1482 // Check eax is an array
1483 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
));
1485 // Checks out okay! - get the length from the storage
1486 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
1487 load32(Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
1489 Jump failureCases2
= branch32(Above
, regT2
, Imm32(JSImmediate::maxImmediateInt
));
1491 emitFastArithIntToImmNoCheck(regT2
, regT0
);
1492 Jump success
= jump();
1494 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1496 // Use the patch information to link the failure cases back to the original slow case routine.
1497 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
1498 patchBuffer
.link(failureCases1
, slowCaseBegin
);
1499 patchBuffer
.link(failureCases2
, slowCaseBegin
);
1501 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1502 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1504 // Track the stub we have created so that it will be deleted later.
1505 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1506 stubInfo
->stubRoutine
= entryLabel
;
1508 // Finally patch the jump to slow case back in the hot path to jump here instead.
1509 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1510 RepatchBuffer
repatchBuffer(m_codeBlock
);
1511 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1513 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1514 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
1517 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1519 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1520 // referencing the prototype object - let's speculatively load it's table nice and early!)
1521 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
1523 // Check eax is an object of the right Structure.
1524 Jump failureCases1
= checkStructure(regT0
, structure
);
1526 // Check the prototype object's Structure had not changed.
1527 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1528 #if PLATFORM(X86_64)
1529 move(ImmPtr(prototypeStructure
), regT3
);
1530 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
1532 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
1535 // Checks out okay! - getDirectOffset
1536 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1538 Jump success
= jump();
1540 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1542 // Use the patch information to link the failure cases back to the original slow case routine.
1543 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
1544 patchBuffer
.link(failureCases1
, slowCaseBegin
);
1545 patchBuffer
.link(failureCases2
, slowCaseBegin
);
1547 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1548 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1550 // Track the stub we have created so that it will be deleted later.
1551 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1552 stubInfo
->stubRoutine
= entryLabel
;
1554 // Finally patch the jump to slow case back in the hot path to jump here instead.
1555 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1556 RepatchBuffer
repatchBuffer(m_codeBlock
);
1557 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1559 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1560 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1563 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, size_t cachedOffset
)
1565 Jump failureCase
= checkStructure(regT0
, structure
);
1566 compileGetDirectOffset(regT0
, regT0
, structure
, cachedOffset
);
1567 Jump success
= jump();
1569 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1571 // Use the patch information to link the failure cases back to the original slow case routine.
1572 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
1573 if (!lastProtoBegin
)
1574 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
1576 patchBuffer
.link(failureCase
, lastProtoBegin
);
1578 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1579 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1581 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1584 polymorphicStructures
->list
[currentIndex
].set(entryLabel
, structure
);
1586 // Finally patch the jump to slow case back in the hot path to jump here instead.
1587 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1588 RepatchBuffer
repatchBuffer(m_codeBlock
);
1589 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1592 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, size_t cachedOffset
, CallFrame
* callFrame
)
1594 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1595 // referencing the prototype object - let's speculatively load it's table nice and early!)
1596 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
1598 // Check eax is an object of the right Structure.
1599 Jump failureCases1
= checkStructure(regT0
, structure
);
1601 // Check the prototype object's Structure had not changed.
1602 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1603 #if PLATFORM(X86_64)
1604 move(ImmPtr(prototypeStructure
), regT3
);
1605 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
1607 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
1610 // Checks out okay! - getDirectOffset
1611 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1613 Jump success
= jump();
1615 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1617 // Use the patch information to link the failure cases back to the original slow case routine.
1618 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
1619 patchBuffer
.link(failureCases1
, lastProtoBegin
);
1620 patchBuffer
.link(failureCases2
, lastProtoBegin
);
1622 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1623 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1625 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1628 prototypeStructure
->ref();
1629 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, prototypeStructure
);
1631 // Finally patch the jump to slow case back in the hot path to jump here instead.
1632 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1633 RepatchBuffer
repatchBuffer(m_codeBlock
);
1634 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1637 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, CallFrame
* callFrame
)
1641 JumpList bucketsOfFail
;
1643 // Check eax is an object of the right Structure.
1644 Jump baseObjectCheck
= checkStructure(regT0
, structure
);
1645 bucketsOfFail
.append(baseObjectCheck
);
1647 Structure
* currStructure
= structure
;
1648 RefPtr
<Structure
>* chainEntries
= chain
->head();
1649 JSObject
* protoObject
= 0;
1650 for (unsigned i
= 0; i
< count
; ++i
) {
1651 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1652 currStructure
= chainEntries
[i
].get();
1654 // Check the prototype object's Structure had not changed.
1655 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1656 #if PLATFORM(X86_64)
1657 move(ImmPtr(currStructure
), regT3
);
1658 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
1660 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
1663 ASSERT(protoObject
);
1665 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1666 Jump success
= jump();
1668 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1670 // Use the patch information to link the failure cases back to the original slow case routine.
1671 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
1673 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
1675 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1676 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1678 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1680 // Track the stub we have created so that it will be deleted later.
1683 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, chain
);
1685 // Finally patch the jump to slow case back in the hot path to jump here instead.
1686 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1687 RepatchBuffer
repatchBuffer(m_codeBlock
);
1688 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1691 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1695 JumpList bucketsOfFail
;
1697 // Check eax is an object of the right Structure.
1698 bucketsOfFail
.append(checkStructure(regT0
, structure
));
1700 Structure
* currStructure
= structure
;
1701 RefPtr
<Structure
>* chainEntries
= chain
->head();
1702 JSObject
* protoObject
= 0;
1703 for (unsigned i
= 0; i
< count
; ++i
) {
1704 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1705 currStructure
= chainEntries
[i
].get();
1707 // Check the prototype object's Structure had not changed.
1708 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1709 #if PLATFORM(X86_64)
1710 move(ImmPtr(currStructure
), regT3
);
1711 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
1713 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
1716 ASSERT(protoObject
);
1718 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1719 Jump success
= jump();
1721 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool());
1723 // Use the patch information to link the failure cases back to the original slow case routine.
1724 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
1726 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1727 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1729 // Track the stub we have created so that it will be deleted later.
1730 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1731 stubInfo
->stubRoutine
= entryLabel
;
1733 // Finally patch the jump to slow case back in the hot path to jump here instead.
1734 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1735 RepatchBuffer
repatchBuffer(m_codeBlock
);
1736 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1738 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1739 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1742 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1744 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1746 #endif // USE(JSVALUE32_64)
1750 #endif // ENABLE(JIT)