2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
36 #include "JSFunction.h"
37 #include "JSPropertyNameIterator.h"
38 #include "Interpreter.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
52 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
54 unsigned base
= currentInstruction
[1].u
.operand
;
55 unsigned property
= currentInstruction
[2].u
.operand
;
56 unsigned value
= currentInstruction
[3].u
.operand
;
58 JITStubCall
stubCall(this, cti_op_put_by_index
);
59 stubCall
.addArgument(base
);
60 stubCall
.addArgument(Imm32(property
));
61 stubCall
.addArgument(value
);
65 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
67 unsigned base
= currentInstruction
[1].u
.operand
;
68 unsigned property
= currentInstruction
[2].u
.operand
;
69 unsigned function
= currentInstruction
[3].u
.operand
;
71 JITStubCall
stubCall(this, cti_op_put_getter
);
72 stubCall
.addArgument(base
);
73 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(property
)));
74 stubCall
.addArgument(function
);
78 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
80 unsigned base
= currentInstruction
[1].u
.operand
;
81 unsigned property
= currentInstruction
[2].u
.operand
;
82 unsigned function
= currentInstruction
[3].u
.operand
;
84 JITStubCall
stubCall(this, cti_op_put_setter
);
85 stubCall
.addArgument(base
);
86 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(property
)));
87 stubCall
.addArgument(function
);
91 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
93 unsigned dst
= currentInstruction
[1].u
.operand
;
94 unsigned base
= currentInstruction
[2].u
.operand
;
95 unsigned property
= currentInstruction
[3].u
.operand
;
97 JITStubCall
stubCall(this, cti_op_del_by_id
);
98 stubCall
.addArgument(base
);
99 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(property
)));
104 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
106 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
108 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
109 void JIT::emit_op_method_check(Instruction
*) {}
110 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
111 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
112 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
115 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
117 unsigned dst
= currentInstruction
[1].u
.operand
;
118 unsigned base
= currentInstruction
[2].u
.operand
;
119 unsigned property
= currentInstruction
[3].u
.operand
;
121 JITStubCall
stubCall(this, cti_op_get_by_val
);
122 stubCall
.addArgument(base
);
123 stubCall
.addArgument(property
);
127 void JIT::emitSlow_op_get_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
129 ASSERT_NOT_REACHED();
132 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
134 unsigned base
= currentInstruction
[1].u
.operand
;
135 unsigned property
= currentInstruction
[2].u
.operand
;
136 unsigned value
= currentInstruction
[3].u
.operand
;
138 JITStubCall
stubCall(this, cti_op_put_by_val
);
139 stubCall
.addArgument(base
);
140 stubCall
.addArgument(property
);
141 stubCall
.addArgument(value
);
145 void JIT::emitSlow_op_put_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
147 ASSERT_NOT_REACHED();
150 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
152 int dst
= currentInstruction
[1].u
.operand
;
153 int base
= currentInstruction
[2].u
.operand
;
154 int ident
= currentInstruction
[3].u
.operand
;
156 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
157 stubCall
.addArgument(base
);
158 stubCall
.addArgument(TrustedImmPtr(&(m_codeBlock
->identifier(ident
))));
161 m_propertyAccessInstructionIndex
++;
164 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
166 m_propertyAccessInstructionIndex
++;
167 ASSERT_NOT_REACHED();
170 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
172 int base
= currentInstruction
[1].u
.operand
;
173 int ident
= currentInstruction
[2].u
.operand
;
174 int value
= currentInstruction
[3].u
.operand
;
176 JITStubCall
stubCall(this, cti_op_put_by_id_generic
);
177 stubCall
.addArgument(base
);
178 stubCall
.addArgument(TrustedImmPtr(&(m_codeBlock
->identifier(ident
))));
179 stubCall
.addArgument(value
);
182 m_propertyAccessInstructionIndex
++;
185 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
187 m_propertyAccessInstructionIndex
++;
188 ASSERT_NOT_REACHED();
191 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
193 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
195 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
197 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
199 // Assert that the following instruction is a get_by_id.
200 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
202 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
204 // Do the method check - check the object & its prototype's structure inline (this is the common case).
205 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
206 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
208 int dst
= currentInstruction
[1].u
.operand
;
209 int base
= currentInstruction
[2].u
.operand
;
211 emitLoad(base
, regT1
, regT0
);
212 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
214 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
216 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), info
.structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
217 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(TrustedImmPtr(0), regT2
);
218 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT2
, JSCell::structureOffset()), protoStructureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
220 // This will be relinked to load the function without doing a load.
221 DataLabelPtr putFunction
= moveWithPatch(TrustedImmPtr(0), regT0
);
223 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
225 move(TrustedImm32(JSValue::CellTag
), regT1
);
228 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, protoObj
), patchOffsetMethodCheckProtoObj
);
229 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, protoStructureToCompare
), patchOffsetMethodCheckProtoStruct
);
230 ASSERT_JIT_OFFSET(differenceBetween(info
.structureToCompare
, putFunction
), patchOffsetMethodCheckPutFunction
);
232 // Link the failure cases here.
233 structureCheck
.link(this);
234 protoStructureCheck
.link(this);
236 // Do a regular(ish) get_by_id (the slow case will be link to
237 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
238 compileGetByIdHotPath();
241 emitStore(dst
, regT1
, regT0
);
242 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_method_check
), dst
, regT1
, regT0
);
244 // We've already generated the following get_by_id, so make sure it's skipped over.
245 m_bytecodeOffset
+= OPCODE_LENGTH(op_get_by_id
);
248 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
250 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
252 int dst
= currentInstruction
[1].u
.operand
;
253 int base
= currentInstruction
[2].u
.operand
;
254 int ident
= currentInstruction
[3].u
.operand
;
256 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
, true);
258 // We've already generated the following get_by_id, so make sure it's skipped over.
259 m_bytecodeOffset
+= OPCODE_LENGTH(op_get_by_id
);
262 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
264 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
265 void JIT::emit_op_method_check(Instruction
*) {}
266 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
270 JIT::CodePtr
JIT::stringGetByValStubGenerator(JSGlobalData
* globalData
, ExecutablePool
* pool
)
274 failures
.append(jit
.branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(globalData
->jsStringVPtr
)));
275 failures
.append(jit
.branchTest32(NonZero
, Address(regT0
, OBJECT_OFFSETOF(JSString
, m_fiberCount
))));
277 // Load string length to regT1, and start the process of loading the data pointer into regT0
278 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT1
);
279 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
280 jit
.loadPtr(Address(regT0
, ThunkHelpers::stringImplDataOffset()), regT0
);
282 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
283 failures
.append(jit
.branch32(AboveOrEqual
, regT2
, regT1
));
285 // Load the character
286 jit
.load16(BaseIndex(regT0
, regT2
, TimesTwo
, 0), regT0
);
288 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
289 jit
.move(TrustedImmPtr(globalData
->smallStrings
.singleCharacterStrings()), regT1
);
290 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
291 jit
.move(TrustedImm32(JSValue::CellTag
), regT1
); // We null check regT0 on return so this is safe
295 jit
.move(TrustedImm32(0), regT0
);
298 LinkBuffer
patchBuffer(*globalData
, &jit
, pool
);
299 return patchBuffer
.finalizeCode().m_code
;
302 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
304 unsigned dst
= currentInstruction
[1].u
.operand
;
305 unsigned base
= currentInstruction
[2].u
.operand
;
306 unsigned property
= currentInstruction
[3].u
.operand
;
308 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
310 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
311 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
312 addSlowCase(branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsArrayVPtr
)));
314 loadPtr(Address(regT0
, JSArray::storageOffset()), regT3
);
315 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT0
, JSArray::vectorLengthOffset())));
317 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
); // tag
318 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
); // payload
319 addSlowCase(branch32(Equal
, regT1
, TrustedImm32(JSValue::EmptyValueTag
)));
321 emitStore(dst
, regT1
, regT0
);
322 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_get_by_val
), dst
, regT1
, regT0
);
325 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
327 unsigned dst
= currentInstruction
[1].u
.operand
;
328 unsigned base
= currentInstruction
[2].u
.operand
;
329 unsigned property
= currentInstruction
[3].u
.operand
;
331 linkSlowCase(iter
); // property int32 check
332 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
334 Jump nonCell
= jump();
335 linkSlowCase(iter
); // base array check
336 Jump notString
= branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsStringVPtr
));
337 emitNakedCall(m_globalData
->getCTIStub(stringGetByValStubGenerator
));
338 Jump failed
= branchTestPtr(Zero
, regT0
);
339 emitStore(dst
, regT1
, regT0
);
340 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
342 notString
.link(this);
345 linkSlowCase(iter
); // vector length check
346 linkSlowCase(iter
); // empty value
348 JITStubCall
stubCall(this, cti_op_get_by_val
);
349 stubCall
.addArgument(base
);
350 stubCall
.addArgument(property
);
354 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
356 unsigned base
= currentInstruction
[1].u
.operand
;
357 unsigned property
= currentInstruction
[2].u
.operand
;
358 unsigned value
= currentInstruction
[3].u
.operand
;
360 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
362 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
363 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
364 addSlowCase(branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsArrayVPtr
)));
365 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT0
, JSArray::vectorLengthOffset())));
367 loadPtr(Address(regT0
, JSArray::storageOffset()), regT3
);
369 Jump empty
= branch32(Equal
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
));
371 Label
storeResult(this);
372 emitLoad(value
, regT1
, regT0
);
373 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
))); // payload
374 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
))); // tag
378 add32(TrustedImm32(1), Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
379 branch32(Below
, regT2
, Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
))).linkTo(storeResult
, this);
381 add32(TrustedImm32(1), regT2
, regT0
);
382 store32(regT0
, Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
383 jump().linkTo(storeResult
, this);
388 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
390 unsigned base
= currentInstruction
[1].u
.operand
;
391 unsigned property
= currentInstruction
[2].u
.operand
;
392 unsigned value
= currentInstruction
[3].u
.operand
;
394 linkSlowCase(iter
); // property int32 check
395 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
396 linkSlowCase(iter
); // base not array check
397 linkSlowCase(iter
); // in vector check
399 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
400 stubPutByValCall
.addArgument(base
);
401 stubPutByValCall
.addArgument(property
);
402 stubPutByValCall
.addArgument(value
);
403 stubPutByValCall
.call();
406 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
408 int dst
= currentInstruction
[1].u
.operand
;
409 int base
= currentInstruction
[2].u
.operand
;
411 emitLoad(base
, regT1
, regT0
);
412 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
413 compileGetByIdHotPath();
414 emitStore(dst
, regT1
, regT0
);
415 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_get_by_id
), dst
, regT1
, regT0
);
418 void JIT::compileGetByIdHotPath()
420 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
421 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
422 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
423 // to jump back to if one of these trampolies finds a match.
425 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
427 Label
hotPathBegin(this);
428 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
429 m_propertyAccessInstructionIndex
++;
431 DataLabelPtr structureToCompare
;
432 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
433 addSlowCase(structureCheck
);
434 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetGetByIdStructure
);
435 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureCheck
), patchOffsetGetByIdBranchToSlowCase
);
437 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), regT2
);
438 DataLabelCompact displacementLabel1
= loadPtrWithCompactAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT0
); // payload
439 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel1
), patchOffsetGetByIdPropertyMapOffset1
);
440 DataLabelCompact displacementLabel2
= loadPtrWithCompactAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT1
); // tag
441 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel2
), patchOffsetGetByIdPropertyMapOffset2
);
443 Label
putResult(this);
444 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, putResult
), patchOffsetGetByIdPutResult
);
446 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
449 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
451 int dst
= currentInstruction
[1].u
.operand
;
452 int base
= currentInstruction
[2].u
.operand
;
453 int ident
= currentInstruction
[3].u
.operand
;
455 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
);
458 void JIT::compileGetByIdSlowCase(int dst
, int base
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
460 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
461 // so that we only need track one pointer into the slow case code - we track a pointer to the location
462 // of the call (which we can use to look up the patch information), but should a array-length or
463 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
464 // the distance from the call to the head of the slow case.
465 linkSlowCaseIfNotJSCell(iter
, base
);
468 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
471 Label
coldPathBegin(this);
473 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
474 stubCall
.addArgument(regT1
, regT0
);
475 stubCall
.addArgument(TrustedImmPtr(ident
));
476 Call call
= stubCall
.call(dst
);
478 END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase
, dst
);
480 ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin
, call
), patchOffsetGetByIdSlowCaseCall
);
482 // Track the location of the call; this will be used to recover patch information.
483 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
484 m_propertyAccessInstructionIndex
++;
487 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
489 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
490 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
491 // such that the Structure & offset are always at the same distance from this.
493 int base
= currentInstruction
[1].u
.operand
;
494 int value
= currentInstruction
[3].u
.operand
;
496 emitLoad2(base
, regT1
, regT0
, value
, regT3
, regT2
);
498 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
500 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
502 Label
hotPathBegin(this);
503 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
504 m_propertyAccessInstructionIndex
++;
506 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
507 DataLabelPtr structureToCompare
;
508 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
509 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, structureToCompare
), patchOffsetPutByIdStructure
);
511 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), regT0
);
512 DataLabel32 displacementLabel1
= storePtrWithAddressOffsetPatch(regT2
, Address(regT0
, patchPutByIdDefaultOffset
)); // payload
513 DataLabel32 displacementLabel2
= storePtrWithAddressOffsetPatch(regT3
, Address(regT0
, patchPutByIdDefaultOffset
)); // tag
515 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
517 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel1
), patchOffsetPutByIdPropertyMapOffset1
);
518 ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin
, displacementLabel2
), patchOffsetPutByIdPropertyMapOffset2
);
521 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
523 int base
= currentInstruction
[1].u
.operand
;
524 int ident
= currentInstruction
[2].u
.operand
;
525 int direct
= currentInstruction
[8].u
.operand
;
527 linkSlowCaseIfNotJSCell(iter
, base
);
530 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct
: cti_op_put_by_id
);
531 stubCall
.addArgument(regT1
, regT0
);
532 stubCall
.addArgument(TrustedImmPtr(&(m_codeBlock
->identifier(ident
))));
533 stubCall
.addArgument(regT3
, regT2
);
534 Call call
= stubCall
.call();
536 // Track the location of the call; this will be used to recover patch information.
537 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
538 m_propertyAccessInstructionIndex
++;
541 // Compile a store into an object's property storage. May overwrite base.
542 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID valueTag
, RegisterID valuePayload
, Structure
* structure
, size_t cachedOffset
)
544 int offset
= cachedOffset
;
545 if (structure
->isUsingInlineStorage())
546 offset
+= JSObject::offsetOfInlineStorage() / sizeof(Register
);
548 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), base
);
549 emitStore(offset
, valueTag
, valuePayload
, base
);
552 // Compile a load from an object's property storage. May overwrite base.
553 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, Structure
* structure
, size_t cachedOffset
)
555 int offset
= cachedOffset
;
556 if (structure
->isUsingInlineStorage()) {
557 offset
+= JSObject::offsetOfInlineStorage() / sizeof(Register
);
558 emitLoad(offset
, resultTag
, resultPayload
, base
);
560 RegisterID temp
= resultPayload
;
561 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), temp
);
562 emitLoad(offset
, resultTag
, resultPayload
, temp
);
566 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID resultTag
, RegisterID resultPayload
, size_t cachedOffset
)
568 load32(reinterpret_cast<char*>(&base
->m_propertyStorage
[cachedOffset
]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
), resultPayload
);
569 load32(reinterpret_cast<char*>(&base
->m_propertyStorage
[cachedOffset
]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
), resultTag
);
572 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
, bool direct
)
574 // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
575 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
576 // For MIPS, we don't add sizeof(void*) to the stack offset.
577 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
578 // For MIPS, we don't add sizeof(void*) to the stack offset.
579 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
581 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
582 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
585 JumpList failureCases
;
586 failureCases
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
587 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(oldStructure
)));
588 testPrototype(oldStructure
->storedPrototype(), failureCases
);
591 // Verify that nothing in the prototype chain has a setter for this property.
592 for (WriteBarrier
<Structure
>* it
= chain
->head(); *it
; ++it
)
593 testPrototype((*it
)->storedPrototype(), failureCases
);
596 // Reallocate property storage if needed.
598 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
599 if (willNeedStorageRealloc
) {
600 // This trampoline was called to like a JIT stub; before we can can call again we need to
601 // remove the return address from the stack, to prevent the stack from becoming misaligned.
602 preserveReturnAddressAfterCall(regT3
);
604 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
605 stubCall
.skipArgument(); // base
606 stubCall
.skipArgument(); // ident
607 stubCall
.skipArgument(); // value
608 stubCall
.addArgument(TrustedImm32(oldStructure
->propertyStorageCapacity()));
609 stubCall
.addArgument(TrustedImm32(newStructure
->propertyStorageCapacity()));
610 stubCall
.call(regT0
);
612 restoreReturnAddressBeforeReturn(regT3
);
614 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
615 // For MIPS, we don't add sizeof(void*) to the stack offset.
616 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
617 // For MIPS, we don't add sizeof(void*) to the stack offset.
618 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
620 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
621 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
625 storePtrWithWriteBarrier(TrustedImmPtr(newStructure
), regT0
, Address(regT0
, JSCell::structureOffset()));
627 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
628 // For MIPS, we don't add sizeof(void*) to the stack offset.
629 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT3
);
630 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT2
);
632 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT3
);
633 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT2
);
637 compilePutDirectOffset(regT0
, regT2
, regT3
, newStructure
, cachedOffset
);
641 ASSERT(!failureCases
.empty());
642 failureCases
.link(this);
643 restoreArgumentReferenceForTrampoline();
644 Call failureCall
= tailRecursiveCall();
646 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
648 patchBuffer
.link(failureCall
, FunctionPtr(direct
? cti_op_put_by_id_direct_fail
: cti_op_put_by_id_fail
));
650 if (willNeedStorageRealloc
) {
651 ASSERT(m_calls
.size() == 1);
652 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
655 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
656 stubInfo
->stubRoutine
= entryLabel
;
657 RepatchBuffer
repatchBuffer(m_codeBlock
);
658 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
661 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
663 RepatchBuffer
repatchBuffer(codeBlock
);
665 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
666 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
667 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
669 int offset
= sizeof(JSValue
) * cachedOffset
;
671 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
672 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
673 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1
), offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)); // payload
674 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2
), offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)); // tag
677 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, bool direct
)
679 RepatchBuffer
repatchBuffer(codeBlock
);
681 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
682 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
683 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(direct
? cti_op_put_by_id_direct_generic
: cti_op_put_by_id_generic
));
685 int offset
= sizeof(JSValue
) * cachedOffset
;
687 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
688 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
689 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1
), offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)); // payload
690 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2
), offset
+ OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)); // tag
693 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
695 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
697 // regT0 holds a JSCell*
700 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), TrustedImmPtr(m_globalData
->jsArrayVPtr
));
702 // Checks out okay! - get the length from the storage
703 loadPtr(Address(regT0
, JSArray::storageOffset()), regT2
);
704 load32(Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
706 Jump failureCases2
= branch32(Above
, regT2
, TrustedImm32(INT_MAX
));
708 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
709 Jump success
= jump();
711 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
713 // Use the patch information to link the failure cases back to the original slow case routine.
714 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
715 patchBuffer
.link(failureCases1
, slowCaseBegin
);
716 patchBuffer
.link(failureCases2
, slowCaseBegin
);
718 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
719 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
721 // Track the stub we have created so that it will be deleted later.
722 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
723 stubInfo
->stubRoutine
= entryLabel
;
725 // Finally patch the jump to slow case back in the hot path to jump here instead.
726 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
727 RepatchBuffer
repatchBuffer(m_codeBlock
);
728 repatchBuffer
.relink(jumpLocation
, entryLabel
);
730 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
731 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
734 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
736 // regT0 holds a JSCell*
738 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
739 // referencing the prototype object - let's speculatively load it's table nice and early!)
740 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
742 Jump failureCases1
= checkStructure(regT0
, structure
);
744 // Check the prototype object's Structure had not changed.
745 move(TrustedImmPtr(protoObject
), regT3
);
746 Jump failureCases2
= branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure
));
748 bool needsStubLink
= false;
750 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
751 needsStubLink
= true;
752 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
753 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
754 stubCall
.addArgument(regT1
);
755 stubCall
.addArgument(regT0
);
756 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
758 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
759 needsStubLink
= true;
760 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
761 stubCall
.addArgument(TrustedImmPtr(protoObject
));
762 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
763 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
764 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
767 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
769 Jump success
= jump();
771 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
773 // Use the patch information to link the failure cases back to the original slow case routine.
774 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
775 patchBuffer
.link(failureCases1
, slowCaseBegin
);
776 patchBuffer
.link(failureCases2
, slowCaseBegin
);
778 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
779 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
782 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
784 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
788 // Track the stub we have created so that it will be deleted later.
789 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
790 stubInfo
->stubRoutine
= entryLabel
;
792 // Finally patch the jump to slow case back in the hot path to jump here instead.
793 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
794 RepatchBuffer
repatchBuffer(m_codeBlock
);
795 repatchBuffer
.relink(jumpLocation
, entryLabel
);
797 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
798 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
802 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
)
804 // regT0 holds a JSCell*
805 Jump failureCase
= checkStructure(regT0
, structure
);
806 bool needsStubLink
= false;
807 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
808 needsStubLink
= true;
809 compileGetDirectOffset(regT0
, regT2
, regT1
, structure
, cachedOffset
);
810 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
811 stubCall
.addArgument(regT1
);
812 stubCall
.addArgument(regT0
);
813 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
815 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
816 needsStubLink
= true;
817 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
818 stubCall
.addArgument(regT0
);
819 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
820 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
821 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
824 compileGetDirectOffset(regT0
, regT1
, regT0
, structure
, cachedOffset
);
826 Jump success
= jump();
828 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
830 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
832 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
835 // Use the patch information to link the failure cases back to the original slow case routine.
836 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
838 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
840 patchBuffer
.link(failureCase
, lastProtoBegin
);
842 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
843 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
845 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
847 polymorphicStructures
->list
[currentIndex
].set(*m_globalData
, m_codeBlock
->ownerExecutable(), entryLabel
, structure
);
849 // Finally patch the jump to slow case back in the hot path to jump here instead.
850 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
851 RepatchBuffer
repatchBuffer(m_codeBlock
);
852 repatchBuffer
.relink(jumpLocation
, entryLabel
);
855 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
857 // regT0 holds a JSCell*
859 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
860 // referencing the prototype object - let's speculatively load it's table nice and early!)
861 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
863 // Check eax is an object of the right Structure.
864 Jump failureCases1
= checkStructure(regT0
, structure
);
866 // Check the prototype object's Structure had not changed.
867 move(TrustedImmPtr(protoObject
), regT3
);
868 Jump failureCases2
= branchPtr(NotEqual
, Address(regT3
, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure
));
870 bool needsStubLink
= false;
871 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
872 needsStubLink
= true;
873 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
874 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
875 stubCall
.addArgument(regT1
);
876 stubCall
.addArgument(regT0
);
877 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
879 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
880 needsStubLink
= true;
881 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
882 stubCall
.addArgument(TrustedImmPtr(protoObject
));
883 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
884 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
885 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
888 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
890 Jump success
= jump();
892 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
894 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
896 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
899 // Use the patch information to link the failure cases back to the original slow case routine.
900 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
901 patchBuffer
.link(failureCases1
, lastProtoBegin
);
902 patchBuffer
.link(failureCases2
, lastProtoBegin
);
904 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
905 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
907 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
909 prototypeStructures
->list
[currentIndex
].set(callFrame
->globalData(), m_codeBlock
->ownerExecutable(), entryLabel
, structure
, prototypeStructure
);
911 // Finally patch the jump to slow case back in the hot path to jump here instead.
912 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
913 RepatchBuffer
repatchBuffer(m_codeBlock
);
914 repatchBuffer
.relink(jumpLocation
, entryLabel
);
917 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
919 // regT0 holds a JSCell*
922 JumpList bucketsOfFail
;
924 // Check eax is an object of the right Structure.
925 bucketsOfFail
.append(checkStructure(regT0
, structure
));
927 Structure
* currStructure
= structure
;
928 WriteBarrier
<Structure
>* it
= chain
->head();
929 JSObject
* protoObject
= 0;
930 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
931 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
932 currStructure
= it
->get();
933 testPrototype(protoObject
, bucketsOfFail
);
937 bool needsStubLink
= false;
938 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
939 needsStubLink
= true;
940 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
941 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
942 stubCall
.addArgument(regT1
);
943 stubCall
.addArgument(regT0
);
944 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
946 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
947 needsStubLink
= true;
948 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
949 stubCall
.addArgument(TrustedImmPtr(protoObject
));
950 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
951 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
952 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
955 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
957 Jump success
= jump();
959 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
961 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
963 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
966 // Use the patch information to link the failure cases back to the original slow case routine.
967 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
969 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
971 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
972 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
974 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
976 // Track the stub we have created so that it will be deleted later.
977 prototypeStructures
->list
[currentIndex
].set(callFrame
->globalData(), m_codeBlock
->ownerExecutable(), entryLabel
, structure
, chain
);
979 // Finally patch the jump to slow case back in the hot path to jump here instead.
980 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
981 RepatchBuffer
repatchBuffer(m_codeBlock
);
982 repatchBuffer
.relink(jumpLocation
, entryLabel
);
985 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
987 // regT0 holds a JSCell*
990 JumpList bucketsOfFail
;
992 // Check eax is an object of the right Structure.
993 bucketsOfFail
.append(checkStructure(regT0
, structure
));
995 Structure
* currStructure
= structure
;
996 WriteBarrier
<Structure
>* it
= chain
->head();
997 JSObject
* protoObject
= 0;
998 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
999 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1000 currStructure
= it
->get();
1001 testPrototype(protoObject
, bucketsOfFail
);
1003 ASSERT(protoObject
);
1005 bool needsStubLink
= false;
1006 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
1007 needsStubLink
= true;
1008 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
1009 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
1010 stubCall
.addArgument(regT1
);
1011 stubCall
.addArgument(regT0
);
1012 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1014 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1015 needsStubLink
= true;
1016 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1017 stubCall
.addArgument(TrustedImmPtr(protoObject
));
1018 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1019 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
1020 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1023 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1024 Jump success
= jump();
1026 LinkBuffer
patchBuffer(*m_globalData
, this, m_codeBlock
->executablePool());
1027 if (needsStubLink
) {
1028 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1030 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1033 // Use the patch information to link the failure cases back to the original slow case routine.
1034 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
1036 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1037 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1039 // Track the stub we have created so that it will be deleted later.
1040 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1041 stubInfo
->stubRoutine
= entryLabel
;
1043 // Finally patch the jump to slow case back in the hot path to jump here instead.
1044 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1045 RepatchBuffer
repatchBuffer(m_codeBlock
);
1046 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1048 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1049 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1052 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1054 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1056 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, RegisterID offset
)
1058 ASSERT(sizeof(JSValue
) == 8);
1060 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_propertyStorage
)), base
);
1061 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
1062 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
1065 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
1067 unsigned dst
= currentInstruction
[1].u
.operand
;
1068 unsigned base
= currentInstruction
[2].u
.operand
;
1069 unsigned property
= currentInstruction
[3].u
.operand
;
1070 unsigned expected
= currentInstruction
[4].u
.operand
;
1071 unsigned iter
= currentInstruction
[5].u
.operand
;
1072 unsigned i
= currentInstruction
[6].u
.operand
;
1074 emitLoad2(property
, regT1
, regT0
, base
, regT3
, regT2
);
1075 emitJumpSlowCaseIfNotJSCell(property
, regT1
);
1076 addSlowCase(branchPtr(NotEqual
, regT0
, payloadFor(expected
)));
1077 // Property registers are now available as the property is known
1078 emitJumpSlowCaseIfNotJSCell(base
, regT3
);
1079 emitLoadPayload(iter
, regT1
);
1081 // Test base's structure
1082 loadPtr(Address(regT2
, JSCell::structureOffset()), regT0
);
1083 addSlowCase(branchPtr(NotEqual
, regT0
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
1084 load32(addressFor(i
), regT3
);
1085 sub32(TrustedImm32(1), regT3
);
1086 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
1087 compileGetDirectOffset(regT2
, regT1
, regT0
, regT3
);
1089 emitStore(dst
, regT1
, regT0
);
1090 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_get_by_pname
), dst
, regT1
, regT0
);
1093 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1095 unsigned dst
= currentInstruction
[1].u
.operand
;
1096 unsigned base
= currentInstruction
[2].u
.operand
;
1097 unsigned property
= currentInstruction
[3].u
.operand
;
1099 linkSlowCaseIfNotJSCell(iter
, property
);
1101 linkSlowCaseIfNotJSCell(iter
, base
);
1105 JITStubCall
stubCall(this, cti_op_get_by_val
);
1106 stubCall
.addArgument(base
);
1107 stubCall
.addArgument(property
);
1113 #endif // USE(JSVALUE32_64)
1114 #endif // ENABLE(JIT)