2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "CodeBlock.h"
35 #include "JITInlineMethods.h"
36 #include "JITStubCall.h"
38 #include "JSFunction.h"
39 #include "JSPropertyNameIterator.h"
40 #include "Interpreter.h"
41 #include "LinkBuffer.h"
42 #include "RepatchBuffer.h"
43 #include "ResultType.h"
44 #include "SamplingTool.h"
54 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
56 unsigned base
= currentInstruction
[1].u
.operand
;
57 unsigned property
= currentInstruction
[2].u
.operand
;
58 unsigned value
= currentInstruction
[3].u
.operand
;
60 JITStubCall
stubCall(this, cti_op_put_by_index
);
61 stubCall
.addArgument(base
);
62 stubCall
.addArgument(Imm32(property
));
63 stubCall
.addArgument(value
);
67 void JIT::emit_op_put_getter(Instruction
* currentInstruction
)
69 unsigned base
= currentInstruction
[1].u
.operand
;
70 unsigned property
= currentInstruction
[2].u
.operand
;
71 unsigned function
= currentInstruction
[3].u
.operand
;
73 JITStubCall
stubCall(this, cti_op_put_getter
);
74 stubCall
.addArgument(base
);
75 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
76 stubCall
.addArgument(function
);
80 void JIT::emit_op_put_setter(Instruction
* currentInstruction
)
82 unsigned base
= currentInstruction
[1].u
.operand
;
83 unsigned property
= currentInstruction
[2].u
.operand
;
84 unsigned function
= currentInstruction
[3].u
.operand
;
86 JITStubCall
stubCall(this, cti_op_put_setter
);
87 stubCall
.addArgument(base
);
88 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
89 stubCall
.addArgument(function
);
93 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
95 unsigned dst
= currentInstruction
[1].u
.operand
;
96 unsigned base
= currentInstruction
[2].u
.operand
;
97 unsigned property
= currentInstruction
[3].u
.operand
;
99 JITStubCall
stubCall(this, cti_op_del_by_id
);
100 stubCall
.addArgument(base
);
101 stubCall
.addArgument(ImmPtr(&m_codeBlock
->identifier(property
)));
106 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
108 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
110 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
111 void JIT::emit_op_method_check(Instruction
*) {}
112 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
113 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
114 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
117 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
119 unsigned dst
= currentInstruction
[1].u
.operand
;
120 unsigned base
= currentInstruction
[2].u
.operand
;
121 unsigned property
= currentInstruction
[3].u
.operand
;
123 JITStubCall
stubCall(this, cti_op_get_by_val
);
124 stubCall
.addArgument(base
);
125 stubCall
.addArgument(property
);
129 void JIT::emitSlow_op_get_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
131 ASSERT_NOT_REACHED();
134 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
136 unsigned base
= currentInstruction
[1].u
.operand
;
137 unsigned property
= currentInstruction
[2].u
.operand
;
138 unsigned value
= currentInstruction
[3].u
.operand
;
140 JITStubCall
stubCall(this, cti_op_put_by_val
);
141 stubCall
.addArgument(base
);
142 stubCall
.addArgument(property
);
143 stubCall
.addArgument(value
);
147 void JIT::emitSlow_op_put_by_val(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
149 ASSERT_NOT_REACHED();
152 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
154 int dst
= currentInstruction
[1].u
.operand
;
155 int base
= currentInstruction
[2].u
.operand
;
156 int ident
= currentInstruction
[3].u
.operand
;
158 JITStubCall
stubCall(this, cti_op_get_by_id_generic
);
159 stubCall
.addArgument(base
);
160 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
163 m_propertyAccessInstructionIndex
++;
166 void JIT::emitSlow_op_get_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
168 m_propertyAccessInstructionIndex
++;
169 ASSERT_NOT_REACHED();
172 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
174 int base
= currentInstruction
[1].u
.operand
;
175 int ident
= currentInstruction
[2].u
.operand
;
176 int value
= currentInstruction
[3].u
.operand
;
178 JITStubCall
stubCall(this, cti_op_put_by_id_generic
);
179 stubCall
.addArgument(base
);
180 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
181 stubCall
.addArgument(value
);
184 m_propertyAccessInstructionIndex
++;
187 void JIT::emitSlow_op_put_by_id(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
189 m_propertyAccessInstructionIndex
++;
190 ASSERT_NOT_REACHED();
193 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
195 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
197 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
199 void JIT::emit_op_method_check(Instruction
* currentInstruction
)
201 // Assert that the following instruction is a get_by_id.
202 ASSERT(m_interpreter
->getOpcodeID((currentInstruction
+ OPCODE_LENGTH(op_method_check
))->u
.opcode
) == op_get_by_id
);
204 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
206 // Do the method check - check the object & its prototype's structure inline (this is the common case).
207 m_methodCallCompilationInfo
.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex
));
208 MethodCallCompilationInfo
& info
= m_methodCallCompilationInfo
.last();
210 int dst
= currentInstruction
[1].u
.operand
;
211 int base
= currentInstruction
[2].u
.operand
;
213 emitLoad(base
, regT1
, regT0
);
214 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
216 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
218 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), info
.structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
219 DataLabelPtr protoStructureToCompare
, protoObj
= moveWithPatch(ImmPtr(0), regT2
);
220 Jump protoStructureCheck
= branchPtrWithPatch(NotEqual
, Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), protoStructureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
222 // This will be relinked to load the function without doing a load.
223 DataLabelPtr putFunction
= moveWithPatch(ImmPtr(0), regT0
);
225 END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck
);
227 move(Imm32(JSValue::CellTag
), regT1
);
230 ASSERT(differenceBetween(info
.structureToCompare
, protoObj
) == patchOffsetMethodCheckProtoObj
);
231 ASSERT(differenceBetween(info
.structureToCompare
, protoStructureToCompare
) == patchOffsetMethodCheckProtoStruct
);
232 ASSERT(differenceBetween(info
.structureToCompare
, putFunction
) == patchOffsetMethodCheckPutFunction
);
234 // Link the failure cases here.
235 structureCheck
.link(this);
236 protoStructureCheck
.link(this);
238 // Do a regular(ish) get_by_id (the slow case will be link to
239 // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
240 compileGetByIdHotPath();
243 emitStore(dst
, regT1
, regT0
);
244 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_method_check
), dst
, regT1
, regT0
);
246 // We've already generated the following get_by_id, so make sure it's skipped over.
247 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
250 void JIT::emitSlow_op_method_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
252 currentInstruction
+= OPCODE_LENGTH(op_method_check
);
254 int dst
= currentInstruction
[1].u
.operand
;
255 int base
= currentInstruction
[2].u
.operand
;
256 int ident
= currentInstruction
[3].u
.operand
;
258 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
, true);
260 // We've already generated the following get_by_id, so make sure it's skipped over.
261 m_bytecodeIndex
+= OPCODE_LENGTH(op_get_by_id
);
264 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
266 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
267 void JIT::emit_op_method_check(Instruction
*) {}
268 void JIT::emitSlow_op_method_check(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&) { ASSERT_NOT_REACHED(); }
272 PassRefPtr
<NativeExecutable
> JIT::stringGetByValStubGenerator(JSGlobalData
* globalData
, ExecutablePool
* pool
)
276 failures
.append(jit
.branchPtr(NotEqual
, Address(regT0
), ImmPtr(globalData
->jsStringVPtr
)));
277 failures
.append(jit
.branchTest32(NonZero
, Address(regT0
, OBJECT_OFFSETOF(JSString
, m_fiberCount
))));
279 // Load string length to regT1, and start the process of loading the data pointer into regT0
280 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT1
);
281 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
282 jit
.loadPtr(Address(regT0
, ThunkHelpers::stringImplDataOffset()), regT0
);
284 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
285 failures
.append(jit
.branch32(AboveOrEqual
, regT2
, regT1
));
287 // Load the character
288 jit
.load16(BaseIndex(regT0
, regT2
, TimesTwo
, 0), regT0
);
290 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, Imm32(0x100)));
291 jit
.move(ImmPtr(globalData
->smallStrings
.singleCharacterStrings()), regT1
);
292 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
293 jit
.move(Imm32(JSValue::CellTag
), regT1
); // We null check regT0 on return so this is safe
297 jit
.move(Imm32(0), regT0
);
300 LinkBuffer
patchBuffer(&jit
, pool
, 0);
301 return adoptRef(new NativeExecutable(patchBuffer
.finalizeCode()));
304 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
306 unsigned dst
= currentInstruction
[1].u
.operand
;
307 unsigned base
= currentInstruction
[2].u
.operand
;
308 unsigned property
= currentInstruction
[3].u
.operand
;
310 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
312 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
313 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
314 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
316 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT3
);
317 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
319 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4), regT1
); // tag
320 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])), regT0
); // payload
321 addSlowCase(branch32(Equal
, regT1
, Imm32(JSValue::EmptyValueTag
)));
323 emitStore(dst
, regT1
, regT0
);
324 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_val
), dst
, regT1
, regT0
);
327 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
329 unsigned dst
= currentInstruction
[1].u
.operand
;
330 unsigned base
= currentInstruction
[2].u
.operand
;
331 unsigned property
= currentInstruction
[3].u
.operand
;
333 linkSlowCase(iter
); // property int32 check
334 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
336 Jump nonCell
= jump();
337 linkSlowCase(iter
); // base array check
338 Jump notString
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsStringVPtr
));
339 emitNakedCall(m_globalData
->getThunk(stringGetByValStubGenerator
)->generatedJITCode().addressForCall());
340 Jump failed
= branchTestPtr(Zero
, regT0
);
341 emitStore(dst
, regT1
, regT0
);
342 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
344 notString
.link(this);
347 linkSlowCase(iter
); // vector length check
348 linkSlowCase(iter
); // empty value
350 JITStubCall
stubCall(this, cti_op_get_by_val
);
351 stubCall
.addArgument(base
);
352 stubCall
.addArgument(property
);
356 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
358 unsigned base
= currentInstruction
[1].u
.operand
;
359 unsigned property
= currentInstruction
[2].u
.operand
;
360 unsigned value
= currentInstruction
[3].u
.operand
;
362 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
364 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
365 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
366 addSlowCase(branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
)));
367 addSlowCase(branch32(AboveOrEqual
, regT2
, Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_vectorLength
))));
369 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT3
);
371 Jump empty
= branch32(Equal
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4), Imm32(JSValue::EmptyValueTag
));
373 Label
storeResult(this);
374 emitLoad(value
, regT1
, regT0
);
375 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]))); // payload
376 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + 4)); // tag
380 add32(Imm32(1), Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
381 branch32(Below
, regT2
, Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
))).linkTo(storeResult
, this);
383 add32(Imm32(1), regT2
, regT0
);
384 store32(regT0
, Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)));
385 jump().linkTo(storeResult
, this);
390 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
392 unsigned base
= currentInstruction
[1].u
.operand
;
393 unsigned property
= currentInstruction
[2].u
.operand
;
394 unsigned value
= currentInstruction
[3].u
.operand
;
396 linkSlowCase(iter
); // property int32 check
397 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
398 linkSlowCase(iter
); // base not array check
399 linkSlowCase(iter
); // in vector check
401 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
402 stubPutByValCall
.addArgument(base
);
403 stubPutByValCall
.addArgument(property
);
404 stubPutByValCall
.addArgument(value
);
405 stubPutByValCall
.call();
408 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
410 int dst
= currentInstruction
[1].u
.operand
;
411 int base
= currentInstruction
[2].u
.operand
;
413 emitLoad(base
, regT1
, regT0
);
414 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
415 compileGetByIdHotPath();
416 emitStore(dst
, regT1
, regT0
);
417 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_id
), dst
, regT1
, regT0
);
420 void JIT::compileGetByIdHotPath()
422 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
423 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
424 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
425 // to jump back to if one of these trampolies finds a match.
427 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
429 Label
hotPathBegin(this);
430 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
431 m_propertyAccessInstructionIndex
++;
433 DataLabelPtr structureToCompare
;
434 Jump structureCheck
= branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
435 addSlowCase(structureCheck
);
436 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetGetByIdStructure
);
437 ASSERT(differenceBetween(hotPathBegin
, structureCheck
) == patchOffsetGetByIdBranchToSlowCase
);
439 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT2
);
440 Label
externalLoadComplete(this);
441 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetGetByIdExternalLoad
);
442 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthGetByIdExternalLoad
);
444 DataLabel32 displacementLabel1
= loadPtrWithAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT0
); // payload
445 ASSERT(differenceBetween(hotPathBegin
, displacementLabel1
) == patchOffsetGetByIdPropertyMapOffset1
);
446 DataLabel32 displacementLabel2
= loadPtrWithAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT1
); // tag
447 ASSERT(differenceBetween(hotPathBegin
, displacementLabel2
) == patchOffsetGetByIdPropertyMapOffset2
);
449 Label
putResult(this);
450 ASSERT(differenceBetween(hotPathBegin
, putResult
) == patchOffsetGetByIdPutResult
);
452 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
455 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
457 int dst
= currentInstruction
[1].u
.operand
;
458 int base
= currentInstruction
[2].u
.operand
;
459 int ident
= currentInstruction
[3].u
.operand
;
461 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
);
464 void JIT::compileGetByIdSlowCase(int dst
, int base
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isMethodCheck
)
466 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
467 // so that we only need track one pointer into the slow case code - we track a pointer to the location
468 // of the call (which we can use to look up the patch information), but should a array-length or
469 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
470 // the distance from the call to the head of the slow case.
471 linkSlowCaseIfNotJSCell(iter
, base
);
474 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
477 Label
coldPathBegin(this);
479 JITStubCall
stubCall(this, isMethodCheck
? cti_op_get_by_id_method_check
: cti_op_get_by_id
);
480 stubCall
.addArgument(regT1
, regT0
);
481 stubCall
.addArgument(ImmPtr(ident
));
482 Call call
= stubCall
.call(dst
);
484 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
486 ASSERT(differenceBetween(coldPathBegin
, call
) == patchOffsetGetByIdSlowCaseCall
);
488 // Track the location of the call; this will be used to recover patch information.
489 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
490 m_propertyAccessInstructionIndex
++;
493 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
495 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
496 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
497 // such that the Structure & offset are always at the same distance from this.
499 int base
= currentInstruction
[1].u
.operand
;
500 int value
= currentInstruction
[3].u
.operand
;
502 emitLoad2(base
, regT1
, regT0
, value
, regT3
, regT2
);
504 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
506 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
508 Label
hotPathBegin(this);
509 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].hotPathBegin
= hotPathBegin
;
510 m_propertyAccessInstructionIndex
++;
512 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
513 DataLabelPtr structureToCompare
;
514 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), structureToCompare
, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
515 ASSERT(differenceBetween(hotPathBegin
, structureToCompare
) == patchOffsetPutByIdStructure
);
517 // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
518 Label externalLoad
= loadPtrWithPatchToLEA(Address(regT0
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), regT0
);
519 Label
externalLoadComplete(this);
520 ASSERT(differenceBetween(hotPathBegin
, externalLoad
) == patchOffsetPutByIdExternalLoad
);
521 ASSERT(differenceBetween(externalLoad
, externalLoadComplete
) == patchLengthPutByIdExternalLoad
);
523 DataLabel32 displacementLabel1
= storePtrWithAddressOffsetPatch(regT2
, Address(regT0
, patchGetByIdDefaultOffset
)); // payload
524 DataLabel32 displacementLabel2
= storePtrWithAddressOffsetPatch(regT3
, Address(regT0
, patchGetByIdDefaultOffset
)); // tag
526 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
528 ASSERT(differenceBetween(hotPathBegin
, displacementLabel1
) == patchOffsetPutByIdPropertyMapOffset1
);
529 ASSERT(differenceBetween(hotPathBegin
, displacementLabel2
) == patchOffsetPutByIdPropertyMapOffset2
);
532 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
534 int base
= currentInstruction
[1].u
.operand
;
535 int ident
= currentInstruction
[2].u
.operand
;
536 int direct
= currentInstruction
[8].u
.operand
;
538 linkSlowCaseIfNotJSCell(iter
, base
);
541 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct
: cti_op_put_by_id
);
542 stubCall
.addArgument(regT1
, regT0
);
543 stubCall
.addArgument(ImmPtr(&(m_codeBlock
->identifier(ident
))));
544 stubCall
.addArgument(regT3
, regT2
);
545 Call call
= stubCall
.call();
547 // Track the location of the call; this will be used to recover patch information.
548 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
].callReturnLocation
= call
;
549 m_propertyAccessInstructionIndex
++;
552 // Compile a store into an object's property storage. May overwrite base.
553 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID valueTag
, RegisterID valuePayload
, Structure
* structure
, size_t cachedOffset
)
555 int offset
= cachedOffset
;
556 if (structure
->isUsingInlineStorage())
557 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
) / sizeof(Register
);
559 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
560 emitStore(offset
, valueTag
, valuePayload
, base
);
563 // Compile a load from an object's property storage. May overwrite base.
564 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, Structure
* structure
, size_t cachedOffset
)
566 int offset
= cachedOffset
;
567 if (structure
->isUsingInlineStorage())
568 offset
+= OBJECT_OFFSETOF(JSObject
, m_inlineStorage
) / sizeof(Register
);
570 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
571 emitLoad(offset
, resultTag
, resultPayload
, base
);
574 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID temp
, RegisterID resultTag
, RegisterID resultPayload
, size_t cachedOffset
)
576 if (base
->isUsingInlineStorage()) {
577 load32(reinterpret_cast<char*>(&base
->m_inlineStorage
[cachedOffset
]), resultPayload
);
578 load32(reinterpret_cast<char*>(&base
->m_inlineStorage
[cachedOffset
]) + 4, resultTag
);
582 size_t offset
= cachedOffset
* sizeof(JSValue
);
584 PropertyStorage
* protoPropertyStorage
= &base
->m_externalStorage
;
585 loadPtr(static_cast<void*>(protoPropertyStorage
), temp
);
586 load32(Address(temp
, offset
), resultPayload
);
587 load32(Address(temp
, offset
+ 4), resultTag
);
590 void JIT::testPrototype(Structure
* structure
, JumpList
& failureCases
)
592 if (structure
->m_prototype
.isNull())
595 failureCases
.append(branchPtr(NotEqual
, AbsoluteAddress(&asCell(structure
->m_prototype
)->m_structure
), ImmPtr(asCell(structure
->m_prototype
)->m_structure
)));
598 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, size_t cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
, bool direct
)
600 // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
602 JumpList failureCases
;
603 failureCases
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::CellTag
)));
604 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)), ImmPtr(oldStructure
)));
605 testPrototype(oldStructure
, failureCases
);
608 // Verify that nothing in the prototype chain has a setter for this property.
609 for (RefPtr
<Structure
>* it
= chain
->head(); *it
; ++it
)
610 testPrototype(it
->get(), failureCases
);
613 // Reallocate property storage if needed.
615 bool willNeedStorageRealloc
= oldStructure
->propertyStorageCapacity() != newStructure
->propertyStorageCapacity();
616 if (willNeedStorageRealloc
) {
617 // This trampoline was called to like a JIT stub; before we can can call again we need to
618 // remove the return address from the stack, to prevent the stack from becoming misaligned.
619 preserveReturnAddressAfterCall(regT3
);
621 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
622 stubCall
.skipArgument(); // base
623 stubCall
.skipArgument(); // ident
624 stubCall
.skipArgument(); // value
625 stubCall
.addArgument(Imm32(oldStructure
->propertyStorageCapacity()));
626 stubCall
.addArgument(Imm32(newStructure
->propertyStorageCapacity()));
627 stubCall
.call(regT0
);
629 restoreReturnAddressBeforeReturn(regT3
);
632 sub32(Imm32(1), AbsoluteAddress(oldStructure
->addressOfCount()));
633 add32(Imm32(1), AbsoluteAddress(newStructure
->addressOfCount()));
634 storePtr(ImmPtr(newStructure
), Address(regT0
, OBJECT_OFFSETOF(JSCell
, m_structure
)));
636 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, args
[2]) + sizeof(void*)), regT3
);
637 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, args
[2]) + sizeof(void*) + 4), regT2
);
640 compilePutDirectOffset(regT0
, regT2
, regT3
, newStructure
, cachedOffset
);
644 ASSERT(!failureCases
.empty());
645 failureCases
.link(this);
646 restoreArgumentReferenceForTrampoline();
647 Call failureCall
= tailRecursiveCall();
649 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool(), 0);
651 patchBuffer
.link(failureCall
, FunctionPtr(direct
? cti_op_put_by_id_direct_fail
: cti_op_put_by_id_fail
));
653 if (willNeedStorageRealloc
) {
654 ASSERT(m_calls
.size() == 1);
655 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
658 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
659 stubInfo
->stubRoutine
= entryLabel
;
660 RepatchBuffer
repatchBuffer(m_codeBlock
);
661 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, entryLabel
);
664 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
)
666 RepatchBuffer
repatchBuffer(codeBlock
);
668 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
669 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
670 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
672 int offset
= sizeof(JSValue
) * cachedOffset
;
674 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
675 // and makes the subsequent load's offset automatically correct
676 if (structure
->isUsingInlineStorage())
677 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetGetByIdExternalLoad
));
679 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
680 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetGetByIdStructure
), structure
);
681 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1
), offset
); // payload
682 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2
), offset
+ 4); // tag
685 void JIT::patchMethodCallProto(CodeBlock
* codeBlock
, MethodCallLinkInfo
& methodCallLinkInfo
, JSFunction
* callee
, Structure
* structure
, JSObject
* proto
, ReturnAddressPtr returnAddress
)
687 RepatchBuffer
repatchBuffer(codeBlock
);
689 ASSERT(!methodCallLinkInfo
.cachedStructure
);
690 methodCallLinkInfo
.cachedStructure
= structure
;
693 Structure
* prototypeStructure
= proto
->structure();
694 methodCallLinkInfo
.cachedPrototypeStructure
= prototypeStructure
;
695 prototypeStructure
->ref();
697 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
, structure
);
698 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj
), proto
);
699 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct
), prototypeStructure
);
700 repatchBuffer
.repatch(methodCallLinkInfo
.structureLabel
.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction
), callee
);
702 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id
));
705 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, bool direct
)
707 RepatchBuffer
repatchBuffer(codeBlock
);
709 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
710 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
711 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(direct
? cti_op_put_by_id_direct_generic
: cti_op_put_by_id_generic
));
713 int offset
= sizeof(JSValue
) * cachedOffset
;
715 // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
716 // and makes the subsequent load's offset automatically correct
717 if (structure
->isUsingInlineStorage())
718 repatchBuffer
.repatchLoadPtrToLEA(stubInfo
->hotPathBegin
.instructionAtOffset(patchOffsetPutByIdExternalLoad
));
720 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
721 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(patchOffsetPutByIdStructure
), structure
);
722 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1
), offset
); // payload
723 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2
), offset
+ 4); // tag
726 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
728 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
730 // regT0 holds a JSCell*
733 Jump failureCases1
= branchPtr(NotEqual
, Address(regT0
), ImmPtr(m_globalData
->jsArrayVPtr
));
735 // Checks out okay! - get the length from the storage
736 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSArray
, m_storage
)), regT2
);
737 load32(Address(regT2
, OBJECT_OFFSETOF(ArrayStorage
, m_length
)), regT2
);
739 Jump failureCases2
= branch32(Above
, regT2
, Imm32(INT_MAX
));
741 move(Imm32(JSValue::Int32Tag
), regT1
);
742 Jump success
= jump();
744 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool(), 0);
746 // Use the patch information to link the failure cases back to the original slow case routine.
747 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
748 patchBuffer
.link(failureCases1
, slowCaseBegin
);
749 patchBuffer
.link(failureCases2
, slowCaseBegin
);
751 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
752 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
754 // Track the stub we have created so that it will be deleted later.
755 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
756 stubInfo
->stubRoutine
= entryLabel
;
758 // Finally patch the jump to slow case back in the hot path to jump here instead.
759 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
760 RepatchBuffer
repatchBuffer(m_codeBlock
);
761 repatchBuffer
.relink(jumpLocation
, entryLabel
);
763 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
764 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
767 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
769 // regT0 holds a JSCell*
771 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
772 // referencing the prototype object - let's speculatively load it's table nice and early!)
773 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
775 Jump failureCases1
= checkStructure(regT0
, structure
);
777 // Check the prototype object's Structure had not changed.
778 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
780 move(ImmPtr(prototypeStructure
), regT3
);
781 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
783 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
785 bool needsStubLink
= false;
787 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
788 needsStubLink
= true;
789 compileGetDirectOffset(protoObject
, regT2
, regT2
, regT1
, cachedOffset
);
790 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
791 stubCall
.addArgument(regT1
);
792 stubCall
.addArgument(regT0
);
793 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
795 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
796 needsStubLink
= true;
797 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
798 stubCall
.addArgument(ImmPtr(protoObject
));
799 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
800 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
801 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
804 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
806 Jump success
= jump();
808 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool(), 0);
810 // Use the patch information to link the failure cases back to the original slow case routine.
811 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
812 patchBuffer
.link(failureCases1
, slowCaseBegin
);
813 patchBuffer
.link(failureCases2
, slowCaseBegin
);
815 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
816 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
819 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
821 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
825 // Track the stub we have created so that it will be deleted later.
826 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
827 stubInfo
->stubRoutine
= entryLabel
;
829 // Finally patch the jump to slow case back in the hot path to jump here instead.
830 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
831 RepatchBuffer
repatchBuffer(m_codeBlock
);
832 repatchBuffer
.relink(jumpLocation
, entryLabel
);
834 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
835 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
839 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
)
841 // regT0 holds a JSCell*
842 Jump failureCase
= checkStructure(regT0
, structure
);
843 bool needsStubLink
= false;
844 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
845 needsStubLink
= true;
846 if (!structure
->isUsingInlineStorage()) {
848 compileGetDirectOffset(regT1
, regT2
, regT1
, structure
, cachedOffset
);
850 compileGetDirectOffset(regT0
, regT2
, regT1
, structure
, cachedOffset
);
851 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
852 stubCall
.addArgument(regT1
);
853 stubCall
.addArgument(regT0
);
854 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
856 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
857 needsStubLink
= true;
858 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
859 stubCall
.addArgument(regT0
);
860 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
861 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
862 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
865 compileGetDirectOffset(regT0
, regT1
, regT0
, structure
, cachedOffset
);
867 Jump success
= jump();
869 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool(), 0);
871 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
873 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
876 // Use the patch information to link the failure cases back to the original slow case routine.
877 CodeLocationLabel lastProtoBegin
= polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
;
879 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
);
881 patchBuffer
.link(failureCase
, lastProtoBegin
);
883 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
884 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
886 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
889 polymorphicStructures
->list
[currentIndex
].set(entryLabel
, structure
);
891 // Finally patch the jump to slow case back in the hot path to jump here instead.
892 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
893 RepatchBuffer
repatchBuffer(m_codeBlock
);
894 repatchBuffer
.relink(jumpLocation
, entryLabel
);
897 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
899 // regT0 holds a JSCell*
901 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
902 // referencing the prototype object - let's speculatively load it's table nice and early!)
903 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
905 // Check eax is an object of the right Structure.
906 Jump failureCases1
= checkStructure(regT0
, structure
);
908 // Check the prototype object's Structure had not changed.
909 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
911 move(ImmPtr(prototypeStructure
), regT3
);
912 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
);
914 Jump failureCases2
= branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(prototypeStructure
));
917 bool needsStubLink
= false;
918 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
919 needsStubLink
= true;
920 compileGetDirectOffset(protoObject
, regT2
, regT2
, regT1
, cachedOffset
);
921 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
922 stubCall
.addArgument(regT1
);
923 stubCall
.addArgument(regT0
);
924 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
926 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
927 needsStubLink
= true;
928 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
929 stubCall
.addArgument(ImmPtr(protoObject
));
930 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
931 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
932 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
935 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
937 Jump success
= jump();
939 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool(), 0);
941 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
943 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
946 // Use the patch information to link the failure cases back to the original slow case routine.
947 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
948 patchBuffer
.link(failureCases1
, lastProtoBegin
);
949 patchBuffer
.link(failureCases2
, lastProtoBegin
);
951 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
952 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
954 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
957 prototypeStructure
->ref();
958 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, prototypeStructure
);
960 // Finally patch the jump to slow case back in the hot path to jump here instead.
961 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
962 RepatchBuffer
repatchBuffer(m_codeBlock
);
963 repatchBuffer
.relink(jumpLocation
, entryLabel
);
966 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, CallFrame
* callFrame
)
968 // regT0 holds a JSCell*
971 JumpList bucketsOfFail
;
973 // Check eax is an object of the right Structure.
974 bucketsOfFail
.append(checkStructure(regT0
, structure
));
976 Structure
* currStructure
= structure
;
977 RefPtr
<Structure
>* chainEntries
= chain
->head();
978 JSObject
* protoObject
= 0;
979 for (unsigned i
= 0; i
< count
; ++i
) {
980 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
981 currStructure
= chainEntries
[i
].get();
983 // Check the prototype object's Structure had not changed.
984 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
986 move(ImmPtr(currStructure
), regT3
);
987 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
989 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
994 bool needsStubLink
= false;
995 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
996 needsStubLink
= true;
997 compileGetDirectOffset(protoObject
, regT2
, regT2
, regT1
, cachedOffset
);
998 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
999 stubCall
.addArgument(regT1
);
1000 stubCall
.addArgument(regT0
);
1001 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1003 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1004 needsStubLink
= true;
1005 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1006 stubCall
.addArgument(ImmPtr(protoObject
));
1007 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1008 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
1009 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1012 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
1014 Jump success
= jump();
1016 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool(), 0);
1017 if (needsStubLink
) {
1018 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1020 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1023 // Use the patch information to link the failure cases back to the original slow case routine.
1024 CodeLocationLabel lastProtoBegin
= prototypeStructures
->list
[currentIndex
- 1].stubRoutine
;
1026 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
1028 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1029 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1031 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1033 // Track the stub we have created so that it will be deleted later.
1036 prototypeStructures
->list
[currentIndex
].set(entryLabel
, structure
, chain
);
1038 // Finally patch the jump to slow case back in the hot path to jump here instead.
1039 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1040 RepatchBuffer
repatchBuffer(m_codeBlock
);
1041 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1044 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, size_t cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1046 // regT0 holds a JSCell*
1049 JumpList bucketsOfFail
;
1051 // Check eax is an object of the right Structure.
1052 bucketsOfFail
.append(checkStructure(regT0
, structure
));
1054 Structure
* currStructure
= structure
;
1055 RefPtr
<Structure
>* chainEntries
= chain
->head();
1056 JSObject
* protoObject
= 0;
1057 for (unsigned i
= 0; i
< count
; ++i
) {
1058 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1059 currStructure
= chainEntries
[i
].get();
1061 // Check the prototype object's Structure had not changed.
1062 Structure
** prototypeStructureAddress
= &(protoObject
->m_structure
);
1064 move(ImmPtr(currStructure
), regT3
);
1065 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), regT3
));
1067 bucketsOfFail
.append(branchPtr(NotEqual
, AbsoluteAddress(prototypeStructureAddress
), ImmPtr(currStructure
)));
1070 ASSERT(protoObject
);
1072 bool needsStubLink
= false;
1073 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
1074 needsStubLink
= true;
1075 compileGetDirectOffset(protoObject
, regT2
, regT2
, regT1
, cachedOffset
);
1076 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
1077 stubCall
.addArgument(regT1
);
1078 stubCall
.addArgument(regT0
);
1079 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1081 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1082 needsStubLink
= true;
1083 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1084 stubCall
.addArgument(ImmPtr(protoObject
));
1085 stubCall
.addArgument(ImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1086 stubCall
.addArgument(ImmPtr(const_cast<Identifier
*>(&ident
)));
1087 stubCall
.addArgument(ImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1090 compileGetDirectOffset(protoObject
, regT2
, regT1
, regT0
, cachedOffset
);
1091 Jump success
= jump();
1093 LinkBuffer
patchBuffer(this, m_codeBlock
->executablePool(), 0);
1094 if (needsStubLink
) {
1095 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1097 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1100 // Use the patch information to link the failure cases back to the original slow case routine.
1101 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-patchOffsetGetByIdSlowCaseCall
));
1103 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1104 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(patchOffsetGetByIdPutResult
));
1106 // Track the stub we have created so that it will be deleted later.
1107 CodeLocationLabel entryLabel
= patchBuffer
.finalizeCodeAddendum();
1108 stubInfo
->stubRoutine
= entryLabel
;
1110 // Finally patch the jump to slow case back in the hot path to jump here instead.
1111 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase
);
1112 RepatchBuffer
repatchBuffer(m_codeBlock
);
1113 repatchBuffer
.relink(jumpLocation
, entryLabel
);
1115 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1116 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1119 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1121 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1123 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, RegisterID structure
, RegisterID offset
)
1125 ASSERT(sizeof(((Structure
*)0)->m_propertyStorageCapacity
) == sizeof(int32_t));
1126 ASSERT(sizeof(JSObject::inlineStorageCapacity
) == sizeof(int32_t));
1127 ASSERT(sizeof(JSValue
) == 8);
1129 Jump notUsingInlineStorage
= branch32(NotEqual
, Address(structure
, OBJECT_OFFSETOF(Structure
, m_propertyStorageCapacity
)), Imm32(JSObject::inlineStorageCapacity
));
1130 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSObject
, m_inlineStorage
)+OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
1131 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSObject
, m_inlineStorage
)+OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
1132 Jump finishedLoad
= jump();
1133 notUsingInlineStorage
.link(this);
1134 loadPtr(Address(base
, OBJECT_OFFSETOF(JSObject
, m_externalStorage
)), base
);
1135 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
1136 loadPtr(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
1137 finishedLoad
.link(this);
1140 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
1142 unsigned dst
= currentInstruction
[1].u
.operand
;
1143 unsigned base
= currentInstruction
[2].u
.operand
;
1144 unsigned property
= currentInstruction
[3].u
.operand
;
1145 unsigned expected
= currentInstruction
[4].u
.operand
;
1146 unsigned iter
= currentInstruction
[5].u
.operand
;
1147 unsigned i
= currentInstruction
[6].u
.operand
;
1149 emitLoad2(property
, regT1
, regT0
, base
, regT3
, regT2
);
1150 emitJumpSlowCaseIfNotJSCell(property
, regT1
);
1151 addSlowCase(branchPtr(NotEqual
, regT0
, payloadFor(expected
)));
1152 // Property registers are now available as the property is known
1153 emitJumpSlowCaseIfNotJSCell(base
, regT3
);
1154 emitLoadPayload(iter
, regT1
);
1156 // Test base's structure
1157 loadPtr(Address(regT2
, OBJECT_OFFSETOF(JSCell
, m_structure
)), regT0
);
1158 addSlowCase(branchPtr(NotEqual
, regT0
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
1159 load32(addressFor(i
), regT3
);
1160 sub32(Imm32(1), regT3
);
1161 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
1162 compileGetDirectOffset(regT2
, regT1
, regT0
, regT0
, regT3
);
1164 emitStore(dst
, regT1
, regT0
);
1165 map(m_bytecodeIndex
+ OPCODE_LENGTH(op_get_by_pname
), dst
, regT1
, regT0
);
1168 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1170 unsigned dst
= currentInstruction
[1].u
.operand
;
1171 unsigned base
= currentInstruction
[2].u
.operand
;
1172 unsigned property
= currentInstruction
[3].u
.operand
;
1174 linkSlowCaseIfNotJSCell(iter
, property
);
1176 linkSlowCaseIfNotJSCell(iter
, base
);
1180 JITStubCall
stubCall(this, cti_op_get_by_val
);
1181 stubCall
.addArgument(base
);
1182 stubCall
.addArgument(property
);
1188 #endif // ENABLE(JIT)
1190 #endif // ENABLE(JSVALUE32_64)