2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "GCAwareJITStubRoutine.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
36 #include "JITStubCall.h"
38 #include "JSFunction.h"
39 #include "JSPropertyNameIterator.h"
40 #include "JSVariableObject.h"
41 #include "LinkBuffer.h"
42 #include "RepatchBuffer.h"
43 #include "ResultType.h"
44 #include "SamplingTool.h"
45 #include <wtf/StringPrintStream.h>
55 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
57 unsigned base
= currentInstruction
[1].u
.operand
;
58 unsigned property
= currentInstruction
[2].u
.operand
;
59 unsigned value
= currentInstruction
[3].u
.operand
;
61 JITStubCall
stubCall(this, cti_op_put_by_index
);
62 stubCall
.addArgument(base
);
63 stubCall
.addArgument(TrustedImm32(property
));
64 stubCall
.addArgument(value
);
68 void JIT::emit_op_put_getter_setter(Instruction
* currentInstruction
)
70 unsigned base
= currentInstruction
[1].u
.operand
;
71 unsigned property
= currentInstruction
[2].u
.operand
;
72 unsigned getter
= currentInstruction
[3].u
.operand
;
73 unsigned setter
= currentInstruction
[4].u
.operand
;
75 JITStubCall
stubCall(this, cti_op_put_getter_setter
);
76 stubCall
.addArgument(base
);
77 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(property
)));
78 stubCall
.addArgument(getter
);
79 stubCall
.addArgument(setter
);
83 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
85 unsigned dst
= currentInstruction
[1].u
.operand
;
86 unsigned base
= currentInstruction
[2].u
.operand
;
87 unsigned property
= currentInstruction
[3].u
.operand
;
89 JITStubCall
stubCall(this, cti_op_del_by_id
);
90 stubCall
.addArgument(base
);
91 stubCall
.addArgument(TrustedImmPtr(&m_codeBlock
->identifier(property
)));
95 JIT::CodeRef
JIT::stringGetByValStubGenerator(VM
* vm
)
99 failures
.append(jit
.branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(vm
->stringStructure
.get())));
101 // Load string length to regT1, and start the process of loading the data pointer into regT0
102 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT1
);
103 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
104 failures
.append(jit
.branchTest32(Zero
, regT0
));
106 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
107 failures
.append(jit
.branch32(AboveOrEqual
, regT2
, regT1
));
109 // Load the character
112 // Load the string flags
113 jit
.loadPtr(Address(regT0
, StringImpl::flagsOffset()), regT1
);
114 jit
.loadPtr(Address(regT0
, StringImpl::dataOffset()), regT0
);
115 is16Bit
.append(jit
.branchTest32(Zero
, regT1
, TrustedImm32(StringImpl::flagIs8Bit())));
116 jit
.load8(BaseIndex(regT0
, regT2
, TimesOne
, 0), regT0
);
117 cont8Bit
.append(jit
.jump());
119 jit
.load16(BaseIndex(regT0
, regT2
, TimesTwo
, 0), regT0
);
123 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
124 jit
.move(TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), regT1
);
125 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
126 jit
.move(TrustedImm32(JSValue::CellTag
), regT1
); // We null check regT0 on return so this is safe
130 jit
.move(TrustedImm32(0), regT0
);
133 LinkBuffer
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
);
134 return FINALIZE_CODE(patchBuffer
, ("String get_by_val stub"));
137 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
139 unsigned dst
= currentInstruction
[1].u
.operand
;
140 unsigned base
= currentInstruction
[2].u
.operand
;
141 unsigned property
= currentInstruction
[3].u
.operand
;
142 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
144 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
146 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
147 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
148 loadPtr(Address(regT0
, JSCell::structureOffset()), regT1
);
149 emitArrayProfilingSite(regT1
, regT3
, profile
);
150 and32(TrustedImm32(IndexingShapeMask
), regT1
);
152 PatchableJump badType
;
155 JITArrayMode mode
= chooseArrayMode(profile
);
158 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
161 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
164 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
166 case JITArrayStorage
:
167 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
173 addSlowCase(badType
);
174 addSlowCase(slowCases
);
176 Label done
= label();
179 Jump resultOK
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::EmptyValueTag
));
184 emitValueProfilingSite();
185 emitStore(dst
, regT1
, regT0
);
186 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_get_by_val
), dst
, regT1
, regT0
);
188 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
191 JIT::JumpList
JIT::emitContiguousGetByVal(Instruction
*, PatchableJump
& badType
, IndexingType expectedShape
)
195 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(expectedShape
));
197 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
198 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfPublicLength())));
200 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
); // tag
201 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
); // payload
202 slowCases
.append(branch32(Equal
, regT1
, TrustedImm32(JSValue::EmptyValueTag
)));
207 JIT::JumpList
JIT::emitDoubleGetByVal(Instruction
*, PatchableJump
& badType
)
211 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(DoubleShape
));
213 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
214 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfPublicLength())));
216 loadDouble(BaseIndex(regT3
, regT2
, TimesEight
), fpRegT0
);
217 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
218 moveDoubleToInts(fpRegT0
, regT0
, regT1
);
223 JIT::JumpList
JIT::emitArrayStorageGetByVal(Instruction
*, PatchableJump
& badType
)
227 add32(TrustedImm32(-ArrayStorageShape
), regT1
, regT3
);
228 badType
= patchableBranch32(Above
, regT3
, TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
));
230 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
231 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, ArrayStorage::vectorLengthOffset())));
233 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
); // tag
234 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
); // payload
235 slowCases
.append(branch32(Equal
, regT1
, TrustedImm32(JSValue::EmptyValueTag
)));
240 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
242 unsigned dst
= currentInstruction
[1].u
.operand
;
243 unsigned base
= currentInstruction
[2].u
.operand
;
244 unsigned property
= currentInstruction
[3].u
.operand
;
245 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
247 linkSlowCase(iter
); // property int32 check
248 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
250 Jump nonCell
= jump();
251 linkSlowCase(iter
); // base array check
252 Jump notString
= branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(m_vm
->stringStructure
.get()));
253 emitNakedCall(m_vm
->getCTIStub(stringGetByValStubGenerator
).code());
254 Jump failed
= branchTestPtr(Zero
, regT0
);
255 emitStore(dst
, regT1
, regT0
);
256 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
258 notString
.link(this);
261 Jump skipProfiling
= jump();
263 linkSlowCase(iter
); // vector length check
264 linkSlowCase(iter
); // empty value
266 emitArrayProfileOutOfBoundsSpecialCase(profile
);
268 skipProfiling
.link(this);
270 Label slowPath
= label();
272 JITStubCall
stubCall(this, cti_op_get_by_val
);
273 stubCall
.addArgument(base
);
274 stubCall
.addArgument(property
);
275 Call call
= stubCall
.call(dst
);
277 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
278 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
279 m_byValInstructionIndex
++;
281 emitValueProfilingSite();
284 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
286 unsigned base
= currentInstruction
[1].u
.operand
;
287 unsigned property
= currentInstruction
[2].u
.operand
;
288 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
290 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
292 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
293 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
294 loadPtr(Address(regT0
, JSCell::structureOffset()), regT1
);
295 emitArrayProfilingSite(regT1
, regT3
, profile
);
296 and32(TrustedImm32(IndexingShapeMask
), regT1
);
298 PatchableJump badType
;
301 JITArrayMode mode
= chooseArrayMode(profile
);
304 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
307 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
310 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
312 case JITArrayStorage
:
313 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
320 addSlowCase(badType
);
321 addSlowCase(slowCases
);
323 Label done
= label();
325 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
328 JIT::JumpList
JIT::emitGenericContiguousPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, IndexingType indexingShape
)
330 unsigned value
= currentInstruction
[3].u
.operand
;
331 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
335 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(ContiguousShape
));
337 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
338 Jump outOfBounds
= branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfPublicLength()));
340 Label storeResult
= label();
341 emitLoad(value
, regT1
, regT0
);
342 switch (indexingShape
) {
344 slowCases
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
346 case ContiguousShape
:
347 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
348 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
351 Jump notInt
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
352 convertInt32ToDouble(regT0
, fpRegT0
);
355 moveIntsToDouble(regT0
, regT1
, fpRegT0
, fpRegT1
);
356 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
358 storeDouble(fpRegT0
, BaseIndex(regT3
, regT2
, TimesEight
));
368 outOfBounds
.link(this);
369 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfVectorLength())));
371 emitArrayProfileStoreToHoleSpecialCase(profile
);
373 add32(TrustedImm32(1), regT2
, regT1
);
374 store32(regT1
, Address(regT3
, Butterfly::offsetOfPublicLength()));
375 jump().linkTo(storeResult
, this);
379 emitWriteBarrier(regT0
, regT1
, regT1
, regT3
, UnconditionalWriteBarrier
, WriteBarrierForPropertyAccess
);
384 JIT::JumpList
JIT::emitArrayStoragePutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
)
386 unsigned value
= currentInstruction
[3].u
.operand
;
387 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
391 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(ArrayStorageShape
));
393 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
394 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, ArrayStorage::vectorLengthOffset())));
396 Jump empty
= branch32(Equal
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
));
398 Label
storeResult(this);
399 emitLoad(value
, regT1
, regT0
);
400 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
))); // payload
401 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
))); // tag
405 emitArrayProfileStoreToHoleSpecialCase(profile
);
406 add32(TrustedImm32(1), Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
407 branch32(Below
, regT2
, Address(regT3
, ArrayStorage::lengthOffset())).linkTo(storeResult
, this);
409 add32(TrustedImm32(1), regT2
, regT0
);
410 store32(regT0
, Address(regT3
, ArrayStorage::lengthOffset()));
411 jump().linkTo(storeResult
, this);
415 emitWriteBarrier(regT0
, regT1
, regT1
, regT3
, UnconditionalWriteBarrier
, WriteBarrierForPropertyAccess
);
420 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
422 unsigned base
= currentInstruction
[1].u
.operand
;
423 unsigned property
= currentInstruction
[2].u
.operand
;
424 unsigned value
= currentInstruction
[3].u
.operand
;
425 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
427 linkSlowCase(iter
); // property int32 check
428 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
429 linkSlowCase(iter
); // base not array check
431 JITArrayMode mode
= chooseArrayMode(profile
);
435 linkSlowCase(iter
); // value type check
441 Jump skipProfiling
= jump();
442 linkSlowCase(iter
); // out of bounds
443 emitArrayProfileOutOfBoundsSpecialCase(profile
);
444 skipProfiling
.link(this);
446 Label slowPath
= label();
448 JITStubCall
stubPutByValCall(this, cti_op_put_by_val
);
449 stubPutByValCall
.addArgument(base
);
450 stubPutByValCall
.addArgument(property
);
451 stubPutByValCall
.addArgument(value
);
452 Call call
= stubPutByValCall
.call();
454 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
455 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
456 m_byValInstructionIndex
++;
459 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
461 int dst
= currentInstruction
[1].u
.operand
;
462 int base
= currentInstruction
[2].u
.operand
;
463 Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
465 emitLoad(base
, regT1
, regT0
);
466 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
467 compileGetByIdHotPath(ident
);
468 emitValueProfilingSite();
469 emitStore(dst
, regT1
, regT0
);
470 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_get_by_id
), dst
, regT1
, regT0
);
473 void JIT::compileGetByIdHotPath(Identifier
* ident
)
475 // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
476 // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
477 // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
478 // to jump back to if one of these trampolies finds a match.
480 if (*ident
== m_vm
->propertyNames
->length
&& shouldEmitProfiling()) {
481 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
482 emitArrayProfilingSiteForBytecodeIndex(regT2
, regT3
, m_bytecodeOffset
);
485 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
487 Label
hotPathBegin(this);
489 DataLabelPtr structureToCompare
;
490 PatchableJump structureCheck
= patchableBranchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
)));
491 addSlowCase(structureCheck
);
493 ConvertibleLoadLabel propertyStorageLoad
= convertibleLoadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
494 DataLabelCompact displacementLabel1
= loadPtrWithCompactAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT0
); // payload
495 DataLabelCompact displacementLabel2
= loadPtrWithCompactAddressOffsetPatch(Address(regT2
, patchGetByIdDefaultOffset
), regT1
); // tag
497 Label
putResult(this);
499 END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath
);
501 m_propertyAccessCompilationInfo
.append(PropertyStubCompilationInfo(PropertyStubGetById
, m_bytecodeOffset
, hotPathBegin
, structureToCompare
, structureCheck
, propertyStorageLoad
, displacementLabel1
, displacementLabel2
, putResult
));
504 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
506 int dst
= currentInstruction
[1].u
.operand
;
507 int base
= currentInstruction
[2].u
.operand
;
508 int ident
= currentInstruction
[3].u
.operand
;
510 compileGetByIdSlowCase(dst
, base
, &(m_codeBlock
->identifier(ident
)), iter
);
511 emitValueProfilingSite();
514 void JIT::compileGetByIdSlowCase(int dst
, int base
, Identifier
* ident
, Vector
<SlowCaseEntry
>::iterator
& iter
)
516 // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
517 // so that we only need track one pointer into the slow case code - we track a pointer to the location
518 // of the call (which we can use to look up the patch information), but should a array-length or
519 // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
520 // the distance from the call to the head of the slow case.
521 linkSlowCaseIfNotJSCell(iter
, base
);
524 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase
);
526 Label
coldPathBegin(this);
527 JITStubCall
stubCall(this, cti_op_get_by_id
);
528 stubCall
.addArgument(regT1
, regT0
);
529 stubCall
.addArgument(TrustedImmPtr(ident
));
530 Call call
= stubCall
.call(dst
);
532 END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase
, dst
);
534 // Track the location of the call; this will be used to recover patch information.
535 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
++].slowCaseInfo(PropertyStubGetById
, coldPathBegin
, call
);
538 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
540 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
541 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
542 // such that the Structure & offset are always at the same distance from this.
544 int base
= currentInstruction
[1].u
.operand
;
545 int value
= currentInstruction
[3].u
.operand
;
547 emitLoad2(base
, regT1
, regT0
, value
, regT3
, regT2
);
549 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
551 BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById
);
553 Label
hotPathBegin(this);
555 // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
556 DataLabelPtr structureToCompare
;
557 addSlowCase(branchPtrWithPatch(NotEqual
, Address(regT0
, JSCell::structureOffset()), structureToCompare
, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure
))));
559 ConvertibleLoadLabel propertyStorageLoad
= convertibleLoadPtr(Address(regT0
, JSObject::butterflyOffset()), regT1
);
560 DataLabel32 displacementLabel1
= storePtrWithAddressOffsetPatch(regT2
, Address(regT1
, patchPutByIdDefaultOffset
)); // payload
561 DataLabel32 displacementLabel2
= storePtrWithAddressOffsetPatch(regT3
, Address(regT1
, patchPutByIdDefaultOffset
)); // tag
563 END_UNINTERRUPTED_SEQUENCE(sequencePutById
);
565 emitWriteBarrier(regT0
, regT2
, regT1
, regT2
, ShouldFilterImmediates
, WriteBarrierForPropertyAccess
);
567 m_propertyAccessCompilationInfo
.append(PropertyStubCompilationInfo(PropertyStubPutById
, m_bytecodeOffset
, hotPathBegin
, structureToCompare
, propertyStorageLoad
, displacementLabel1
, displacementLabel2
));
570 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
572 int base
= currentInstruction
[1].u
.operand
;
573 int ident
= currentInstruction
[2].u
.operand
;
574 int direct
= currentInstruction
[8].u
.operand
;
576 linkSlowCaseIfNotJSCell(iter
, base
);
579 JITStubCall
stubCall(this, direct
? cti_op_put_by_id_direct
: cti_op_put_by_id
);
580 stubCall
.addArgument(base
);
581 stubCall
.addArgument(TrustedImmPtr(&(m_codeBlock
->identifier(ident
))));
582 stubCall
.addArgument(regT3
, regT2
);
583 Call call
= stubCall
.call();
585 // Track the location of the call; this will be used to recover patch information.
586 m_propertyAccessCompilationInfo
[m_propertyAccessInstructionIndex
++].slowCaseInfo(PropertyStubPutById
, call
);
589 // Compile a store into an object's property storage. May overwrite base.
590 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID valueTag
, RegisterID valuePayload
, PropertyOffset cachedOffset
)
592 if (isOutOfLineOffset(cachedOffset
))
593 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
594 emitStore(indexRelativeToBase(cachedOffset
), valueTag
, valuePayload
, base
);
597 // Compile a load from an object's property storage. May overwrite base.
598 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, PropertyOffset cachedOffset
)
600 if (isInlineOffset(cachedOffset
)) {
601 emitLoad(indexRelativeToBase(cachedOffset
), resultTag
, resultPayload
, base
);
605 RegisterID temp
= resultPayload
;
606 loadPtr(Address(base
, JSObject::butterflyOffset()), temp
);
607 emitLoad(indexRelativeToBase(cachedOffset
), resultTag
, resultPayload
, temp
);
610 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID resultTag
, RegisterID resultPayload
, PropertyOffset cachedOffset
)
612 if (isInlineOffset(cachedOffset
)) {
613 move(TrustedImmPtr(base
->locationForOffset(cachedOffset
)), resultTag
);
614 load32(Address(resultTag
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
615 load32(Address(resultTag
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
619 loadPtr(base
->butterflyAddress(), resultTag
);
620 load32(Address(resultTag
, offsetInButterfly(cachedOffset
) * sizeof(WriteBarrier
<Unknown
>) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
621 load32(Address(resultTag
, offsetInButterfly(cachedOffset
) * sizeof(WriteBarrier
<Unknown
>) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
624 void JIT::privateCompilePutByIdTransition(StructureStubInfo
* stubInfo
, Structure
* oldStructure
, Structure
* newStructure
, PropertyOffset cachedOffset
, StructureChain
* chain
, ReturnAddressPtr returnAddress
, bool direct
)
626 // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
627 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
628 // For MIPS, we don't add sizeof(void*) to the stack offset.
629 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
630 // For MIPS, we don't add sizeof(void*) to the stack offset.
631 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
633 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
634 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
637 JumpList failureCases
;
638 failureCases
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
639 failureCases
.append(branchPtr(NotEqual
, Address(regT0
, JSCell::structureOffset()), TrustedImmPtr(oldStructure
)));
640 testPrototype(oldStructure
->storedPrototype(), failureCases
, stubInfo
);
643 // Verify that nothing in the prototype chain has a setter for this property.
644 for (WriteBarrier
<Structure
>* it
= chain
->head(); *it
; ++it
)
645 testPrototype((*it
)->storedPrototype(), failureCases
, stubInfo
);
648 // If we succeed in all of our checks, and the code was optimizable, then make sure we
649 // decrement the rare case counter.
650 #if ENABLE(VALUE_PROFILER)
651 if (m_codeBlock
->canCompileWithDFG() >= DFG::MayInline
) {
654 AbsoluteAddress(&m_codeBlock
->rareCaseProfileForBytecodeOffset(stubInfo
->bytecodeIndex
)->m_counter
));
658 // Reallocate property storage if needed.
660 bool willNeedStorageRealloc
= oldStructure
->outOfLineCapacity() != newStructure
->outOfLineCapacity();
661 if (willNeedStorageRealloc
) {
662 // This trampoline was called to like a JIT stub; before we can can call again we need to
663 // remove the return address from the stack, to prevent the stack from becoming misaligned.
664 preserveReturnAddressAfterCall(regT3
);
666 JITStubCall
stubCall(this, cti_op_put_by_id_transition_realloc
);
667 stubCall
.skipArgument(); // base
668 stubCall
.skipArgument(); // ident
669 stubCall
.skipArgument(); // value
670 stubCall
.addArgument(TrustedImm32(oldStructure
->outOfLineCapacity()));
671 stubCall
.addArgument(TrustedImmPtr(newStructure
));
672 stubCall
.call(regT0
);
674 restoreReturnAddressBeforeReturn(regT3
);
676 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
677 // For MIPS, we don't add sizeof(void*) to the stack offset.
678 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
679 // For MIPS, we don't add sizeof(void*) to the stack offset.
680 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
682 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
);
683 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
);
687 emitWriteBarrier(regT0
, regT1
, regT1
, regT3
, UnconditionalWriteBarrier
, WriteBarrierForPropertyAccess
);
689 storePtr(TrustedImmPtr(newStructure
), Address(regT0
, JSCell::structureOffset()));
690 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
691 // For MIPS, we don't add sizeof(void*) to the stack offset.
692 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT3
);
693 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT2
);
695 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT3
);
696 load32(Address(stackPointerRegister
, OBJECT_OFFSETOF(JITStackFrame
, args
[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT2
);
698 compilePutDirectOffset(regT0
, regT2
, regT3
, cachedOffset
);
702 ASSERT(!failureCases
.empty());
703 failureCases
.link(this);
704 restoreArgumentReferenceForTrampoline();
705 Call failureCall
= tailRecursiveCall();
707 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
709 patchBuffer
.link(failureCall
, FunctionPtr(direct
? cti_op_put_by_id_direct_fail
: cti_op_put_by_id_fail
));
711 if (willNeedStorageRealloc
) {
712 ASSERT(m_calls
.size() == 1);
713 patchBuffer
.link(m_calls
[0].from
, FunctionPtr(cti_op_put_by_id_transition_realloc
));
716 stubInfo
->stubRoutine
= createJITStubRoutine(
719 ("Baseline put_by_id transition stub for %s, return point %p",
720 toCString(*m_codeBlock
).data(), returnAddress
.value())),
722 m_codeBlock
->ownerExecutable(),
723 willNeedStorageRealloc
,
725 RepatchBuffer
repatchBuffer(m_codeBlock
);
726 repatchBuffer
.relinkCallerToTrampoline(returnAddress
, CodeLocationLabel(stubInfo
->stubRoutine
->code().code()));
729 void JIT::patchGetByIdSelf(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
)
731 RepatchBuffer
repatchBuffer(codeBlock
);
733 // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
734 // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
735 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_self_fail
));
737 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
738 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureToCompare
), structure
);
739 repatchBuffer
.setLoadInstructionIsActive(stubInfo
->hotPathBegin
.convertibleLoadAtOffset(stubInfo
->patch
.baseline
.u
.get
.propertyStorageLoad
), isOutOfLineOffset(cachedOffset
));
740 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel1
), offsetRelativeToPatchedStorage(cachedOffset
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)); // payload
741 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel2
), offsetRelativeToPatchedStorage(cachedOffset
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)); // tag
744 void JIT::patchPutByIdReplace(CodeBlock
* codeBlock
, StructureStubInfo
* stubInfo
, Structure
* structure
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
, bool direct
)
746 RepatchBuffer
repatchBuffer(codeBlock
);
748 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
749 // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
750 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(direct
? cti_op_put_by_id_direct_generic
: cti_op_put_by_id_generic
));
752 // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
753 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.put
.structureToCompare
), structure
);
754 repatchBuffer
.setLoadInstructionIsActive(stubInfo
->hotPathBegin
.convertibleLoadAtOffset(stubInfo
->patch
.baseline
.u
.put
.propertyStorageLoad
), isOutOfLineOffset(cachedOffset
));
755 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel1
), offsetRelativeToPatchedStorage(cachedOffset
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)); // payload
756 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel2
), offsetRelativeToPatchedStorage(cachedOffset
) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)); // tag
759 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress
)
761 StructureStubInfo
* stubInfo
= &m_codeBlock
->getStubInfo(returnAddress
);
763 // regT0 holds a JSCell*
766 loadPtr(Address(regT0
, JSCell::structureOffset()), regT2
);
767 Jump failureCases1
= branchTest32(Zero
, regT2
, TrustedImm32(IsArray
));
768 Jump failureCases2
= branchTest32(Zero
, regT2
, TrustedImm32(IndexingShapeMask
));
770 // Checks out okay! - get the length from the storage
771 loadPtr(Address(regT0
, JSArray::butterflyOffset()), regT2
);
772 load32(Address(regT2
, ArrayStorage::lengthOffset()), regT2
);
774 Jump failureCases3
= branch32(Above
, regT2
, TrustedImm32(INT_MAX
));
776 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
777 Jump success
= jump();
779 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
781 // Use the patch information to link the failure cases back to the original slow case routine.
782 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
783 patchBuffer
.link(failureCases1
, slowCaseBegin
);
784 patchBuffer
.link(failureCases2
, slowCaseBegin
);
785 patchBuffer
.link(failureCases3
, slowCaseBegin
);
787 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
788 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
790 // Track the stub we have created so that it will be deleted later.
791 stubInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
793 ("Baseline get_by_id array length stub for %s, return point %p",
794 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
795 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress()));
797 // Finally patch the jump to slow case back in the hot path to jump here instead.
798 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
799 RepatchBuffer
repatchBuffer(m_codeBlock
);
800 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubInfo
->stubRoutine
->code().code()));
802 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
803 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_array_fail
));
806 void JIT::privateCompileGetByIdProto(StructureStubInfo
* stubInfo
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
808 // regT0 holds a JSCell*
810 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
811 // referencing the prototype object - let's speculatively load it's table nice and early!)
812 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
814 Jump failureCases1
= checkStructure(regT0
, structure
);
816 // Check the prototype object's Structure had not changed.
817 Jump failureCases2
= addStructureTransitionCheck(protoObject
, prototypeStructure
, stubInfo
, regT3
);
819 bool needsStubLink
= false;
821 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
822 needsStubLink
= true;
823 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
824 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
825 stubCall
.addArgument(regT1
);
826 stubCall
.addArgument(regT0
);
827 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
829 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
830 needsStubLink
= true;
831 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
832 stubCall
.addArgument(TrustedImmPtr(protoObject
));
833 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
834 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
835 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
838 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
840 Jump success
= jump();
842 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
844 // Use the patch information to link the failure cases back to the original slow case routine.
845 CodeLocationLabel slowCaseBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
846 patchBuffer
.link(failureCases1
, slowCaseBegin
);
847 if (failureCases2
.isSet())
848 patchBuffer
.link(failureCases2
, slowCaseBegin
);
850 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
851 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
854 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
856 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
860 // Track the stub we have created so that it will be deleted later.
861 stubInfo
->stubRoutine
= createJITStubRoutine(
864 ("Baseline get_by_id proto stub for %s, return point %p",
865 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
866 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
868 m_codeBlock
->ownerExecutable(),
871 // Finally patch the jump to slow case back in the hot path to jump here instead.
872 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
873 RepatchBuffer
repatchBuffer(m_codeBlock
);
874 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubInfo
->stubRoutine
->code().code()));
876 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
877 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
881 void JIT::privateCompileGetByIdSelfList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* polymorphicStructures
, int currentIndex
, Structure
* structure
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
)
883 // regT0 holds a JSCell*
884 Jump failureCase
= checkStructure(regT0
, structure
);
885 bool needsStubLink
= false;
886 bool isDirect
= false;
887 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
888 needsStubLink
= true;
889 compileGetDirectOffset(regT0
, regT2
, regT1
, cachedOffset
);
890 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
891 stubCall
.addArgument(regT1
);
892 stubCall
.addArgument(regT0
);
893 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
895 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
896 needsStubLink
= true;
897 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
898 stubCall
.addArgument(regT0
);
899 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
900 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
901 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
905 compileGetDirectOffset(regT0
, regT1
, regT0
, cachedOffset
);
908 Jump success
= jump();
910 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
912 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
914 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
917 // Use the patch information to link the failure cases back to the original slow case routine.
918 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures
->list
[currentIndex
- 1].stubRoutine
));
920 lastProtoBegin
= stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
);
922 patchBuffer
.link(failureCase
, lastProtoBegin
);
924 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
925 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
927 RefPtr
<JITStubRoutine
> stubRoutine
= createJITStubRoutine(
930 ("Baseline get_by_id self list stub for %s, return point %p",
931 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
932 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
934 m_codeBlock
->ownerExecutable(),
937 polymorphicStructures
->list
[currentIndex
].set(*m_vm
, m_codeBlock
->ownerExecutable(), stubRoutine
, structure
, isDirect
);
939 // Finally patch the jump to slow case back in the hot path to jump here instead.
940 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
941 RepatchBuffer
repatchBuffer(m_codeBlock
);
942 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
->code().code()));
945 void JIT::privateCompileGetByIdProtoList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, Structure
* prototypeStructure
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, CallFrame
* callFrame
)
947 // regT0 holds a JSCell*
949 // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
950 // referencing the prototype object - let's speculatively load it's table nice and early!)
951 JSObject
* protoObject
= asObject(structure
->prototypeForLookup(callFrame
));
953 // Check eax is an object of the right Structure.
954 Jump failureCases1
= checkStructure(regT0
, structure
);
956 // Check the prototype object's Structure had not changed.
957 Jump failureCases2
= addStructureTransitionCheck(protoObject
, prototypeStructure
, stubInfo
, regT3
);
959 bool needsStubLink
= false;
960 bool isDirect
= false;
961 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
962 needsStubLink
= true;
963 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
964 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
965 stubCall
.addArgument(regT1
);
966 stubCall
.addArgument(regT0
);
967 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
969 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
970 needsStubLink
= true;
971 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
972 stubCall
.addArgument(TrustedImmPtr(protoObject
));
973 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
974 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
975 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
979 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
982 Jump success
= jump();
984 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
986 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
988 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
991 // Use the patch information to link the failure cases back to the original slow case routine.
992 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures
->list
[currentIndex
- 1].stubRoutine
));
993 patchBuffer
.link(failureCases1
, lastProtoBegin
);
994 if (failureCases2
.isSet())
995 patchBuffer
.link(failureCases2
, lastProtoBegin
);
997 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
998 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
1000 RefPtr
<JITStubRoutine
> stubRoutine
= createJITStubRoutine(
1003 ("Baseline get_by_id proto list stub for %s, return point %p",
1004 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
1005 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
1007 m_codeBlock
->ownerExecutable(),
1010 prototypeStructures
->list
[currentIndex
].set(callFrame
->vm(), m_codeBlock
->ownerExecutable(), stubRoutine
, structure
, prototypeStructure
, isDirect
);
1012 // Finally patch the jump to slow case back in the hot path to jump here instead.
1013 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
1014 RepatchBuffer
repatchBuffer(m_codeBlock
);
1015 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
->code().code()));
1018 void JIT::privateCompileGetByIdChainList(StructureStubInfo
* stubInfo
, PolymorphicAccessStructureList
* prototypeStructures
, int currentIndex
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, CallFrame
* callFrame
)
1020 // regT0 holds a JSCell*
1023 JumpList bucketsOfFail
;
1025 // Check eax is an object of the right Structure.
1026 bucketsOfFail
.append(checkStructure(regT0
, structure
));
1028 Structure
* currStructure
= structure
;
1029 WriteBarrier
<Structure
>* it
= chain
->head();
1030 JSObject
* protoObject
= 0;
1031 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
1032 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1033 currStructure
= it
->get();
1034 testPrototype(protoObject
, bucketsOfFail
, stubInfo
);
1036 ASSERT(protoObject
);
1038 bool needsStubLink
= false;
1039 bool isDirect
= false;
1040 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
1041 needsStubLink
= true;
1042 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
1043 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
1044 stubCall
.addArgument(regT1
);
1045 stubCall
.addArgument(regT0
);
1046 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1048 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1049 needsStubLink
= true;
1050 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1051 stubCall
.addArgument(TrustedImmPtr(protoObject
));
1052 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1053 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
1054 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1058 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1061 Jump success
= jump();
1063 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
1064 if (needsStubLink
) {
1065 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1067 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1070 // Use the patch information to link the failure cases back to the original slow case routine.
1071 CodeLocationLabel lastProtoBegin
= CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures
->list
[currentIndex
- 1].stubRoutine
));
1073 patchBuffer
.link(bucketsOfFail
, lastProtoBegin
);
1075 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1076 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
1078 RefPtr
<JITStubRoutine
> stubRoutine
= createJITStubRoutine(
1081 ("Baseline get_by_id chain list stub for %s, return point %p",
1082 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
1083 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
1085 m_codeBlock
->ownerExecutable(),
1088 // Track the stub we have created so that it will be deleted later.
1089 prototypeStructures
->list
[currentIndex
].set(callFrame
->vm(), m_codeBlock
->ownerExecutable(), stubRoutine
, structure
, chain
, isDirect
);
1091 // Finally patch the jump to slow case back in the hot path to jump here instead.
1092 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
1093 RepatchBuffer
repatchBuffer(m_codeBlock
);
1094 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
->code().code()));
1097 void JIT::privateCompileGetByIdChain(StructureStubInfo
* stubInfo
, Structure
* structure
, StructureChain
* chain
, size_t count
, const Identifier
& ident
, const PropertySlot
& slot
, PropertyOffset cachedOffset
, ReturnAddressPtr returnAddress
, CallFrame
* callFrame
)
1099 // regT0 holds a JSCell*
1102 JumpList bucketsOfFail
;
1104 // Check eax is an object of the right Structure.
1105 bucketsOfFail
.append(checkStructure(regT0
, structure
));
1107 Structure
* currStructure
= structure
;
1108 WriteBarrier
<Structure
>* it
= chain
->head();
1109 JSObject
* protoObject
= 0;
1110 for (unsigned i
= 0; i
< count
; ++i
, ++it
) {
1111 protoObject
= asObject(currStructure
->prototypeForLookup(callFrame
));
1112 currStructure
= it
->get();
1113 testPrototype(protoObject
, bucketsOfFail
, stubInfo
);
1115 ASSERT(protoObject
);
1117 bool needsStubLink
= false;
1118 if (slot
.cachedPropertyType() == PropertySlot::Getter
) {
1119 needsStubLink
= true;
1120 compileGetDirectOffset(protoObject
, regT2
, regT1
, cachedOffset
);
1121 JITStubCall
stubCall(this, cti_op_get_by_id_getter_stub
);
1122 stubCall
.addArgument(regT1
);
1123 stubCall
.addArgument(regT0
);
1124 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1126 } else if (slot
.cachedPropertyType() == PropertySlot::Custom
) {
1127 needsStubLink
= true;
1128 JITStubCall
stubCall(this, cti_op_get_by_id_custom_stub
);
1129 stubCall
.addArgument(TrustedImmPtr(protoObject
));
1130 stubCall
.addArgument(TrustedImmPtr(FunctionPtr(slot
.customGetter()).executableAddress()));
1131 stubCall
.addArgument(TrustedImmPtr(const_cast<Identifier
*>(&ident
)));
1132 stubCall
.addArgument(TrustedImmPtr(stubInfo
->callReturnLocation
.executableAddress()));
1135 compileGetDirectOffset(protoObject
, regT1
, regT0
, cachedOffset
);
1136 Jump success
= jump();
1138 LinkBuffer
patchBuffer(*m_vm
, this, m_codeBlock
);
1139 if (needsStubLink
) {
1140 for (Vector
<CallRecord
>::iterator iter
= m_calls
.begin(); iter
!= m_calls
.end(); ++iter
) {
1142 patchBuffer
.link(iter
->from
, FunctionPtr(iter
->to
));
1145 // Use the patch information to link the failure cases back to the original slow case routine.
1146 patchBuffer
.link(bucketsOfFail
, stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
));
1148 // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1149 patchBuffer
.link(success
, stubInfo
->hotPathBegin
.labelAtOffset(stubInfo
->patch
.baseline
.u
.get
.putResult
));
1151 // Track the stub we have created so that it will be deleted later.
1152 RefPtr
<JITStubRoutine
> stubRoutine
= createJITStubRoutine(
1155 ("Baseline get_by_id chain stub for %s, return point %p",
1156 toCString(*m_codeBlock
).data(), stubInfo
->hotPathBegin
.labelAtOffset(
1157 stubInfo
->patch
.baseline
.u
.get
.putResult
).executableAddress())),
1159 m_codeBlock
->ownerExecutable(),
1161 stubInfo
->stubRoutine
= stubRoutine
;
1163 // Finally patch the jump to slow case back in the hot path to jump here instead.
1164 CodeLocationJump jumpLocation
= stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
);
1165 RepatchBuffer
repatchBuffer(m_codeBlock
);
1166 repatchBuffer
.relink(jumpLocation
, CodeLocationLabel(stubRoutine
->code().code()));
1168 // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1169 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(cti_op_get_by_id_proto_list
));
1172 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, RegisterID offset
, FinalObjectMode finalObjectMode
)
1174 ASSERT(sizeof(JSValue
) == 8);
1176 if (finalObjectMode
== MayBeFinal
) {
1177 Jump isInline
= branch32(LessThan
, offset
, TrustedImm32(firstOutOfLineOffset
));
1178 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
1181 isInline
.link(this);
1182 addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), base
);
1185 #if !ASSERT_DISABLED
1186 Jump isOutOfLine
= branch32(GreaterThanOrEqual
, offset
, TrustedImm32(firstOutOfLineOffset
));
1188 isOutOfLine
.link(this);
1190 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
1193 load32(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
) + (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), resultPayload
);
1194 load32(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
) + (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), resultTag
);
1197 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
1199 unsigned dst
= currentInstruction
[1].u
.operand
;
1200 unsigned base
= currentInstruction
[2].u
.operand
;
1201 unsigned property
= currentInstruction
[3].u
.operand
;
1202 unsigned expected
= currentInstruction
[4].u
.operand
;
1203 unsigned iter
= currentInstruction
[5].u
.operand
;
1204 unsigned i
= currentInstruction
[6].u
.operand
;
1206 emitLoad2(property
, regT1
, regT0
, base
, regT3
, regT2
);
1207 emitJumpSlowCaseIfNotJSCell(property
, regT1
);
1208 addSlowCase(branchPtr(NotEqual
, regT0
, payloadFor(expected
)));
1209 // Property registers are now available as the property is known
1210 emitJumpSlowCaseIfNotJSCell(base
, regT3
);
1211 emitLoadPayload(iter
, regT1
);
1213 // Test base's structure
1214 loadPtr(Address(regT2
, JSCell::structureOffset()), regT0
);
1215 addSlowCase(branchPtr(NotEqual
, regT0
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
1216 load32(addressFor(i
), regT3
);
1217 sub32(TrustedImm32(1), regT3
);
1218 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
1219 Jump inlineProperty
= branch32(Below
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)));
1220 add32(TrustedImm32(firstOutOfLineOffset
), regT3
);
1221 sub32(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)), regT3
);
1222 inlineProperty
.link(this);
1223 compileGetDirectOffset(regT2
, regT1
, regT0
, regT3
);
1225 emitStore(dst
, regT1
, regT0
);
1226 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_get_by_pname
), dst
, regT1
, regT0
);
1229 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1231 unsigned dst
= currentInstruction
[1].u
.operand
;
1232 unsigned base
= currentInstruction
[2].u
.operand
;
1233 unsigned property
= currentInstruction
[3].u
.operand
;
1235 linkSlowCaseIfNotJSCell(iter
, property
);
1237 linkSlowCaseIfNotJSCell(iter
, base
);
1241 JITStubCall
stubCall(this, cti_op_get_by_val_generic
);
1242 stubCall
.addArgument(base
);
1243 stubCall
.addArgument(property
);
1247 void JIT::emit_op_get_scoped_var(Instruction
* currentInstruction
)
1249 int dst
= currentInstruction
[1].u
.operand
;
1250 int index
= currentInstruction
[2].u
.operand
;
1251 int skip
= currentInstruction
[3].u
.operand
;
1253 emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, regT2
);
1254 bool checkTopLevel
= m_codeBlock
->codeType() == FunctionCode
&& m_codeBlock
->needsFullScopeChain();
1255 ASSERT(skip
|| !checkTopLevel
);
1256 if (checkTopLevel
&& skip
--) {
1257 Jump activationNotCreated
;
1259 activationNotCreated
= branch32(Equal
, tagFor(m_codeBlock
->activationRegister()), TrustedImm32(JSValue::EmptyValueTag
));
1260 loadPtr(Address(regT2
, JSScope::offsetOfNext()), regT2
);
1261 activationNotCreated
.link(this);
1264 loadPtr(Address(regT2
, JSScope::offsetOfNext()), regT2
);
1266 loadPtr(Address(regT2
, JSVariableObject::offsetOfRegisters()), regT2
);
1268 emitLoad(index
, regT1
, regT0
, regT2
);
1269 emitValueProfilingSite();
1270 emitStore(dst
, regT1
, regT0
);
1271 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_get_scoped_var
), dst
, regT1
, regT0
);
1274 void JIT::emit_op_put_scoped_var(Instruction
* currentInstruction
)
1276 int index
= currentInstruction
[1].u
.operand
;
1277 int skip
= currentInstruction
[2].u
.operand
;
1278 int value
= currentInstruction
[3].u
.operand
;
1280 emitLoad(value
, regT1
, regT0
);
1282 emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, regT2
);
1283 bool checkTopLevel
= m_codeBlock
->codeType() == FunctionCode
&& m_codeBlock
->needsFullScopeChain();
1284 ASSERT(skip
|| !checkTopLevel
);
1285 if (checkTopLevel
&& skip
--) {
1286 Jump activationNotCreated
;
1288 activationNotCreated
= branch32(Equal
, tagFor(m_codeBlock
->activationRegister()), TrustedImm32(JSValue::EmptyValueTag
));
1289 loadPtr(Address(regT2
, JSScope::offsetOfNext()), regT2
);
1290 activationNotCreated
.link(this);
1293 loadPtr(Address(regT2
, JSScope::offsetOfNext()), regT2
);
1295 loadPtr(Address(regT2
, JSVariableObject::offsetOfRegisters()), regT3
);
1296 emitStore(index
, regT1
, regT0
, regT3
);
1297 emitWriteBarrier(regT2
, regT1
, regT0
, regT1
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1300 void JIT::emit_op_init_global_const(Instruction
* currentInstruction
)
1302 WriteBarrier
<Unknown
>* registerPointer
= currentInstruction
[1].u
.registerPointer
;
1303 int value
= currentInstruction
[2].u
.operand
;
1305 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
1307 emitLoad(value
, regT1
, regT0
);
1309 if (Heap::isWriteBarrierEnabled()) {
1310 move(TrustedImmPtr(globalObject
), regT2
);
1312 emitWriteBarrier(globalObject
, regT1
, regT3
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1315 store32(regT1
, registerPointer
->tagPointer());
1316 store32(regT0
, registerPointer
->payloadPointer());
1317 map(m_bytecodeOffset
+ OPCODE_LENGTH(op_init_global_const
), value
, regT1
, regT0
);
1320 void JIT::emit_op_init_global_const_check(Instruction
* currentInstruction
)
1322 WriteBarrier
<Unknown
>* registerPointer
= currentInstruction
[1].u
.registerPointer
;
1323 int value
= currentInstruction
[2].u
.operand
;
1325 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
1327 emitLoad(value
, regT1
, regT0
);
1329 addSlowCase(branchTest8(NonZero
, AbsoluteAddress(currentInstruction
[3].u
.predicatePointer
)));
1331 if (Heap::isWriteBarrierEnabled()) {
1332 move(TrustedImmPtr(globalObject
), regT2
);
1333 emitWriteBarrier(globalObject
, regT1
, regT3
, ShouldFilterImmediates
, WriteBarrierForVariableAccess
);
1336 store32(regT1
, registerPointer
->tagPointer());
1337 store32(regT0
, registerPointer
->payloadPointer());
1341 void JIT::emitSlow_op_init_global_const_check(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1345 JITStubCall
stubCall(this, cti_op_init_global_const_check
);
1346 stubCall
.addArgument(regT1
, regT0
);
1347 stubCall
.addArgument(TrustedImm32(currentInstruction
[4].u
.operand
));
1351 void JIT::resetPatchGetById(RepatchBuffer
& repatchBuffer
, StructureStubInfo
* stubInfo
)
1353 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_get_by_id
);
1354 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureToCompare
), reinterpret_cast<void*>(unusedPointer
));
1355 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel1
), 0);
1356 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelCompactAtOffset(stubInfo
->patch
.baseline
.u
.get
.displacementLabel2
), 0);
1357 repatchBuffer
.relink(stubInfo
->hotPathBegin
.jumpAtOffset(stubInfo
->patch
.baseline
.u
.get
.structureCheck
), stubInfo
->callReturnLocation
.labelAtOffset(-stubInfo
->patch
.baseline
.u
.get
.coldPathBegin
));
1360 void JIT::resetPatchPutById(RepatchBuffer
& repatchBuffer
, StructureStubInfo
* stubInfo
)
1362 if (isDirectPutById(stubInfo
))
1363 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_put_by_id_direct
);
1365 repatchBuffer
.relink(stubInfo
->callReturnLocation
, cti_op_put_by_id
);
1366 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabelPtrAtOffset(stubInfo
->patch
.baseline
.u
.put
.structureToCompare
), reinterpret_cast<void*>(unusedPointer
));
1367 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel1
), 0);
1368 repatchBuffer
.repatch(stubInfo
->hotPathBegin
.dataLabel32AtOffset(stubInfo
->patch
.baseline
.u
.put
.displacementLabel2
), 0);
1373 #endif // USE(JSVALUE32_64)
1374 #endif // ENABLE(JIT)