2 * Copyright (C) 2008, 2009, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "GCAwareJITStubRoutine.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
37 #include "JSFunction.h"
38 #include "JSPropertyNameIterator.h"
39 #include "JSVariableObject.h"
40 #include "LinkBuffer.h"
41 #include "RepatchBuffer.h"
42 #include "ResultType.h"
43 #include "SamplingTool.h"
44 #include <wtf/StringPrintStream.h>
49 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
51 int base
= currentInstruction
[1].u
.operand
;
52 int property
= currentInstruction
[2].u
.operand
;
53 int value
= currentInstruction
[3].u
.operand
;
55 emitLoad(base
, regT1
, regT0
);
56 emitLoad(value
, regT3
, regT2
);
57 callOperation(operationPutByIndex
, regT1
, regT0
, property
, regT3
, regT2
);
60 void JIT::emit_op_put_getter_setter(Instruction
* currentInstruction
)
62 int base
= currentInstruction
[1].u
.operand
;
63 int property
= currentInstruction
[2].u
.operand
;
64 int getter
= currentInstruction
[3].u
.operand
;
65 int setter
= currentInstruction
[4].u
.operand
;
67 emitLoadPayload(base
, regT1
);
68 emitLoadPayload(getter
, regT3
);
69 emitLoadPayload(setter
, regT4
);
70 callOperation(operationPutGetterSetter
, regT1
, &m_codeBlock
->identifier(property
), regT3
, regT4
);
73 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
75 int dst
= currentInstruction
[1].u
.operand
;
76 int base
= currentInstruction
[2].u
.operand
;
77 int property
= currentInstruction
[3].u
.operand
;
78 emitLoad(base
, regT1
, regT0
);
79 callOperation(operationDeleteById
, dst
, regT1
, regT0
, &m_codeBlock
->identifier(property
));
82 JIT::CodeRef
JIT::stringGetByValStubGenerator(VM
* vm
)
84 JSInterfaceJIT
jit(vm
);
86 failures
.append(JSC::branchStructure(jit
, NotEqual
, Address(regT0
, JSCell::structureIDOffset()), vm
->stringStructure
.get()));
88 // Load string length to regT1, and start the process of loading the data pointer into regT0
89 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT1
);
90 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
91 failures
.append(jit
.branchTest32(Zero
, regT0
));
93 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
94 failures
.append(jit
.branch32(AboveOrEqual
, regT2
, regT1
));
99 // Load the string flags
100 jit
.loadPtr(Address(regT0
, StringImpl::flagsOffset()), regT1
);
101 jit
.loadPtr(Address(regT0
, StringImpl::dataOffset()), regT0
);
102 is16Bit
.append(jit
.branchTest32(Zero
, regT1
, TrustedImm32(StringImpl::flagIs8Bit())));
103 jit
.load8(BaseIndex(regT0
, regT2
, TimesOne
, 0), regT0
);
104 cont8Bit
.append(jit
.jump());
106 jit
.load16(BaseIndex(regT0
, regT2
, TimesTwo
, 0), regT0
);
110 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
111 jit
.move(TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), regT1
);
112 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
113 jit
.move(TrustedImm32(JSValue::CellTag
), regT1
); // We null check regT0 on return so this is safe
117 jit
.move(TrustedImm32(0), regT0
);
120 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
121 return FINALIZE_CODE(patchBuffer
, ("String get_by_val stub"));
124 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
126 int dst
= currentInstruction
[1].u
.operand
;
127 int base
= currentInstruction
[2].u
.operand
;
128 int property
= currentInstruction
[3].u
.operand
;
129 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
131 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
133 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
134 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
135 emitArrayProfilingSiteWithCell(regT0
, regT1
, profile
);
136 and32(TrustedImm32(IndexingShapeMask
), regT1
);
138 PatchableJump badType
;
141 JITArrayMode mode
= chooseArrayMode(profile
);
144 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
147 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
150 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
152 case JITArrayStorage
:
153 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
159 addSlowCase(badType
);
160 addSlowCase(slowCases
);
162 Label done
= label();
164 if (!ASSERT_DISABLED
) {
165 Jump resultOK
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::EmptyValueTag
));
166 abortWithReason(JITGetByValResultIsNotEmpty
);
170 emitValueProfilingSite();
171 emitStore(dst
, regT1
, regT0
);
173 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
176 JIT::JumpList
JIT::emitContiguousGetByVal(Instruction
*, PatchableJump
& badType
, IndexingType expectedShape
)
180 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(expectedShape
));
182 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
183 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfPublicLength())));
185 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
); // tag
186 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
); // payload
187 slowCases
.append(branch32(Equal
, regT1
, TrustedImm32(JSValue::EmptyValueTag
)));
192 JIT::JumpList
JIT::emitDoubleGetByVal(Instruction
*, PatchableJump
& badType
)
196 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(DoubleShape
));
198 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
199 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfPublicLength())));
201 loadDouble(BaseIndex(regT3
, regT2
, TimesEight
), fpRegT0
);
202 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
203 moveDoubleToInts(fpRegT0
, regT0
, regT1
);
208 JIT::JumpList
JIT::emitArrayStorageGetByVal(Instruction
*, PatchableJump
& badType
)
212 add32(TrustedImm32(-ArrayStorageShape
), regT1
, regT3
);
213 badType
= patchableBranch32(Above
, regT3
, TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
));
215 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
216 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, ArrayStorage::vectorLengthOffset())));
218 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), regT1
); // tag
219 load32(BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), regT0
); // payload
220 slowCases
.append(branch32(Equal
, regT1
, TrustedImm32(JSValue::EmptyValueTag
)));
225 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
227 int dst
= currentInstruction
[1].u
.operand
;
228 int base
= currentInstruction
[2].u
.operand
;
229 int property
= currentInstruction
[3].u
.operand
;
230 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
232 linkSlowCase(iter
); // property int32 check
233 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
235 Jump nonCell
= jump();
236 linkSlowCase(iter
); // base array check
237 Jump notString
= branchStructure(NotEqual
, Address(regT0
, JSCell::structureIDOffset()), m_vm
->stringStructure
.get());
238 emitNakedCall(m_vm
->getCTIStub(stringGetByValStubGenerator
).code());
239 Jump failed
= branchTestPtr(Zero
, regT0
);
240 emitStore(dst
, regT1
, regT0
);
241 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
243 notString
.link(this);
246 Jump skipProfiling
= jump();
248 linkSlowCase(iter
); // vector length check
249 linkSlowCase(iter
); // empty value
251 emitArrayProfileOutOfBoundsSpecialCase(profile
);
253 skipProfiling
.link(this);
255 Label slowPath
= label();
257 emitLoad(base
, regT1
, regT0
);
258 emitLoad(property
, regT3
, regT2
);
259 Call call
= callOperation(operationGetByValDefault
, dst
, regT1
, regT0
, regT3
, regT2
);
261 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
262 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
263 m_byValInstructionIndex
++;
265 emitValueProfilingSite();
268 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
270 int base
= currentInstruction
[1].u
.operand
;
271 int property
= currentInstruction
[2].u
.operand
;
272 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
274 emitLoad2(base
, regT1
, regT0
, property
, regT3
, regT2
);
276 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
277 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
278 emitArrayProfilingSiteWithCell(regT0
, regT1
, profile
);
279 and32(TrustedImm32(IndexingShapeMask
), regT1
);
281 PatchableJump badType
;
284 JITArrayMode mode
= chooseArrayMode(profile
);
287 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
290 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
293 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
295 case JITArrayStorage
:
296 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
303 addSlowCase(badType
);
304 addSlowCase(slowCases
);
306 Label done
= label();
308 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
311 JIT::JumpList
JIT::emitGenericContiguousPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, IndexingType indexingShape
)
313 int base
= currentInstruction
[1].u
.operand
;
314 int value
= currentInstruction
[3].u
.operand
;
315 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
319 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(ContiguousShape
));
321 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
322 Jump outOfBounds
= branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfPublicLength()));
324 Label storeResult
= label();
325 emitLoad(value
, regT1
, regT0
);
326 switch (indexingShape
) {
328 slowCases
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
329 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
330 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
332 case ContiguousShape
:
333 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)));
334 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)));
335 emitLoad(base
, regT2
, regT3
);
336 emitWriteBarrier(base
, value
, ShouldFilterValue
);
339 Jump notInt
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
340 convertInt32ToDouble(regT0
, fpRegT0
);
343 moveIntsToDouble(regT0
, regT1
, fpRegT0
, fpRegT1
);
344 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
346 storeDouble(fpRegT0
, BaseIndex(regT3
, regT2
, TimesEight
));
356 outOfBounds
.link(this);
357 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, Butterfly::offsetOfVectorLength())));
359 emitArrayProfileStoreToHoleSpecialCase(profile
);
361 add32(TrustedImm32(1), regT2
, regT1
);
362 store32(regT1
, Address(regT3
, Butterfly::offsetOfPublicLength()));
363 jump().linkTo(storeResult
, this);
370 JIT::JumpList
JIT::emitArrayStoragePutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
)
372 int base
= currentInstruction
[1].u
.operand
;
373 int value
= currentInstruction
[3].u
.operand
;
374 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
378 badType
= patchableBranch32(NotEqual
, regT1
, TrustedImm32(ArrayStorageShape
));
380 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT3
);
381 slowCases
.append(branch32(AboveOrEqual
, regT2
, Address(regT3
, ArrayStorage::vectorLengthOffset())));
383 Jump empty
= branch32(Equal
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), TrustedImm32(JSValue::EmptyValueTag
));
385 Label
storeResult(this);
386 emitLoad(value
, regT1
, regT0
);
387 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
))); // payload
388 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0]) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
))); // tag
392 emitArrayProfileStoreToHoleSpecialCase(profile
);
393 add32(TrustedImm32(1), Address(regT3
, OBJECT_OFFSETOF(ArrayStorage
, m_numValuesInVector
)));
394 branch32(Below
, regT2
, Address(regT3
, ArrayStorage::lengthOffset())).linkTo(storeResult
, this);
396 add32(TrustedImm32(1), regT2
, regT0
);
397 store32(regT0
, Address(regT3
, ArrayStorage::lengthOffset()));
398 jump().linkTo(storeResult
, this);
402 emitWriteBarrier(base
, value
, ShouldFilterValue
);
407 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
409 int base
= currentInstruction
[1].u
.operand
;
410 int property
= currentInstruction
[2].u
.operand
;
411 int value
= currentInstruction
[3].u
.operand
;
412 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
414 linkSlowCase(iter
); // property int32 check
415 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
416 linkSlowCase(iter
); // base not array check
418 JITArrayMode mode
= chooseArrayMode(profile
);
422 linkSlowCase(iter
); // value type check
428 Jump skipProfiling
= jump();
429 linkSlowCase(iter
); // out of bounds
430 emitArrayProfileOutOfBoundsSpecialCase(profile
);
431 skipProfiling
.link(this);
433 Label slowPath
= label();
435 bool isDirect
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
) == op_put_by_val_direct
;
438 // FIXME: We only have 5 temp registers, but need 6 to make this call, therefore we materialize
439 // our own call. When we finish moving JSC to the C call stack, we'll get another register so
440 // we can use the normal case.
441 resetCallArguments();
442 addCallArgument(GPRInfo::callFrameRegister
);
443 emitLoad(base
, regT0
, regT1
);
444 addCallArgument(regT1
);
445 addCallArgument(regT0
);
446 emitLoad(property
, regT0
, regT1
);
447 addCallArgument(regT1
);
448 addCallArgument(regT0
);
449 emitLoad(value
, regT0
, regT1
);
450 addCallArgument(regT1
);
451 addCallArgument(regT0
);
452 Call call
= appendCallWithExceptionCheck(isDirect
? operationDirectPutByVal
: operationPutByVal
);
454 // The register selection below is chosen to reduce register swapping on ARM.
455 // Swapping shouldn't happen on other platforms.
456 emitLoad(base
, regT2
, regT1
);
457 emitLoad(property
, regT3
, regT0
);
458 emitLoad(value
, regT5
, regT4
);
459 Call call
= callOperation(isDirect
? operationDirectPutByVal
: operationPutByVal
, regT2
, regT1
, regT3
, regT0
, regT5
, regT4
);
462 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
463 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
464 m_byValInstructionIndex
++;
467 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
469 int dst
= currentInstruction
[1].u
.operand
;
470 int base
= currentInstruction
[2].u
.operand
;
471 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
473 emitLoad(base
, regT1
, regT0
);
474 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
476 if (*ident
== m_vm
->propertyNames
->length
&& shouldEmitProfiling())
477 emitArrayProfilingSiteForBytecodeIndexWithCell(regT0
, regT2
, m_bytecodeOffset
);
479 JITGetByIdGenerator
gen(
480 m_codeBlock
, CodeOrigin(m_bytecodeOffset
), RegisterSet::specialRegisters(),
481 JSValueRegs::payloadOnly(regT0
), JSValueRegs(regT1
, regT0
), DontSpill
);
482 gen
.generateFastPath(*this);
483 addSlowCase(gen
.slowPathJump());
484 m_getByIds
.append(gen
);
486 emitValueProfilingSite();
487 emitStore(dst
, regT1
, regT0
);
490 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
492 int resultVReg
= currentInstruction
[1].u
.operand
;
493 int baseVReg
= currentInstruction
[2].u
.operand
;
494 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
496 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
499 JITGetByIdGenerator
& gen
= m_getByIds
[m_getByIdIndex
++];
501 Label coldPathBegin
= label();
503 Call call
= callOperation(WithProfile
, operationGetByIdOptimize
, resultVReg
, gen
.stubInfo(), regT1
, regT0
, ident
->impl());
505 gen
.reportSlowPathCall(coldPathBegin
, call
);
508 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
510 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
511 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
512 // such that the Structure & offset are always at the same distance from this.
514 int base
= currentInstruction
[1].u
.operand
;
515 int value
= currentInstruction
[3].u
.operand
;
516 int direct
= currentInstruction
[8].u
.operand
;
518 emitWriteBarrier(base
, value
, ShouldFilterBase
);
520 emitLoad2(base
, regT1
, regT0
, value
, regT3
, regT2
);
522 emitJumpSlowCaseIfNotJSCell(base
, regT1
);
524 emitLoad(base
, regT1
, regT0
);
525 emitLoad(value
, regT3
, regT2
);
527 JITPutByIdGenerator
gen(
528 m_codeBlock
, CodeOrigin(m_bytecodeOffset
), RegisterSet::specialRegisters(),
529 JSValueRegs::payloadOnly(regT0
), JSValueRegs(regT3
, regT2
),
530 regT1
, DontSpill
, m_codeBlock
->ecmaMode(), direct
? Direct
: NotDirect
);
532 gen
.generateFastPath(*this);
533 addSlowCase(gen
.slowPathJump());
535 m_putByIds
.append(gen
);
538 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
540 int base
= currentInstruction
[1].u
.operand
;
541 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
543 linkSlowCaseIfNotJSCell(iter
, base
);
546 Label
coldPathBegin(this);
548 JITPutByIdGenerator
& gen
= m_putByIds
[m_putByIdIndex
++];
550 Call call
= callOperation(
551 gen
.slowPathFunction(), gen
.stubInfo(), regT3
, regT2
, regT1
, regT0
, ident
->impl());
553 gen
.reportSlowPathCall(coldPathBegin
, call
);
556 // Compile a store into an object's property storage. May overwrite base.
557 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID valueTag
, RegisterID valuePayload
, PropertyOffset cachedOffset
)
559 if (isOutOfLineOffset(cachedOffset
))
560 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
561 emitStore(indexRelativeToBase(cachedOffset
), valueTag
, valuePayload
, base
);
564 // Compile a load from an object's property storage. May overwrite base.
565 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, PropertyOffset cachedOffset
)
567 if (isInlineOffset(cachedOffset
)) {
568 emitLoad(indexRelativeToBase(cachedOffset
), resultTag
, resultPayload
, base
);
572 RegisterID temp
= resultPayload
;
573 loadPtr(Address(base
, JSObject::butterflyOffset()), temp
);
574 emitLoad(indexRelativeToBase(cachedOffset
), resultTag
, resultPayload
, temp
);
577 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID resultTag
, RegisterID resultPayload
, PropertyOffset cachedOffset
)
579 if (isInlineOffset(cachedOffset
)) {
580 move(TrustedImmPtr(base
->locationForOffset(cachedOffset
)), resultTag
);
581 load32(Address(resultTag
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
582 load32(Address(resultTag
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
586 loadPtr(base
->butterflyAddress(), resultTag
);
587 load32(Address(resultTag
, offsetInButterfly(cachedOffset
) * sizeof(WriteBarrier
<Unknown
>) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
)), resultPayload
);
588 load32(Address(resultTag
, offsetInButterfly(cachedOffset
) * sizeof(WriteBarrier
<Unknown
>) + OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
)), resultTag
);
591 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID resultTag
, RegisterID resultPayload
, RegisterID offset
, FinalObjectMode finalObjectMode
)
593 ASSERT(sizeof(JSValue
) == 8);
595 if (finalObjectMode
== MayBeFinal
) {
596 Jump isInline
= branch32(LessThan
, offset
, TrustedImm32(firstOutOfLineOffset
));
597 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
601 addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), base
);
604 if (!ASSERT_DISABLED
) {
605 Jump isOutOfLine
= branch32(GreaterThanOrEqual
, offset
, TrustedImm32(firstOutOfLineOffset
));
606 abortWithReason(JITOffsetIsNotOutOfLine
);
607 isOutOfLine
.link(this);
609 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
612 load32(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
) + (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), resultPayload
);
613 load32(BaseIndex(base
, offset
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
) + (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), resultTag
);
616 void JIT::emit_op_get_by_pname(Instruction
* currentInstruction
)
618 int dst
= currentInstruction
[1].u
.operand
;
619 int base
= currentInstruction
[2].u
.operand
;
620 int property
= currentInstruction
[3].u
.operand
;
621 unsigned expected
= currentInstruction
[4].u
.operand
;
622 int iter
= currentInstruction
[5].u
.operand
;
623 int i
= currentInstruction
[6].u
.operand
;
625 emitLoad2(property
, regT1
, regT0
, base
, regT3
, regT2
);
626 emitJumpSlowCaseIfNotJSCell(property
, regT1
);
627 addSlowCase(branchPtr(NotEqual
, regT0
, payloadFor(expected
)));
628 // Property registers are now available as the property is known
629 emitJumpSlowCaseIfNotJSCell(base
, regT3
);
630 emitLoadPayload(iter
, regT1
);
632 // Test base's structure
633 loadPtr(Address(regT2
, JSCell::structureIDOffset()), regT0
);
634 addSlowCase(branchPtr(NotEqual
, regT0
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructure
))));
635 load32(addressFor(i
), regT3
);
636 sub32(TrustedImm32(1), regT3
);
637 addSlowCase(branch32(AboveOrEqual
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_numCacheableSlots
))));
638 Jump inlineProperty
= branch32(Below
, regT3
, Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)));
639 add32(TrustedImm32(firstOutOfLineOffset
), regT3
);
640 sub32(Address(regT1
, OBJECT_OFFSETOF(JSPropertyNameIterator
, m_cachedStructureInlineCapacity
)), regT3
);
641 inlineProperty
.link(this);
642 compileGetDirectOffset(regT2
, regT1
, regT0
, regT3
);
644 emitStore(dst
, regT1
, regT0
);
647 void JIT::emitSlow_op_get_by_pname(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
649 int dst
= currentInstruction
[1].u
.operand
;
650 int base
= currentInstruction
[2].u
.operand
;
651 int property
= currentInstruction
[3].u
.operand
;
653 linkSlowCaseIfNotJSCell(iter
, property
);
655 linkSlowCaseIfNotJSCell(iter
, base
);
659 emitLoad(base
, regT1
, regT0
);
660 emitLoad(property
, regT3
, regT2
);
661 callOperation(operationGetByValGeneric
, dst
, regT1
, regT0
, regT3
, regT2
);
664 void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks
)
666 if (!needsVarInjectionChecks
)
668 addSlowCase(branch8(Equal
, AbsoluteAddress(m_codeBlock
->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated
)));
671 void JIT::emitResolveClosure(int dst
, bool needsVarInjectionChecks
, unsigned depth
)
673 emitVarInjectionCheck(needsVarInjectionChecks
);
674 move(TrustedImm32(JSValue::CellTag
), regT1
);
675 emitLoadPayload(JSStack::ScopeChain
, regT0
);
676 if (m_codeBlock
->needsActivation()) {
677 emitLoadPayload(m_codeBlock
->activationRegister().offset(), regT2
);
678 Jump noActivation
= branchTestPtr(Zero
, regT2
);
679 loadPtr(Address(regT2
, JSScope::offsetOfNext()), regT0
);
680 noActivation
.link(this);
682 for (unsigned i
= 0; i
< depth
; ++i
)
683 loadPtr(Address(regT0
, JSScope::offsetOfNext()), regT0
);
684 emitStore(dst
, regT1
, regT0
);
687 void JIT::emit_op_resolve_scope(Instruction
* currentInstruction
)
689 int dst
= currentInstruction
[1].u
.operand
;
690 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[3].u
.operand
);
691 unsigned depth
= currentInstruction
[4].u
.operand
;
693 switch (resolveType
) {
696 case GlobalPropertyWithVarInjectionChecks
:
697 case GlobalVarWithVarInjectionChecks
:
698 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
699 move(TrustedImm32(JSValue::CellTag
), regT1
);
700 move(TrustedImmPtr(m_codeBlock
->globalObject()), regT0
);
701 emitStore(dst
, regT1
, regT0
);
704 case ClosureVarWithVarInjectionChecks
:
705 emitResolveClosure(dst
, needsVarInjectionChecks(resolveType
), depth
);
713 void JIT::emitSlow_op_resolve_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
715 int dst
= currentInstruction
[1].u
.operand
;
716 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[3].u
.operand
);
718 if (resolveType
== GlobalProperty
|| resolveType
== GlobalVar
|| resolveType
== ClosureVar
)
722 int32_t indentifierIndex
= currentInstruction
[2].u
.operand
;
723 callOperation(operationResolveScope
, dst
, indentifierIndex
);
726 void JIT::emitLoadWithStructureCheck(int scope
, Structure
** structureSlot
)
728 emitLoad(scope
, regT1
, regT0
);
729 loadPtr(structureSlot
, regT2
);
730 addSlowCase(branchPtr(NotEqual
, Address(regT0
, JSCell::structureIDOffset()), regT2
));
733 void JIT::emitGetGlobalProperty(uintptr_t* operandSlot
)
736 load32(operandSlot
, regT3
);
737 compileGetDirectOffset(regT2
, regT1
, regT0
, regT3
, KnownNotFinal
);
740 void JIT::emitGetGlobalVar(uintptr_t operand
)
742 load32(reinterpret_cast<char*>(operand
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
), regT1
);
743 load32(reinterpret_cast<char*>(operand
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
), regT0
);
746 void JIT::emitGetClosureVar(int scope
, uintptr_t operand
)
748 emitLoad(scope
, regT1
, regT0
);
749 loadPtr(Address(regT0
, JSVariableObject::offsetOfRegisters()), regT0
);
750 load32(Address(regT0
, operand
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), regT1
);
751 load32(Address(regT0
, operand
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)), regT0
);
754 void JIT::emit_op_get_from_scope(Instruction
* currentInstruction
)
756 int dst
= currentInstruction
[1].u
.operand
;
757 int scope
= currentInstruction
[2].u
.operand
;
758 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
759 Structure
** structureSlot
= currentInstruction
[5].u
.structure
.slot();
760 uintptr_t* operandSlot
= reinterpret_cast<uintptr_t*>(¤tInstruction
[6].u
.pointer
);
762 switch (resolveType
) {
764 case GlobalPropertyWithVarInjectionChecks
:
765 emitLoadWithStructureCheck(scope
, structureSlot
); // Structure check covers var injection.
766 emitGetGlobalProperty(operandSlot
);
769 case GlobalVarWithVarInjectionChecks
:
770 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
771 emitGetGlobalVar(*operandSlot
);
774 case ClosureVarWithVarInjectionChecks
:
775 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
776 emitGetClosureVar(scope
, *operandSlot
);
782 emitValueProfilingSite();
783 emitStore(dst
, regT1
, regT0
);
786 void JIT::emitSlow_op_get_from_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
788 int dst
= currentInstruction
[1].u
.operand
;
789 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
791 if (resolveType
== GlobalVar
|| resolveType
== ClosureVar
)
795 callOperation(WithProfile
, operationGetFromScope
, dst
, currentInstruction
);
798 void JIT::emitPutGlobalProperty(uintptr_t* operandSlot
, int value
)
800 emitLoad(value
, regT3
, regT2
);
802 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT0
);
803 loadPtr(operandSlot
, regT1
);
805 store32(regT3
, BaseIndex(regT0
, regT1
, TimesEight
, (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
806 store32(regT2
, BaseIndex(regT0
, regT1
, TimesEight
, (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
809 void JIT::emitNotifyWrite(RegisterID tag
, RegisterID payload
, RegisterID scratch
, VariableWatchpointSet
* set
)
811 if (!set
|| set
->state() == IsInvalidated
)
814 load8(set
->addressOfState(), scratch
);
815 Jump isDone
= branch32(Equal
, scratch
, TrustedImm32(IsInvalidated
));
817 JumpList notifySlow
= branch32(
818 NotEqual
, AbsoluteAddress(set
->addressOfInferredValue()->payloadPointer()), payload
);
819 notifySlow
.append(branch32(
820 NotEqual
, AbsoluteAddress(set
->addressOfInferredValue()->tagPointer()), tag
));
821 addSlowCase(notifySlow
);
826 void JIT::emitPutGlobalVar(uintptr_t operand
, int value
, VariableWatchpointSet
* set
)
828 emitLoad(value
, regT1
, regT0
);
829 emitNotifyWrite(regT1
, regT0
, regT2
, set
);
830 store32(regT1
, reinterpret_cast<char*>(operand
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
));
831 store32(regT0
, reinterpret_cast<char*>(operand
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
));
834 void JIT::emitPutClosureVar(int scope
, uintptr_t operand
, int value
)
836 emitLoad(value
, regT3
, regT2
);
837 emitLoad(scope
, regT1
, regT0
);
838 loadPtr(Address(regT0
, JSVariableObject::offsetOfRegisters()), regT0
);
839 store32(regT3
, Address(regT0
, operand
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)));
840 store32(regT2
, Address(regT0
, operand
* sizeof(Register
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
843 void JIT::emit_op_put_to_scope(Instruction
* currentInstruction
)
845 int scope
= currentInstruction
[1].u
.operand
;
846 int value
= currentInstruction
[3].u
.operand
;
847 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
848 Structure
** structureSlot
= currentInstruction
[5].u
.structure
.slot();
849 uintptr_t* operandSlot
= reinterpret_cast<uintptr_t*>(¤tInstruction
[6].u
.pointer
);
851 switch (resolveType
) {
853 case GlobalPropertyWithVarInjectionChecks
:
854 emitWriteBarrier(m_codeBlock
->globalObject(), value
, ShouldFilterValue
);
855 emitLoadWithStructureCheck(scope
, structureSlot
); // Structure check covers var injection.
856 emitPutGlobalProperty(operandSlot
, value
);
859 case GlobalVarWithVarInjectionChecks
:
860 emitWriteBarrier(m_codeBlock
->globalObject(), value
, ShouldFilterValue
);
861 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
862 emitPutGlobalVar(*operandSlot
, value
, currentInstruction
[5].u
.watchpointSet
);
865 case ClosureVarWithVarInjectionChecks
:
866 emitWriteBarrier(scope
, value
, ShouldFilterValue
);
867 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
868 emitPutClosureVar(scope
, *operandSlot
, value
);
876 void JIT::emitSlow_op_put_to_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
878 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
879 unsigned linkCount
= 0;
880 if (resolveType
!= GlobalVar
&& resolveType
!= ClosureVar
)
882 if ((resolveType
== GlobalVar
|| resolveType
== GlobalVarWithVarInjectionChecks
)
883 && currentInstruction
[5].u
.watchpointSet
->state() != IsInvalidated
)
889 callOperation(operationPutToScope
, currentInstruction
);
892 void JIT::emit_op_init_global_const(Instruction
* currentInstruction
)
894 WriteBarrier
<Unknown
>* registerPointer
= currentInstruction
[1].u
.registerPointer
;
895 int value
= currentInstruction
[2].u
.operand
;
897 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
899 emitWriteBarrier(globalObject
, value
, ShouldFilterValue
);
901 emitLoad(value
, regT1
, regT0
);
903 store32(regT1
, registerPointer
->tagPointer());
904 store32(regT0
, registerPointer
->payloadPointer());
909 #endif // USE(JSVALUE32_64)
910 #endif // ENABLE(JIT)