2 * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "DirectArguments.h"
33 #include "GCAwareJITStubRoutine.h"
34 #include "GetterSetter.h"
35 #include "Interpreter.h"
36 #include "JITInlines.h"
38 #include "JSEnvironmentRecord.h"
39 #include "JSFunction.h"
40 #include "LinkBuffer.h"
41 #include "RepatchBuffer.h"
42 #include "ResultType.h"
43 #include "SamplingTool.h"
44 #include "ScopedArguments.h"
45 #include "ScopedArgumentsTable.h"
46 #include <wtf/StringPrintStream.h>
52 JIT::CodeRef
JIT::stringGetByValStubGenerator(VM
* vm
)
54 JSInterfaceJIT
jit(vm
);
56 failures
.append(JSC::branchStructure(jit
,
58 Address(regT0
, JSCell::structureIDOffset()),
59 vm
->stringStructure
.get()));
61 // Load string length to regT2, and start the process of loading the data pointer into regT0
62 jit
.load32(Address(regT0
, ThunkHelpers::jsStringLengthOffset()), regT2
);
63 jit
.loadPtr(Address(regT0
, ThunkHelpers::jsStringValueOffset()), regT0
);
64 failures
.append(jit
.branchTest32(Zero
, regT0
));
66 // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
67 failures
.append(jit
.branch32(AboveOrEqual
, regT1
, regT2
));
72 // Load the string flags
73 jit
.loadPtr(Address(regT0
, StringImpl::flagsOffset()), regT2
);
74 jit
.loadPtr(Address(regT0
, StringImpl::dataOffset()), regT0
);
75 is16Bit
.append(jit
.branchTest32(Zero
, regT2
, TrustedImm32(StringImpl::flagIs8Bit())));
76 jit
.load8(BaseIndex(regT0
, regT1
, TimesOne
, 0), regT0
);
77 cont8Bit
.append(jit
.jump());
79 jit
.load16(BaseIndex(regT0
, regT1
, TimesTwo
, 0), regT0
);
82 failures
.append(jit
.branch32(AboveOrEqual
, regT0
, TrustedImm32(0x100)));
83 jit
.move(TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), regT1
);
84 jit
.loadPtr(BaseIndex(regT1
, regT0
, ScalePtr
, 0), regT0
);
88 jit
.move(TrustedImm32(0), regT0
);
91 LinkBuffer
patchBuffer(*vm
, jit
, GLOBAL_THUNK_ID
);
92 return FINALIZE_CODE(patchBuffer
, ("String get_by_val stub"));
95 void JIT::emit_op_get_by_val(Instruction
* currentInstruction
)
97 int dst
= currentInstruction
[1].u
.operand
;
98 int base
= currentInstruction
[2].u
.operand
;
99 int property
= currentInstruction
[3].u
.operand
;
100 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
102 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
103 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
105 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
106 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
107 // number was signed since m_vectorLength is always less than intmax (since the total allocation
108 // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
109 // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign
110 // extending since it makes it easier to re-tag the value in the slow case.
111 zeroExtend32ToPtr(regT1
, regT1
);
113 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
114 emitArrayProfilingSiteWithCell(regT0
, regT2
, profile
);
115 and32(TrustedImm32(IndexingShapeMask
), regT2
);
117 PatchableJump badType
;
120 JITArrayMode mode
= chooseArrayMode(profile
);
123 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
126 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
129 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
131 case JITArrayStorage
:
132 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
139 addSlowCase(badType
);
140 addSlowCase(slowCases
);
142 Label done
= label();
144 if (!ASSERT_DISABLED
) {
145 Jump resultOK
= branchTest64(NonZero
, regT0
);
146 abortWithReason(JITGetByValResultIsNotEmpty
);
150 emitValueProfilingSite();
151 emitPutVirtualRegister(dst
);
153 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
156 JIT::JumpList
JIT::emitDoubleLoad(Instruction
*, PatchableJump
& badType
)
160 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(DoubleShape
));
161 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
162 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength())));
163 loadDouble(BaseIndex(regT2
, regT1
, TimesEight
), fpRegT0
);
164 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
169 JIT::JumpList
JIT::emitContiguousLoad(Instruction
*, PatchableJump
& badType
, IndexingType expectedShape
)
173 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(expectedShape
));
174 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
175 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength())));
176 load64(BaseIndex(regT2
, regT1
, TimesEight
), regT0
);
177 slowCases
.append(branchTest64(Zero
, regT0
));
182 JIT::JumpList
JIT::emitArrayStorageLoad(Instruction
*, PatchableJump
& badType
)
186 add32(TrustedImm32(-ArrayStorageShape
), regT2
, regT3
);
187 badType
= patchableBranch32(Above
, regT3
, TrustedImm32(SlowPutArrayStorageShape
- ArrayStorageShape
));
189 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
190 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, ArrayStorage::vectorLengthOffset())));
192 load64(BaseIndex(regT2
, regT1
, TimesEight
, ArrayStorage::vectorOffset()), regT0
);
193 slowCases
.append(branchTest64(Zero
, regT0
));
198 void JIT::emitSlow_op_get_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
200 int dst
= currentInstruction
[1].u
.operand
;
201 int base
= currentInstruction
[2].u
.operand
;
202 int property
= currentInstruction
[3].u
.operand
;
203 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
205 linkSlowCase(iter
); // property int32 check
206 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
207 Jump nonCell
= jump();
208 linkSlowCase(iter
); // base array check
209 Jump notString
= branchStructure(NotEqual
,
210 Address(regT0
, JSCell::structureIDOffset()),
211 m_vm
->stringStructure
.get());
212 emitNakedCall(CodeLocationLabel(m_vm
->getCTIStub(stringGetByValStubGenerator
).code()));
213 Jump failed
= branchTest64(Zero
, regT0
);
214 emitPutVirtualRegister(dst
, regT0
);
215 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val
));
217 notString
.link(this);
220 linkSlowCase(iter
); // vector length check
221 linkSlowCase(iter
); // empty value
223 Label slowPath
= label();
225 emitGetVirtualRegister(base
, regT0
);
226 emitGetVirtualRegister(property
, regT1
);
227 Call call
= callOperation(operationGetByValDefault
, dst
, regT0
, regT1
, profile
);
229 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
230 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
231 m_byValInstructionIndex
++;
233 emitValueProfilingSite();
236 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, RegisterID offset
, RegisterID scratch
, FinalObjectMode finalObjectMode
)
238 ASSERT(sizeof(JSValue
) == 8);
240 if (finalObjectMode
== MayBeFinal
) {
241 Jump isInline
= branch32(LessThan
, offset
, TrustedImm32(firstOutOfLineOffset
));
242 loadPtr(Address(base
, JSObject::butterflyOffset()), scratch
);
246 addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), base
, scratch
);
249 if (!ASSERT_DISABLED
) {
250 Jump isOutOfLine
= branch32(GreaterThanOrEqual
, offset
, TrustedImm32(firstOutOfLineOffset
));
251 abortWithReason(JITOffsetIsNotOutOfLine
);
252 isOutOfLine
.link(this);
254 loadPtr(Address(base
, JSObject::butterflyOffset()), scratch
);
257 signExtend32ToPtr(offset
, offset
);
258 load64(BaseIndex(scratch
, offset
, TimesEight
, (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)), result
);
261 void JIT::emit_op_put_by_val(Instruction
* currentInstruction
)
263 int base
= currentInstruction
[1].u
.operand
;
264 int property
= currentInstruction
[2].u
.operand
;
265 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
267 emitGetVirtualRegisters(base
, regT0
, property
, regT1
);
268 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
269 // See comment in op_get_by_val.
270 zeroExtend32ToPtr(regT1
, regT1
);
271 emitJumpSlowCaseIfNotJSCell(regT0
, base
);
272 emitArrayProfilingSiteWithCell(regT0
, regT2
, profile
);
273 and32(TrustedImm32(IndexingShapeMask
), regT2
);
275 PatchableJump badType
;
278 JITArrayMode mode
= chooseArrayMode(profile
);
281 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
284 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
287 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
289 case JITArrayStorage
:
290 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
297 addSlowCase(badType
);
298 addSlowCase(slowCases
);
300 Label done
= label();
302 m_byValCompilationInfo
.append(ByValCompilationInfo(m_bytecodeOffset
, badType
, mode
, done
));
306 JIT::JumpList
JIT::emitGenericContiguousPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, IndexingType indexingShape
)
308 int value
= currentInstruction
[3].u
.operand
;
309 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
313 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(indexingShape
));
315 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
316 Jump outOfBounds
= branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfPublicLength()));
318 Label storeResult
= label();
319 emitGetVirtualRegister(value
, regT3
);
320 switch (indexingShape
) {
322 slowCases
.append(emitJumpIfNotImmediateInteger(regT3
));
323 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
));
326 Jump notInt
= emitJumpIfNotImmediateInteger(regT3
);
327 convertInt32ToDouble(regT3
, fpRegT0
);
330 add64(tagTypeNumberRegister
, regT3
);
331 move64ToDouble(regT3
, fpRegT0
);
332 slowCases
.append(branchDouble(DoubleNotEqualOrUnordered
, fpRegT0
, fpRegT0
));
334 storeDouble(fpRegT0
, BaseIndex(regT2
, regT1
, TimesEight
));
337 case ContiguousShape
:
338 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
));
339 emitWriteBarrier(currentInstruction
[1].u
.operand
, value
, ShouldFilterValue
);
347 outOfBounds
.link(this);
349 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, Butterfly::offsetOfVectorLength())));
351 emitArrayProfileStoreToHoleSpecialCase(profile
);
353 add32(TrustedImm32(1), regT1
, regT3
);
354 store32(regT3
, Address(regT2
, Butterfly::offsetOfPublicLength()));
355 jump().linkTo(storeResult
, this);
362 JIT::JumpList
JIT::emitArrayStoragePutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
)
364 int value
= currentInstruction
[3].u
.operand
;
365 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
369 badType
= patchableBranch32(NotEqual
, regT2
, TrustedImm32(ArrayStorageShape
));
370 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT2
);
371 slowCases
.append(branch32(AboveOrEqual
, regT1
, Address(regT2
, ArrayStorage::vectorLengthOffset())));
373 Jump empty
= branchTest64(Zero
, BaseIndex(regT2
, regT1
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
375 Label
storeResult(this);
376 emitGetVirtualRegister(value
, regT3
);
377 store64(regT3
, BaseIndex(regT2
, regT1
, TimesEight
, OBJECT_OFFSETOF(ArrayStorage
, m_vector
[0])));
378 emitWriteBarrier(currentInstruction
[1].u
.operand
, value
, ShouldFilterValue
);
382 emitArrayProfileStoreToHoleSpecialCase(profile
);
383 add32(TrustedImm32(1), Address(regT2
, ArrayStorage::numValuesInVectorOffset()));
384 branch32(Below
, regT1
, Address(regT2
, ArrayStorage::lengthOffset())).linkTo(storeResult
, this);
386 add32(TrustedImm32(1), regT1
);
387 store32(regT1
, Address(regT2
, ArrayStorage::lengthOffset()));
388 sub32(TrustedImm32(1), regT1
);
389 jump().linkTo(storeResult
, this);
396 void JIT::emitSlow_op_put_by_val(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
398 int base
= currentInstruction
[1].u
.operand
;
399 int property
= currentInstruction
[2].u
.operand
;
400 int value
= currentInstruction
[3].u
.operand
;
401 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
403 linkSlowCase(iter
); // property int32 check
404 linkSlowCaseIfNotJSCell(iter
, base
); // base cell check
405 linkSlowCase(iter
); // base not array check
407 JITArrayMode mode
= chooseArrayMode(profile
);
411 linkSlowCase(iter
); // value type check
417 Jump skipProfiling
= jump();
418 linkSlowCase(iter
); // out of bounds
419 emitArrayProfileOutOfBoundsSpecialCase(profile
);
420 skipProfiling
.link(this);
422 Label slowPath
= label();
424 emitGetVirtualRegister(property
, regT1
);
425 emitGetVirtualRegister(value
, regT2
);
426 bool isDirect
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
) == op_put_by_val_direct
;
427 Call call
= callOperation(isDirect
? operationDirectPutByVal
: operationPutByVal
, regT0
, regT1
, regT2
, profile
);
429 m_byValCompilationInfo
[m_byValInstructionIndex
].slowPathTarget
= slowPath
;
430 m_byValCompilationInfo
[m_byValInstructionIndex
].returnAddress
= call
;
431 m_byValInstructionIndex
++;
434 void JIT::emit_op_put_by_index(Instruction
* currentInstruction
)
436 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
437 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
438 callOperation(operationPutByIndex
, regT0
, currentInstruction
[2].u
.operand
, regT1
);
441 void JIT::emit_op_put_getter_by_id(Instruction
* currentInstruction
)
443 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
444 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
445 callOperation(operationPutGetterById
, regT0
, &m_codeBlock
->identifier(currentInstruction
[2].u
.operand
), regT1
);
448 void JIT::emit_op_put_setter_by_id(Instruction
* currentInstruction
)
450 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
451 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
452 callOperation(operationPutSetterById
, regT0
, &m_codeBlock
->identifier(currentInstruction
[2].u
.operand
), regT1
);
455 void JIT::emit_op_put_getter_setter(Instruction
* currentInstruction
)
457 emitGetVirtualRegister(currentInstruction
[1].u
.operand
, regT0
);
458 emitGetVirtualRegister(currentInstruction
[3].u
.operand
, regT1
);
459 emitGetVirtualRegister(currentInstruction
[4].u
.operand
, regT2
);
460 callOperation(operationPutGetterSetter
, regT0
, &m_codeBlock
->identifier(currentInstruction
[2].u
.operand
), regT1
, regT2
);
463 void JIT::emit_op_del_by_id(Instruction
* currentInstruction
)
465 int dst
= currentInstruction
[1].u
.operand
;
466 int base
= currentInstruction
[2].u
.operand
;
467 int property
= currentInstruction
[3].u
.operand
;
468 emitGetVirtualRegister(base
, regT0
);
469 callOperation(operationDeleteById
, dst
, regT0
, &m_codeBlock
->identifier(property
));
472 void JIT::emit_op_get_by_id(Instruction
* currentInstruction
)
474 int resultVReg
= currentInstruction
[1].u
.operand
;
475 int baseVReg
= currentInstruction
[2].u
.operand
;
476 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
478 emitGetVirtualRegister(baseVReg
, regT0
);
480 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
482 if (*ident
== m_vm
->propertyNames
->length
&& shouldEmitProfiling())
483 emitArrayProfilingSiteForBytecodeIndexWithCell(regT0
, regT1
, m_bytecodeOffset
);
485 JITGetByIdGenerator
gen(
486 m_codeBlock
, CodeOrigin(m_bytecodeOffset
), RegisterSet::specialRegisters(),
487 JSValueRegs(regT0
), JSValueRegs(regT0
), DontSpill
);
488 gen
.generateFastPath(*this);
489 addSlowCase(gen
.slowPathJump());
490 m_getByIds
.append(gen
);
492 emitValueProfilingSite();
493 emitPutVirtualRegister(resultVReg
);
494 assertStackPointerOffset();
497 void JIT::emitSlow_op_get_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
499 int resultVReg
= currentInstruction
[1].u
.operand
;
500 int baseVReg
= currentInstruction
[2].u
.operand
;
501 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[3].u
.operand
));
503 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
506 JITGetByIdGenerator
& gen
= m_getByIds
[m_getByIdIndex
++];
508 Label coldPathBegin
= label();
510 Call call
= callOperation(WithProfile
, operationGetByIdOptimize
, resultVReg
, gen
.stubInfo(), regT0
, ident
->impl());
512 gen
.reportSlowPathCall(coldPathBegin
, call
);
515 void JIT::emit_op_put_by_id(Instruction
* currentInstruction
)
517 int baseVReg
= currentInstruction
[1].u
.operand
;
518 int valueVReg
= currentInstruction
[3].u
.operand
;
519 unsigned direct
= currentInstruction
[8].u
.operand
;
521 emitWriteBarrier(baseVReg
, valueVReg
, ShouldFilterBase
);
523 // In order to be able to patch both the Structure, and the object offset, we store one pointer,
524 // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
525 // such that the Structure & offset are always at the same distance from this.
527 emitGetVirtualRegisters(baseVReg
, regT0
, valueVReg
, regT1
);
529 // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
530 emitJumpSlowCaseIfNotJSCell(regT0
, baseVReg
);
532 JITPutByIdGenerator
gen(
533 m_codeBlock
, CodeOrigin(m_bytecodeOffset
), RegisterSet::specialRegisters(),
534 JSValueRegs(regT0
), JSValueRegs(regT1
), regT2
, DontSpill
, m_codeBlock
->ecmaMode(),
535 direct
? Direct
: NotDirect
);
537 gen
.generateFastPath(*this);
538 addSlowCase(gen
.slowPathJump());
540 m_putByIds
.append(gen
);
543 void JIT::emitSlow_op_put_by_id(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
545 int baseVReg
= currentInstruction
[1].u
.operand
;
546 const Identifier
* ident
= &(m_codeBlock
->identifier(currentInstruction
[2].u
.operand
));
548 linkSlowCaseIfNotJSCell(iter
, baseVReg
);
551 Label
coldPathBegin(this);
553 JITPutByIdGenerator
& gen
= m_putByIds
[m_putByIdIndex
++];
555 Call call
= callOperation(
556 gen
.slowPathFunction(), gen
.stubInfo(), regT1
, regT0
, ident
->impl());
558 gen
.reportSlowPathCall(coldPathBegin
, call
);
561 // Compile a store into an object's property storage. May overwrite the
562 // value in objectReg.
563 void JIT::compilePutDirectOffset(RegisterID base
, RegisterID value
, PropertyOffset cachedOffset
)
565 if (isInlineOffset(cachedOffset
)) {
566 store64(value
, Address(base
, JSObject::offsetOfInlineStorage() + sizeof(JSValue
) * offsetInInlineStorage(cachedOffset
)));
570 loadPtr(Address(base
, JSObject::butterflyOffset()), base
);
571 store64(value
, Address(base
, sizeof(JSValue
) * offsetInButterfly(cachedOffset
)));
574 // Compile a load from an object's property storage. May overwrite base.
575 void JIT::compileGetDirectOffset(RegisterID base
, RegisterID result
, PropertyOffset cachedOffset
)
577 if (isInlineOffset(cachedOffset
)) {
578 load64(Address(base
, JSObject::offsetOfInlineStorage() + sizeof(JSValue
) * offsetInInlineStorage(cachedOffset
)), result
);
582 loadPtr(Address(base
, JSObject::butterflyOffset()), result
);
583 load64(Address(result
, sizeof(JSValue
) * offsetInButterfly(cachedOffset
)), result
);
586 void JIT::compileGetDirectOffset(JSObject
* base
, RegisterID result
, PropertyOffset cachedOffset
)
588 if (isInlineOffset(cachedOffset
)) {
589 load64(base
->locationForOffset(cachedOffset
), result
);
593 loadPtr(base
->butterflyAddress(), result
);
594 load64(Address(result
, offsetInButterfly(cachedOffset
) * sizeof(WriteBarrier
<Unknown
>)), result
);
597 void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks
)
599 if (!needsVarInjectionChecks
)
601 addSlowCase(branch8(Equal
, AbsoluteAddress(m_codeBlock
->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated
)));
604 void JIT::emitResolveClosure(int dst
, int scope
, bool needsVarInjectionChecks
, unsigned depth
)
606 emitVarInjectionCheck(needsVarInjectionChecks
);
607 emitGetVirtualRegister(scope
, regT0
);
608 for (unsigned i
= 0; i
< depth
; ++i
)
609 loadPtr(Address(regT0
, JSScope::offsetOfNext()), regT0
);
610 emitPutVirtualRegister(dst
);
613 void JIT::emit_op_resolve_scope(Instruction
* currentInstruction
)
615 int dst
= currentInstruction
[1].u
.operand
;
616 int scope
= currentInstruction
[2].u
.operand
;
617 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[4].u
.operand
);
618 unsigned depth
= currentInstruction
[5].u
.operand
;
620 switch (resolveType
) {
623 case GlobalPropertyWithVarInjectionChecks
:
624 case GlobalVarWithVarInjectionChecks
:
625 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
626 move(TrustedImmPtr(m_codeBlock
->globalObject()), regT0
);
627 emitPutVirtualRegister(dst
);
630 case ClosureVarWithVarInjectionChecks
:
631 emitResolveClosure(dst
, scope
, needsVarInjectionChecks(resolveType
), depth
);
636 case LocalClosureVar
:
637 RELEASE_ASSERT_NOT_REACHED();
641 void JIT::emitSlow_op_resolve_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
643 int dst
= currentInstruction
[1].u
.operand
;
644 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[4].u
.operand
);
646 if (resolveType
== GlobalProperty
|| resolveType
== GlobalVar
|| resolveType
== ClosureVar
)
650 int32_t scope
= currentInstruction
[2].u
.operand
;
651 int32_t identifierIndex
= currentInstruction
[3].u
.operand
;
652 callOperation(operationResolveScope
, dst
, scope
, identifierIndex
);
655 void JIT::emitLoadWithStructureCheck(int scope
, Structure
** structureSlot
)
657 emitGetVirtualRegister(scope
, regT0
);
658 loadPtr(structureSlot
, regT1
);
659 addSlowCase(branchTestPtr(Zero
, regT1
));
660 load32(Address(regT1
, Structure::structureIDOffset()), regT1
);
661 addSlowCase(branch32(NotEqual
, Address(regT0
, JSCell::structureIDOffset()), regT1
));
664 void JIT::emitGetGlobalProperty(uintptr_t* operandSlot
)
666 load32(operandSlot
, regT1
);
667 compileGetDirectOffset(regT0
, regT0
, regT1
, regT2
, KnownNotFinal
);
670 void JIT::emitGetGlobalVar(uintptr_t operand
)
672 loadPtr(reinterpret_cast<void*>(operand
), regT0
);
675 void JIT::emitGetClosureVar(int scope
, uintptr_t operand
)
677 emitGetVirtualRegister(scope
, regT0
);
678 loadPtr(Address(regT0
, JSEnvironmentRecord::offsetOfVariables() + operand
* sizeof(Register
)), regT0
);
681 void JIT::emit_op_get_from_scope(Instruction
* currentInstruction
)
683 int dst
= currentInstruction
[1].u
.operand
;
684 int scope
= currentInstruction
[2].u
.operand
;
685 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
686 Structure
** structureSlot
= currentInstruction
[5].u
.structure
.slot();
687 uintptr_t* operandSlot
= reinterpret_cast<uintptr_t*>(¤tInstruction
[6].u
.pointer
);
689 switch (resolveType
) {
691 case GlobalPropertyWithVarInjectionChecks
:
692 emitLoadWithStructureCheck(scope
, structureSlot
); // Structure check covers var injection.
693 emitGetGlobalProperty(operandSlot
);
696 case GlobalVarWithVarInjectionChecks
:
697 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
698 emitGetGlobalVar(*operandSlot
);
701 case ClosureVarWithVarInjectionChecks
:
702 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
703 emitGetClosureVar(scope
, *operandSlot
);
708 case LocalClosureVar
:
709 RELEASE_ASSERT_NOT_REACHED();
711 emitPutVirtualRegister(dst
);
712 emitValueProfilingSite();
715 void JIT::emitSlow_op_get_from_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
717 int dst
= currentInstruction
[1].u
.operand
;
718 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
720 if (resolveType
== GlobalVar
|| resolveType
== ClosureVar
)
723 if (resolveType
== GlobalProperty
|| resolveType
== GlobalPropertyWithVarInjectionChecks
)
726 callOperation(WithProfile
, operationGetFromScope
, dst
, currentInstruction
);
729 void JIT::emitPutGlobalProperty(uintptr_t* operandSlot
, int value
)
731 emitGetVirtualRegister(value
, regT2
);
733 loadPtr(Address(regT0
, JSObject::butterflyOffset()), regT0
);
734 loadPtr(operandSlot
, regT1
);
736 storePtr(regT2
, BaseIndex(regT0
, regT1
, TimesEight
, (firstOutOfLineOffset
- 2) * sizeof(EncodedJSValue
)));
739 void JIT::emitPutGlobalVar(uintptr_t operand
, int value
, WatchpointSet
* set
)
741 emitGetVirtualRegister(value
, regT0
);
742 emitNotifyWrite(set
);
743 storePtr(regT0
, reinterpret_cast<void*>(operand
));
746 void JIT::emitPutClosureVar(int scope
, uintptr_t operand
, int value
, WatchpointSet
* set
)
748 emitGetVirtualRegister(value
, regT1
);
749 emitGetVirtualRegister(scope
, regT0
);
750 emitNotifyWrite(set
);
751 storePtr(regT1
, Address(regT0
, JSEnvironmentRecord::offsetOfVariables() + operand
* sizeof(Register
)));
754 void JIT::emit_op_put_to_scope(Instruction
* currentInstruction
)
756 int scope
= currentInstruction
[1].u
.operand
;
757 int value
= currentInstruction
[3].u
.operand
;
758 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
759 Structure
** structureSlot
= currentInstruction
[5].u
.structure
.slot();
760 uintptr_t* operandSlot
= reinterpret_cast<uintptr_t*>(¤tInstruction
[6].u
.pointer
);
762 switch (resolveType
) {
764 case GlobalPropertyWithVarInjectionChecks
:
765 emitWriteBarrier(m_codeBlock
->globalObject(), value
, ShouldFilterValue
);
766 emitLoadWithStructureCheck(scope
, structureSlot
); // Structure check covers var injection.
767 emitPutGlobalProperty(operandSlot
, value
);
770 case GlobalVarWithVarInjectionChecks
:
771 emitWriteBarrier(m_codeBlock
->globalObject(), value
, ShouldFilterValue
);
772 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
773 emitPutGlobalVar(*operandSlot
, value
, currentInstruction
[5].u
.watchpointSet
);
775 case LocalClosureVar
:
777 case ClosureVarWithVarInjectionChecks
:
778 emitWriteBarrier(scope
, value
, ShouldFilterValue
);
779 emitVarInjectionCheck(needsVarInjectionChecks(resolveType
));
780 emitPutClosureVar(scope
, *operandSlot
, value
, currentInstruction
[5].u
.watchpointSet
);
788 void JIT::emitSlow_op_put_to_scope(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
790 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
791 unsigned linkCount
= 0;
792 if (resolveType
!= GlobalVar
&& resolveType
!= ClosureVar
&& resolveType
!= LocalClosureVar
)
794 if ((resolveType
== GlobalVar
|| resolveType
== GlobalVarWithVarInjectionChecks
|| resolveType
== LocalClosureVar
)
795 && currentInstruction
[5].u
.watchpointSet
->state() != IsInvalidated
)
797 if (resolveType
== GlobalProperty
|| resolveType
== GlobalPropertyWithVarInjectionChecks
)
803 callOperation(operationPutToScope
, currentInstruction
);
806 void JIT::emit_op_get_from_arguments(Instruction
* currentInstruction
)
808 int dst
= currentInstruction
[1].u
.operand
;
809 int arguments
= currentInstruction
[2].u
.operand
;
810 int index
= currentInstruction
[3].u
.operand
;
812 emitGetVirtualRegister(arguments
, regT0
);
813 load64(Address(regT0
, DirectArguments::storageOffset() + index
* sizeof(WriteBarrier
<Unknown
>)), regT0
);
814 emitValueProfilingSite();
815 emitPutVirtualRegister(dst
);
818 void JIT::emit_op_put_to_arguments(Instruction
* currentInstruction
)
820 int arguments
= currentInstruction
[1].u
.operand
;
821 int index
= currentInstruction
[2].u
.operand
;
822 int value
= currentInstruction
[3].u
.operand
;
824 emitWriteBarrier(arguments
, value
, ShouldFilterValue
);
826 emitGetVirtualRegister(arguments
, regT0
);
827 emitGetVirtualRegister(value
, regT1
);
828 store64(regT1
, Address(regT0
, DirectArguments::storageOffset() + index
* sizeof(WriteBarrier
<Unknown
>)));
831 void JIT::emit_op_init_global_const(Instruction
* currentInstruction
)
833 JSGlobalObject
* globalObject
= m_codeBlock
->globalObject();
834 emitWriteBarrier(globalObject
, currentInstruction
[2].u
.operand
, ShouldFilterValue
);
835 emitGetVirtualRegister(currentInstruction
[2].u
.operand
, regT0
);
836 store64(regT0
, currentInstruction
[1].u
.variablePointer
);
839 #endif // USE(JSVALUE64)
842 void JIT::emitWriteBarrier(unsigned owner
, unsigned value
, WriteBarrierMode mode
)
846 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
) {
847 emitGetVirtualRegister(value
, regT0
);
848 valueNotCell
= branchTest64(NonZero
, regT0
, tagMaskRegister
);
851 emitGetVirtualRegister(owner
, regT0
);
853 if (mode
== ShouldFilterBaseAndValue
|| mode
== ShouldFilterBase
)
854 ownerNotCell
= branchTest64(NonZero
, regT0
, tagMaskRegister
);
856 Jump ownerIsRememberedOrInEden
= jumpIfIsRememberedOrInEden(regT0
);
857 callOperation(operationUnconditionalWriteBarrier
, regT0
);
858 ownerIsRememberedOrInEden
.link(this);
860 if (mode
== ShouldFilterBaseAndValue
|| mode
== ShouldFilterBase
)
861 ownerNotCell
.link(this);
862 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
)
863 valueNotCell
.link(this);
871 void JIT::emitWriteBarrier(JSCell
* owner
, unsigned value
, WriteBarrierMode mode
)
874 emitGetVirtualRegister(value
, regT0
);
876 if (mode
== ShouldFilterValue
)
877 valueNotCell
= branchTest64(NonZero
, regT0
, tagMaskRegister
);
879 emitWriteBarrier(owner
);
881 if (mode
== ShouldFilterValue
)
882 valueNotCell
.link(this);
890 #else // USE(JSVALUE64)
892 void JIT::emitWriteBarrier(unsigned owner
, unsigned value
, WriteBarrierMode mode
)
896 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
) {
897 emitLoadTag(value
, regT0
);
898 valueNotCell
= branch32(NotEqual
, regT0
, TrustedImm32(JSValue::CellTag
));
901 emitLoad(owner
, regT0
, regT1
);
903 if (mode
== ShouldFilterBase
|| mode
== ShouldFilterBaseAndValue
)
904 ownerNotCell
= branch32(NotEqual
, regT0
, TrustedImm32(JSValue::CellTag
));
906 Jump ownerIsRememberedOrInEden
= jumpIfIsRememberedOrInEden(regT1
);
907 callOperation(operationUnconditionalWriteBarrier
, regT1
);
908 ownerIsRememberedOrInEden
.link(this);
910 if (mode
== ShouldFilterBase
|| mode
== ShouldFilterBaseAndValue
)
911 ownerNotCell
.link(this);
912 if (mode
== ShouldFilterValue
|| mode
== ShouldFilterBaseAndValue
)
913 valueNotCell
.link(this);
921 void JIT::emitWriteBarrier(JSCell
* owner
, unsigned value
, WriteBarrierMode mode
)
925 if (mode
== ShouldFilterValue
) {
926 emitLoadTag(value
, regT0
);
927 valueNotCell
= branch32(NotEqual
, regT0
, TrustedImm32(JSValue::CellTag
));
930 emitWriteBarrier(owner
);
932 if (mode
== ShouldFilterValue
)
933 valueNotCell
.link(this);
941 #endif // USE(JSVALUE64)
943 void JIT::emitWriteBarrier(JSCell
* owner
)
946 if (!MarkedBlock::blockFor(owner
)->isMarked(owner
)) {
947 Jump ownerIsRememberedOrInEden
= jumpIfIsRememberedOrInEden(owner
);
948 callOperation(operationUnconditionalWriteBarrier
, owner
);
949 ownerIsRememberedOrInEden
.link(this);
951 callOperation(operationUnconditionalWriteBarrier
, owner
);
954 #endif // ENABLE(GGC)
957 void JIT::privateCompileGetByVal(ByValInfo
* byValInfo
, ReturnAddressPtr returnAddress
, JITArrayMode arrayMode
)
959 Instruction
* currentInstruction
= m_codeBlock
->instructions().begin() + byValInfo
->bytecodeIndex
;
961 PatchableJump badType
;
966 slowCases
= emitInt32GetByVal(currentInstruction
, badType
);
969 slowCases
= emitDoubleGetByVal(currentInstruction
, badType
);
972 slowCases
= emitContiguousGetByVal(currentInstruction
, badType
);
974 case JITArrayStorage
:
975 slowCases
= emitArrayStorageGetByVal(currentInstruction
, badType
);
977 case JITDirectArguments
:
978 slowCases
= emitDirectArgumentsGetByVal(currentInstruction
, badType
);
980 case JITScopedArguments
:
981 slowCases
= emitScopedArgumentsGetByVal(currentInstruction
, badType
);
984 TypedArrayType type
= typedArrayTypeForJITArrayMode(arrayMode
);
986 slowCases
= emitIntTypedArrayGetByVal(currentInstruction
, badType
, type
);
988 slowCases
= emitFloatTypedArrayGetByVal(currentInstruction
, badType
, type
);
994 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
);
996 patchBuffer
.link(badType
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
997 patchBuffer
.link(slowCases
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
999 patchBuffer
.link(done
, byValInfo
->badTypeJump
.labelAtOffset(byValInfo
->badTypeJumpToDone
));
1001 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1002 m_codeBlock
, patchBuffer
,
1003 ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1005 RepatchBuffer
repatchBuffer(m_codeBlock
);
1006 repatchBuffer
.relink(byValInfo
->badTypeJump
, CodeLocationLabel(byValInfo
->stubRoutine
->code().code()));
1007 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(operationGetByValGeneric
));
1010 void JIT::privateCompilePutByVal(ByValInfo
* byValInfo
, ReturnAddressPtr returnAddress
, JITArrayMode arrayMode
)
1012 Instruction
* currentInstruction
= m_codeBlock
->instructions().begin() + byValInfo
->bytecodeIndex
;
1014 PatchableJump badType
;
1018 bool needsLinkForWriteBarrier
= false;
1021 switch (arrayMode
) {
1023 slowCases
= emitInt32PutByVal(currentInstruction
, badType
);
1026 slowCases
= emitDoublePutByVal(currentInstruction
, badType
);
1029 slowCases
= emitContiguousPutByVal(currentInstruction
, badType
);
1031 needsLinkForWriteBarrier
= true;
1034 case JITArrayStorage
:
1035 slowCases
= emitArrayStoragePutByVal(currentInstruction
, badType
);
1037 needsLinkForWriteBarrier
= true;
1041 TypedArrayType type
= typedArrayTypeForJITArrayMode(arrayMode
);
1043 slowCases
= emitIntTypedArrayPutByVal(currentInstruction
, badType
, type
);
1045 slowCases
= emitFloatTypedArrayPutByVal(currentInstruction
, badType
, type
);
1051 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
);
1052 patchBuffer
.link(badType
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1053 patchBuffer
.link(slowCases
, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress
.value())).labelAtOffset(byValInfo
->returnAddressToSlowPath
));
1054 patchBuffer
.link(done
, byValInfo
->badTypeJump
.labelAtOffset(byValInfo
->badTypeJumpToDone
));
1056 if (needsLinkForWriteBarrier
) {
1057 ASSERT(m_calls
.last().to
== operationUnconditionalWriteBarrier
);
1058 patchBuffer
.link(m_calls
.last().from
, operationUnconditionalWriteBarrier
);
1062 bool isDirect
= m_interpreter
->getOpcodeID(currentInstruction
->u
.opcode
) == op_put_by_val_direct
;
1064 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1065 m_codeBlock
, patchBuffer
,
1066 ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1069 byValInfo
->stubRoutine
= FINALIZE_CODE_FOR_STUB(
1070 m_codeBlock
, patchBuffer
,
1071 ("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock
).data(), returnAddress
.value()));
1073 RepatchBuffer
repatchBuffer(m_codeBlock
);
1074 repatchBuffer
.relink(byValInfo
->badTypeJump
, CodeLocationLabel(byValInfo
->stubRoutine
->code().code()));
1075 repatchBuffer
.relinkCallerToFunction(returnAddress
, FunctionPtr(isDirect
? operationDirectPutByValGeneric
: operationPutByValGeneric
));
1078 JIT::JumpList
JIT::emitDirectArgumentsGetByVal(Instruction
*, PatchableJump
& badType
)
1083 RegisterID base
= regT0
;
1084 RegisterID property
= regT1
;
1085 JSValueRegs result
= JSValueRegs(regT0
);
1086 RegisterID scratch
= regT3
;
1088 RegisterID base
= regT0
;
1089 RegisterID property
= regT2
;
1090 JSValueRegs result
= JSValueRegs(regT1
, regT0
);
1091 RegisterID scratch
= regT3
;
1094 load8(Address(base
, JSCell::typeInfoTypeOffset()), scratch
);
1095 badType
= patchableBranch32(NotEqual
, scratch
, TrustedImm32(DirectArgumentsType
));
1097 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, DirectArguments::offsetOfLength())));
1098 slowCases
.append(branchTestPtr(NonZero
, Address(base
, DirectArguments::offsetOfOverrides())));
1100 zeroExtend32ToPtr(property
, scratch
);
1101 loadValue(BaseIndex(base
, scratch
, TimesEight
, DirectArguments::storageOffset()), result
);
1106 JIT::JumpList
JIT::emitScopedArgumentsGetByVal(Instruction
*, PatchableJump
& badType
)
1111 RegisterID base
= regT0
;
1112 RegisterID property
= regT1
;
1113 JSValueRegs result
= JSValueRegs(regT0
);
1114 RegisterID scratch
= regT3
;
1115 RegisterID scratch2
= regT4
;
1117 RegisterID base
= regT0
;
1118 RegisterID property
= regT2
;
1119 JSValueRegs result
= JSValueRegs(regT1
, regT0
);
1120 RegisterID scratch
= regT3
;
1121 RegisterID scratch2
= regT4
;
1124 load8(Address(base
, JSCell::typeInfoTypeOffset()), scratch
);
1125 badType
= patchableBranch32(NotEqual
, scratch
, TrustedImm32(ScopedArgumentsType
));
1126 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, ScopedArguments::offsetOfTotalLength())));
1128 loadPtr(Address(base
, ScopedArguments::offsetOfTable()), scratch
);
1129 load32(Address(scratch
, ScopedArgumentsTable::offsetOfLength()), scratch2
);
1130 Jump overflowCase
= branch32(AboveOrEqual
, property
, scratch2
);
1131 loadPtr(Address(base
, ScopedArguments::offsetOfScope()), scratch2
);
1132 loadPtr(Address(scratch
, ScopedArgumentsTable::offsetOfArguments()), scratch
);
1133 load32(BaseIndex(scratch
, property
, TimesFour
), scratch
);
1134 slowCases
.append(branch32(Equal
, scratch
, TrustedImm32(ScopeOffset::invalidOffset
)));
1135 loadValue(BaseIndex(scratch2
, scratch
, TimesEight
, JSEnvironmentRecord::offsetOfVariables()), result
);
1137 overflowCase
.link(this);
1138 sub32(property
, scratch2
);
1140 loadValue(BaseIndex(base
, scratch2
, TimesEight
, ScopedArguments::overflowStorageOffset()), result
);
1141 slowCases
.append(branchIfEmpty(result
));
1147 JIT::JumpList
JIT::emitIntTypedArrayGetByVal(Instruction
*, PatchableJump
& badType
, TypedArrayType type
)
1149 ASSERT(isInt(type
));
1151 // The best way to test the array type is to use the classInfo. We need to do so without
1152 // clobbering the register that holds the indexing type, base, and property.
1155 RegisterID base
= regT0
;
1156 RegisterID property
= regT1
;
1157 RegisterID resultPayload
= regT0
;
1158 RegisterID scratch
= regT3
;
1160 RegisterID base
= regT0
;
1161 RegisterID property
= regT2
;
1162 RegisterID resultPayload
= regT0
;
1163 RegisterID resultTag
= regT1
;
1164 RegisterID scratch
= regT3
;
1169 load8(Address(base
, JSCell::typeInfoTypeOffset()), scratch
);
1170 badType
= patchableBranch32(NotEqual
, scratch
, TrustedImm32(typeForTypedArrayType(type
)));
1171 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, JSArrayBufferView::offsetOfLength())));
1172 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), base
);
1174 switch (elementSize(type
)) {
1177 load8SignedExtendTo32(BaseIndex(base
, property
, TimesOne
), resultPayload
);
1179 load8(BaseIndex(base
, property
, TimesOne
), resultPayload
);
1183 load16SignedExtendTo32(BaseIndex(base
, property
, TimesTwo
), resultPayload
);
1185 load16(BaseIndex(base
, property
, TimesTwo
), resultPayload
);
1188 load32(BaseIndex(base
, property
, TimesFour
), resultPayload
);
1195 if (type
== TypeUint32
) {
1196 Jump canBeInt
= branch32(GreaterThanOrEqual
, resultPayload
, TrustedImm32(0));
1198 convertInt32ToDouble(resultPayload
, fpRegT0
);
1199 addDouble(AbsoluteAddress(&twoToThe32
), fpRegT0
);
1201 moveDoubleTo64(fpRegT0
, resultPayload
);
1202 sub64(tagTypeNumberRegister
, resultPayload
);
1204 moveDoubleToInts(fpRegT0
, resultPayload
, resultTag
);
1208 canBeInt
.link(this);
1212 or64(tagTypeNumberRegister
, resultPayload
);
1214 move(TrustedImm32(JSValue::Int32Tag
), resultTag
);
1221 JIT::JumpList
JIT::emitFloatTypedArrayGetByVal(Instruction
*, PatchableJump
& badType
, TypedArrayType type
)
1223 ASSERT(isFloat(type
));
1226 RegisterID base
= regT0
;
1227 RegisterID property
= regT1
;
1228 RegisterID resultPayload
= regT0
;
1229 RegisterID scratch
= regT3
;
1231 RegisterID base
= regT0
;
1232 RegisterID property
= regT2
;
1233 RegisterID resultPayload
= regT0
;
1234 RegisterID resultTag
= regT1
;
1235 RegisterID scratch
= regT3
;
1240 load8(Address(base
, JSCell::typeInfoTypeOffset()), scratch
);
1241 badType
= patchableBranch32(NotEqual
, scratch
, TrustedImm32(typeForTypedArrayType(type
)));
1242 slowCases
.append(branch32(AboveOrEqual
, property
, Address(base
, JSArrayBufferView::offsetOfLength())));
1243 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), base
);
1245 switch (elementSize(type
)) {
1247 loadFloat(BaseIndex(base
, property
, TimesFour
), fpRegT0
);
1248 convertFloatToDouble(fpRegT0
, fpRegT0
);
1251 loadDouble(BaseIndex(base
, property
, TimesEight
), fpRegT0
);
1258 Jump notNaN
= branchDouble(DoubleEqual
, fpRegT0
, fpRegT0
);
1259 static const double NaN
= PNaN
;
1260 loadDouble(TrustedImmPtr(&NaN
), fpRegT0
);
1264 moveDoubleTo64(fpRegT0
, resultPayload
);
1265 sub64(tagTypeNumberRegister
, resultPayload
);
1267 moveDoubleToInts(fpRegT0
, resultPayload
, resultTag
);
1272 JIT::JumpList
JIT::emitIntTypedArrayPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, TypedArrayType type
)
1274 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
1275 ASSERT(isInt(type
));
1277 int value
= currentInstruction
[3].u
.operand
;
1280 RegisterID base
= regT0
;
1281 RegisterID property
= regT1
;
1282 RegisterID earlyScratch
= regT3
;
1283 RegisterID lateScratch
= regT2
;
1285 RegisterID base
= regT0
;
1286 RegisterID property
= regT2
;
1287 RegisterID earlyScratch
= regT3
;
1288 RegisterID lateScratch
= regT1
;
1293 load8(Address(base
, JSCell::typeInfoTypeOffset()), earlyScratch
);
1294 badType
= patchableBranch32(NotEqual
, earlyScratch
, TrustedImm32(typeForTypedArrayType(type
)));
1295 Jump inBounds
= branch32(Below
, property
, Address(base
, JSArrayBufferView::offsetOfLength()));
1296 emitArrayProfileOutOfBoundsSpecialCase(profile
);
1298 inBounds
.link(this);
1301 emitGetVirtualRegister(value
, earlyScratch
);
1302 slowCases
.append(emitJumpIfNotImmediateInteger(earlyScratch
));
1304 emitLoad(value
, lateScratch
, earlyScratch
);
1305 slowCases
.append(branch32(NotEqual
, lateScratch
, TrustedImm32(JSValue::Int32Tag
)));
1308 // We would be loading this into base as in get_by_val, except that the slow
1309 // path expects the base to be unclobbered.
1310 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), lateScratch
);
1312 if (isClamped(type
)) {
1313 ASSERT(elementSize(type
) == 1);
1314 ASSERT(!isSigned(type
));
1315 Jump inBounds
= branch32(BelowOrEqual
, earlyScratch
, TrustedImm32(0xff));
1316 Jump tooBig
= branch32(GreaterThan
, earlyScratch
, TrustedImm32(0xff));
1317 xor32(earlyScratch
, earlyScratch
);
1318 Jump clamped
= jump();
1320 move(TrustedImm32(0xff), earlyScratch
);
1322 inBounds
.link(this);
1325 switch (elementSize(type
)) {
1327 store8(earlyScratch
, BaseIndex(lateScratch
, property
, TimesOne
));
1330 store16(earlyScratch
, BaseIndex(lateScratch
, property
, TimesTwo
));
1333 store32(earlyScratch
, BaseIndex(lateScratch
, property
, TimesFour
));
1344 JIT::JumpList
JIT::emitFloatTypedArrayPutByVal(Instruction
* currentInstruction
, PatchableJump
& badType
, TypedArrayType type
)
1346 ArrayProfile
* profile
= currentInstruction
[4].u
.arrayProfile
;
1347 ASSERT(isFloat(type
));
1349 int value
= currentInstruction
[3].u
.operand
;
1352 RegisterID base
= regT0
;
1353 RegisterID property
= regT1
;
1354 RegisterID earlyScratch
= regT3
;
1355 RegisterID lateScratch
= regT2
;
1357 RegisterID base
= regT0
;
1358 RegisterID property
= regT2
;
1359 RegisterID earlyScratch
= regT3
;
1360 RegisterID lateScratch
= regT1
;
1365 load8(Address(base
, JSCell::typeInfoTypeOffset()), earlyScratch
);
1366 badType
= patchableBranch32(NotEqual
, earlyScratch
, TrustedImm32(typeForTypedArrayType(type
)));
1367 Jump inBounds
= branch32(Below
, property
, Address(base
, JSArrayBufferView::offsetOfLength()));
1368 emitArrayProfileOutOfBoundsSpecialCase(profile
);
1370 inBounds
.link(this);
1373 emitGetVirtualRegister(value
, earlyScratch
);
1374 Jump doubleCase
= emitJumpIfNotImmediateInteger(earlyScratch
);
1375 convertInt32ToDouble(earlyScratch
, fpRegT0
);
1376 Jump ready
= jump();
1377 doubleCase
.link(this);
1378 slowCases
.append(emitJumpIfNotImmediateNumber(earlyScratch
));
1379 add64(tagTypeNumberRegister
, earlyScratch
);
1380 move64ToDouble(earlyScratch
, fpRegT0
);
1383 emitLoad(value
, lateScratch
, earlyScratch
);
1384 Jump doubleCase
= branch32(NotEqual
, lateScratch
, TrustedImm32(JSValue::Int32Tag
));
1385 convertInt32ToDouble(earlyScratch
, fpRegT0
);
1386 Jump ready
= jump();
1387 doubleCase
.link(this);
1388 slowCases
.append(branch32(Above
, lateScratch
, TrustedImm32(JSValue::LowestTag
)));
1389 moveIntsToDouble(earlyScratch
, lateScratch
, fpRegT0
, fpRegT1
);
1393 // We would be loading this into base as in get_by_val, except that the slow
1394 // path expects the base to be unclobbered.
1395 loadPtr(Address(base
, JSArrayBufferView::offsetOfVector()), lateScratch
);
1397 switch (elementSize(type
)) {
1399 convertDoubleToFloat(fpRegT0
, fpRegT0
);
1400 storeFloat(fpRegT0
, BaseIndex(lateScratch
, property
, TimesFour
));
1403 storeDouble(fpRegT0
, BaseIndex(lateScratch
, property
, TimesEight
));
1416 #endif // ENABLE(JIT)