2 * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "Arguments.h"
33 #include "CodeBlock.h"
34 #include "JITInlines.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43 #include "StackAlignment.h"
44 #include "ThunkGenerators.h"
45 #include <wtf/StringPrintStream.h>
50 void JIT::emitPutCallResult(Instruction
* instruction
)
52 int dst
= instruction
[1].u
.operand
;
53 emitValueProfilingSite();
54 emitPutVirtualRegister(dst
);
57 void JIT::compileLoadVarargs(Instruction
* instruction
)
59 int thisValue
= instruction
[3].u
.operand
;
60 int arguments
= instruction
[4].u
.operand
;
61 int firstFreeRegister
= instruction
[5].u
.operand
;
62 int firstVarArgOffset
= instruction
[6].u
.operand
;
66 bool canOptimize
= m_codeBlock
->usesArguments()
67 && arguments
== m_codeBlock
->argumentsRegister().offset()
68 && !m_codeBlock
->symbolTable()->slowArguments();
71 emitGetVirtualRegister(arguments
, regT0
);
72 slowCase
.append(branch64(NotEqual
, regT0
, TrustedImm64(JSValue::encode(JSValue()))));
74 emitGetFromCallFrameHeader32(JSStack::ArgumentCount
, regT0
);
75 if (firstVarArgOffset
) {
76 Jump sufficientArguments
= branch32(GreaterThan
, regT0
, TrustedImm32(firstVarArgOffset
+ 1));
77 move(TrustedImm32(1), regT0
);
78 Jump endVarArgs
= jump();
79 sufficientArguments
.link(this);
80 sub32(TrustedImm32(firstVarArgOffset
), regT0
);
81 endVarArgs
.link(this);
83 slowCase
.append(branch32(Above
, regT0
, TrustedImm32(Arguments::MaxArguments
+ 1)));
84 // regT0: argumentCountIncludingThis
86 add64(TrustedImm32(-firstFreeRegister
+ JSStack::CallFrameHeaderSize
), regT1
);
87 // regT1 now has the required frame size in Register units
88 // Round regT1 to next multiple of stackAlignmentRegisters()
89 add64(TrustedImm32(stackAlignmentRegisters() - 1), regT1
);
90 and64(TrustedImm32(~(stackAlignmentRegisters() - 1)), regT1
);
93 lshift64(TrustedImm32(3), regT1
);
94 addPtr(callFrameRegister
, regT1
);
95 // regT1: newCallFrame
97 slowCase
.append(branchPtr(Above
, AbsoluteAddress(m_vm
->addressOfStackLimit()), regT1
));
99 // Initialize ArgumentCount.
100 store32(regT0
, Address(regT1
, JSStack::ArgumentCount
* static_cast<int>(sizeof(Register
)) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.payload
)));
102 // Initialize 'this'.
103 emitGetVirtualRegister(thisValue
, regT2
);
104 store64(regT2
, Address(regT1
, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register
))));
107 signExtend32ToPtr(regT0
, regT0
);
108 end
.append(branchSub64(Zero
, TrustedImm32(1), regT0
));
109 // regT0: argumentCount
111 Label copyLoop
= label();
112 load64(BaseIndex(callFrameRegister
, regT0
, TimesEight
, (CallFrame::thisArgumentOffset() + firstVarArgOffset
) * static_cast<int>(sizeof(Register
))), regT2
);
113 store64(regT2
, BaseIndex(regT1
, regT0
, TimesEight
, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register
))));
114 branchSub64(NonZero
, TrustedImm32(1), regT0
).linkTo(copyLoop
, this);
122 emitGetVirtualRegister(arguments
, regT1
);
123 callOperation(operationSizeFrameForVarargs
, regT1
, firstFreeRegister
, firstVarArgOffset
);
124 move(returnValueGPR
, stackPointerRegister
);
125 emitGetVirtualRegister(thisValue
, regT1
);
126 emitGetVirtualRegister(arguments
, regT2
);
127 callOperation(operationLoadVarargs
, returnValueGPR
, regT1
, regT2
, firstVarArgOffset
);
128 move(returnValueGPR
, regT1
);
133 addPtr(TrustedImm32(sizeof(CallerFrameAndPC
)), regT1
, stackPointerRegister
);
136 void JIT::compileCallEval(Instruction
* instruction
)
138 addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC
))), stackPointerRegister
, regT1
);
139 callOperationNoExceptionCheck(operationCallEval
, regT1
);
141 Jump noException
= emitExceptionCheck(InvertedExceptionCheck
);
142 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
143 exceptionCheck(jump());
145 noException
.link(this);
146 addSlowCase(branch64(Equal
, regT0
, TrustedImm64(JSValue::encode(JSValue()))));
148 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
149 checkStackPointerAlignment();
151 sampleCodeBlock(m_codeBlock
);
153 emitPutCallResult(instruction
);
156 void JIT::compileCallEvalSlowCase(Instruction
* instruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
160 load64(Address(stackPointerRegister
, sizeof(Register
) * JSStack::Callee
- sizeof(CallerFrameAndPC
)), regT0
);
161 move(TrustedImmPtr(&CallLinkInfo::dummy()), regT2
);
162 emitNakedCall(m_vm
->getCTIStub(virtualCallThunkGenerator
).code());
163 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
164 checkStackPointerAlignment();
166 sampleCodeBlock(m_codeBlock
);
168 emitPutCallResult(instruction
);
171 void JIT::compileOpCall(OpcodeID opcodeID
, Instruction
* instruction
, unsigned callLinkInfoIndex
)
173 int callee
= instruction
[2].u
.operand
;
176 - Updates callFrameRegister to callee callFrame.
177 - Initializes ArgumentCount; CallerFrame; Callee.
180 - Caller initializes ScopeChain.
181 - Callee initializes ReturnPC; CodeBlock.
182 - Callee restores callFrameRegister before return.
185 - Caller initializes ScopeChain; ReturnPC; CodeBlock.
186 - Caller restores callFrameRegister after return.
188 COMPILE_ASSERT(OPCODE_LENGTH(op_call
) == OPCODE_LENGTH(op_construct
), call_and_construct_opcodes_must_be_same_length
);
189 COMPILE_ASSERT(OPCODE_LENGTH(op_call
) == OPCODE_LENGTH(op_call_varargs
), call_and_call_varargs_opcodes_must_be_same_length
);
190 COMPILE_ASSERT(OPCODE_LENGTH(op_call
) == OPCODE_LENGTH(op_construct_varargs
), call_and_construct_varargs_opcodes_must_be_same_length
);
191 if (opcodeID
== op_call_varargs
|| opcodeID
== op_construct_varargs
)
192 compileLoadVarargs(instruction
);
194 int argCount
= instruction
[3].u
.operand
;
195 int registerOffset
= -instruction
[4].u
.operand
;
197 if (opcodeID
== op_call
&& shouldEmitProfiling()) {
198 emitGetVirtualRegister(registerOffset
+ CallFrame::argumentOffsetIncludingThis(0), regT0
);
199 Jump done
= emitJumpIfNotJSCell(regT0
);
200 load32(Address(regT0
, JSCell::structureIDOffset()), regT0
);
201 store32(regT0
, instruction
[OPCODE_LENGTH(op_call
) - 2].u
.arrayProfile
->addressOfLastSeenStructureID());
205 addPtr(TrustedImm32(registerOffset
* sizeof(Register
) + sizeof(CallerFrameAndPC
)), callFrameRegister
, stackPointerRegister
);
206 store32(TrustedImm32(argCount
), Address(stackPointerRegister
, JSStack::ArgumentCount
* static_cast<int>(sizeof(Register
)) + PayloadOffset
- sizeof(CallerFrameAndPC
)));
207 } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
209 uint32_t bytecodeOffset
= instruction
- m_codeBlock
->instructions().begin();
210 uint32_t locationBits
= CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset
);
211 store32(TrustedImm32(locationBits
), Address(callFrameRegister
, JSStack::ArgumentCount
* static_cast<int>(sizeof(Register
)) + TagOffset
));
212 emitGetVirtualRegister(callee
, regT0
); // regT0 holds callee.
214 store64(regT0
, Address(stackPointerRegister
, JSStack::Callee
* static_cast<int>(sizeof(Register
)) - sizeof(CallerFrameAndPC
)));
216 if (opcodeID
== op_call_eval
) {
217 compileCallEval(instruction
);
221 DataLabelPtr addressOfLinkedFunctionCheck
;
222 Jump slowCase
= branchPtrWithPatch(NotEqual
, regT0
, addressOfLinkedFunctionCheck
, TrustedImmPtr(0));
223 addSlowCase(slowCase
);
225 ASSERT(m_callCompilationInfo
.size() == callLinkInfoIndex
);
226 CallLinkInfo
* info
= m_codeBlock
->addCallLinkInfo();
227 info
->callType
= CallLinkInfo::callTypeFor(opcodeID
);
228 info
->codeOrigin
= CodeOrigin(m_bytecodeOffset
);
229 info
->calleeGPR
= regT0
;
230 m_callCompilationInfo
.append(CallCompilationInfo());
231 m_callCompilationInfo
[callLinkInfoIndex
].hotPathBegin
= addressOfLinkedFunctionCheck
;
232 m_callCompilationInfo
[callLinkInfoIndex
].callLinkInfo
= info
;
234 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_scope
)), regT2
);
235 store64(regT2
, Address(MacroAssembler::stackPointerRegister
, JSStack::ScopeChain
* sizeof(Register
) - sizeof(CallerFrameAndPC
)));
237 m_callCompilationInfo
[callLinkInfoIndex
].hotPathOther
= emitNakedCall();
239 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
240 checkStackPointerAlignment();
242 sampleCodeBlock(m_codeBlock
);
244 emitPutCallResult(instruction
);
247 void JIT::compileOpCallSlowCase(OpcodeID opcodeID
, Instruction
* instruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned callLinkInfoIndex
)
249 if (opcodeID
== op_call_eval
) {
250 compileCallEvalSlowCase(instruction
, iter
);
256 ThunkGenerator generator
= linkThunkGeneratorFor(
257 (opcodeID
== op_construct
|| opcodeID
== op_construct_varargs
) ? CodeForConstruct
: CodeForCall
,
258 RegisterPreservationNotRequired
);
260 move(TrustedImmPtr(m_callCompilationInfo
[callLinkInfoIndex
].callLinkInfo
), regT2
);
261 m_callCompilationInfo
[callLinkInfoIndex
].callReturnLocation
= emitNakedCall(m_vm
->getCTIStub(generator
).code());
263 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
264 checkStackPointerAlignment();
266 sampleCodeBlock(m_codeBlock
);
268 emitPutCallResult(instruction
);
271 void JIT::privateCompileClosureCall(CallLinkInfo
* callLinkInfo
, CodeBlock
* calleeCodeBlock
, Structure
* expectedStructure
, ExecutableBase
* expectedExecutable
, MacroAssemblerCodePtr codePtr
)
275 slowCases
.append(branchTestPtr(NonZero
, regT0
, tagMaskRegister
));
276 slowCases
.append(branchStructure(NotEqual
, Address(regT0
, JSCell::structureIDOffset()), expectedStructure
));
277 slowCases
.append(branchPtr(NotEqual
, Address(regT0
, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable
)));
279 loadPtr(Address(regT0
, JSFunction::offsetOfScopeChain()), regT1
);
280 emitPutToCallFrameHeader(regT1
, JSStack::ScopeChain
);
282 Call call
= nearCall();
285 slowCases
.link(this);
286 move(TrustedImmPtr(callLinkInfo
->callReturnLocation
.executableAddress()), regT2
);
287 restoreReturnAddressBeforeReturn(regT2
);
290 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
);
292 patchBuffer
.link(call
, FunctionPtr(codePtr
.executableAddress()));
293 patchBuffer
.link(done
, callLinkInfo
->hotPathOther
.labelAtOffset(0));
294 patchBuffer
.link(slow
, CodeLocationLabel(m_vm
->getCTIStub(virtualCallThunkGenerator
).code()));
296 RefPtr
<ClosureCallStubRoutine
> stubRoutine
= adoptRef(new ClosureCallStubRoutine(
299 ("Baseline closure call stub for %s, return point %p, target %p (%s)",
300 toCString(*m_codeBlock
).data(),
301 callLinkInfo
->hotPathOther
.labelAtOffset(0).executableAddress(),
302 codePtr
.executableAddress(),
303 toCString(pointerDump(calleeCodeBlock
)).data())),
304 *m_vm
, m_codeBlock
->ownerExecutable(), expectedStructure
, expectedExecutable
,
305 callLinkInfo
->codeOrigin
));
307 RepatchBuffer
repatchBuffer(m_codeBlock
);
309 repatchBuffer
.replaceWithJump(
310 RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo
->hotPathBegin
),
311 CodeLocationLabel(stubRoutine
->code().code()));
312 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, m_vm
->getCTIStub(virtualCallThunkGenerator
).code());
314 callLinkInfo
->stub
= stubRoutine
.release();
317 void JIT::emit_op_call(Instruction
* currentInstruction
)
319 compileOpCall(op_call
, currentInstruction
, m_callLinkInfoIndex
++);
322 void JIT::emit_op_call_eval(Instruction
* currentInstruction
)
324 compileOpCall(op_call_eval
, currentInstruction
, m_callLinkInfoIndex
);
327 void JIT::emit_op_call_varargs(Instruction
* currentInstruction
)
329 compileOpCall(op_call_varargs
, currentInstruction
, m_callLinkInfoIndex
++);
332 void JIT::emit_op_construct_varargs(Instruction
* currentInstruction
)
334 compileOpCall(op_construct_varargs
, currentInstruction
, m_callLinkInfoIndex
++);
337 void JIT::emit_op_construct(Instruction
* currentInstruction
)
339 compileOpCall(op_construct
, currentInstruction
, m_callLinkInfoIndex
++);
342 void JIT::emitSlow_op_call(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
344 compileOpCallSlowCase(op_call
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
347 void JIT::emitSlow_op_call_eval(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
349 compileOpCallSlowCase(op_call_eval
, currentInstruction
, iter
, m_callLinkInfoIndex
);
352 void JIT::emitSlow_op_call_varargs(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
354 compileOpCallSlowCase(op_call_varargs
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
357 void JIT::emitSlow_op_construct_varargs(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
359 compileOpCallSlowCase(op_construct_varargs
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
362 void JIT::emitSlow_op_construct(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
364 compileOpCallSlowCase(op_construct
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
369 #endif // USE(JSVALUE64)
370 #endif // ENABLE(JIT)