2 * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "Arguments.h"
33 #include "CodeBlock.h"
34 #include "Interpreter.h"
35 #include "JITInlines.h"
37 #include "JSFunction.h"
38 #include "JSCInlines.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43 #include "StackAlignment.h"
44 #include <wtf/StringPrintStream.h>
49 void JIT::emitPutCallResult(Instruction
* instruction
)
51 int dst
= instruction
[1].u
.operand
;
52 emitValueProfilingSite();
53 emitStore(dst
, regT1
, regT0
);
56 void JIT::emit_op_ret(Instruction
* currentInstruction
)
58 unsigned dst
= currentInstruction
[1].u
.operand
;
60 emitLoad(dst
, regT1
, regT0
);
62 checkStackPointerAlignment();
63 emitFunctionEpilogue();
67 void JIT::emit_op_ret_object_or_this(Instruction
* currentInstruction
)
69 unsigned result
= currentInstruction
[1].u
.operand
;
70 unsigned thisReg
= currentInstruction
[2].u
.operand
;
72 emitLoad(result
, regT1
, regT0
);
73 Jump notJSCell
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
));
74 Jump notObject
= emitJumpIfCellNotObject(regT0
);
76 checkStackPointerAlignment();
77 emitFunctionEpilogue();
82 emitLoad(thisReg
, regT1
, regT0
);
84 checkStackPointerAlignment();
85 emitFunctionEpilogue();
89 void JIT::emitSlow_op_call(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
91 compileOpCallSlowCase(op_call
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
94 void JIT::emitSlow_op_call_eval(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
96 compileOpCallSlowCase(op_call_eval
, currentInstruction
, iter
, m_callLinkInfoIndex
);
99 void JIT::emitSlow_op_call_varargs(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
101 compileOpCallSlowCase(op_call_varargs
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
104 void JIT::emitSlow_op_construct_varargs(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
106 compileOpCallSlowCase(op_construct_varargs
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
109 void JIT::emitSlow_op_construct(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
111 compileOpCallSlowCase(op_construct
, currentInstruction
, iter
, m_callLinkInfoIndex
++);
114 void JIT::emit_op_call(Instruction
* currentInstruction
)
116 compileOpCall(op_call
, currentInstruction
, m_callLinkInfoIndex
++);
119 void JIT::emit_op_call_eval(Instruction
* currentInstruction
)
121 compileOpCall(op_call_eval
, currentInstruction
, m_callLinkInfoIndex
);
124 void JIT::emit_op_call_varargs(Instruction
* currentInstruction
)
126 compileOpCall(op_call_varargs
, currentInstruction
, m_callLinkInfoIndex
++);
129 void JIT::emit_op_construct_varargs(Instruction
* currentInstruction
)
131 compileOpCall(op_construct_varargs
, currentInstruction
, m_callLinkInfoIndex
++);
134 void JIT::emit_op_construct(Instruction
* currentInstruction
)
136 compileOpCall(op_construct
, currentInstruction
, m_callLinkInfoIndex
++);
139 void JIT::compileLoadVarargs(Instruction
* instruction
)
141 int thisValue
= instruction
[3].u
.operand
;
142 int arguments
= instruction
[4].u
.operand
;
143 int firstFreeRegister
= instruction
[5].u
.operand
;
144 int firstVarArgOffset
= instruction
[6].u
.operand
;
148 bool canOptimize
= m_codeBlock
->usesArguments()
149 && VirtualRegister(arguments
) == m_codeBlock
->argumentsRegister()
150 && !m_codeBlock
->symbolTable()->slowArguments();
153 emitLoadTag(arguments
, regT1
);
154 slowCase
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::EmptyValueTag
)));
156 load32(payloadFor(JSStack::ArgumentCount
), regT2
);
157 if (firstVarArgOffset
) {
158 Jump sufficientArguments
= branch32(GreaterThan
, regT2
, TrustedImm32(firstVarArgOffset
+ 1));
159 move(TrustedImm32(1), regT2
);
160 Jump endVarArgs
= jump();
161 sufficientArguments
.link(this);
162 sub32(TrustedImm32(firstVarArgOffset
), regT2
);
163 endVarArgs
.link(this);
165 slowCase
.append(branch32(Above
, regT2
, TrustedImm32(Arguments::MaxArguments
+ 1)));
166 // regT2: argumentCountIncludingThis
169 addPtr(TrustedImm32(-firstFreeRegister
+ JSStack::CallFrameHeaderSize
), regT3
);
170 // regT1 now has the required frame size in Register units
171 // Round regT1 to next multiple of stackAlignmentRegisters()
172 addPtr(TrustedImm32(stackAlignmentRegisters() - 1), regT3
);
173 andPtr(TrustedImm32(~(stackAlignmentRegisters() - 1)), regT3
);
175 lshift32(TrustedImm32(3), regT3
);
176 addPtr(callFrameRegister
, regT3
);
177 // regT3: newCallFrame
179 slowCase
.append(branchPtr(Above
, AbsoluteAddress(m_vm
->addressOfStackLimit()), regT3
));
181 // Initialize ArgumentCount.
182 store32(regT2
, payloadFor(JSStack::ArgumentCount
, regT3
));
184 // Initialize 'this'.
185 emitLoad(thisValue
, regT1
, regT0
);
186 store32(regT0
, Address(regT3
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register
)))));
187 store32(regT1
, Address(regT3
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register
)))));
190 end
.append(branchSub32(Zero
, TrustedImm32(1), regT2
));
191 // regT2: argumentCount;
193 Label copyLoop
= label();
194 load32(BaseIndex(callFrameRegister
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
) +((CallFrame::thisArgumentOffset() + firstVarArgOffset
) * static_cast<int>(sizeof(Register
)))), regT0
);
195 load32(BaseIndex(callFrameRegister
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
) +((CallFrame::thisArgumentOffset() + firstVarArgOffset
) * static_cast<int>(sizeof(Register
)))), regT1
);
196 store32(regT0
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.payload
) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register
)))));
197 store32(regT1
, BaseIndex(regT3
, regT2
, TimesEight
, OBJECT_OFFSETOF(JSValue
, u
.asBits
.tag
) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register
)))));
198 branchSub32(NonZero
, TrustedImm32(1), regT2
).linkTo(copyLoop
, this);
206 emitLoad(arguments
, regT1
, regT0
);
207 callOperation(operationSizeFrameForVarargs
, regT1
, regT0
, firstFreeRegister
, firstVarArgOffset
);
208 addPtr(TrustedImm32(-sizeof(CallerFrameAndPC
)), returnValueGPR
, stackPointerRegister
);
209 emitLoad(thisValue
, regT1
, regT4
);
210 emitLoad(arguments
, regT3
, regT2
);
211 callOperation(operationLoadVarargs
, returnValueGPR
, regT1
, regT4
, regT3
, regT2
, firstVarArgOffset
);
212 move(returnValueGPR
, regT3
);
217 addPtr(TrustedImm32(sizeof(CallerFrameAndPC
)), regT3
, stackPointerRegister
);
220 void JIT::compileCallEval(Instruction
* instruction
)
222 addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC
))), stackPointerRegister
, regT1
);
224 callOperationNoExceptionCheck(operationCallEval
, regT1
);
226 Jump noException
= emitExceptionCheck(InvertedExceptionCheck
);
227 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
228 exceptionCheck(jump());
230 noException
.link(this);
231 addSlowCase(branch32(Equal
, regT1
, TrustedImm32(JSValue::EmptyValueTag
)));
233 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
234 checkStackPointerAlignment();
236 sampleCodeBlock(m_codeBlock
);
238 emitPutCallResult(instruction
);
241 void JIT::compileCallEvalSlowCase(Instruction
* instruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
245 loadPtr(Address(stackPointerRegister
, sizeof(Register
) * JSStack::Callee
- sizeof(CallerFrameAndPC
)), regT0
);
246 loadPtr(Address(stackPointerRegister
, sizeof(Register
) * JSStack::Callee
- sizeof(CallerFrameAndPC
)), regT1
);
247 move(TrustedImmPtr(&CallLinkInfo::dummy()), regT2
);
249 emitLoad(JSStack::Callee
, regT1
, regT0
);
250 emitNakedCall(m_vm
->getCTIStub(virtualCallThunkGenerator
).code());
251 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
252 checkStackPointerAlignment();
254 sampleCodeBlock(m_codeBlock
);
256 emitPutCallResult(instruction
);
259 void JIT::compileOpCall(OpcodeID opcodeID
, Instruction
* instruction
, unsigned callLinkInfoIndex
)
261 int callee
= instruction
[2].u
.operand
;
264 - Updates callFrameRegister to callee callFrame.
265 - Initializes ArgumentCount; CallerFrame; Callee.
268 - Caller initializes ScopeChain.
269 - Callee initializes ReturnPC; CodeBlock.
270 - Callee restores callFrameRegister before return.
273 - Caller initializes ScopeChain; ReturnPC; CodeBlock.
274 - Caller restores callFrameRegister after return.
277 if (opcodeID
== op_call_varargs
|| opcodeID
== op_construct_varargs
)
278 compileLoadVarargs(instruction
);
280 int argCount
= instruction
[3].u
.operand
;
281 int registerOffset
= -instruction
[4].u
.operand
;
283 if (opcodeID
== op_call
&& shouldEmitProfiling()) {
284 emitLoad(registerOffset
+ CallFrame::argumentOffsetIncludingThis(0), regT0
, regT1
);
285 Jump done
= branch32(NotEqual
, regT0
, TrustedImm32(JSValue::CellTag
));
286 loadPtr(Address(regT1
, JSCell::structureIDOffset()), regT1
);
287 storePtr(regT1
, instruction
[OPCODE_LENGTH(op_call
) - 2].u
.arrayProfile
->addressOfLastSeenStructureID());
291 addPtr(TrustedImm32(registerOffset
* sizeof(Register
) + sizeof(CallerFrameAndPC
)), callFrameRegister
, stackPointerRegister
);
293 store32(TrustedImm32(argCount
), Address(stackPointerRegister
, JSStack::ArgumentCount
* static_cast<int>(sizeof(Register
)) + PayloadOffset
- sizeof(CallerFrameAndPC
)));
294 } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
296 uint32_t locationBits
= CallFrame::Location::encodeAsBytecodeInstruction(instruction
);
297 store32(TrustedImm32(locationBits
), tagFor(JSStack::ArgumentCount
, callFrameRegister
));
298 emitLoad(callee
, regT1
, regT0
); // regT1, regT0 holds callee.
300 store32(regT0
, Address(stackPointerRegister
, JSStack::Callee
* static_cast<int>(sizeof(Register
)) + PayloadOffset
- sizeof(CallerFrameAndPC
)));
301 store32(regT1
, Address(stackPointerRegister
, JSStack::Callee
* static_cast<int>(sizeof(Register
)) + TagOffset
- sizeof(CallerFrameAndPC
)));
303 if (opcodeID
== op_call_eval
) {
304 compileCallEval(instruction
);
308 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
310 DataLabelPtr addressOfLinkedFunctionCheck
;
311 Jump slowCase
= branchPtrWithPatch(NotEqual
, regT0
, addressOfLinkedFunctionCheck
, TrustedImmPtr(0));
313 addSlowCase(slowCase
);
315 ASSERT(m_callCompilationInfo
.size() == callLinkInfoIndex
);
316 CallLinkInfo
* info
= m_codeBlock
->addCallLinkInfo();
317 info
->callType
= CallLinkInfo::callTypeFor(opcodeID
);
318 info
->codeOrigin
= CodeOrigin(m_bytecodeOffset
);
319 info
->calleeGPR
= regT0
;
320 m_callCompilationInfo
.append(CallCompilationInfo());
321 m_callCompilationInfo
[callLinkInfoIndex
].hotPathBegin
= addressOfLinkedFunctionCheck
;
322 m_callCompilationInfo
[callLinkInfoIndex
].callLinkInfo
= info
;
324 loadPtr(Address(regT0
, OBJECT_OFFSETOF(JSFunction
, m_scope
)), regT2
);
325 store32(regT2
, Address(MacroAssembler::stackPointerRegister
, JSStack::ScopeChain
* sizeof(Register
) + PayloadOffset
- sizeof(CallerFrameAndPC
)));
326 store32(TrustedImm32(JSValue::CellTag
), Address(stackPointerRegister
, JSStack::ScopeChain
* sizeof(Register
) + TagOffset
- sizeof(CallerFrameAndPC
)));
328 checkStackPointerAlignment();
329 m_callCompilationInfo
[callLinkInfoIndex
].hotPathOther
= emitNakedCall();
331 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
332 checkStackPointerAlignment();
334 sampleCodeBlock(m_codeBlock
);
335 emitPutCallResult(instruction
);
338 void JIT::compileOpCallSlowCase(OpcodeID opcodeID
, Instruction
* instruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned callLinkInfoIndex
)
340 if (opcodeID
== op_call_eval
) {
341 compileCallEvalSlowCase(instruction
, iter
);
348 ThunkGenerator generator
= linkThunkGeneratorFor(
349 (opcodeID
== op_construct
|| opcodeID
== op_construct_varargs
) ? CodeForConstruct
: CodeForCall
,
350 RegisterPreservationNotRequired
);
352 move(TrustedImmPtr(m_callCompilationInfo
[callLinkInfoIndex
].callLinkInfo
), regT2
);
353 m_callCompilationInfo
[callLinkInfoIndex
].callReturnLocation
= emitNakedCall(m_vm
->getCTIStub(generator
).code());
355 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock
) * sizeof(Register
)), callFrameRegister
, stackPointerRegister
);
356 checkStackPointerAlignment();
358 sampleCodeBlock(m_codeBlock
);
359 emitPutCallResult(instruction
);
362 void JIT::privateCompileClosureCall(CallLinkInfo
* callLinkInfo
, CodeBlock
* calleeCodeBlock
, Structure
* expectedStructure
, ExecutableBase
* expectedExecutable
, MacroAssemblerCodePtr codePtr
)
366 slowCases
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
367 slowCases
.append(branchPtr(NotEqual
, Address(regT0
, JSCell::structureIDOffset()), TrustedImmPtr(expectedStructure
)));
368 slowCases
.append(branchPtr(NotEqual
, Address(regT0
, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable
)));
370 loadPtr(Address(regT0
, JSFunction::offsetOfScopeChain()), regT1
);
371 emitPutCellToCallFrameHeader(regT1
, JSStack::ScopeChain
);
373 Call call
= nearCall();
376 slowCases
.link(this);
377 move(TrustedImmPtr(callLinkInfo
->callReturnLocation
.executableAddress()), regT2
);
378 restoreReturnAddressBeforeReturn(regT2
);
381 LinkBuffer
patchBuffer(*m_vm
, *this, m_codeBlock
);
383 patchBuffer
.link(call
, FunctionPtr(codePtr
.executableAddress()));
384 patchBuffer
.link(done
, callLinkInfo
->hotPathOther
.labelAtOffset(0));
385 patchBuffer
.link(slow
, CodeLocationLabel(m_vm
->getCTIStub(virtualCallThunkGenerator
).code()));
387 RefPtr
<ClosureCallStubRoutine
> stubRoutine
= adoptRef(new ClosureCallStubRoutine(
390 ("Baseline closure call stub for %s, return point %p, target %p (%s)",
391 toCString(*m_codeBlock
).data(),
392 callLinkInfo
->hotPathOther
.labelAtOffset(0).executableAddress(),
393 codePtr
.executableAddress(),
394 toCString(pointerDump(calleeCodeBlock
)).data())),
395 *m_vm
, m_codeBlock
->ownerExecutable(), expectedStructure
, expectedExecutable
,
396 callLinkInfo
->codeOrigin
));
398 RepatchBuffer
repatchBuffer(m_codeBlock
);
400 repatchBuffer
.replaceWithJump(
401 RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo
->hotPathBegin
),
402 CodeLocationLabel(stubRoutine
->code().code()));
403 repatchBuffer
.relink(callLinkInfo
->callReturnLocation
, m_vm
->getCTIStub(virtualCallThunkGenerator
).code());
405 callLinkInfo
->stub
= stubRoutine
.release();
410 #endif // USE(JSVALUE32_64)
411 #endif // ENABLE(JIT)