]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/SpecializedThunkJIT.h
JavaScriptCore-7600.1.4.15.12.tar.gz
[apple/javascriptcore.git] / jit / SpecializedThunkJIT.h
1 /*
2 * Copyright (C) 2010 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef SpecializedThunkJIT_h
27 #define SpecializedThunkJIT_h
28
29 #if ENABLE(JIT)
30
31 #include "Executable.h"
32 #include "JIT.h"
33 #include "JITInlines.h"
34 #include "JSInterfaceJIT.h"
35 #include "JSStack.h"
36 #include "LinkBuffer.h"
37
38 namespace JSC {
39
40 class SpecializedThunkJIT : public JSInterfaceJIT {
41 public:
42 static const int ThisArgument = -1;
43 SpecializedThunkJIT(VM* vm, int expectedArgCount)
44 : JSInterfaceJIT(vm)
45 {
46 emitFunctionPrologue();
47 // Check that we have the expected number of arguments
48 m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
49 }
50
51 explicit SpecializedThunkJIT(VM* vm)
52 : JSInterfaceJIT(vm)
53 {
54 emitFunctionPrologue();
55 }
56
57 void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
58 {
59 unsigned src = CallFrame::argumentOffset(argument);
60 m_failures.append(emitLoadDouble(src, dst, scratch));
61 }
62
63 void loadCellArgument(int argument, RegisterID dst)
64 {
65 unsigned src = CallFrame::argumentOffset(argument);
66 m_failures.append(emitLoadJSCell(src, dst));
67 }
68
69 void loadJSStringArgument(VM& vm, int argument, RegisterID dst)
70 {
71 loadCellArgument(argument, dst);
72 m_failures.append(branchStructure(*this, NotEqual,
73 Address(dst, JSCell::structureIDOffset()),
74 vm.stringStructure.get()));
75 }
76
77 void loadArgumentWithSpecificClass(const ClassInfo* classInfo, int argument, RegisterID dst, RegisterID scratch)
78 {
79 loadCellArgument(argument, dst);
80 emitLoadStructure(dst, scratch, dst);
81 appendFailure(branchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfo)));
82 // We have to reload the argument since emitLoadStructure clobbered it.
83 loadCellArgument(argument, dst);
84 }
85
86 void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
87 {
88 unsigned src = CallFrame::argumentOffset(argument);
89 failTarget = emitLoadInt32(src, dst);
90 }
91
92 void loadInt32Argument(int argument, RegisterID dst)
93 {
94 Jump conversionFailed;
95 loadInt32Argument(argument, dst, conversionFailed);
96 m_failures.append(conversionFailed);
97 }
98
99 void appendFailure(const Jump& failure)
100 {
101 m_failures.append(failure);
102 }
103 #if USE(JSVALUE64)
104 void returnJSValue(RegisterID src)
105 {
106 if (src != regT0)
107 move(src, regT0);
108 emitFunctionEpilogue();
109 ret();
110 }
111 #else
112 void returnJSValue(RegisterID payload, RegisterID tag)
113 {
114 ASSERT_UNUSED(payload, payload == regT0);
115 ASSERT_UNUSED(tag, tag == regT1);
116 emitFunctionEpilogue();
117 ret();
118 }
119 #endif
120
121 void returnDouble(FPRegisterID src)
122 {
123 #if USE(JSVALUE64)
124 moveDoubleTo64(src, regT0);
125 Jump zero = branchTest64(Zero, regT0);
126 sub64(tagTypeNumberRegister, regT0);
127 Jump done = jump();
128 zero.link(this);
129 move(tagTypeNumberRegister, regT0);
130 done.link(this);
131 #else
132 #if !CPU(X86)
133 // The src register is not clobbered by moveDoubleToInts with ARM, MIPS and SH4 macro assemblers, so let's use it.
134 moveDoubleToInts(src, regT0, regT1);
135 #else
136 storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
137 loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
138 loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
139 #endif
140 Jump lowNonZero = branchTestPtr(NonZero, regT1);
141 Jump highNonZero = branchTestPtr(NonZero, regT0);
142 move(TrustedImm32(0), regT0);
143 move(TrustedImm32(Int32Tag), regT1);
144 lowNonZero.link(this);
145 highNonZero.link(this);
146 #endif
147 emitFunctionEpilogue();
148 ret();
149 }
150
151 void returnInt32(RegisterID src)
152 {
153 if (src != regT0)
154 move(src, regT0);
155 tagReturnAsInt32();
156 emitFunctionEpilogue();
157 ret();
158 }
159
160 void returnJSCell(RegisterID src)
161 {
162 if (src != regT0)
163 move(src, regT0);
164 tagReturnAsJSCell();
165 emitFunctionEpilogue();
166 ret();
167 }
168
169 MacroAssemblerCodeRef finalize(MacroAssemblerCodePtr fallback, const char* thunkKind)
170 {
171 LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
172 patchBuffer.link(m_failures, CodeLocationLabel(fallback));
173 for (unsigned i = 0; i < m_calls.size(); i++)
174 patchBuffer.link(m_calls[i].first, m_calls[i].second);
175 return FINALIZE_CODE(patchBuffer, ("Specialized thunk for %s", thunkKind));
176 }
177
178 // Assumes that the target function uses fpRegister0 as the first argument
179 // and return value. Like any sensible architecture would.
180 void callDoubleToDouble(FunctionPtr function)
181 {
182 m_calls.append(std::make_pair(call(), function));
183 }
184
185 void callDoubleToDoublePreservingReturn(FunctionPtr function)
186 {
187 if (!isX86())
188 preserveReturnAddressAfterCall(regT3);
189 callDoubleToDouble(function);
190 if (!isX86())
191 restoreReturnAddressBeforeReturn(regT3);
192 }
193
194 private:
195
196 void tagReturnAsInt32()
197 {
198 #if USE(JSVALUE64)
199 or64(tagTypeNumberRegister, regT0);
200 #else
201 move(TrustedImm32(JSValue::Int32Tag), regT1);
202 #endif
203 }
204
205 void tagReturnAsJSCell()
206 {
207 #if USE(JSVALUE32_64)
208 move(TrustedImm32(JSValue::CellTag), regT1);
209 #endif
210 }
211
212 MacroAssembler::JumpList m_failures;
213 Vector<std::pair<Call, FunctionPtr>> m_calls;
214 };
215
216 }
217
218 #endif // ENABLE(JIT)
219
220 #endif // SpecializedThunkJIT_h