]> git.saurik.com Git - apple/javascriptcore.git/blob - assembler/MacroAssemblerCodeRef.h
JavaScriptCore-1097.3.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerCodeRef.h
1 /*
2 * Copyright (C) 2009 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef MacroAssemblerCodeRef_h
27 #define MacroAssemblerCodeRef_h
28
29 #include "ExecutableAllocator.h"
30 #include <wtf/PassRefPtr.h>
31 #include <wtf/RefPtr.h>
32 #include <wtf/UnusedParam.h>
33
34 // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
35 // instruction address on the platform (for example, check any alignment requirements).
36 #if CPU(ARM_THUMB2)
37 // ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
38 // into the processor are decorated with the bottom bit set, indicating that this is
39 // thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
40 // decorated and undectorated null, and the second test ensures that the pointer is
41 // decorated.
42 #define ASSERT_VALID_CODE_POINTER(ptr) \
43 ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
44 ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
45 #define ASSERT_VALID_CODE_OFFSET(offset) \
46 ASSERT(!(offset & 1)) // Must be multiple of 2.
47 #else
48 #define ASSERT_VALID_CODE_POINTER(ptr) \
49 ASSERT(ptr)
50 #define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
51 #endif
52
53 #if CPU(X86) && OS(WINDOWS)
54 #define CALLING_CONVENTION_IS_STDCALL 1
55 #ifndef CDECL
56 #if COMPILER(MSVC)
57 #define CDECL __cdecl
58 #else
59 #define CDECL __attribute__ ((__cdecl))
60 #endif // COMPILER(MSVC)
61 #endif // CDECL
62 #else
63 #define CALLING_CONVENTION_IS_STDCALL 0
64 #endif
65
66 #if CPU(X86)
67 #define HAS_FASTCALL_CALLING_CONVENTION 1
68 #ifndef FASTCALL
69 #if COMPILER(MSVC)
70 #define FASTCALL __fastcall
71 #else
72 #define FASTCALL __attribute__ ((fastcall))
73 #endif // COMPILER(MSVC)
74 #endif // FASTCALL
75 #else
76 #define HAS_FASTCALL_CALLING_CONVENTION 0
77 #endif // CPU(X86)
78
79 namespace JSC {
80
81 // FunctionPtr:
82 //
83 // FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
84 // (particularly, the stub functions).
85 class FunctionPtr {
86 public:
87 FunctionPtr()
88 : m_value(0)
89 {
90 }
91
92 template<typename returnType>
93 FunctionPtr(returnType(*value)())
94 : m_value((void*)value)
95 {
96 ASSERT_VALID_CODE_POINTER(m_value);
97 }
98
99 template<typename returnType, typename argType1>
100 FunctionPtr(returnType(*value)(argType1))
101 : m_value((void*)value)
102 {
103 ASSERT_VALID_CODE_POINTER(m_value);
104 }
105
106 template<typename returnType, typename argType1, typename argType2>
107 FunctionPtr(returnType(*value)(argType1, argType2))
108 : m_value((void*)value)
109 {
110 ASSERT_VALID_CODE_POINTER(m_value);
111 }
112
113 template<typename returnType, typename argType1, typename argType2, typename argType3>
114 FunctionPtr(returnType(*value)(argType1, argType2, argType3))
115 : m_value((void*)value)
116 {
117 ASSERT_VALID_CODE_POINTER(m_value);
118 }
119
120 template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
121 FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4))
122 : m_value((void*)value)
123 {
124 ASSERT_VALID_CODE_POINTER(m_value);
125 }
126
127 // MSVC doesn't seem to treat functions with different calling conventions as
128 // different types; these methods already defined for fastcall, below.
129 #if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
130
131 template<typename returnType>
132 FunctionPtr(returnType (CDECL *value)())
133 : m_value((void*)value)
134 {
135 ASSERT_VALID_CODE_POINTER(m_value);
136 }
137
138 template<typename returnType, typename argType1>
139 FunctionPtr(returnType (CDECL *value)(argType1))
140 : m_value((void*)value)
141 {
142 ASSERT_VALID_CODE_POINTER(m_value);
143 }
144
145 template<typename returnType, typename argType1, typename argType2>
146 FunctionPtr(returnType (CDECL *value)(argType1, argType2))
147 : m_value((void*)value)
148 {
149 ASSERT_VALID_CODE_POINTER(m_value);
150 }
151
152 template<typename returnType, typename argType1, typename argType2, typename argType3>
153 FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3))
154 : m_value((void*)value)
155 {
156 ASSERT_VALID_CODE_POINTER(m_value);
157 }
158
159 template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
160 FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4))
161 : m_value((void*)value)
162 {
163 ASSERT_VALID_CODE_POINTER(m_value);
164 }
165 #endif
166
167 #if HAS_FASTCALL_CALLING_CONVENTION
168
169 template<typename returnType>
170 FunctionPtr(returnType (FASTCALL *value)())
171 : m_value((void*)value)
172 {
173 ASSERT_VALID_CODE_POINTER(m_value);
174 }
175
176 template<typename returnType, typename argType1>
177 FunctionPtr(returnType (FASTCALL *value)(argType1))
178 : m_value((void*)value)
179 {
180 ASSERT_VALID_CODE_POINTER(m_value);
181 }
182
183 template<typename returnType, typename argType1, typename argType2>
184 FunctionPtr(returnType (FASTCALL *value)(argType1, argType2))
185 : m_value((void*)value)
186 {
187 ASSERT_VALID_CODE_POINTER(m_value);
188 }
189
190 template<typename returnType, typename argType1, typename argType2, typename argType3>
191 FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3))
192 : m_value((void*)value)
193 {
194 ASSERT_VALID_CODE_POINTER(m_value);
195 }
196
197 template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
198 FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4))
199 : m_value((void*)value)
200 {
201 ASSERT_VALID_CODE_POINTER(m_value);
202 }
203 #endif
204
205 template<typename FunctionType>
206 explicit FunctionPtr(FunctionType* value)
207 // Using a C-ctyle cast here to avoid compiler error on RVTC:
208 // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
209 // (I guess on RVTC function pointers have a different constness to GCC/MSVC?)
210 : m_value((void*)value)
211 {
212 ASSERT_VALID_CODE_POINTER(m_value);
213 }
214
215 void* value() const { return m_value; }
216 void* executableAddress() const { return m_value; }
217
218
219 private:
220 void* m_value;
221 };
222
223 // ReturnAddressPtr:
224 //
225 // ReturnAddressPtr should be used to wrap return addresses generated by processor
226 // 'call' instructions exectued in JIT code. We use return addresses to look up
227 // exception and optimization information, and to repatch the call instruction
228 // that is the source of the return address.
229 class ReturnAddressPtr {
230 public:
231 ReturnAddressPtr()
232 : m_value(0)
233 {
234 }
235
236 explicit ReturnAddressPtr(void* value)
237 : m_value(value)
238 {
239 ASSERT_VALID_CODE_POINTER(m_value);
240 }
241
242 explicit ReturnAddressPtr(FunctionPtr function)
243 : m_value(function.value())
244 {
245 ASSERT_VALID_CODE_POINTER(m_value);
246 }
247
248 void* value() const { return m_value; }
249
250 private:
251 void* m_value;
252 };
253
254 // MacroAssemblerCodePtr:
255 //
256 // MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
257 class MacroAssemblerCodePtr {
258 public:
259 MacroAssemblerCodePtr()
260 : m_value(0)
261 {
262 }
263
264 explicit MacroAssemblerCodePtr(void* value)
265 #if CPU(ARM_THUMB2)
266 // Decorate the pointer as a thumb code pointer.
267 : m_value(reinterpret_cast<char*>(value) + 1)
268 #else
269 : m_value(value)
270 #endif
271 {
272 ASSERT_VALID_CODE_POINTER(m_value);
273 }
274
275 static MacroAssemblerCodePtr createFromExecutableAddress(void* value)
276 {
277 ASSERT_VALID_CODE_POINTER(value);
278 MacroAssemblerCodePtr result;
279 result.m_value = value;
280 return result;
281 }
282
283 static MacroAssemblerCodePtr createLLIntCodePtr(void (*function)())
284 {
285 return createFromExecutableAddress(bitwise_cast<void*>(function));
286 }
287 explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
288 : m_value(ra.value())
289 {
290 ASSERT_VALID_CODE_POINTER(m_value);
291 }
292
293 void* executableAddress() const { return m_value; }
294 #if CPU(ARM_THUMB2)
295 // To use this pointer as a data address remove the decoration.
296 void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
297 #else
298 void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
299 #endif
300
301 bool operator!() const
302 {
303 return !m_value;
304 }
305
306 private:
307 void* m_value;
308 };
309
310 // MacroAssemblerCodeRef:
311 //
312 // A reference to a section of JIT generated code. A CodeRef consists of a
313 // pointer to the code, and a ref pointer to the pool from within which it
314 // was allocated.
315 class MacroAssemblerCodeRef {
316 private:
317 // This is private because it's dangerous enough that we want uses of it
318 // to be easy to find - hence the static create method below.
319 explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr)
320 : m_codePtr(codePtr)
321 {
322 ASSERT(m_codePtr);
323 }
324
325 public:
326 MacroAssemblerCodeRef()
327 {
328 }
329
330 MacroAssemblerCodeRef(PassRefPtr<ExecutableMemoryHandle> executableMemory)
331 : m_codePtr(executableMemory->start())
332 , m_executableMemory(executableMemory)
333 {
334 ASSERT(m_executableMemory->isManaged());
335 ASSERT(m_executableMemory->start());
336 ASSERT(m_codePtr);
337 }
338
339 // Use this only when you know that the codePtr refers to code that is
340 // already being kept alive through some other means. Typically this means
341 // that codePtr is immortal.
342 static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr)
343 {
344 return MacroAssemblerCodeRef(codePtr);
345 }
346
347 // Helper for creating self-managed code refs from LLInt.
348 static MacroAssemblerCodeRef createLLIntCodeRef(void (*function)())
349 {
350 return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(bitwise_cast<void*>(function)));
351 }
352
353 ExecutableMemoryHandle* executableMemory() const
354 {
355 return m_executableMemory.get();
356 }
357
358 MacroAssemblerCodePtr code() const
359 {
360 return m_codePtr;
361 }
362
363 size_t size() const
364 {
365 if (!m_executableMemory)
366 return 0;
367 return m_executableMemory->sizeInBytes();
368 }
369
370 bool operator!() const { return !m_codePtr; }
371
372 private:
373 MacroAssemblerCodePtr m_codePtr;
374 RefPtr<ExecutableMemoryHandle> m_executableMemory;
375 };
376
377 } // namespace JSC
378
379 #endif // MacroAssemblerCodeRef_h