]> git.saurik.com Git - apple/javascriptcore.git/blob - llint/LowLevelInterpreter.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter.cpp
1 /*
2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "LowLevelInterpreter.h"
28 #include "LLIntOfflineAsmConfig.h"
29 #include <wtf/InlineASM.h>
30
31 #if !ENABLE(JIT)
32 #include "CodeBlock.h"
33 #include "CommonSlowPaths.h"
34 #include "LLIntCLoop.h"
35 #include "LLIntSlowPaths.h"
36 #include "JSCInlines.h"
37 #include <wtf/Assertions.h>
38 #include <wtf/MathExtras.h>
39
40 using namespace JSC::LLInt;
41
42 // LLInt C Loop opcodes
43 // ====================
44 // In the implementation of the C loop, the LLint trampoline glue functions
45 // (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
46 // if they are bytecode handlers. That means the names of the trampoline
47 // functions will be added to the OpcodeID list via the
48 // FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
49 // includes.
50 //
51 // In addition, some JIT trampoline functions which are needed by LLInt
52 // (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
53 // bytecodes, and the CLoop will provide bytecode handlers for them.
54 //
55 // In the CLoop, we can only dispatch indirectly to these bytecodes
56 // (including the LLInt and JIT extensions). All other dispatches
57 // (i.e. goto's) must be to a known label (i.e. local / global labels).
58
59
60 // How are the opcodes named?
61 // ==========================
62 // Here is a table to show examples of how each of the manifestation of the
63 // opcodes are named:
64 //
65 // Type: Opcode Trampoline Glue
66 // ====== ===============
67 // [In the llint .asm files]
68 // llint labels: llint_op_enter llint_program_prologue
69 //
70 // OpcodeID: op_enter llint_program
71 // [in Opcode.h] [in LLIntOpcode.h]
72 //
73 // When using a switch statement dispatch in the CLoop, each "opcode" is
74 // a case statement:
75 // Opcode: case op_enter: case llint_program_prologue:
76 //
77 // When using a computed goto dispatch in the CLoop, each opcode is a label:
78 // Opcode: op_enter: llint_program_prologue:
79
80
81 //============================================================================
82 // Define the opcode dispatch mechanism when using the C loop:
83 //
84
85 // These are for building a C Loop interpreter:
86 #define OFFLINE_ASM_BEGIN
87 #define OFFLINE_ASM_END
88
89 #if ENABLE(OPCODE_TRACING)
90 #define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
91 #else
92 #define TRACE_OPCODE(opcode)
93 #endif
94
95 // To keep compilers happy in case of unused labels, force usage of the label:
96 #define USE_LABEL(label) \
97 do { \
98 if (false) \
99 goto label; \
100 } while (false)
101
102 #define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
103
104 #define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
105
106 #if ENABLE(COMPUTED_GOTO_OPCODES)
107 #define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
108 #else
109 #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
110 #endif
111
112 #define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);
113
114
115 //============================================================================
116 // Some utilities:
117 //
118
119 namespace JSC {
120 namespace LLInt {
121
122 #if USE(JSVALUE32_64)
123 static double Ints2Double(uint32_t lo, uint32_t hi)
124 {
125 union {
126 double dval;
127 uint64_t ival64;
128 } u;
129 u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
130 return u.dval;
131 }
132
133 static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
134 {
135 union {
136 double dval;
137 uint64_t ival64;
138 } u;
139 u.dval = val;
140 hi = static_cast<uint32_t>(u.ival64 >> 32);
141 lo = static_cast<uint32_t>(u.ival64);
142 }
143 #endif // USE(JSVALUE32_64)
144
145 } // namespace LLint
146
147
148 //============================================================================
149 // CLoopRegister is the storage for an emulated CPU register.
150 // It defines the policy of how ints smaller than intptr_t are packed into the
151 // pseudo register, as well as hides endianness differences.
152
153 struct CLoopRegister {
154 CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); }
155 union {
156 intptr_t i;
157 uintptr_t u;
158 #if USE(JSVALUE64)
159 #if CPU(BIG_ENDIAN)
160 struct {
161 int32_t i32padding;
162 int32_t i32;
163 };
164 struct {
165 uint32_t u32padding;
166 uint32_t u32;
167 };
168 struct {
169 int8_t i8padding[7];
170 int8_t i8;
171 };
172 struct {
173 uint8_t u8padding[7];
174 uint8_t u8;
175 };
176 #else // !CPU(BIG_ENDIAN)
177 struct {
178 int32_t i32;
179 int32_t i32padding;
180 };
181 struct {
182 uint32_t u32;
183 uint32_t u32padding;
184 };
185 struct {
186 int8_t i8;
187 int8_t i8padding[7];
188 };
189 struct {
190 uint8_t u8;
191 uint8_t u8padding[7];
192 };
193 #endif // !CPU(BIG_ENDIAN)
194 #else // !USE(JSVALUE64)
195 int32_t i32;
196 uint32_t u32;
197
198 #if CPU(BIG_ENDIAN)
199 struct {
200 int8_t i8padding[3];
201 int8_t i8;
202 };
203 struct {
204 uint8_t u8padding[3];
205 uint8_t u8;
206 };
207
208 #else // !CPU(BIG_ENDIAN)
209 struct {
210 int8_t i8;
211 int8_t i8padding[3];
212 };
213 struct {
214 uint8_t u8;
215 uint8_t u8padding[3];
216 };
217 #endif // !CPU(BIG_ENDIAN)
218 #endif // !USE(JSVALUE64)
219
220 intptr_t* ip;
221 int8_t* i8p;
222 void* vp;
223 CallFrame* callFrame;
224 ExecState* execState;
225 void* instruction;
226 VM* vm;
227 JSCell* cell;
228 ProtoCallFrame* protoCallFrame;
229 NativeFunction nativeFunc;
230 #if USE(JSVALUE64)
231 int64_t i64;
232 uint64_t u64;
233 EncodedJSValue encodedJSValue;
234 double castToDouble;
235 #endif
236 Opcode opcode;
237 };
238
239 operator ExecState*() { return execState; }
240 operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); }
241 operator VM*() { return vm; }
242 operator ProtoCallFrame*() { return protoCallFrame; }
243 operator Register*() { return reinterpret_cast<Register*>(vp); }
244 operator JSCell*() { return cell; }
245
246 #if USE(JSVALUE64)
247 inline void clearHighWord() { i32padding = 0; }
248 #else
249 inline void clearHighWord() { }
250 #endif
251 };
252
253 //============================================================================
254 // The llint C++ interpreter loop:
255 //
256
257 JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
258 {
259 #define CAST reinterpret_cast
260 #define SIGN_BIT32(x) ((x) & 0x80000000)
261
262 // One-time initialization of our address tables. We have to put this code
263 // here because our labels are only in scope inside this function. The
264 // caller (or one of its ancestors) is responsible for ensuring that this
265 // is only called once during the initialization of the VM before threads
266 // are at play.
267 if (UNLIKELY(isInitializationPass)) {
268 #if ENABLE(COMPUTED_GOTO_OPCODES)
269 Opcode* opcodeMap = LLInt::opcodeMap();
270 #define OPCODE_ENTRY(__opcode, length) \
271 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
272 FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
273 #undef OPCODE_ENTRY
274
275 #define LLINT_OPCODE_ENTRY(__opcode, length) \
276 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
277
278 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
279 #undef LLINT_OPCODE_ENTRY
280 #endif
281 // Note: we can only set the exceptionInstructions after we have
282 // initialized the opcodeMap above. This is because getCodePtr()
283 // can depend on the opcodeMap.
284 Instruction* exceptionInstructions = LLInt::exceptionInstructions();
285 for (int i = 0; i < maxOpcodeLength + 1; ++i)
286 exceptionInstructions[i].u.pointer =
287 LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
288
289 return JSValue();
290 }
291
292 // Define the pseudo registers used by the LLINT C Loop backend:
293 ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
294
295 union CLoopDoubleRegister {
296 double d;
297 #if USE(JSVALUE64)
298 int64_t castToInt64;
299 #endif
300 };
301
302 // The CLoop llint backend is initially based on the ARMv7 backend, and
303 // then further enhanced with a few instructions from the x86 backend to
304 // support building for X64 targets. Hence, the shape of the generated
305 // code and the usage convention of registers will look a lot like the
306 // ARMv7 backend's.
307 //
308 // For example, on a 32-bit build:
309 // 1. Outgoing args will be set up as follows:
310 // arg1 in t0 (r0 on ARM)
311 // arg2 in t1 (r1 on ARM)
312 // 2. 32 bit return values will be in t0 (r0 on ARM).
313 // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
314 //
315 // But instead of naming these simulator registers based on their ARM
316 // counterparts, we'll name them based on their original llint asm names.
317 // This will make it easier to correlate the generated code with the
318 // original llint asm code.
319 //
320 // On a 64-bit build, it more like x64 in that the registers are 64 bit.
321 // Hence:
322 // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
323 // 2. 32 bit result values will be in the low 32-bit of t0.
324 // 3. 64 bit result values will be in t0.
325
326 CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc;
327 #if USE(JSVALUE64)
328 CLoopRegister pcBase, tagTypeNumber, tagMask;
329 #endif
330 CLoopDoubleRegister d0, d1;
331
332 lr.opcode = getOpcode(llint_return_to_host);
333 sp.vp = vm->interpreter->stack().topOfStack() + 1;
334 cfr.callFrame = vm->topCallFrame;
335 #ifndef NDEBUG
336 void* startSP = sp.vp;
337 CallFrame* startCFR = cfr.callFrame;
338 #endif
339
340 // Initialize the incoming args for doVMEntryToJavaScript:
341 t0.vp = executableAddress;
342 t1.vm = vm;
343 t2.protoCallFrame = protoCallFrame;
344
345 #if USE(JSVALUE64)
346 // For the ASM llint, JITStubs takes care of this initialization. We do
347 // it explicitly here for the C loop:
348 tagTypeNumber.i = 0xFFFF000000000000;
349 tagMask.i = 0xFFFF000000000002;
350 #endif // USE(JSVALUE64)
351
352 // Interpreter variables for value passing between opcodes and/or helpers:
353 NativeFunction nativeFunc = 0;
354 JSValue functionReturnValue;
355 Opcode opcode = getOpcode(entryOpcodeID);
356
357 #define PUSH(cloopReg) \
358 do { \
359 sp.ip--; \
360 *sp.ip = cloopReg.i; \
361 } while (false)
362
363 #define POP(cloopReg) \
364 do { \
365 cloopReg.i = *sp.ip; \
366 sp.ip++; \
367 } while (false)
368
369 #if ENABLE(OPCODE_STATS)
370 #define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
371 #else
372 #define RECORD_OPCODE_STATS(__opcode)
373 #endif
374
375 #if USE(JSVALUE32_64)
376 #define FETCH_OPCODE() pc.opcode
377 #else // USE(JSVALUE64)
378 #define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
379 #endif // USE(JSVALUE64)
380
381 #define NEXT_INSTRUCTION() \
382 do { \
383 opcode = FETCH_OPCODE(); \
384 DISPATCH_OPCODE(); \
385 } while (false)
386
387 #if ENABLE(COMPUTED_GOTO_OPCODES)
388
389 //========================================================================
390 // Loop dispatch mechanism using computed goto statements:
391
392 #define DISPATCH_OPCODE() goto *opcode
393
394 #define DEFINE_OPCODE(__opcode) \
395 __opcode: \
396 RECORD_OPCODE_STATS(__opcode);
397
398 // Dispatch to the current PC's bytecode:
399 DISPATCH_OPCODE();
400
401 #else // !ENABLE(COMPUTED_GOTO_OPCODES)
402 //========================================================================
403 // Loop dispatch mechanism using a C switch statement:
404
405 #define DISPATCH_OPCODE() goto dispatchOpcode
406
407 #define DEFINE_OPCODE(__opcode) \
408 case __opcode: \
409 __opcode: \
410 RECORD_OPCODE_STATS(__opcode);
411
412 // Dispatch to the current PC's bytecode:
413 dispatchOpcode:
414 switch (opcode)
415
416 #endif // !ENABLE(COMPUTED_GOTO_OPCODES)
417
418 //========================================================================
419 // Bytecode handlers:
420 {
421 // This is the file generated by offlineasm, which contains all of the
422 // bytecode handlers for the interpreter, as compiled from
423 // LowLevelInterpreter.asm and its peers.
424
425 #include "LLIntAssembly.h"
426
427 OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
428 {
429 ASSERT(startSP == sp.vp);
430 ASSERT(startCFR == cfr.callFrame);
431 #if USE(JSVALUE32_64)
432 return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
433 #else
434 return JSValue::decode(t0.encodedJSValue);
435 #endif
436 }
437
438 // In the ASM llint, getHostCallReturnValue() is a piece of glue
439 // function provided by the JIT (see jit/JITOperations.cpp).
440 // We simulate it here with a pseduo-opcode handler.
441 OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
442 {
443 // The part in getHostCallReturnValueWithExecState():
444 JSValue result = vm->hostCallReturnValue;
445 #if USE(JSVALUE32_64)
446 t1.i = result.tag();
447 t0.i = result.payload();
448 #else
449 t0.encodedJSValue = JSValue::encode(result);
450 #endif
451 opcode = lr.opcode;
452 DISPATCH_OPCODE();
453 }
454
455 #if !ENABLE(COMPUTED_GOTO_OPCODES)
456 default:
457 ASSERT(false);
458 #endif
459
460 } // END bytecode handler cases.
461
462 #if ENABLE(COMPUTED_GOTO_OPCODES)
463 // Keep the compiler happy so that it doesn't complain about unused
464 // labels for the LLInt trampoline glue. The labels are automatically
465 // emitted by label macros above, and some of them are referenced by
466 // the llint generated code. Since we can't tell ahead of time which
467 // will be referenced and which will be not, we'll just passify the
468 // compiler on all such labels:
469 #define LLINT_OPCODE_ENTRY(__opcode, length) \
470 UNUSED_LABEL(__opcode);
471 FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
472 #undef LLINT_OPCODE_ENTRY
473 #endif
474
475 #undef NEXT_INSTRUCTION
476 #undef DEFINE_OPCODE
477 #undef CHECK_FOR_TIMEOUT
478 #undef CAST
479 #undef SIGN_BIT32
480
481 return JSValue(); // to suppress a compiler warning.
482 } // Interpreter::llintCLoopExecute()
483
484 } // namespace JSC
485
486 #elif !OS(WINDOWS)
487
488 //============================================================================
489 // Define the opcode dispatch mechanism when using an ASM loop:
490 //
491
492 // These are for building an interpreter from generated assembly code:
493 #define OFFLINE_ASM_BEGIN asm (
494 #define OFFLINE_ASM_END );
495
496 #define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
497 #define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
498
499 #if CPU(ARM_THUMB2)
500 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
501 ".text\n" \
502 ".align 4\n" \
503 ".globl " SYMBOL_STRING(label) "\n" \
504 HIDE_SYMBOL(label) "\n" \
505 ".thumb\n" \
506 ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
507 SYMBOL_STRING(label) ":\n"
508 #elif CPU(ARM64)
509 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
510 ".text\n" \
511 ".align 4\n" \
512 ".globl " SYMBOL_STRING(label) "\n" \
513 HIDE_SYMBOL(label) "\n" \
514 SYMBOL_STRING(label) ":\n"
515 #else
516 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
517 ".text\n" \
518 ".globl " SYMBOL_STRING(label) "\n" \
519 HIDE_SYMBOL(label) "\n" \
520 SYMBOL_STRING(label) ":\n"
521 #endif
522
523 #define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
524
525 // This is a file generated by offlineasm, which contains all of the assembly code
526 // for the interpreter, as compiled from LowLevelInterpreter.asm.
527 #include "LLIntAssembly.h"
528
529 #endif // ENABLE(JIT)