]> git.saurik.com Git - apple/javascriptcore.git/blob - llint/LowLevelInterpreter.cpp
JavaScriptCore-7600.1.4.11.8.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter.cpp
1 /*
2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "LowLevelInterpreter.h"
28 #include "LLIntOfflineAsmConfig.h"
29 #include <wtf/InlineASM.h>
30
31 #if !ENABLE(JIT)
32 #include "CodeBlock.h"
33 #include "CommonSlowPaths.h"
34 #include "LLIntCLoop.h"
35 #include "LLIntSlowPaths.h"
36 #include "JSCInlines.h"
37 #include "VMInspector.h"
38 #include <wtf/Assertions.h>
39 #include <wtf/MathExtras.h>
40
41 using namespace JSC::LLInt;
42
43 // LLInt C Loop opcodes
44 // ====================
45 // In the implementation of the C loop, the LLint trampoline glue functions
46 // (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
47 // if they are bytecode handlers. That means the names of the trampoline
48 // functions will be added to the OpcodeID list via the
49 // FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
50 // includes.
51 //
52 // In addition, some JIT trampoline functions which are needed by LLInt
53 // (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
54 // bytecodes, and the CLoop will provide bytecode handlers for them.
55 //
56 // In the CLoop, we can only dispatch indirectly to these bytecodes
57 // (including the LLInt and JIT extensions). All other dispatches
58 // (i.e. goto's) must be to a known label (i.e. local / global labels).
59
60
61 // How are the opcodes named?
62 // ==========================
63 // Here is a table to show examples of how each of the manifestation of the
64 // opcodes are named:
65 //
66 // Type: Opcode Trampoline Glue
67 // ====== ===============
68 // [In the llint .asm files]
69 // llint labels: llint_op_enter llint_program_prologue
70 //
71 // OpcodeID: op_enter llint_program
72 // [in Opcode.h] [in LLIntOpcode.h]
73 //
74 // When using a switch statement dispatch in the CLoop, each "opcode" is
75 // a case statement:
76 // Opcode: case op_enter: case llint_program_prologue:
77 //
78 // When using a computed goto dispatch in the CLoop, each opcode is a label:
79 // Opcode: op_enter: llint_program_prologue:
80
81
82 //============================================================================
83 // Define the opcode dispatch mechanism when using the C loop:
84 //
85
86 // These are for building a C Loop interpreter:
87 #define OFFLINE_ASM_BEGIN
88 #define OFFLINE_ASM_END
89
90 #if ENABLE(OPCODE_TRACING)
91 #define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
92 #else
93 #define TRACE_OPCODE(opcode)
94 #endif
95
96 // To keep compilers happy in case of unused labels, force usage of the label:
97 #define USE_LABEL(label) \
98 do { \
99 if (false) \
100 goto label; \
101 } while (false)
102
103 #define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
104
105 #define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
106
107 #if ENABLE(COMPUTED_GOTO_OPCODES)
108 #define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
109 #else
110 #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
111 #endif
112
113 #define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);
114
115
116 //============================================================================
117 // Some utilities:
118 //
119
120 namespace JSC {
121 namespace LLInt {
122
123 #if USE(JSVALUE32_64)
124 static double Ints2Double(uint32_t lo, uint32_t hi)
125 {
126 union {
127 double dval;
128 uint64_t ival64;
129 } u;
130 u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
131 return u.dval;
132 }
133
134 static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
135 {
136 union {
137 double dval;
138 uint64_t ival64;
139 } u;
140 u.dval = val;
141 hi = static_cast<uint32_t>(u.ival64 >> 32);
142 lo = static_cast<uint32_t>(u.ival64);
143 }
144 #endif // USE(JSVALUE32_64)
145
146 } // namespace LLint
147
148
149 //============================================================================
150 // CLoopRegister is the storage for an emulated CPU register.
151 // It defines the policy of how ints smaller than intptr_t are packed into the
152 // pseudo register, as well as hides endianness differences.
153
154 struct CLoopRegister {
155 CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); }
156 union {
157 intptr_t i;
158 uintptr_t u;
159 #if USE(JSVALUE64)
160 #if CPU(BIG_ENDIAN)
161 struct {
162 int32_t i32padding;
163 int32_t i32;
164 };
165 struct {
166 uint32_t u32padding;
167 uint32_t u32;
168 };
169 struct {
170 int8_t i8padding[7];
171 int8_t i8;
172 };
173 struct {
174 uint8_t u8padding[7];
175 uint8_t u8;
176 };
177 #else // !CPU(BIG_ENDIAN)
178 struct {
179 int32_t i32;
180 int32_t i32padding;
181 };
182 struct {
183 uint32_t u32;
184 uint32_t u32padding;
185 };
186 struct {
187 int8_t i8;
188 int8_t i8padding[7];
189 };
190 struct {
191 uint8_t u8;
192 uint8_t u8padding[7];
193 };
194 #endif // !CPU(BIG_ENDIAN)
195 #else // !USE(JSVALUE64)
196 int32_t i32;
197 uint32_t u32;
198
199 #if CPU(BIG_ENDIAN)
200 struct {
201 int8_t i8padding[3];
202 int8_t i8;
203 };
204 struct {
205 uint8_t u8padding[3];
206 uint8_t u8;
207 };
208
209 #else // !CPU(BIG_ENDIAN)
210 struct {
211 int8_t i8;
212 int8_t i8padding[3];
213 };
214 struct {
215 uint8_t u8;
216 uint8_t u8padding[3];
217 };
218 #endif // !CPU(BIG_ENDIAN)
219 #endif // !USE(JSVALUE64)
220
221 intptr_t* ip;
222 int8_t* i8p;
223 void* vp;
224 CallFrame* callFrame;
225 ExecState* execState;
226 void* instruction;
227 VM* vm;
228 JSCell* cell;
229 ProtoCallFrame* protoCallFrame;
230 NativeFunction nativeFunc;
231 #if USE(JSVALUE64)
232 int64_t i64;
233 uint64_t u64;
234 EncodedJSValue encodedJSValue;
235 double castToDouble;
236 #endif
237 Opcode opcode;
238 };
239
240 operator ExecState*() { return execState; }
241 operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); }
242 operator VM*() { return vm; }
243 operator ProtoCallFrame*() { return protoCallFrame; }
244 operator Register*() { return reinterpret_cast<Register*>(vp); }
245 operator JSCell*() { return cell; }
246
247 #if USE(JSVALUE64)
248 inline void clearHighWord() { i32padding = 0; }
249 #else
250 inline void clearHighWord() { }
251 #endif
252 };
253
254 //============================================================================
255 // The llint C++ interpreter loop:
256 //
257
258 JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
259 {
260 #define CAST reinterpret_cast
261 #define SIGN_BIT32(x) ((x) & 0x80000000)
262
263 // One-time initialization of our address tables. We have to put this code
264 // here because our labels are only in scope inside this function. The
265 // caller (or one of its ancestors) is responsible for ensuring that this
266 // is only called once during the initialization of the VM before threads
267 // are at play.
268 if (UNLIKELY(isInitializationPass)) {
269 #if ENABLE(COMPUTED_GOTO_OPCODES)
270 Opcode* opcodeMap = LLInt::opcodeMap();
271 #define OPCODE_ENTRY(__opcode, length) \
272 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
273 FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
274 #undef OPCODE_ENTRY
275
276 #define LLINT_OPCODE_ENTRY(__opcode, length) \
277 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
278
279 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
280 #undef LLINT_OPCODE_ENTRY
281 #endif
282 // Note: we can only set the exceptionInstructions after we have
283 // initialized the opcodeMap above. This is because getCodePtr()
284 // can depend on the opcodeMap.
285 Instruction* exceptionInstructions = LLInt::exceptionInstructions();
286 for (int i = 0; i < maxOpcodeLength + 1; ++i)
287 exceptionInstructions[i].u.pointer =
288 LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
289
290 return JSValue();
291 }
292
293 // Define the pseudo registers used by the LLINT C Loop backend:
294 ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
295
296 union CLoopDoubleRegister {
297 double d;
298 #if USE(JSVALUE64)
299 int64_t castToInt64;
300 #endif
301 };
302
303 // The CLoop llint backend is initially based on the ARMv7 backend, and
304 // then further enhanced with a few instructions from the x86 backend to
305 // support building for X64 targets. Hence, the shape of the generated
306 // code and the usage convention of registers will look a lot like the
307 // ARMv7 backend's.
308 //
309 // For example, on a 32-bit build:
310 // 1. Outgoing args will be set up as follows:
311 // arg1 in t0 (r0 on ARM)
312 // arg2 in t1 (r1 on ARM)
313 // 2. 32 bit return values will be in t0 (r0 on ARM).
314 // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
315 //
316 // But instead of naming these simulator registers based on their ARM
317 // counterparts, we'll name them based on their original llint asm names.
318 // This will make it easier to correlate the generated code with the
319 // original llint asm code.
320 //
321 // On a 64-bit build, it more like x64 in that the registers are 64 bit.
322 // Hence:
323 // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
324 // 2. 32 bit result values will be in the low 32-bit of t0.
325 // 3. 64 bit result values will be in t0.
326
327 CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc;
328 #if USE(JSVALUE64)
329 CLoopRegister pcBase, tagTypeNumber, tagMask;
330 #endif
331 CLoopDoubleRegister d0, d1;
332
333 lr.opcode = getOpcode(llint_return_to_host);
334 sp.vp = vm->interpreter->stack().topOfStack() + 1;
335 cfr.callFrame = vm->topCallFrame;
336 #ifndef NDEBUG
337 void* startSP = sp.vp;
338 CallFrame* startCFR = cfr.callFrame;
339 #endif
340
341 // Initialize the incoming args for doCallToJavaScript:
342 t0.vp = executableAddress;
343 t1.vm = vm;
344 t2.protoCallFrame = protoCallFrame;
345
346 #if USE(JSVALUE64)
347 // For the ASM llint, JITStubs takes care of this initialization. We do
348 // it explicitly here for the C loop:
349 tagTypeNumber.i = 0xFFFF000000000000;
350 tagMask.i = 0xFFFF000000000002;
351 #endif // USE(JSVALUE64)
352
353 // Interpreter variables for value passing between opcodes and/or helpers:
354 NativeFunction nativeFunc = 0;
355 JSValue functionReturnValue;
356 Opcode opcode = getOpcode(entryOpcodeID);
357
358 #define PUSH(cloopReg) \
359 do { \
360 sp.ip--; \
361 *sp.ip = cloopReg.i; \
362 } while (false)
363
364 #define POP(cloopReg) \
365 do { \
366 cloopReg.i = *sp.ip; \
367 sp.ip++; \
368 } while (false)
369
370 #if ENABLE(OPCODE_STATS)
371 #define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
372 #else
373 #define RECORD_OPCODE_STATS(__opcode)
374 #endif
375
376 #if USE(JSVALUE32_64)
377 #define FETCH_OPCODE() pc.opcode
378 #else // USE(JSVALUE64)
379 #define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
380 #endif // USE(JSVALUE64)
381
382 #define NEXT_INSTRUCTION() \
383 do { \
384 opcode = FETCH_OPCODE(); \
385 DISPATCH_OPCODE(); \
386 } while (false)
387
388 #if ENABLE(COMPUTED_GOTO_OPCODES)
389
390 //========================================================================
391 // Loop dispatch mechanism using computed goto statements:
392
393 #define DISPATCH_OPCODE() goto *opcode
394
395 #define DEFINE_OPCODE(__opcode) \
396 __opcode: \
397 RECORD_OPCODE_STATS(__opcode);
398
399 // Dispatch to the current PC's bytecode:
400 DISPATCH_OPCODE();
401
402 #else // !ENABLE(COMPUTED_GOTO_OPCODES)
403 //========================================================================
404 // Loop dispatch mechanism using a C switch statement:
405
406 #define DISPATCH_OPCODE() goto dispatchOpcode
407
408 #define DEFINE_OPCODE(__opcode) \
409 case __opcode: \
410 __opcode: \
411 RECORD_OPCODE_STATS(__opcode);
412
413 // Dispatch to the current PC's bytecode:
414 dispatchOpcode:
415 switch (opcode)
416
417 #endif // !ENABLE(COMPUTED_GOTO_OPCODES)
418
419 //========================================================================
420 // Bytecode handlers:
421 {
422 // This is the file generated by offlineasm, which contains all of the
423 // bytecode handlers for the interpreter, as compiled from
424 // LowLevelInterpreter.asm and its peers.
425
426 #include "LLIntAssembly.h"
427
428 OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
429 {
430 ASSERT(startSP == sp.vp);
431 ASSERT(startCFR == cfr.callFrame);
432 #if USE(JSVALUE32_64)
433 return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
434 #else
435 return JSValue::decode(t0.encodedJSValue);
436 #endif
437 }
438
439 // In the ASM llint, getHostCallReturnValue() is a piece of glue
440 // function provided by the JIT (see jit/JITOperations.cpp).
441 // We simulate it here with a pseduo-opcode handler.
442 OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
443 {
444 // The part in getHostCallReturnValueWithExecState():
445 JSValue result = vm->hostCallReturnValue;
446 #if USE(JSVALUE32_64)
447 t1.i = result.tag();
448 t0.i = result.payload();
449 #else
450 t0.encodedJSValue = JSValue::encode(result);
451 #endif
452 opcode = lr.opcode;
453 DISPATCH_OPCODE();
454 }
455
456 #if !ENABLE(COMPUTED_GOTO_OPCODES)
457 default:
458 ASSERT(false);
459 #endif
460
461 } // END bytecode handler cases.
462
463 #if ENABLE(COMPUTED_GOTO_OPCODES)
464 // Keep the compiler happy so that it doesn't complain about unused
465 // labels for the LLInt trampoline glue. The labels are automatically
466 // emitted by label macros above, and some of them are referenced by
467 // the llint generated code. Since we can't tell ahead of time which
468 // will be referenced and which will be not, we'll just passify the
469 // compiler on all such labels:
470 #define LLINT_OPCODE_ENTRY(__opcode, length) \
471 UNUSED_LABEL(__opcode);
472 FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
473 #undef LLINT_OPCODE_ENTRY
474 #endif
475
476 #undef NEXT_INSTRUCTION
477 #undef DEFINE_OPCODE
478 #undef CHECK_FOR_TIMEOUT
479 #undef CAST
480 #undef SIGN_BIT32
481
482 return JSValue(); // to suppress a compiler warning.
483 } // Interpreter::llintCLoopExecute()
484
485 } // namespace JSC
486
487 #elif !OS(WINDOWS)
488
489 //============================================================================
490 // Define the opcode dispatch mechanism when using an ASM loop:
491 //
492
493 // These are for building an interpreter from generated assembly code:
494 #define OFFLINE_ASM_BEGIN asm (
495 #define OFFLINE_ASM_END );
496
497 #define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
498 #define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
499
500 #if CPU(ARM_THUMB2)
501 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
502 ".text\n" \
503 ".align 4\n" \
504 ".globl " SYMBOL_STRING(label) "\n" \
505 HIDE_SYMBOL(label) "\n" \
506 ".thumb\n" \
507 ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
508 SYMBOL_STRING(label) ":\n"
509 #elif CPU(ARM64)
510 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
511 ".text\n" \
512 ".align 4\n" \
513 ".globl " SYMBOL_STRING(label) "\n" \
514 HIDE_SYMBOL(label) "\n" \
515 SYMBOL_STRING(label) ":\n"
516 #else
517 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
518 ".text\n" \
519 ".globl " SYMBOL_STRING(label) "\n" \
520 HIDE_SYMBOL(label) "\n" \
521 SYMBOL_STRING(label) ":\n"
522 #endif
523
524 #define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
525
526 // This is a file generated by offlineasm, which contains all of the assembly code
527 // for the interpreter, as compiled from LowLevelInterpreter.asm.
528 #include "LLIntAssembly.h"
529
530 #endif // ENABLE(JIT)