2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "LowLevelInterpreter.h"
28 #include "LLIntOfflineAsmConfig.h"
29 #include <wtf/InlineASM.h>
32 #include "CodeBlock.h"
33 #include "CommonSlowPaths.h"
34 #include "LLIntCLoop.h"
35 #include "LLIntSlowPaths.h"
36 #include "JSCInlines.h"
37 #include "VMInspector.h"
38 #include <wtf/Assertions.h>
39 #include <wtf/MathExtras.h>
41 using namespace JSC::LLInt
;
43 // LLInt C Loop opcodes
44 // ====================
45 // In the implementation of the C loop, the LLint trampoline glue functions
46 // (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
47 // if they are bytecode handlers. That means the names of the trampoline
48 // functions will be added to the OpcodeID list via the
49 // FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
52 // In addition, some JIT trampoline functions which are needed by LLInt
53 // (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
54 // bytecodes, and the CLoop will provide bytecode handlers for them.
56 // In the CLoop, we can only dispatch indirectly to these bytecodes
57 // (including the LLInt and JIT extensions). All other dispatches
58 // (i.e. goto's) must be to a known label (i.e. local / global labels).
61 // How are the opcodes named?
62 // ==========================
63 // Here is a table to show examples of how each of the manifestation of the
66 // Type: Opcode Trampoline Glue
67 // ====== ===============
68 // [In the llint .asm files]
69 // llint labels: llint_op_enter llint_program_prologue
71 // OpcodeID: op_enter llint_program
72 // [in Opcode.h] [in LLIntOpcode.h]
74 // When using a switch statement dispatch in the CLoop, each "opcode" is
76 // Opcode: case op_enter: case llint_program_prologue:
78 // When using a computed goto dispatch in the CLoop, each opcode is a label:
79 // Opcode: op_enter: llint_program_prologue:
82 //============================================================================
83 // Define the opcode dispatch mechanism when using the C loop:
86 // These are for building a C Loop interpreter:
87 #define OFFLINE_ASM_BEGIN
88 #define OFFLINE_ASM_END
90 #if ENABLE(OPCODE_TRACING)
91 #define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
93 #define TRACE_OPCODE(opcode)
96 // To keep compilers happy in case of unused labels, force usage of the label:
97 #define USE_LABEL(label) \
103 #define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
105 #define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
107 #if ENABLE(COMPUTED_GOTO_OPCODES)
108 #define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
110 #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
113 #define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);
116 //============================================================================
123 #if USE(JSVALUE32_64)
124 static double Ints2Double(uint32_t lo
, uint32_t hi
)
130 u
.ival64
= (static_cast<uint64_t>(hi
) << 32) | lo
;
134 static void Double2Ints(double val
, uint32_t& lo
, uint32_t& hi
)
141 hi
= static_cast<uint32_t>(u
.ival64
>> 32);
142 lo
= static_cast<uint32_t>(u
.ival64
);
144 #endif // USE(JSVALUE32_64)
149 //============================================================================
150 // CLoopRegister is the storage for an emulated CPU register.
151 // It defines the policy of how ints smaller than intptr_t are packed into the
152 // pseudo register, as well as hides endianness differences.
154 struct CLoopRegister
{
155 CLoopRegister() { i
= static_cast<intptr_t>(0xbadbeef0baddbeef); }
174 uint8_t u8padding
[7];
177 #else // !CPU(BIG_ENDIAN)
192 uint8_t u8padding
[7];
194 #endif // !CPU(BIG_ENDIAN)
195 #else // !USE(JSVALUE64)
205 uint8_t u8padding
[3];
209 #else // !CPU(BIG_ENDIAN)
216 uint8_t u8padding
[3];
218 #endif // !CPU(BIG_ENDIAN)
219 #endif // !USE(JSVALUE64)
224 CallFrame
* callFrame
;
225 ExecState
* execState
;
229 ProtoCallFrame
* protoCallFrame
;
230 NativeFunction nativeFunc
;
234 EncodedJSValue encodedJSValue
;
240 operator ExecState
*() { return execState
; }
241 operator Instruction
*() { return reinterpret_cast<Instruction
*>(instruction
); }
242 operator VM
*() { return vm
; }
243 operator ProtoCallFrame
*() { return protoCallFrame
; }
244 operator Register
*() { return reinterpret_cast<Register
*>(vp
); }
245 operator JSCell
*() { return cell
; }
248 inline void clearHighWord() { i32padding
= 0; }
250 inline void clearHighWord() { }
254 //============================================================================
255 // The llint C++ interpreter loop:
258 JSValue
CLoop::execute(OpcodeID entryOpcodeID
, void* executableAddress
, VM
* vm
, ProtoCallFrame
* protoCallFrame
, bool isInitializationPass
)
260 #define CAST reinterpret_cast
261 #define SIGN_BIT32(x) ((x) & 0x80000000)
263 // One-time initialization of our address tables. We have to put this code
264 // here because our labels are only in scope inside this function. The
265 // caller (or one of its ancestors) is responsible for ensuring that this
266 // is only called once during the initialization of the VM before threads
268 if (UNLIKELY(isInitializationPass
)) {
269 #if ENABLE(COMPUTED_GOTO_OPCODES)
270 Opcode
* opcodeMap
= LLInt::opcodeMap();
271 #define OPCODE_ENTRY(__opcode, length) \
272 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
273 FOR_EACH_OPCODE_ID(OPCODE_ENTRY
)
276 #define LLINT_OPCODE_ENTRY(__opcode, length) \
277 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
279 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY
)
280 #undef LLINT_OPCODE_ENTRY
282 // Note: we can only set the exceptionInstructions after we have
283 // initialized the opcodeMap above. This is because getCodePtr()
284 // can depend on the opcodeMap.
285 Instruction
* exceptionInstructions
= LLInt::exceptionInstructions();
286 for (int i
= 0; i
< maxOpcodeLength
+ 1; ++i
)
287 exceptionInstructions
[i
].u
.pointer
=
288 LLInt::getCodePtr(llint_throw_from_slow_path_trampoline
);
293 // Define the pseudo registers used by the LLINT C Loop backend:
294 ASSERT(sizeof(CLoopRegister
) == sizeof(intptr_t));
296 union CLoopDoubleRegister
{
303 // The CLoop llint backend is initially based on the ARMv7 backend, and
304 // then further enhanced with a few instructions from the x86 backend to
305 // support building for X64 targets. Hence, the shape of the generated
306 // code and the usage convention of registers will look a lot like the
309 // For example, on a 32-bit build:
310 // 1. Outgoing args will be set up as follows:
311 // arg1 in t0 (r0 on ARM)
312 // arg2 in t1 (r1 on ARM)
313 // 2. 32 bit return values will be in t0 (r0 on ARM).
314 // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
316 // But instead of naming these simulator registers based on their ARM
317 // counterparts, we'll name them based on their original llint asm names.
318 // This will make it easier to correlate the generated code with the
319 // original llint asm code.
321 // On a 64-bit build, it more like x64 in that the registers are 64 bit.
323 // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
324 // 2. 32 bit result values will be in the low 32-bit of t0.
325 // 3. 64 bit result values will be in t0.
327 CLoopRegister t0
, t1
, t2
, t3
, t5
, t7
, sp
, cfr
, lr
, pc
;
329 CLoopRegister pcBase
, tagTypeNumber
, tagMask
;
331 CLoopDoubleRegister d0
, d1
;
333 lr
.opcode
= getOpcode(llint_return_to_host
);
334 sp
.vp
= vm
->interpreter
->stack().topOfStack() + 1;
335 cfr
.callFrame
= vm
->topCallFrame
;
337 void* startSP
= sp
.vp
;
338 CallFrame
* startCFR
= cfr
.callFrame
;
341 // Initialize the incoming args for doCallToJavaScript:
342 t0
.vp
= executableAddress
;
344 t2
.protoCallFrame
= protoCallFrame
;
347 // For the ASM llint, JITStubs takes care of this initialization. We do
348 // it explicitly here for the C loop:
349 tagTypeNumber
.i
= 0xFFFF000000000000;
350 tagMask
.i
= 0xFFFF000000000002;
351 #endif // USE(JSVALUE64)
353 // Interpreter variables for value passing between opcodes and/or helpers:
354 NativeFunction nativeFunc
= 0;
355 JSValue functionReturnValue
;
356 Opcode opcode
= getOpcode(entryOpcodeID
);
358 #define PUSH(cloopReg) \
361 *sp.ip = cloopReg.i; \
364 #define POP(cloopReg) \
366 cloopReg.i = *sp.ip; \
370 #if ENABLE(OPCODE_STATS)
371 #define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
373 #define RECORD_OPCODE_STATS(__opcode)
376 #if USE(JSVALUE32_64)
377 #define FETCH_OPCODE() pc.opcode
378 #else // USE(JSVALUE64)
379 #define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
380 #endif // USE(JSVALUE64)
382 #define NEXT_INSTRUCTION() \
384 opcode = FETCH_OPCODE(); \
388 #if ENABLE(COMPUTED_GOTO_OPCODES)
390 //========================================================================
391 // Loop dispatch mechanism using computed goto statements:
393 #define DISPATCH_OPCODE() goto *opcode
395 #define DEFINE_OPCODE(__opcode) \
397 RECORD_OPCODE_STATS(__opcode);
399 // Dispatch to the current PC's bytecode:
402 #else // !ENABLE(COMPUTED_GOTO_OPCODES)
403 //========================================================================
404 // Loop dispatch mechanism using a C switch statement:
406 #define DISPATCH_OPCODE() goto dispatchOpcode
408 #define DEFINE_OPCODE(__opcode) \
411 RECORD_OPCODE_STATS(__opcode);
413 // Dispatch to the current PC's bytecode:
417 #endif // !ENABLE(COMPUTED_GOTO_OPCODES)
419 //========================================================================
420 // Bytecode handlers:
422 // This is the file generated by offlineasm, which contains all of the
423 // bytecode handlers for the interpreter, as compiled from
424 // LowLevelInterpreter.asm and its peers.
426 #include "LLIntAssembly.h"
428 OFFLINE_ASM_GLUE_LABEL(llint_return_to_host
)
430 ASSERT(startSP
== sp
.vp
);
431 ASSERT(startCFR
== cfr
.callFrame
);
432 #if USE(JSVALUE32_64)
433 return JSValue(t1
.i
, t0
.i
); // returning JSValue(tag, payload);
435 return JSValue::decode(t0
.encodedJSValue
);
439 // In the ASM llint, getHostCallReturnValue() is a piece of glue
440 // function provided by the JIT (see jit/JITOperations.cpp).
441 // We simulate it here with a pseduo-opcode handler.
442 OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue
)
444 // The part in getHostCallReturnValueWithExecState():
445 JSValue result
= vm
->hostCallReturnValue
;
446 #if USE(JSVALUE32_64)
448 t0
.i
= result
.payload();
450 t0
.encodedJSValue
= JSValue::encode(result
);
456 #if !ENABLE(COMPUTED_GOTO_OPCODES)
461 } // END bytecode handler cases.
463 #if ENABLE(COMPUTED_GOTO_OPCODES)
464 // Keep the compiler happy so that it doesn't complain about unused
465 // labels for the LLInt trampoline glue. The labels are automatically
466 // emitted by label macros above, and some of them are referenced by
467 // the llint generated code. Since we can't tell ahead of time which
468 // will be referenced and which will be not, we'll just passify the
469 // compiler on all such labels:
470 #define LLINT_OPCODE_ENTRY(__opcode, length) \
471 UNUSED_LABEL(__opcode);
472 FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY
);
473 #undef LLINT_OPCODE_ENTRY
476 #undef NEXT_INSTRUCTION
478 #undef CHECK_FOR_TIMEOUT
482 return JSValue(); // to suppress a compiler warning.
483 } // Interpreter::llintCLoopExecute()
489 //============================================================================
490 // Define the opcode dispatch mechanism when using an ASM loop:
493 // These are for building an interpreter from generated assembly code:
494 #define OFFLINE_ASM_BEGIN asm (
495 #define OFFLINE_ASM_END );
497 #define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
498 #define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
501 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
504 ".globl " SYMBOL_STRING(label) "\n" \
505 HIDE_SYMBOL(label) "\n" \
507 ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
508 SYMBOL_STRING(label) ":\n"
510 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
513 ".globl " SYMBOL_STRING(label) "\n" \
514 HIDE_SYMBOL(label) "\n" \
515 SYMBOL_STRING(label) ":\n"
517 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
519 ".globl " SYMBOL_STRING(label) "\n" \
520 HIDE_SYMBOL(label) "\n" \
521 SYMBOL_STRING(label) ":\n"
524 #define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
526 // This is a file generated by offlineasm, which contains all of the assembly code
527 // for the interpreter, as compiled from LowLevelInterpreter.asm.
528 #include "LLIntAssembly.h"
530 #endif // ENABLE(JIT)