2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "LowLevelInterpreter.h"
28 #include "LLIntOfflineAsmConfig.h"
29 #include <wtf/InlineASM.h>
32 #include "CodeBlock.h"
33 #include "CommonSlowPaths.h"
34 #include "LLIntCLoop.h"
35 #include "LLIntSlowPaths.h"
36 #include "JSCInlines.h"
37 #include <wtf/Assertions.h>
38 #include <wtf/MathExtras.h>
40 using namespace JSC::LLInt
;
42 // LLInt C Loop opcodes
43 // ====================
44 // In the implementation of the C loop, the LLint trampoline glue functions
45 // (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
46 // if they are bytecode handlers. That means the names of the trampoline
47 // functions will be added to the OpcodeID list via the
48 // FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
51 // In addition, some JIT trampoline functions which are needed by LLInt
52 // (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
53 // bytecodes, and the CLoop will provide bytecode handlers for them.
55 // In the CLoop, we can only dispatch indirectly to these bytecodes
56 // (including the LLInt and JIT extensions). All other dispatches
57 // (i.e. goto's) must be to a known label (i.e. local / global labels).
60 // How are the opcodes named?
61 // ==========================
62 // Here is a table to show examples of how each of the manifestation of the
65 // Type: Opcode Trampoline Glue
66 // ====== ===============
67 // [In the llint .asm files]
68 // llint labels: llint_op_enter llint_program_prologue
70 // OpcodeID: op_enter llint_program
71 // [in Opcode.h] [in LLIntOpcode.h]
73 // When using a switch statement dispatch in the CLoop, each "opcode" is
75 // Opcode: case op_enter: case llint_program_prologue:
77 // When using a computed goto dispatch in the CLoop, each opcode is a label:
78 // Opcode: op_enter: llint_program_prologue:
81 //============================================================================
82 // Define the opcode dispatch mechanism when using the C loop:
85 // These are for building a C Loop interpreter:
86 #define OFFLINE_ASM_BEGIN
87 #define OFFLINE_ASM_END
89 #if ENABLE(OPCODE_TRACING)
90 #define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
92 #define TRACE_OPCODE(opcode)
95 // To keep compilers happy in case of unused labels, force usage of the label:
96 #define USE_LABEL(label) \
102 #define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
104 #define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
106 #if ENABLE(COMPUTED_GOTO_OPCODES)
107 #define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
109 #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
112 #define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);
115 //============================================================================
122 #if USE(JSVALUE32_64)
123 static double Ints2Double(uint32_t lo
, uint32_t hi
)
129 u
.ival64
= (static_cast<uint64_t>(hi
) << 32) | lo
;
133 static void Double2Ints(double val
, uint32_t& lo
, uint32_t& hi
)
140 hi
= static_cast<uint32_t>(u
.ival64
>> 32);
141 lo
= static_cast<uint32_t>(u
.ival64
);
143 #endif // USE(JSVALUE32_64)
148 //============================================================================
149 // CLoopRegister is the storage for an emulated CPU register.
150 // It defines the policy of how ints smaller than intptr_t are packed into the
151 // pseudo register, as well as hides endianness differences.
153 struct CLoopRegister
{
154 CLoopRegister() { i
= static_cast<intptr_t>(0xbadbeef0baddbeef); }
173 uint8_t u8padding
[7];
176 #else // !CPU(BIG_ENDIAN)
191 uint8_t u8padding
[7];
193 #endif // !CPU(BIG_ENDIAN)
194 #else // !USE(JSVALUE64)
204 uint8_t u8padding
[3];
208 #else // !CPU(BIG_ENDIAN)
215 uint8_t u8padding
[3];
217 #endif // !CPU(BIG_ENDIAN)
218 #endif // !USE(JSVALUE64)
223 CallFrame
* callFrame
;
224 ExecState
* execState
;
228 ProtoCallFrame
* protoCallFrame
;
229 NativeFunction nativeFunc
;
233 EncodedJSValue encodedJSValue
;
239 operator ExecState
*() { return execState
; }
240 operator Instruction
*() { return reinterpret_cast<Instruction
*>(instruction
); }
241 operator VM
*() { return vm
; }
242 operator ProtoCallFrame
*() { return protoCallFrame
; }
243 operator Register
*() { return reinterpret_cast<Register
*>(vp
); }
244 operator JSCell
*() { return cell
; }
247 inline void clearHighWord() { i32padding
= 0; }
249 inline void clearHighWord() { }
253 //============================================================================
254 // The llint C++ interpreter loop:
257 JSValue
CLoop::execute(OpcodeID entryOpcodeID
, void* executableAddress
, VM
* vm
, ProtoCallFrame
* protoCallFrame
, bool isInitializationPass
)
259 #define CAST reinterpret_cast
260 #define SIGN_BIT32(x) ((x) & 0x80000000)
262 // One-time initialization of our address tables. We have to put this code
263 // here because our labels are only in scope inside this function. The
264 // caller (or one of its ancestors) is responsible for ensuring that this
265 // is only called once during the initialization of the VM before threads
267 if (UNLIKELY(isInitializationPass
)) {
268 #if ENABLE(COMPUTED_GOTO_OPCODES)
269 Opcode
* opcodeMap
= LLInt::opcodeMap();
270 #define OPCODE_ENTRY(__opcode, length) \
271 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
272 FOR_EACH_OPCODE_ID(OPCODE_ENTRY
)
275 #define LLINT_OPCODE_ENTRY(__opcode, length) \
276 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
278 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY
)
279 #undef LLINT_OPCODE_ENTRY
281 // Note: we can only set the exceptionInstructions after we have
282 // initialized the opcodeMap above. This is because getCodePtr()
283 // can depend on the opcodeMap.
284 Instruction
* exceptionInstructions
= LLInt::exceptionInstructions();
285 for (int i
= 0; i
< maxOpcodeLength
+ 1; ++i
)
286 exceptionInstructions
[i
].u
.pointer
=
287 LLInt::getCodePtr(llint_throw_from_slow_path_trampoline
);
292 // Define the pseudo registers used by the LLINT C Loop backend:
293 ASSERT(sizeof(CLoopRegister
) == sizeof(intptr_t));
295 union CLoopDoubleRegister
{
302 // The CLoop llint backend is initially based on the ARMv7 backend, and
303 // then further enhanced with a few instructions from the x86 backend to
304 // support building for X64 targets. Hence, the shape of the generated
305 // code and the usage convention of registers will look a lot like the
308 // For example, on a 32-bit build:
309 // 1. Outgoing args will be set up as follows:
310 // arg1 in t0 (r0 on ARM)
311 // arg2 in t1 (r1 on ARM)
312 // 2. 32 bit return values will be in t0 (r0 on ARM).
313 // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
315 // But instead of naming these simulator registers based on their ARM
316 // counterparts, we'll name them based on their original llint asm names.
317 // This will make it easier to correlate the generated code with the
318 // original llint asm code.
320 // On a 64-bit build, it more like x64 in that the registers are 64 bit.
322 // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
323 // 2. 32 bit result values will be in the low 32-bit of t0.
324 // 3. 64 bit result values will be in t0.
326 CLoopRegister t0
, t1
, t2
, t3
, t5
, t7
, sp
, cfr
, lr
, pc
;
328 CLoopRegister pcBase
, tagTypeNumber
, tagMask
;
330 CLoopDoubleRegister d0
, d1
;
332 lr
.opcode
= getOpcode(llint_return_to_host
);
333 sp
.vp
= vm
->interpreter
->stack().topOfStack() + 1;
334 cfr
.callFrame
= vm
->topCallFrame
;
336 void* startSP
= sp
.vp
;
337 CallFrame
* startCFR
= cfr
.callFrame
;
340 // Initialize the incoming args for doVMEntryToJavaScript:
341 t0
.vp
= executableAddress
;
343 t2
.protoCallFrame
= protoCallFrame
;
346 // For the ASM llint, JITStubs takes care of this initialization. We do
347 // it explicitly here for the C loop:
348 tagTypeNumber
.i
= 0xFFFF000000000000;
349 tagMask
.i
= 0xFFFF000000000002;
350 #endif // USE(JSVALUE64)
352 // Interpreter variables for value passing between opcodes and/or helpers:
353 NativeFunction nativeFunc
= 0;
354 JSValue functionReturnValue
;
355 Opcode opcode
= getOpcode(entryOpcodeID
);
357 #define PUSH(cloopReg) \
360 *sp.ip = cloopReg.i; \
363 #define POP(cloopReg) \
365 cloopReg.i = *sp.ip; \
369 #if ENABLE(OPCODE_STATS)
370 #define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
372 #define RECORD_OPCODE_STATS(__opcode)
375 #if USE(JSVALUE32_64)
376 #define FETCH_OPCODE() pc.opcode
377 #else // USE(JSVALUE64)
378 #define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
379 #endif // USE(JSVALUE64)
381 #define NEXT_INSTRUCTION() \
383 opcode = FETCH_OPCODE(); \
387 #if ENABLE(COMPUTED_GOTO_OPCODES)
389 //========================================================================
390 // Loop dispatch mechanism using computed goto statements:
392 #define DISPATCH_OPCODE() goto *opcode
394 #define DEFINE_OPCODE(__opcode) \
396 RECORD_OPCODE_STATS(__opcode);
398 // Dispatch to the current PC's bytecode:
401 #else // !ENABLE(COMPUTED_GOTO_OPCODES)
402 //========================================================================
403 // Loop dispatch mechanism using a C switch statement:
405 #define DISPATCH_OPCODE() goto dispatchOpcode
407 #define DEFINE_OPCODE(__opcode) \
410 RECORD_OPCODE_STATS(__opcode);
412 // Dispatch to the current PC's bytecode:
416 #endif // !ENABLE(COMPUTED_GOTO_OPCODES)
418 //========================================================================
419 // Bytecode handlers:
421 // This is the file generated by offlineasm, which contains all of the
422 // bytecode handlers for the interpreter, as compiled from
423 // LowLevelInterpreter.asm and its peers.
425 #include "LLIntAssembly.h"
427 OFFLINE_ASM_GLUE_LABEL(llint_return_to_host
)
429 ASSERT(startSP
== sp
.vp
);
430 ASSERT(startCFR
== cfr
.callFrame
);
431 #if USE(JSVALUE32_64)
432 return JSValue(t1
.i
, t0
.i
); // returning JSValue(tag, payload);
434 return JSValue::decode(t0
.encodedJSValue
);
438 // In the ASM llint, getHostCallReturnValue() is a piece of glue
439 // function provided by the JIT (see jit/JITOperations.cpp).
440 // We simulate it here with a pseduo-opcode handler.
441 OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue
)
443 // The part in getHostCallReturnValueWithExecState():
444 JSValue result
= vm
->hostCallReturnValue
;
445 #if USE(JSVALUE32_64)
447 t0
.i
= result
.payload();
449 t0
.encodedJSValue
= JSValue::encode(result
);
455 #if !ENABLE(COMPUTED_GOTO_OPCODES)
460 } // END bytecode handler cases.
462 #if ENABLE(COMPUTED_GOTO_OPCODES)
463 // Keep the compiler happy so that it doesn't complain about unused
464 // labels for the LLInt trampoline glue. The labels are automatically
465 // emitted by label macros above, and some of them are referenced by
466 // the llint generated code. Since we can't tell ahead of time which
467 // will be referenced and which will be not, we'll just passify the
468 // compiler on all such labels:
469 #define LLINT_OPCODE_ENTRY(__opcode, length) \
470 UNUSED_LABEL(__opcode);
471 FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY
);
472 #undef LLINT_OPCODE_ENTRY
475 #undef NEXT_INSTRUCTION
477 #undef CHECK_FOR_TIMEOUT
481 return JSValue(); // to suppress a compiler warning.
482 } // Interpreter::llintCLoopExecute()
488 //============================================================================
489 // Define the opcode dispatch mechanism when using an ASM loop:
492 // These are for building an interpreter from generated assembly code:
493 #define OFFLINE_ASM_BEGIN asm (
494 #define OFFLINE_ASM_END );
496 #define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
497 #define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
500 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
503 ".globl " SYMBOL_STRING(label) "\n" \
504 HIDE_SYMBOL(label) "\n" \
506 ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
507 SYMBOL_STRING(label) ":\n"
509 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
512 ".globl " SYMBOL_STRING(label) "\n" \
513 HIDE_SYMBOL(label) "\n" \
514 SYMBOL_STRING(label) ":\n"
516 #define OFFLINE_ASM_GLOBAL_LABEL(label) \
518 ".globl " SYMBOL_STRING(label) "\n" \
519 HIDE_SYMBOL(label) "\n" \
520 SYMBOL_STRING(label) ":\n"
523 #define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
525 // This is a file generated by offlineasm, which contains all of the assembly code
526 // for the interpreter, as compiled from LowLevelInterpreter.asm.
527 #include "LLIntAssembly.h"
529 #endif // ENABLE(JIT)