]> git.saurik.com Git - apple/javascriptcore.git/blame - llint/LowLevelInterpreter.cpp
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter.cpp
CommitLineData
f9bf01c6 1/*
6fe7ccc8 2 * Copyright (C) 2012 Apple Inc. All rights reserved.
f9bf01c6
A
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
6fe7ccc8 27#include "LowLevelInterpreter.h"
f9bf01c6 28
6fe7ccc8 29#if ENABLE(LLINT)
f9bf01c6 30
6fe7ccc8
A
31#include "LLIntOfflineAsmConfig.h"
32#include <wtf/InlineASM.h>
f9bf01c6 33
93a37866
A
34#if ENABLE(LLINT_C_LOOP)
35#include "CodeBlock.h"
36#include "LLIntCLoop.h"
37#include "LLIntSlowPaths.h"
38#include "Operations.h"
39#include "VMInspector.h"
40#include <wtf/Assertions.h>
41#include <wtf/MathExtras.h>
42
43using namespace JSC::LLInt;
44
45// LLInt C Loop opcodes
46// ====================
47// In the implementation of the C loop, the LLint trampoline glue functions
48// (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
49// if they are bytecode handlers. That means the names of the trampoline
50// functions will be added to the OpcodeID list via the
51// FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
52// includes.
53//
54// In addition, some JIT trampoline functions which are needed by LLInt
55// (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
56// bytecodes, and the CLoop will provide bytecode handlers for them.
57//
58// In the CLoop, we can only dispatch indirectly to these bytecodes
59// (including the LLInt and JIT extensions). All other dispatches
60// (i.e. goto's) must be to a known label (i.e. local / global labels).
61
62
63// How are the opcodes named?
64// ==========================
65// Here is a table to show examples of how each of the manifestation of the
66// opcodes are named:
67//
68// Type: Opcode Trampoline Glue
69// ====== ===============
70// [In the llint .asm files]
71// llint labels: llint_op_enter llint_program_prologue
72//
73// OpcodeID: op_enter llint_program
74// [in Opcode.h] [in LLIntOpcode.h]
75//
76// When using a switch statement dispatch in the CLoop, each "opcode" is
77// a case statement:
78// Opcode: case op_enter: case llint_program_prologue:
79//
80// When using a computed goto dispatch in the CLoop, each opcode is a label:
81// Opcode: op_enter: llint_program_prologue:
82
83
84//============================================================================
85// Define the opcode dispatch mechanism when using the C loop:
86//
87
88// These are for building a C Loop interpreter:
89#define OFFLINE_ASM_BEGIN
90#define OFFLINE_ASM_END
91
92
93#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode)
94#if ENABLE(COMPUTED_GOTO_OPCODES)
95 #define OFFLINE_ASM_GLUE_LABEL(label) label:
96#else
97 #define OFFLINE_ASM_GLUE_LABEL(label) case label: label:
98#endif
99
100#define OFFLINE_ASM_LOCAL_LABEL(label) label:
101
102
103//============================================================================
104// Some utilities:
105//
106
107namespace JSC {
108namespace LLInt {
109
110#if USE(JSVALUE32_64)
111static double Ints2Double(uint32_t lo, uint32_t hi)
112{
113 union {
114 double dval;
115 uint64_t ival64;
116 } u;
117 u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
118 return u.dval;
119}
120
121static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
122{
123 union {
124 double dval;
125 uint64_t ival64;
126 } u;
127 u.dval = val;
128 hi = static_cast<uint32_t>(u.ival64 >> 32);
129 lo = static_cast<uint32_t>(u.ival64);
130}
131#endif // USE(JSVALUE32_64)
132
133} // namespace LLint
134
135
136//============================================================================
137// CLoopRegister is the storage for an emulated CPU register.
138// It defines the policy of how ints smaller than intptr_t are packed into the
139// pseudo register, as well as hides endianness differences.
140
141struct CLoopRegister {
142 union {
143 intptr_t i;
144 uintptr_t u;
145#if USE(JSVALUE64)
146#if CPU(BIG_ENDIAN)
147 struct {
148 int32_t i32padding;
149 int32_t i32;
150 };
151 struct {
152 uint32_t u32padding;
153 uint32_t u32;
154 };
155 struct {
156 int8_t i8padding[7];
157 int8_t i8;
158 };
159 struct {
160 uint8_t u8padding[7];
161 uint8_t u8;
162 };
163#else // !CPU(BIG_ENDIAN)
164 struct {
165 int32_t i32;
166 int32_t i32padding;
167 };
168 struct {
169 uint32_t u32;
170 uint32_t u32padding;
171 };
172 struct {
173 int8_t i8;
174 int8_t i8padding[7];
175 };
176 struct {
177 uint8_t u8;
178 uint8_t u8padding[7];
179 };
180#endif // !CPU(BIG_ENDIAN)
181#else // !USE(JSVALUE64)
182 int32_t i32;
183 uint32_t u32;
184
185#if CPU(BIG_ENDIAN)
186 struct {
187 int8_t i8padding[3];
188 int8_t i8;
189 };
190 struct {
191 uint8_t u8padding[3];
192 uint8_t u8;
193 };
194
195#else // !CPU(BIG_ENDIAN)
196 struct {
197 int8_t i8;
198 int8_t i8padding[3];
199 };
200 struct {
201 uint8_t u8;
202 uint8_t u8padding[3];
203 };
204#endif // !CPU(BIG_ENDIAN)
205#endif // !USE(JSVALUE64)
206
207 int8_t* i8p;
208 void* vp;
209 ExecState* execState;
210 void* instruction;
211 NativeFunction nativeFunc;
212#if USE(JSVALUE64)
213 int64_t i64;
214 uint64_t u64;
215 EncodedJSValue encodedJSValue;
216 double castToDouble;
217#endif
218 Opcode opcode;
219 };
220
221#if USE(JSVALUE64)
222 inline void clearHighWord() { i32padding = 0; }
223#else
224 inline void clearHighWord() { }
225#endif
226};
227
228//============================================================================
229// The llint C++ interpreter loop:
230//
231
232JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
233 bool isInitializationPass)
234{
235 #define CAST reinterpret_cast
236 #define SIGN_BIT32(x) ((x) & 0x80000000)
237
238 // One-time initialization of our address tables. We have to put this code
239 // here because our labels are only in scope inside this function. The
240 // caller (or one of its ancestors) is responsible for ensuring that this
241 // is only called once during the initialization of the VM before threads
242 // are at play.
243 if (UNLIKELY(isInitializationPass)) {
244#if ENABLE(COMPUTED_GOTO_OPCODES)
245 Opcode* opcodeMap = LLInt::opcodeMap();
246 #define OPCODE_ENTRY(__opcode, length) \
247 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
248 FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
249 #undef OPCODE_ENTRY
250
251 #define LLINT_OPCODE_ENTRY(__opcode, length) \
252 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
253
254 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
255 #undef LLINT_OPCODE_ENTRY
256#endif
257 // Note: we can only set the exceptionInstructions after we have
258 // initialized the opcodeMap above. This is because getCodePtr()
259 // can depend on the opcodeMap.
260 Instruction* exceptionInstructions = LLInt::exceptionInstructions();
261 for (int i = 0; i < maxOpcodeLength + 1; ++i)
262 exceptionInstructions[i].u.pointer =
263 LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
264
265 return JSValue();
266 }
267
268 ASSERT(callFrame->vm().topCallFrame == callFrame);
269
270 // Define the pseudo registers used by the LLINT C Loop backend:
271 ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
272
273 union CLoopDoubleRegister {
274 double d;
275#if USE(JSVALUE64)
276 int64_t castToInt64;
277#endif
278 };
279
280 // The CLoop llint backend is initially based on the ARMv7 backend, and
281 // then further enhanced with a few instructions from the x86 backend to
282 // support building for X64 targets. Hence, the shape of the generated
283 // code and the usage convention of registers will look a lot like the
284 // ARMv7 backend's.
285 //
286 // For example, on a 32-bit build:
287 // 1. Outgoing args will be set up as follows:
288 // arg1 in t0 (r0 on ARM)
289 // arg2 in t1 (r1 on ARM)
290 // 2. 32 bit return values will be in t0 (r0 on ARM).
291 // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
292 //
293 // But instead of naming these simulator registers based on their ARM
294 // counterparts, we'll name them based on their original llint asm names.
295 // This will make it easier to correlate the generated code with the
296 // original llint asm code.
297 //
298 // On a 64-bit build, it more like x64 in that the registers are 64 bit.
299 // Hence:
300 // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
301 // 2. 32 bit result values will be in the low 32-bit of t0.
302 // 3. 64 bit result values will be in t0.
303
304 CLoopRegister t0, t1, t2, t3;
305#if USE(JSVALUE64)
306 CLoopRegister rBasePC, tagTypeNumber, tagMask;
307#endif
308 CLoopRegister rRetVPC;
309 CLoopDoubleRegister d0, d1;
310
311 // Keep the compiler happy. We don't really need this, but the compiler
312 // will complain. This makes the warning go away.
313 t0.i = 0;
314 t1.i = 0;
315
316 // Instantiate the pseudo JIT stack frame used by the LLINT C Loop backend:
317 JITStackFrame jitStackFrame;
318
319 // The llint expects the native stack pointer, sp, to be pointing to the
320 // jitStackFrame (which is the simulation of the native stack frame):
321 JITStackFrame* const sp = &jitStackFrame;
322 sp->vm = &callFrame->vm();
323
324 // Set up an alias for the vm ptr in the JITStackFrame:
325 VM* &vm = sp->vm;
326
327 CodeBlock* codeBlock = callFrame->codeBlock();
328 Instruction* vPC;
329
330 // rPC is an alias for vPC. Set up the alias:
331 CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC);
332
333#if USE(JSVALUE32_64)
334 vPC = codeBlock->instructions().begin();
335#else // USE(JSVALUE64)
336 vPC = 0;
337 rBasePC.vp = codeBlock->instructions().begin();
338
339 // For the ASM llint, JITStubs takes care of this initialization. We do
340 // it explicitly here for the C loop:
341 tagTypeNumber.i = 0xFFFF000000000000;
342 tagMask.i = 0xFFFF000000000002;
343#endif // USE(JSVALUE64)
344
345 // cfr is an alias for callFrame. Set up this alias:
346 CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame);
347
348 // Simulate a native return PC which should never be used:
349 rRetVPC.i = 0xbbadbeef;
350
351 // Interpreter variables for value passing between opcodes and/or helpers:
352 NativeFunction nativeFunc = 0;
353 JSValue functionReturnValue;
354 Opcode opcode;
355
356 opcode = LLInt::getOpcode(bootstrapOpcodeId);
357
358 #if ENABLE(OPCODE_STATS)
359 #define RECORD_OPCODE_STATS(__opcode) \
360 OpcodeStats::recordInstruction(__opcode)
361 #else
362 #define RECORD_OPCODE_STATS(__opcode)
363 #endif
364
365 #if USE(JSVALUE32_64)
366 #define FETCH_OPCODE() vPC->u.opcode
367 #else // USE(JSVALUE64)
368 #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8)
369 #endif // USE(JSVALUE64)
370
371 #define NEXT_INSTRUCTION() \
372 do { \
373 opcode = FETCH_OPCODE(); \
374 DISPATCH_OPCODE(); \
375 } while (false)
376
377#if ENABLE(COMPUTED_GOTO_OPCODES)
378
379 //========================================================================
380 // Loop dispatch mechanism using computed goto statements:
381
382 #define DISPATCH_OPCODE() goto *opcode
383
384 #define DEFINE_OPCODE(__opcode) \
385 __opcode: \
386 RECORD_OPCODE_STATS(__opcode);
387
388 // Dispatch to the current PC's bytecode:
389 DISPATCH_OPCODE();
390
391#else // !ENABLE(COMPUTED_GOTO_OPCODES)
392 //========================================================================
393 // Loop dispatch mechanism using a C switch statement:
394
395 #define DISPATCH_OPCODE() goto dispatchOpcode
396
397 #define DEFINE_OPCODE(__opcode) \
398 case __opcode: \
399 __opcode: \
400 RECORD_OPCODE_STATS(__opcode);
401
402 // Dispatch to the current PC's bytecode:
403 dispatchOpcode:
404 switch (opcode)
405
406#endif // !ENABLE(COMPUTED_GOTO_OPCODES)
407
408 //========================================================================
409 // Bytecode handlers:
410 {
411 // This is the file generated by offlineasm, which contains all of the
412 // bytecode handlers for the interpreter, as compiled from
413 // LowLevelInterpreter.asm and its peers.
414
415 #include "LLIntAssembly.h"
416
417 // In the ASM llint, getHostCallReturnValue() is a piece of glue
418 // function provided by the JIT (see dfg/DFGOperations.cpp).
419 // We simulate it here with a pseduo-opcode handler.
420 OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
421 {
422 // The ASM part pops the frame:
423 callFrame = callFrame->callerFrame();
424
425 // The part in getHostCallReturnValueWithExecState():
426 JSValue result = vm->hostCallReturnValue;
427#if USE(JSVALUE32_64)
428 t1.i = result.tag();
429 t0.i = result.payload();
430#else
431 t0.encodedJSValue = JSValue::encode(result);
432#endif
433 goto doReturnHelper;
434 }
435
436 OFFLINE_ASM_GLUE_LABEL(ctiOpThrowNotCaught)
437 {
438 return vm->exception;
439 }
440
441#if !ENABLE(COMPUTED_GOTO_OPCODES)
442 default:
443 ASSERT(false);
444#endif
445
446 } // END bytecode handler cases.
447
448 //========================================================================
449 // Bytecode helpers:
450
451 doReturnHelper: {
452 ASSERT(!!callFrame);
453 if (callFrame->hasHostCallFrameFlag()) {
454#if USE(JSVALUE32_64)
455 return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
456#else
457 return JSValue::decode(t0.encodedJSValue);
458#endif
459 }
460
461 // The normal ASM llint call implementation returns to the caller as
462 // recorded in rRetVPC, and the caller would fetch the return address
463 // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in
464 // the callTargetFunction() macro in the llint asm files).
465 //
466 // For the C loop, we don't have the JIT stub to this work for us.
467 // So, we need to implement the equivalent of dispatchAfterCall() here
468 // before dispatching to the PC.
469
470 vPC = callFrame->currentVPC();
471
472#if USE(JSVALUE64)
473 // Based on LowLevelInterpreter64.asm's dispatchAfterCall():
474
475 // When returning from a native trampoline call, unlike the assembly
476 // LLInt, we can't simply return to the caller. In our case, we grab
477 // the caller's VPC and resume execution there. However, the caller's
478 // VPC returned by callFrame->currentVPC() is in the form of the real
479 // address of the target bytecode, but the 64-bit llint expects the
480 // VPC to be a bytecode offset. Hence, we need to map it back to a
481 // bytecode offset before we dispatch via the usual dispatch mechanism
482 // i.e. NEXT_INSTRUCTION():
483
484 codeBlock = callFrame->codeBlock();
485 ASSERT(codeBlock);
486 rPC.vp = callFrame->currentVPC();
487 rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin());
488 rPC.i >>= 3;
489
490 rBasePC.vp = codeBlock->instructions().begin();
491#endif // USE(JSVALUE64)
492
493 NEXT_INSTRUCTION();
494
495 } // END doReturnHelper.
496
497
498 // Keep the compiler happy so that it doesn't complain about unused
499 // labels for the LLInt trampoline glue. The labels are automatically
500 // emitted by label macros above, and some of them are referenced by
501 // the llint generated code. Since we can't tell ahead of time which
502 // will be referenced and which will be not, we'll just passify the
503 // compiler on all such labels:
504 #define LLINT_OPCODE_ENTRY(__opcode, length) \
505 UNUSED_LABEL(__opcode);
506 FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
507 #undef LLINT_OPCODE_ENTRY
508
509
510 #undef NEXT_INSTRUCTION
511 #undef DEFINE_OPCODE
512 #undef CHECK_FOR_TIMEOUT
513 #undef CAST
514 #undef SIGN_BIT32
515
516} // Interpreter::llintCLoopExecute()
517
518} // namespace JSC
519
520#else // !ENABLE(LLINT_C_LOOP)
521
522//============================================================================
523// Define the opcode dispatch mechanism when using an ASM loop:
524//
525
526// These are for building an interpreter from generated assembly code:
527#define OFFLINE_ASM_BEGIN asm (
528#define OFFLINE_ASM_END );
529
530#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode)
531#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode)
532
533#if CPU(ARM_THUMB2)
534#define OFFLINE_ASM_GLOBAL_LABEL(label) \
535 ".globl " SYMBOL_STRING(label) "\n" \
536 HIDE_SYMBOL(label) "\n" \
537 ".thumb\n" \
538 ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
539 SYMBOL_STRING(label) ":\n"
540#else
541#define OFFLINE_ASM_GLOBAL_LABEL(label) \
542 ".globl " SYMBOL_STRING(label) "\n" \
543 HIDE_SYMBOL(label) "\n" \
544 SYMBOL_STRING(label) ":\n"
545#endif
546
547#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
548
6fe7ccc8
A
549// This is a file generated by offlineasm, which contains all of the assembly code
550// for the interpreter, as compiled from LowLevelInterpreter.asm.
551#include "LLIntAssembly.h"
f9bf01c6 552
93a37866
A
553#endif // !ENABLE(LLINT_C_LOOP)
554
6fe7ccc8 555#endif // ENABLE(LLINT)