]>
Commit | Line | Data |
---|---|---|
6fe7ccc8 A |
1 | # Copyright (C) 2011, 2012 Apple Inc. All rights reserved. |
2 | # | |
3 | # Redistribution and use in source and binary forms, with or without | |
4 | # modification, are permitted provided that the following conditions | |
5 | # are met: | |
6 | # 1. Redistributions of source code must retain the above copyright | |
7 | # notice, this list of conditions and the following disclaimer. | |
8 | # 2. Redistributions in binary form must reproduce the above copyright | |
9 | # notice, this list of conditions and the following disclaimer in the | |
10 | # documentation and/or other materials provided with the distribution. | |
11 | # | |
12 | # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' | |
13 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
14 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
15 | # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS | |
16 | # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
17 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
18 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
19 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
20 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
21 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF | |
22 | # THE POSSIBILITY OF SUCH DAMAGE. | |
23 | ||
24 | # Work-around for the fact that the toolchain's awareness of armv7s results in | |
25 | # a separate slab in the fat binary, yet the offlineasm doesn't know to expect | |
26 | # it. | |
27 | if ARMv7s | |
28 | end | |
29 | ||
30 | # First come the common protocols that both interpreters use. Note that each | |
31 | # of these must have an ASSERT() in LLIntData.cpp | |
32 | ||
33 | # These declarations must match interpreter/RegisterFile.h. | |
34 | const CallFrameHeaderSize = 48 | |
35 | const ArgumentCount = -48 | |
36 | const CallerFrame = -40 | |
37 | const Callee = -32 | |
38 | const ScopeChain = -24 | |
39 | const ReturnPC = -16 | |
40 | const CodeBlock = -8 | |
41 | ||
42 | const ThisArgumentOffset = -CallFrameHeaderSize - 8 | |
43 | ||
44 | # Some register conventions. | |
45 | if JSVALUE64 | |
46 | # - Use a pair of registers to represent the PC: one register for the | |
47 | # base of the register file, and one register for the index. | |
48 | # - The PC base (or PB for short) should be stored in the csr. It will | |
49 | # get clobbered on calls to other JS code, but will get saved on calls | |
50 | # to C functions. | |
51 | # - C calls are still given the Instruction* rather than the PC index. | |
52 | # This requires an add before the call, and a sub after. | |
53 | const PC = t4 | |
54 | const PB = t6 | |
55 | const tagTypeNumber = csr1 | |
56 | const tagMask = csr2 | |
57 | else | |
58 | const PC = t4 | |
59 | end | |
60 | ||
61 | # Constants for reasoning about value representation. | |
62 | if BIG_ENDIAN | |
63 | const TagOffset = 0 | |
64 | const PayloadOffset = 4 | |
65 | else | |
66 | const TagOffset = 4 | |
67 | const PayloadOffset = 0 | |
68 | end | |
69 | ||
70 | # Type constants. | |
71 | const StringType = 5 | |
72 | const ObjectType = 13 | |
73 | ||
74 | # Type flags constants. | |
75 | const MasqueradesAsUndefined = 1 | |
76 | const ImplementsHasInstance = 2 | |
77 | const ImplementsDefaultHasInstance = 8 | |
78 | ||
79 | # Bytecode operand constants. | |
80 | const FirstConstantRegisterIndex = 0x40000000 | |
81 | ||
82 | # Code type constants. | |
83 | const GlobalCode = 0 | |
84 | const EvalCode = 1 | |
85 | const FunctionCode = 2 | |
86 | ||
87 | # The interpreter steals the tag word of the argument count. | |
88 | const LLIntReturnPC = ArgumentCount + TagOffset | |
89 | ||
90 | # String flags. | |
91 | const HashFlags8BitBuffer = 64 | |
92 | ||
93 | # Allocation constants | |
94 | if JSVALUE64 | |
95 | const JSFinalObjectSizeClassIndex = 1 | |
96 | else | |
97 | const JSFinalObjectSizeClassIndex = 3 | |
98 | end | |
99 | ||
100 | # This must match wtf/Vector.h | |
101 | if JSVALUE64 | |
102 | const VectorSizeOffset = 0 | |
103 | const VectorBufferOffset = 8 | |
104 | else | |
105 | const VectorSizeOffset = 0 | |
106 | const VectorBufferOffset = 4 | |
107 | end | |
108 | ||
109 | ||
110 | # Some common utilities. | |
111 | macro crash() | |
112 | storei 0, 0xbbadbeef[] | |
113 | move 0, t0 | |
114 | call t0 | |
115 | end | |
116 | ||
117 | macro assert(assertion) | |
118 | if ASSERT_ENABLED | |
119 | assertion(.ok) | |
120 | crash() | |
121 | .ok: | |
122 | end | |
123 | end | |
124 | ||
125 | macro preserveReturnAddressAfterCall(destinationRegister) | |
126 | if ARMv7 | |
127 | move lr, destinationRegister | |
128 | elsif X86 or X86_64 | |
129 | pop destinationRegister | |
130 | else | |
131 | error | |
132 | end | |
133 | end | |
134 | ||
135 | macro restoreReturnAddressBeforeReturn(sourceRegister) | |
136 | if ARMv7 | |
137 | move sourceRegister, lr | |
138 | elsif X86 or X86_64 | |
139 | push sourceRegister | |
140 | else | |
141 | error | |
142 | end | |
143 | end | |
144 | ||
145 | macro traceExecution() | |
146 | if EXECUTION_TRACING | |
147 | callSlowPath(_llint_trace) | |
148 | end | |
149 | end | |
150 | ||
151 | macro slowPathForCall(advance, slowPath) | |
152 | callCallSlowPath( | |
153 | advance, | |
154 | slowPath, | |
155 | macro (callee) | |
156 | call callee | |
157 | dispatchAfterCall() | |
158 | end) | |
159 | end | |
160 | ||
161 | macro checkSwitchToJIT(increment, action) | |
162 | if JIT_ENABLED | |
163 | loadp CodeBlock[cfr], t0 | |
164 | baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue | |
165 | action() | |
166 | .continue: | |
167 | end | |
168 | end | |
169 | ||
170 | macro checkSwitchToJITForEpilogue() | |
171 | checkSwitchToJIT( | |
172 | 10, | |
173 | macro () | |
174 | callSlowPath(_llint_replace) | |
175 | end) | |
176 | end | |
177 | ||
178 | macro assertNotConstant(index) | |
179 | assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end) | |
180 | end | |
181 | ||
182 | macro functionForCallCodeBlockGetter(targetRegister) | |
183 | loadp Callee[cfr], targetRegister | |
184 | loadp JSFunction::m_executable[targetRegister], targetRegister | |
185 | loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister | |
186 | end | |
187 | ||
188 | macro functionForConstructCodeBlockGetter(targetRegister) | |
189 | loadp Callee[cfr], targetRegister | |
190 | loadp JSFunction::m_executable[targetRegister], targetRegister | |
191 | loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister | |
192 | end | |
193 | ||
194 | macro notFunctionCodeBlockGetter(targetRegister) | |
195 | loadp CodeBlock[cfr], targetRegister | |
196 | end | |
197 | ||
198 | macro functionCodeBlockSetter(sourceRegister) | |
199 | storep sourceRegister, CodeBlock[cfr] | |
200 | end | |
201 | ||
202 | macro notFunctionCodeBlockSetter(sourceRegister) | |
203 | # Nothing to do! | |
204 | end | |
205 | ||
206 | # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock* | |
207 | # in t1. May also trigger prologue entry OSR. | |
208 | macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath) | |
209 | preserveReturnAddressAfterCall(t2) | |
210 | ||
211 | # Set up the call frame and check if we should OSR. | |
212 | storep t2, ReturnPC[cfr] | |
213 | if EXECUTION_TRACING | |
214 | callSlowPath(traceSlowPath) | |
215 | end | |
216 | codeBlockGetter(t1) | |
217 | if JIT_ENABLED | |
218 | baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue | |
219 | cCall2(osrSlowPath, cfr, PC) | |
220 | move t1, cfr | |
221 | btpz t0, .recover | |
222 | loadp ReturnPC[cfr], t2 | |
223 | restoreReturnAddressBeforeReturn(t2) | |
224 | jmp t0 | |
225 | .recover: | |
226 | codeBlockGetter(t1) | |
227 | .continue: | |
228 | end | |
229 | codeBlockSetter(t1) | |
230 | ||
231 | # Set up the PC. | |
232 | if JSVALUE64 | |
233 | loadp CodeBlock::m_instructions[t1], PB | |
234 | move 0, PC | |
235 | else | |
236 | loadp CodeBlock::m_instructions[t1], PC | |
237 | end | |
238 | end | |
239 | ||
240 | # Expects that CodeBlock is in t1, which is what prologue() leaves behind. | |
241 | # Must call dispatch(0) after calling this. | |
242 | macro functionInitialization(profileArgSkip) | |
243 | if VALUE_PROFILER | |
244 | # Profile the arguments. Unfortunately, we have no choice but to do this. This | |
245 | # code is pretty horrendous because of the difference in ordering between | |
246 | # arguments and value profiles, the desire to have a simple loop-down-to-zero | |
247 | # loop, and the desire to use only three registers so as to preserve the PC and | |
248 | # the code block. It is likely that this code should be rewritten in a more | |
249 | # optimal way for architectures that have more than five registers available | |
250 | # for arbitrary use in the interpreter. | |
251 | loadi CodeBlock::m_numParameters[t1], t0 | |
252 | addp -profileArgSkip, t0 # Use addi because that's what has the peephole | |
253 | assert(macro (ok) bpgteq t0, 0, ok end) | |
254 | btpz t0, .argumentProfileDone | |
255 | loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3 | |
256 | mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction! | |
257 | negp t0 | |
258 | lshiftp 3, t0 | |
259 | addp t2, t3 | |
260 | .argumentProfileLoop: | |
261 | if JSVALUE64 | |
262 | loadp ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2 | |
263 | subp sizeof ValueProfile, t3 | |
264 | storep t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3] | |
265 | else | |
266 | loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2 | |
267 | subp sizeof ValueProfile, t3 | |
268 | storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3] | |
269 | loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2 | |
270 | storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3] | |
271 | end | |
272 | baddpnz 8, t0, .argumentProfileLoop | |
273 | .argumentProfileDone: | |
274 | end | |
275 | ||
276 | # Check stack height. | |
277 | loadi CodeBlock::m_numCalleeRegisters[t1], t0 | |
278 | loadp CodeBlock::m_globalData[t1], t2 | |
279 | loadp JSGlobalData::interpreter[t2], t2 # FIXME: Can get to the RegisterFile from the JITStackFrame | |
280 | lshifti 3, t0 | |
281 | addp t0, cfr, t0 | |
282 | bpaeq Interpreter::m_registerFile + RegisterFile::m_end[t2], t0, .stackHeightOK | |
283 | ||
284 | # Stack height check failed - need to call a slow_path. | |
285 | callSlowPath(_llint_register_file_check) | |
286 | .stackHeightOK: | |
287 | end | |
288 | ||
289 | macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, scratch1, scratch2, slowCase) | |
290 | if ALWAYS_ALLOCATE_SLOW | |
291 | jmp slowCase | |
292 | else | |
293 | const offsetOfMySizeClass = | |
294 | JSGlobalData::heap + | |
295 | Heap::m_objectSpace + | |
296 | MarkedSpace::m_normalSpace + | |
297 | MarkedSpace::Subspace::preciseAllocators + | |
298 | sizeClassIndex * sizeof MarkedAllocator | |
299 | ||
300 | const offsetOfFirstFreeCell = | |
301 | MarkedAllocator::m_freeList + | |
302 | MarkedBlock::FreeList::head | |
303 | ||
304 | # FIXME: we can get the global data in one load from the stack. | |
305 | loadp CodeBlock[cfr], scratch1 | |
306 | loadp CodeBlock::m_globalData[scratch1], scratch1 | |
307 | ||
308 | # Get the object from the free list. | |
309 | loadp offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1], result | |
310 | btpz result, slowCase | |
311 | ||
312 | # Remove the object from the free list. | |
313 | loadp [result], scratch2 | |
314 | storep scratch2, offsetOfMySizeClass + offsetOfFirstFreeCell[scratch1] | |
315 | ||
316 | # Initialize the object. | |
317 | loadp classInfoOffset[scratch1], scratch2 | |
318 | storep scratch2, [result] | |
319 | storep structure, JSCell::m_structure[result] | |
320 | storep 0, JSObject::m_inheritorID[result] | |
321 | addp sizeof JSObject, result, scratch1 | |
322 | storep scratch1, JSObject::m_propertyStorage[result] | |
323 | end | |
324 | end | |
325 | ||
326 | macro doReturn() | |
327 | loadp ReturnPC[cfr], t2 | |
328 | loadp CallerFrame[cfr], cfr | |
329 | restoreReturnAddressBeforeReturn(t2) | |
330 | ret | |
331 | end | |
332 | ||
333 | ||
334 | # Indicate the beginning of LLInt. | |
335 | _llint_begin: | |
336 | crash() | |
337 | ||
338 | ||
339 | _llint_program_prologue: | |
340 | prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) | |
341 | dispatch(0) | |
342 | ||
343 | ||
344 | _llint_eval_prologue: | |
345 | prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) | |
346 | dispatch(0) | |
347 | ||
348 | ||
349 | _llint_function_for_call_prologue: | |
350 | prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call) | |
351 | .functionForCallBegin: | |
352 | functionInitialization(0) | |
353 | dispatch(0) | |
354 | ||
355 | ||
356 | _llint_function_for_construct_prologue: | |
357 | prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct) | |
358 | .functionForConstructBegin: | |
359 | functionInitialization(1) | |
360 | dispatch(0) | |
361 | ||
362 | ||
363 | _llint_function_for_call_arity_check: | |
364 | prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call) | |
365 | functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck) | |
366 | ||
367 | ||
368 | _llint_function_for_construct_arity_check: | |
369 | prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct) | |
370 | functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck) | |
371 | ||
372 | ||
373 | # Value-representation-specific code. | |
374 | if JSVALUE64 | |
375 | include LowLevelInterpreter64 | |
376 | else | |
377 | include LowLevelInterpreter32_64 | |
378 | end | |
379 | ||
380 | ||
381 | # Value-representation-agnostic code. | |
382 | _llint_op_new_array: | |
383 | traceExecution() | |
384 | callSlowPath(_llint_slow_path_new_array) | |
385 | dispatch(4) | |
386 | ||
387 | ||
388 | _llint_op_new_array_buffer: | |
389 | traceExecution() | |
390 | callSlowPath(_llint_slow_path_new_array_buffer) | |
391 | dispatch(4) | |
392 | ||
393 | ||
394 | _llint_op_new_regexp: | |
395 | traceExecution() | |
396 | callSlowPath(_llint_slow_path_new_regexp) | |
397 | dispatch(3) | |
398 | ||
399 | ||
400 | _llint_op_less: | |
401 | traceExecution() | |
402 | callSlowPath(_llint_slow_path_less) | |
403 | dispatch(4) | |
404 | ||
405 | ||
406 | _llint_op_lesseq: | |
407 | traceExecution() | |
408 | callSlowPath(_llint_slow_path_lesseq) | |
409 | dispatch(4) | |
410 | ||
411 | ||
412 | _llint_op_greater: | |
413 | traceExecution() | |
414 | callSlowPath(_llint_slow_path_greater) | |
415 | dispatch(4) | |
416 | ||
417 | ||
418 | _llint_op_greatereq: | |
419 | traceExecution() | |
420 | callSlowPath(_llint_slow_path_greatereq) | |
421 | dispatch(4) | |
422 | ||
423 | ||
424 | _llint_op_mod: | |
425 | traceExecution() | |
426 | callSlowPath(_llint_slow_path_mod) | |
427 | dispatch(4) | |
428 | ||
429 | ||
430 | _llint_op_typeof: | |
431 | traceExecution() | |
432 | callSlowPath(_llint_slow_path_typeof) | |
433 | dispatch(3) | |
434 | ||
435 | ||
436 | _llint_op_is_object: | |
437 | traceExecution() | |
438 | callSlowPath(_llint_slow_path_is_object) | |
439 | dispatch(3) | |
440 | ||
441 | ||
442 | _llint_op_is_function: | |
443 | traceExecution() | |
444 | callSlowPath(_llint_slow_path_is_function) | |
445 | dispatch(3) | |
446 | ||
447 | ||
448 | _llint_op_in: | |
449 | traceExecution() | |
450 | callSlowPath(_llint_slow_path_in) | |
451 | dispatch(4) | |
452 | ||
453 | ||
454 | _llint_op_resolve: | |
455 | traceExecution() | |
456 | callSlowPath(_llint_slow_path_resolve) | |
457 | dispatch(4) | |
458 | ||
459 | ||
460 | _llint_op_resolve_skip: | |
461 | traceExecution() | |
462 | callSlowPath(_llint_slow_path_resolve_skip) | |
463 | dispatch(5) | |
464 | ||
465 | ||
466 | _llint_op_resolve_base: | |
467 | traceExecution() | |
468 | callSlowPath(_llint_slow_path_resolve_base) | |
469 | dispatch(5) | |
470 | ||
471 | ||
472 | _llint_op_ensure_property_exists: | |
473 | traceExecution() | |
474 | callSlowPath(_llint_slow_path_ensure_property_exists) | |
475 | dispatch(3) | |
476 | ||
477 | ||
478 | _llint_op_resolve_with_base: | |
479 | traceExecution() | |
480 | callSlowPath(_llint_slow_path_resolve_with_base) | |
481 | dispatch(5) | |
482 | ||
483 | ||
484 | _llint_op_resolve_with_this: | |
485 | traceExecution() | |
486 | callSlowPath(_llint_slow_path_resolve_with_this) | |
487 | dispatch(5) | |
488 | ||
489 | ||
490 | _llint_op_del_by_id: | |
491 | traceExecution() | |
492 | callSlowPath(_llint_slow_path_del_by_id) | |
493 | dispatch(4) | |
494 | ||
495 | ||
496 | _llint_op_del_by_val: | |
497 | traceExecution() | |
498 | callSlowPath(_llint_slow_path_del_by_val) | |
499 | dispatch(4) | |
500 | ||
501 | ||
502 | _llint_op_put_by_index: | |
503 | traceExecution() | |
504 | callSlowPath(_llint_slow_path_put_by_index) | |
505 | dispatch(4) | |
506 | ||
507 | ||
508 | _llint_op_put_getter_setter: | |
509 | traceExecution() | |
510 | callSlowPath(_llint_slow_path_put_getter_setter) | |
511 | dispatch(5) | |
512 | ||
513 | ||
514 | _llint_op_jmp_scopes: | |
515 | traceExecution() | |
516 | callSlowPath(_llint_slow_path_jmp_scopes) | |
517 | dispatch(0) | |
518 | ||
519 | ||
520 | _llint_op_loop_if_true: | |
521 | jmp _llint_op_jtrue | |
522 | _llint_op_jtrue: | |
523 | traceExecution() | |
524 | jumpTrueOrFalse( | |
525 | macro (value, target) btinz value, target end, | |
526 | _llint_slow_path_jtrue) | |
527 | ||
528 | ||
529 | _llint_op_loop_if_false: | |
530 | jmp _llint_op_jfalse | |
531 | _llint_op_jfalse: | |
532 | traceExecution() | |
533 | jumpTrueOrFalse( | |
534 | macro (value, target) btiz value, target end, | |
535 | _llint_slow_path_jfalse) | |
536 | ||
537 | ||
538 | _llint_op_loop_if_less: | |
539 | jmp _llint_op_jless | |
540 | _llint_op_jless: | |
541 | traceExecution() | |
542 | compare( | |
543 | macro (left, right, target) bilt left, right, target end, | |
544 | macro (left, right, target) bdlt left, right, target end, | |
545 | _llint_slow_path_jless) | |
546 | ||
547 | ||
548 | _llint_op_jnless: | |
549 | traceExecution() | |
550 | compare( | |
551 | macro (left, right, target) bigteq left, right, target end, | |
552 | macro (left, right, target) bdgtequn left, right, target end, | |
553 | _llint_slow_path_jnless) | |
554 | ||
555 | ||
556 | _llint_op_loop_if_greater: | |
557 | jmp _llint_op_jgreater | |
558 | _llint_op_jgreater: | |
559 | traceExecution() | |
560 | compare( | |
561 | macro (left, right, target) bigt left, right, target end, | |
562 | macro (left, right, target) bdgt left, right, target end, | |
563 | _llint_slow_path_jgreater) | |
564 | ||
565 | ||
566 | _llint_op_jngreater: | |
567 | traceExecution() | |
568 | compare( | |
569 | macro (left, right, target) bilteq left, right, target end, | |
570 | macro (left, right, target) bdltequn left, right, target end, | |
571 | _llint_slow_path_jngreater) | |
572 | ||
573 | ||
574 | _llint_op_loop_if_lesseq: | |
575 | jmp _llint_op_jlesseq | |
576 | _llint_op_jlesseq: | |
577 | traceExecution() | |
578 | compare( | |
579 | macro (left, right, target) bilteq left, right, target end, | |
580 | macro (left, right, target) bdlteq left, right, target end, | |
581 | _llint_slow_path_jlesseq) | |
582 | ||
583 | ||
584 | _llint_op_jnlesseq: | |
585 | traceExecution() | |
586 | compare( | |
587 | macro (left, right, target) bigt left, right, target end, | |
588 | macro (left, right, target) bdgtun left, right, target end, | |
589 | _llint_slow_path_jnlesseq) | |
590 | ||
591 | ||
592 | _llint_op_loop_if_greatereq: | |
593 | jmp _llint_op_jgreatereq | |
594 | _llint_op_jgreatereq: | |
595 | traceExecution() | |
596 | compare( | |
597 | macro (left, right, target) bigteq left, right, target end, | |
598 | macro (left, right, target) bdgteq left, right, target end, | |
599 | _llint_slow_path_jgreatereq) | |
600 | ||
601 | ||
602 | _llint_op_jngreatereq: | |
603 | traceExecution() | |
604 | compare( | |
605 | macro (left, right, target) bilt left, right, target end, | |
606 | macro (left, right, target) bdltun left, right, target end, | |
607 | _llint_slow_path_jngreatereq) | |
608 | ||
609 | ||
610 | _llint_op_loop_hint: | |
611 | traceExecution() | |
612 | checkSwitchToJITForLoop() | |
613 | dispatch(1) | |
614 | ||
615 | ||
616 | _llint_op_switch_string: | |
617 | traceExecution() | |
618 | callSlowPath(_llint_slow_path_switch_string) | |
619 | dispatch(0) | |
620 | ||
621 | ||
622 | _llint_op_new_func_exp: | |
623 | traceExecution() | |
624 | callSlowPath(_llint_slow_path_new_func_exp) | |
625 | dispatch(3) | |
626 | ||
627 | ||
628 | _llint_op_call: | |
629 | traceExecution() | |
630 | doCall(_llint_slow_path_call) | |
631 | ||
632 | ||
633 | _llint_op_construct: | |
634 | traceExecution() | |
635 | doCall(_llint_slow_path_construct) | |
636 | ||
637 | ||
638 | _llint_op_call_varargs: | |
639 | traceExecution() | |
640 | slowPathForCall(6, _llint_slow_path_call_varargs) | |
641 | ||
642 | ||
643 | _llint_op_call_eval: | |
644 | traceExecution() | |
645 | ||
646 | # Eval is executed in one of two modes: | |
647 | # | |
648 | # 1) We find that we're really invoking eval() in which case the | |
649 | # execution is perfomed entirely inside the slow_path, and it | |
650 | # returns the PC of a function that just returns the return value | |
651 | # that the eval returned. | |
652 | # | |
653 | # 2) We find that we're invoking something called eval() that is not | |
654 | # the real eval. Then the slow_path returns the PC of the thing to | |
655 | # call, and we call it. | |
656 | # | |
657 | # This allows us to handle two cases, which would require a total of | |
658 | # up to four pieces of state that cannot be easily packed into two | |
659 | # registers (C functions can return up to two registers, easily): | |
660 | # | |
661 | # - The call frame register. This may or may not have been modified | |
662 | # by the slow_path, but the convention is that it returns it. It's not | |
663 | # totally clear if that's necessary, since the cfr is callee save. | |
664 | # But that's our style in this here interpreter so we stick with it. | |
665 | # | |
666 | # - A bit to say if the slow_path successfully executed the eval and has | |
667 | # the return value, or did not execute the eval but has a PC for us | |
668 | # to call. | |
669 | # | |
670 | # - Either: | |
671 | # - The JS return value (two registers), or | |
672 | # | |
673 | # - The PC to call. | |
674 | # | |
675 | # It turns out to be easier to just always have this return the cfr | |
676 | # and a PC to call, and that PC may be a dummy thunk that just | |
677 | # returns the JS value that the eval returned. | |
678 | ||
679 | slowPathForCall(4, _llint_slow_path_call_eval) | |
680 | ||
681 | ||
682 | _llint_generic_return_point: | |
683 | dispatchAfterCall() | |
684 | ||
685 | ||
686 | _llint_op_strcat: | |
687 | traceExecution() | |
688 | callSlowPath(_llint_slow_path_strcat) | |
689 | dispatch(4) | |
690 | ||
691 | ||
692 | _llint_op_method_check: | |
693 | traceExecution() | |
694 | # We ignore method checks and use normal get_by_id optimizations. | |
695 | dispatch(1) | |
696 | ||
697 | ||
698 | _llint_op_get_pnames: | |
699 | traceExecution() | |
700 | callSlowPath(_llint_slow_path_get_pnames) | |
701 | dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else. | |
702 | ||
703 | ||
704 | _llint_op_push_scope: | |
705 | traceExecution() | |
706 | callSlowPath(_llint_slow_path_push_scope) | |
707 | dispatch(2) | |
708 | ||
709 | ||
710 | _llint_op_pop_scope: | |
711 | traceExecution() | |
712 | callSlowPath(_llint_slow_path_pop_scope) | |
713 | dispatch(1) | |
714 | ||
715 | ||
716 | _llint_op_push_new_scope: | |
717 | traceExecution() | |
718 | callSlowPath(_llint_slow_path_push_new_scope) | |
719 | dispatch(4) | |
720 | ||
721 | ||
722 | _llint_op_throw: | |
723 | traceExecution() | |
724 | callSlowPath(_llint_slow_path_throw) | |
725 | dispatch(2) | |
726 | ||
727 | ||
728 | _llint_op_throw_reference_error: | |
729 | traceExecution() | |
730 | callSlowPath(_llint_slow_path_throw_reference_error) | |
731 | dispatch(2) | |
732 | ||
733 | ||
734 | _llint_op_profile_will_call: | |
735 | traceExecution() | |
736 | loadp JITStackFrame::enabledProfilerReference[sp], t0 | |
737 | btpz [t0], .opProfileWillCallDone | |
738 | callSlowPath(_llint_slow_path_profile_will_call) | |
739 | .opProfileWillCallDone: | |
740 | dispatch(2) | |
741 | ||
742 | ||
743 | _llint_op_profile_did_call: | |
744 | traceExecution() | |
745 | loadp JITStackFrame::enabledProfilerReference[sp], t0 | |
746 | btpz [t0], .opProfileWillCallDone | |
747 | callSlowPath(_llint_slow_path_profile_did_call) | |
748 | .opProfileDidCallDone: | |
749 | dispatch(2) | |
750 | ||
751 | ||
752 | _llint_op_debug: | |
753 | traceExecution() | |
754 | callSlowPath(_llint_slow_path_debug) | |
755 | dispatch(4) | |
756 | ||
757 | ||
758 | _llint_native_call_trampoline: | |
759 | nativeCallTrampoline(NativeExecutable::m_function) | |
760 | ||
761 | ||
762 | _llint_native_construct_trampoline: | |
763 | nativeCallTrampoline(NativeExecutable::m_constructor) | |
764 | ||
765 | ||
766 | # Lastly, make sure that we can link even though we don't support all opcodes. | |
767 | # These opcodes should never arise when using LLInt or either JIT. We assert | |
768 | # as much. | |
769 | ||
770 | macro notSupported() | |
771 | if ASSERT_ENABLED | |
772 | crash() | |
773 | else | |
774 | # We should use whatever the smallest possible instruction is, just to | |
775 | # ensure that there is a gap between instruction labels. If multiple | |
776 | # smallest instructions exist, we should pick the one that is most | |
777 | # likely result in execution being halted. Currently that is the break | |
778 | # instruction on all architectures we're interested in. (Break is int3 | |
779 | # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.) | |
780 | break | |
781 | end | |
782 | end | |
783 | ||
784 | _llint_op_get_array_length: | |
785 | notSupported() | |
786 | ||
787 | _llint_op_get_by_id_chain: | |
788 | notSupported() | |
789 | ||
790 | _llint_op_get_by_id_custom_chain: | |
791 | notSupported() | |
792 | ||
793 | _llint_op_get_by_id_custom_proto: | |
794 | notSupported() | |
795 | ||
796 | _llint_op_get_by_id_custom_self: | |
797 | notSupported() | |
798 | ||
799 | _llint_op_get_by_id_generic: | |
800 | notSupported() | |
801 | ||
802 | _llint_op_get_by_id_getter_chain: | |
803 | notSupported() | |
804 | ||
805 | _llint_op_get_by_id_getter_proto: | |
806 | notSupported() | |
807 | ||
808 | _llint_op_get_by_id_getter_self: | |
809 | notSupported() | |
810 | ||
811 | _llint_op_get_by_id_proto: | |
812 | notSupported() | |
813 | ||
814 | _llint_op_get_by_id_self: | |
815 | notSupported() | |
816 | ||
817 | _llint_op_get_string_length: | |
818 | notSupported() | |
819 | ||
820 | _llint_op_put_by_id_generic: | |
821 | notSupported() | |
822 | ||
823 | _llint_op_put_by_id_replace: | |
824 | notSupported() | |
825 | ||
826 | _llint_op_put_by_id_transition: | |
827 | notSupported() | |
828 | ||
829 | ||
830 | # Indicate the end of LLInt. | |
831 | _llint_end: | |
832 | crash() | |
833 |