]> git.saurik.com Git - apple/javascriptcore.git/blob - llint/LowLevelInterpreter32_64.asm
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter32_64.asm
1 # Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
5 # are met:
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
11 #
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
23
24
25 # Crash course on the language that this is written in (which I just call
26 # "assembly" even though it's more than that):
27 #
28 # - Mostly gas-style operand ordering. The last operand tends to be the
29 # destination. So "a := b" is written as "mov b, a". But unlike gas,
30 # comparisons are in-order, so "if (a < b)" is written as
31 # "bilt a, b, ...".
32 #
33 # - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
34 # Currently this is just 32-bit so "i" and "p" are interchangeable
35 # except when an op supports one but not the other.
36 #
37 # - In general, valid operands for macro invocations and instructions are
38 # registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
39 # (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
40 # (eg "_foo" or ".foo"). Macro invocations can also take anonymous
41 # macros as operands. Instructions cannot take anonymous macros.
42 #
43 # - Labels must have names that begin with either "_" or ".". A "." label
44 # is local and gets renamed before code gen to minimize namespace
45 # pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
46 # may or may not be removed during code gen depending on whether the asm
47 # conventions for C name mangling on the target platform mandate a "_"
48 # prefix.
49 #
50 # - A "macro" is a lambda expression, which may be either anonymous or
51 # named. But this has caveats. "macro" can take zero or more arguments,
52 # which may be macros or any valid operands, but it can only return
53 # code. But you can do Turing-complete things via continuation passing
54 # style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
55 # that, since you'll just crash the assembler.
56 #
57 # - An "if" is a conditional on settings. Any identifier supplied in the
58 # predicate of an "if" is assumed to be a #define that is available
59 # during code gen. So you can't use "if" for computation in a macro, but
60 # you can use it to select different pieces of code for different
61 # platforms.
62 #
63 # - Arguments to macros follow lexical scoping rather than dynamic scoping.
64 # Const's also follow lexical scoping and may override (hide) arguments
65 # or other consts. All variables (arguments and constants) can be bound
66 # to operands. Additionally, arguments (but not constants) can be bound
67 # to macros.
68
69
70 # Below we have a bunch of constant declarations. Each constant must have
71 # a corresponding ASSERT() in LLIntData.cpp.
72
73
74 # Value representation constants.
75 const Int32Tag = -1
76 const BooleanTag = -2
77 const NullTag = -3
78 const UndefinedTag = -4
79 const CellTag = -5
80 const EmptyValueTag = -6
81 const DeletedValueTag = -7
82 const LowestTag = DeletedValueTag
83
84
85 # Utilities
86 macro dispatch(advance)
87 addp advance * 4, PC
88 jmp [PC]
89 end
90
91 macro dispatchBranchWithOffset(pcOffset)
92 lshifti 2, pcOffset
93 addp pcOffset, PC
94 jmp [PC]
95 end
96
97 macro dispatchBranch(pcOffset)
98 loadi pcOffset, t0
99 dispatchBranchWithOffset(t0)
100 end
101
102 macro dispatchAfterCall()
103 loadi ArgumentCount + TagOffset[cfr], PC
104 jmp [PC]
105 end
106
107 macro cCall2(function, arg1, arg2)
108 if ARM or ARMv7 or ARMv7_TRADITIONAL
109 move arg1, t0
110 move arg2, t1
111 call function
112 elsif X86
113 resetX87Stack
114 poke arg1, 0
115 poke arg2, 1
116 call function
117 elsif MIPS or SH4
118 move arg1, a0
119 move arg2, a1
120 call function
121 elsif C_LOOP
122 cloopCallSlowPath function, arg1, arg2
123 else
124 error
125 end
126 end
127
128 # This barely works. arg3 and arg4 should probably be immediates.
129 macro cCall4(function, arg1, arg2, arg3, arg4)
130 if ARM or ARMv7 or ARMv7_TRADITIONAL
131 move arg1, t0
132 move arg2, t1
133 move arg3, t2
134 move arg4, t3
135 call function
136 elsif X86
137 resetX87Stack
138 poke arg1, 0
139 poke arg2, 1
140 poke arg3, 2
141 poke arg4, 3
142 call function
143 elsif MIPS or SH4
144 move arg1, a0
145 move arg2, a1
146 move arg3, a2
147 move arg4, a3
148 call function
149 elsif C_LOOP
150 error
151 else
152 error
153 end
154 end
155
156 macro callSlowPath(slowPath)
157 cCall2(slowPath, cfr, PC)
158 move t0, PC
159 move t1, cfr
160 end
161
162 # Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
163 # should be an immediate integer - any integer you like; use it to identify the place you're
164 # debugging from. operand should likewise be an immediate, and should identify the operand
165 # in the instruction stream you'd like to print out.
166 macro traceOperand(fromWhere, operand)
167 cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
168 move t0, PC
169 move t1, cfr
170 end
171
172 # Debugging operation if you'd like to print the value of an operand in the instruction
173 # stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
174 # value.
175 macro traceValue(fromWhere, operand)
176 cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
177 move t0, PC
178 move t1, cfr
179 end
180
181 # Call a slowPath for call opcodes.
182 macro callCallSlowPath(advance, slowPath, action)
183 addp advance * 4, PC, t0
184 storep t0, ArgumentCount + TagOffset[cfr]
185 cCall2(slowPath, cfr, PC)
186 move t1, cfr
187 action(t0)
188 end
189
190 macro callWatchdogTimerHandler(throwHandler)
191 storei PC, ArgumentCount + TagOffset[cfr]
192 cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
193 move t1, cfr
194 btpnz t0, throwHandler
195 loadi ArgumentCount + TagOffset[cfr], PC
196 end
197
198 macro checkSwitchToJITForLoop()
199 checkSwitchToJIT(
200 1,
201 macro ()
202 storei PC, ArgumentCount + TagOffset[cfr]
203 cCall2(_llint_loop_osr, cfr, PC)
204 move t1, cfr
205 btpz t0, .recover
206 jmp t0
207 .recover:
208 loadi ArgumentCount + TagOffset[cfr], PC
209 end)
210 end
211
212 # Index, tag, and payload must be different registers. Index is not
213 # changed.
214 macro loadConstantOrVariable(index, tag, payload)
215 bigteq index, FirstConstantRegisterIndex, .constant
216 loadi TagOffset[cfr, index, 8], tag
217 loadi PayloadOffset[cfr, index, 8], payload
218 jmp .done
219 .constant:
220 loadp CodeBlock[cfr], payload
221 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
222 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
223 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
224 loadp TagOffset[payload, index, 8], tag
225 loadp PayloadOffset[payload, index, 8], payload
226 .done:
227 end
228
229 macro loadConstantOrVariableTag(index, tag)
230 bigteq index, FirstConstantRegisterIndex, .constant
231 loadi TagOffset[cfr, index, 8], tag
232 jmp .done
233 .constant:
234 loadp CodeBlock[cfr], tag
235 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
236 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
237 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
238 loadp TagOffset[tag, index, 8], tag
239 .done:
240 end
241
242 # Index and payload may be the same register. Index may be clobbered.
243 macro loadConstantOrVariable2Reg(index, tag, payload)
244 bigteq index, FirstConstantRegisterIndex, .constant
245 loadi TagOffset[cfr, index, 8], tag
246 loadi PayloadOffset[cfr, index, 8], payload
247 jmp .done
248 .constant:
249 loadp CodeBlock[cfr], tag
250 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
251 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
252 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
253 lshifti 3, index
254 addp index, tag
255 loadp PayloadOffset[tag], payload
256 loadp TagOffset[tag], tag
257 .done:
258 end
259
260 macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
261 bigteq index, FirstConstantRegisterIndex, .constant
262 tagCheck(TagOffset[cfr, index, 8])
263 loadi PayloadOffset[cfr, index, 8], payload
264 jmp .done
265 .constant:
266 loadp CodeBlock[cfr], payload
267 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
268 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
269 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
270 tagCheck(TagOffset[payload, index, 8])
271 loadp PayloadOffset[payload, index, 8], payload
272 .done:
273 end
274
275 # Index and payload must be different registers. Index is not mutated. Use
276 # this if you know what the tag of the variable should be. Doing the tag
277 # test as part of loading the variable reduces register use, but may not
278 # be faster than doing loadConstantOrVariable followed by a branch on the
279 # tag.
280 macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
281 loadConstantOrVariablePayloadTagCustom(
282 index,
283 macro (actualTag) bineq actualTag, expectedTag, slow end,
284 payload)
285 end
286
287 macro loadConstantOrVariablePayloadUnchecked(index, payload)
288 loadConstantOrVariablePayloadTagCustom(
289 index,
290 macro (actualTag) end,
291 payload)
292 end
293
294 macro writeBarrier(tag, payload)
295 # Nothing to do, since we don't have a generational or incremental collector.
296 end
297
298 macro valueProfile(tag, payload, profile)
299 if VALUE_PROFILER
300 storei tag, ValueProfile::m_buckets + TagOffset[profile]
301 storei payload, ValueProfile::m_buckets + PayloadOffset[profile]
302 end
303 end
304
305
306 # Entrypoints into the interpreter
307
308 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
309 macro functionArityCheck(doneLabel, slow_path)
310 loadi PayloadOffset + ArgumentCount[cfr], t0
311 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
312 cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
313 move t1, cfr
314 btiz t0, .continue
315 loadp JITStackFrame::vm[sp], t1
316 loadp VM::callFrameForThrow[t1], t0
317 jmp VM::targetMachinePCForThrow[t1]
318 .continue:
319 # Reload CodeBlock and PC, since the slow_path clobbered it.
320 loadp CodeBlock[cfr], t1
321 loadp CodeBlock::m_instructions[t1], PC
322 jmp doneLabel
323 end
324
325
326 # Instruction implementations
327
328 _llint_op_enter:
329 traceExecution()
330 loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
331 loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
332 btiz t2, .opEnterDone
333 move UndefinedTag, t0
334 move 0, t1
335 .opEnterLoop:
336 subi 1, t2
337 storei t0, TagOffset[cfr, t2, 8]
338 storei t1, PayloadOffset[cfr, t2, 8]
339 btinz t2, .opEnterLoop
340 .opEnterDone:
341 dispatch(1)
342
343
344 _llint_op_create_activation:
345 traceExecution()
346 loadi 4[PC], t0
347 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
348 callSlowPath(_llint_slow_path_create_activation)
349 .opCreateActivationDone:
350 dispatch(2)
351
352
353 _llint_op_init_lazy_reg:
354 traceExecution()
355 loadi 4[PC], t0
356 storei EmptyValueTag, TagOffset[cfr, t0, 8]
357 storei 0, PayloadOffset[cfr, t0, 8]
358 dispatch(2)
359
360
361 _llint_op_create_arguments:
362 traceExecution()
363 loadi 4[PC], t0
364 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
365 callSlowPath(_llint_slow_path_create_arguments)
366 .opCreateArgumentsDone:
367 dispatch(2)
368
369
370 _llint_op_create_this:
371 traceExecution()
372 loadi 8[PC], t0
373 loadp PayloadOffset[cfr, t0, 8], t0
374 loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
375 loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
376 btpz t1, .opCreateThisSlow
377 allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
378 loadi 4[PC], t1
379 storei CellTag, TagOffset[cfr, t1, 8]
380 storei t0, PayloadOffset[cfr, t1, 8]
381 dispatch(4)
382
383 .opCreateThisSlow:
384 callSlowPath(_llint_slow_path_create_this)
385 dispatch(4)
386
387
388 _llint_op_get_callee:
389 traceExecution()
390 loadi 4[PC], t0
391 loadp PayloadOffset + Callee[cfr], t1
392 loadp 8[PC], t2
393 valueProfile(CellTag, t1, t2)
394 storei CellTag, TagOffset[cfr, t0, 8]
395 storei t1, PayloadOffset[cfr, t0, 8]
396 dispatch(3)
397
398
399 _llint_op_convert_this:
400 traceExecution()
401 loadi 4[PC], t0
402 bineq TagOffset[cfr, t0, 8], CellTag, .opConvertThisSlow
403 loadi PayloadOffset[cfr, t0, 8], t0
404 loadp JSCell::m_structure[t0], t0
405 bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
406 loadi 8[PC], t1
407 valueProfile(CellTag, t0, t1)
408 dispatch(3)
409
410 .opConvertThisSlow:
411 callSlowPath(_llint_slow_path_convert_this)
412 dispatch(3)
413
414
415 _llint_op_new_object:
416 traceExecution()
417 loadpFromInstruction(3, t0)
418 loadp ObjectAllocationProfile::m_allocator[t0], t1
419 loadp ObjectAllocationProfile::m_structure[t0], t2
420 allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
421 loadi 4[PC], t1
422 storei CellTag, TagOffset[cfr, t1, 8]
423 storei t0, PayloadOffset[cfr, t1, 8]
424 dispatch(4)
425
426 .opNewObjectSlow:
427 callSlowPath(_llint_slow_path_new_object)
428 dispatch(4)
429
430
431 _llint_op_mov:
432 traceExecution()
433 loadi 8[PC], t1
434 loadi 4[PC], t0
435 loadConstantOrVariable(t1, t2, t3)
436 storei t2, TagOffset[cfr, t0, 8]
437 storei t3, PayloadOffset[cfr, t0, 8]
438 dispatch(3)
439
440
441 _llint_op_not:
442 traceExecution()
443 loadi 8[PC], t0
444 loadi 4[PC], t1
445 loadConstantOrVariable(t0, t2, t3)
446 bineq t2, BooleanTag, .opNotSlow
447 xori 1, t3
448 storei t2, TagOffset[cfr, t1, 8]
449 storei t3, PayloadOffset[cfr, t1, 8]
450 dispatch(3)
451
452 .opNotSlow:
453 callSlowPath(_llint_slow_path_not)
454 dispatch(3)
455
456
457 _llint_op_eq:
458 traceExecution()
459 loadi 12[PC], t2
460 loadi 8[PC], t0
461 loadConstantOrVariable(t2, t3, t1)
462 loadConstantOrVariable2Reg(t0, t2, t0)
463 bineq t2, t3, .opEqSlow
464 bieq t2, CellTag, .opEqSlow
465 bib t2, LowestTag, .opEqSlow
466 loadi 4[PC], t2
467 cieq t0, t1, t0
468 storei BooleanTag, TagOffset[cfr, t2, 8]
469 storei t0, PayloadOffset[cfr, t2, 8]
470 dispatch(4)
471
472 .opEqSlow:
473 callSlowPath(_llint_slow_path_eq)
474 dispatch(4)
475
476
477 _llint_op_eq_null:
478 traceExecution()
479 loadi 8[PC], t0
480 loadi 4[PC], t3
481 assertNotConstant(t0)
482 loadi TagOffset[cfr, t0, 8], t1
483 loadi PayloadOffset[cfr, t0, 8], t0
484 bineq t1, CellTag, .opEqNullImmediate
485 loadp JSCell::m_structure[t0], t1
486 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
487 move 0, t1
488 jmp .opEqNullNotImmediate
489 .opEqNullMasqueradesAsUndefined:
490 loadp CodeBlock[cfr], t0
491 loadp CodeBlock::m_globalObject[t0], t0
492 cpeq Structure::m_globalObject[t1], t0, t1
493 jmp .opEqNullNotImmediate
494 .opEqNullImmediate:
495 cieq t1, NullTag, t2
496 cieq t1, UndefinedTag, t1
497 ori t2, t1
498 .opEqNullNotImmediate:
499 storei BooleanTag, TagOffset[cfr, t3, 8]
500 storei t1, PayloadOffset[cfr, t3, 8]
501 dispatch(3)
502
503
504 _llint_op_neq:
505 traceExecution()
506 loadi 12[PC], t2
507 loadi 8[PC], t0
508 loadConstantOrVariable(t2, t3, t1)
509 loadConstantOrVariable2Reg(t0, t2, t0)
510 bineq t2, t3, .opNeqSlow
511 bieq t2, CellTag, .opNeqSlow
512 bib t2, LowestTag, .opNeqSlow
513 loadi 4[PC], t2
514 cineq t0, t1, t0
515 storei BooleanTag, TagOffset[cfr, t2, 8]
516 storei t0, PayloadOffset[cfr, t2, 8]
517 dispatch(4)
518
519 .opNeqSlow:
520 callSlowPath(_llint_slow_path_neq)
521 dispatch(4)
522
523
524 _llint_op_neq_null:
525 traceExecution()
526 loadi 8[PC], t0
527 loadi 4[PC], t3
528 assertNotConstant(t0)
529 loadi TagOffset[cfr, t0, 8], t1
530 loadi PayloadOffset[cfr, t0, 8], t0
531 bineq t1, CellTag, .opNeqNullImmediate
532 loadp JSCell::m_structure[t0], t1
533 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
534 move 1, t1
535 jmp .opNeqNullNotImmediate
536 .opNeqNullMasqueradesAsUndefined:
537 loadp CodeBlock[cfr], t0
538 loadp CodeBlock::m_globalObject[t0], t0
539 cpneq Structure::m_globalObject[t1], t0, t1
540 jmp .opNeqNullNotImmediate
541 .opNeqNullImmediate:
542 cineq t1, NullTag, t2
543 cineq t1, UndefinedTag, t1
544 andi t2, t1
545 .opNeqNullNotImmediate:
546 storei BooleanTag, TagOffset[cfr, t3, 8]
547 storei t1, PayloadOffset[cfr, t3, 8]
548 dispatch(3)
549
550
551 macro strictEq(equalityOperation, slowPath)
552 loadi 12[PC], t2
553 loadi 8[PC], t0
554 loadConstantOrVariable(t2, t3, t1)
555 loadConstantOrVariable2Reg(t0, t2, t0)
556 bineq t2, t3, .slow
557 bib t2, LowestTag, .slow
558 bineq t2, CellTag, .notString
559 loadp JSCell::m_structure[t0], t2
560 loadp JSCell::m_structure[t1], t3
561 bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
562 bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
563 .notString:
564 loadi 4[PC], t2
565 equalityOperation(t0, t1, t0)
566 storei BooleanTag, TagOffset[cfr, t2, 8]
567 storei t0, PayloadOffset[cfr, t2, 8]
568 dispatch(4)
569
570 .slow:
571 callSlowPath(slowPath)
572 dispatch(4)
573 end
574
575 _llint_op_stricteq:
576 traceExecution()
577 strictEq(macro (left, right, result) cieq left, right, result end, _llint_slow_path_stricteq)
578
579
580 _llint_op_nstricteq:
581 traceExecution()
582 strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
583
584
585 _llint_op_inc:
586 traceExecution()
587 loadi 4[PC], t0
588 bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow
589 loadi PayloadOffset[cfr, t0, 8], t1
590 baddio 1, t1, .opIncSlow
591 storei t1, PayloadOffset[cfr, t0, 8]
592 dispatch(2)
593
594 .opIncSlow:
595 callSlowPath(_llint_slow_path_pre_inc)
596 dispatch(2)
597
598
599 _llint_op_dec:
600 traceExecution()
601 loadi 4[PC], t0
602 bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow
603 loadi PayloadOffset[cfr, t0, 8], t1
604 bsubio 1, t1, .opDecSlow
605 storei t1, PayloadOffset[cfr, t0, 8]
606 dispatch(2)
607
608 .opDecSlow:
609 callSlowPath(_llint_slow_path_pre_dec)
610 dispatch(2)
611
612
613 _llint_op_to_number:
614 traceExecution()
615 loadi 8[PC], t0
616 loadi 4[PC], t1
617 loadConstantOrVariable(t0, t2, t3)
618 bieq t2, Int32Tag, .opToNumberIsInt
619 biaeq t2, LowestTag, .opToNumberSlow
620 .opToNumberIsInt:
621 storei t2, TagOffset[cfr, t1, 8]
622 storei t3, PayloadOffset[cfr, t1, 8]
623 dispatch(3)
624
625 .opToNumberSlow:
626 callSlowPath(_llint_slow_path_to_number)
627 dispatch(3)
628
629
630 _llint_op_negate:
631 traceExecution()
632 loadi 8[PC], t0
633 loadi 4[PC], t3
634 loadConstantOrVariable(t0, t1, t2)
635 bineq t1, Int32Tag, .opNegateSrcNotInt
636 btiz t2, 0x7fffffff, .opNegateSlow
637 negi t2
638 storei Int32Tag, TagOffset[cfr, t3, 8]
639 storei t2, PayloadOffset[cfr, t3, 8]
640 dispatch(3)
641 .opNegateSrcNotInt:
642 bia t1, LowestTag, .opNegateSlow
643 xori 0x80000000, t1
644 storei t1, TagOffset[cfr, t3, 8]
645 storei t2, PayloadOffset[cfr, t3, 8]
646 dispatch(3)
647
648 .opNegateSlow:
649 callSlowPath(_llint_slow_path_negate)
650 dispatch(3)
651
652
653 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
654 loadi 12[PC], t2
655 loadi 8[PC], t0
656 loadConstantOrVariable(t2, t3, t1)
657 loadConstantOrVariable2Reg(t0, t2, t0)
658 bineq t2, Int32Tag, .op1NotInt
659 bineq t3, Int32Tag, .op2NotInt
660 loadi 4[PC], t2
661 integerOperationAndStore(t3, t1, t0, .slow, t2)
662 dispatch(5)
663
664 .op1NotInt:
665 # First operand is definitely not an int, the second operand could be anything.
666 bia t2, LowestTag, .slow
667 bib t3, LowestTag, .op1NotIntOp2Double
668 bineq t3, Int32Tag, .slow
669 ci2d t1, ft1
670 jmp .op1NotIntReady
671 .op1NotIntOp2Double:
672 fii2d t1, t3, ft1
673 .op1NotIntReady:
674 loadi 4[PC], t1
675 fii2d t0, t2, ft0
676 doubleOperation(ft1, ft0)
677 stored ft0, [cfr, t1, 8]
678 dispatch(5)
679
680 .op2NotInt:
681 # First operand is definitely an int, the second operand is definitely not.
682 loadi 4[PC], t2
683 bia t3, LowestTag, .slow
684 ci2d t0, ft0
685 fii2d t1, t3, ft1
686 doubleOperation(ft1, ft0)
687 stored ft0, [cfr, t2, 8]
688 dispatch(5)
689
690 .slow:
691 callSlowPath(slowPath)
692 dispatch(5)
693 end
694
695 macro binaryOp(integerOperation, doubleOperation, slowPath)
696 binaryOpCustomStore(
697 macro (int32Tag, left, right, slow, index)
698 integerOperation(left, right, slow)
699 storei int32Tag, TagOffset[cfr, index, 8]
700 storei right, PayloadOffset[cfr, index, 8]
701 end,
702 doubleOperation, slowPath)
703 end
704
705 _llint_op_add:
706 traceExecution()
707 binaryOp(
708 macro (left, right, slow) baddio left, right, slow end,
709 macro (left, right) addd left, right end,
710 _llint_slow_path_add)
711
712
713 _llint_op_mul:
714 traceExecution()
715 binaryOpCustomStore(
716 macro (int32Tag, left, right, slow, index)
717 const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
718 move right, scratch
719 bmulio left, scratch, slow
720 btinz scratch, .done
721 bilt left, 0, slow
722 bilt right, 0, slow
723 .done:
724 storei Int32Tag, TagOffset[cfr, index, 8]
725 storei scratch, PayloadOffset[cfr, index, 8]
726 end,
727 macro (left, right) muld left, right end,
728 _llint_slow_path_mul)
729
730
731 _llint_op_sub:
732 traceExecution()
733 binaryOp(
734 macro (left, right, slow) bsubio left, right, slow end,
735 macro (left, right) subd left, right end,
736 _llint_slow_path_sub)
737
738
739 _llint_op_div:
740 traceExecution()
741 binaryOpCustomStore(
742 macro (int32Tag, left, right, slow, index)
743 ci2d left, ft0
744 ci2d right, ft1
745 divd ft0, ft1
746 bcd2i ft1, right, .notInt
747 storei int32Tag, TagOffset[cfr, index, 8]
748 storei right, PayloadOffset[cfr, index, 8]
749 jmp .done
750 .notInt:
751 stored ft1, [cfr, index, 8]
752 .done:
753 end,
754 macro (left, right) divd left, right end,
755 _llint_slow_path_div)
756
757
758 macro bitOp(operation, slowPath, advance)
759 loadi 12[PC], t2
760 loadi 8[PC], t0
761 loadConstantOrVariable(t2, t3, t1)
762 loadConstantOrVariable2Reg(t0, t2, t0)
763 bineq t3, Int32Tag, .slow
764 bineq t2, Int32Tag, .slow
765 loadi 4[PC], t2
766 operation(t1, t0, .slow)
767 storei t3, TagOffset[cfr, t2, 8]
768 storei t0, PayloadOffset[cfr, t2, 8]
769 dispatch(advance)
770
771 .slow:
772 callSlowPath(slowPath)
773 dispatch(advance)
774 end
775
776 _llint_op_lshift:
777 traceExecution()
778 bitOp(
779 macro (left, right, slow) lshifti left, right end,
780 _llint_slow_path_lshift,
781 4)
782
783
784 _llint_op_rshift:
785 traceExecution()
786 bitOp(
787 macro (left, right, slow) rshifti left, right end,
788 _llint_slow_path_rshift,
789 4)
790
791
792 _llint_op_urshift:
793 traceExecution()
794 bitOp(
795 macro (left, right, slow)
796 urshifti left, right
797 bilt right, 0, slow
798 end,
799 _llint_slow_path_urshift,
800 4)
801
802
803 _llint_op_bitand:
804 traceExecution()
805 bitOp(
806 macro (left, right, slow) andi left, right end,
807 _llint_slow_path_bitand,
808 5)
809
810
811 _llint_op_bitxor:
812 traceExecution()
813 bitOp(
814 macro (left, right, slow) xori left, right end,
815 _llint_slow_path_bitxor,
816 5)
817
818
819 _llint_op_bitor:
820 traceExecution()
821 bitOp(
822 macro (left, right, slow) ori left, right end,
823 _llint_slow_path_bitor,
824 5)
825
826
827 _llint_op_check_has_instance:
828 traceExecution()
829 loadi 12[PC], t1
830 loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
831 loadp JSCell::m_structure[t0], t0
832 btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
833 dispatch(5)
834
835 .opCheckHasInstanceSlow:
836 callSlowPath(_llint_slow_path_check_has_instance)
837 dispatch(0)
838
839
840 _llint_op_instanceof:
841 traceExecution()
842 # Actually do the work.
843 loadi 12[PC], t0
844 loadi 4[PC], t3
845 loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
846 loadp JSCell::m_structure[t1], t2
847 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
848 loadi 8[PC], t0
849 loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
850
851 # Register state: t1 = prototype, t2 = value
852 move 1, t0
853 .opInstanceofLoop:
854 loadp JSCell::m_structure[t2], t2
855 loadi Structure::m_prototype + PayloadOffset[t2], t2
856 bpeq t2, t1, .opInstanceofDone
857 btinz t2, .opInstanceofLoop
858
859 move 0, t0
860 .opInstanceofDone:
861 storei BooleanTag, TagOffset[cfr, t3, 8]
862 storei t0, PayloadOffset[cfr, t3, 8]
863 dispatch(4)
864
865 .opInstanceofSlow:
866 callSlowPath(_llint_slow_path_instanceof)
867 dispatch(4)
868
869
870 _llint_op_is_undefined:
871 traceExecution()
872 loadi 8[PC], t1
873 loadi 4[PC], t0
874 loadConstantOrVariable(t1, t2, t3)
875 storei BooleanTag, TagOffset[cfr, t0, 8]
876 bieq t2, CellTag, .opIsUndefinedCell
877 cieq t2, UndefinedTag, t3
878 storei t3, PayloadOffset[cfr, t0, 8]
879 dispatch(3)
880 .opIsUndefinedCell:
881 loadp JSCell::m_structure[t3], t1
882 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
883 move 0, t1
884 storei t1, PayloadOffset[cfr, t0, 8]
885 dispatch(3)
886 .opIsUndefinedMasqueradesAsUndefined:
887 loadp CodeBlock[cfr], t3
888 loadp CodeBlock::m_globalObject[t3], t3
889 cpeq Structure::m_globalObject[t1], t3, t1
890 storei t1, PayloadOffset[cfr, t0, 8]
891 dispatch(3)
892
893
894 _llint_op_is_boolean:
895 traceExecution()
896 loadi 8[PC], t1
897 loadi 4[PC], t2
898 loadConstantOrVariableTag(t1, t0)
899 cieq t0, BooleanTag, t0
900 storei BooleanTag, TagOffset[cfr, t2, 8]
901 storei t0, PayloadOffset[cfr, t2, 8]
902 dispatch(3)
903
904
905 _llint_op_is_number:
906 traceExecution()
907 loadi 8[PC], t1
908 loadi 4[PC], t2
909 loadConstantOrVariableTag(t1, t0)
910 storei BooleanTag, TagOffset[cfr, t2, 8]
911 addi 1, t0
912 cib t0, LowestTag + 1, t1
913 storei t1, PayloadOffset[cfr, t2, 8]
914 dispatch(3)
915
916
917 _llint_op_is_string:
918 traceExecution()
919 loadi 8[PC], t1
920 loadi 4[PC], t2
921 loadConstantOrVariable(t1, t0, t3)
922 storei BooleanTag, TagOffset[cfr, t2, 8]
923 bineq t0, CellTag, .opIsStringNotCell
924 loadp JSCell::m_structure[t3], t0
925 cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
926 storei t1, PayloadOffset[cfr, t2, 8]
927 dispatch(3)
928 .opIsStringNotCell:
929 storep 0, PayloadOffset[cfr, t2, 8]
930 dispatch(3)
931
932
933 macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload)
934 assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end)
935 negi propertyOffset
936 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
937 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
938 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
939 end
940
941 macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload)
942 bilt propertyOffset, firstOutOfLineOffset, .isInline
943 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
944 negi propertyOffset
945 jmp .ready
946 .isInline:
947 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
948 .ready:
949 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
950 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
951 end
952
953 macro resolveGlobal(size, slow)
954 # Operands are as follows:
955 # 4[PC] Destination for the load.
956 # 8[PC] Property identifier index in the code block.
957 # 12[PC] Structure pointer, initialized to 0 by bytecode generator.
958 # 16[PC] Offset in global object, initialized to 0 by bytecode generator.
959 loadp CodeBlock[cfr], t0
960 loadp CodeBlock::m_globalObject[t0], t0
961 loadp JSCell::m_structure[t0], t1
962 bpneq t1, 12[PC], slow
963 loadi 16[PC], t1
964 loadPropertyAtVariableOffsetKnownNotInline(t1, t0, t2, t3)
965 loadi 4[PC], t0
966 storei t2, TagOffset[cfr, t0, 8]
967 storei t3, PayloadOffset[cfr, t0, 8]
968 loadi (size - 1) * 4[PC], t0
969 valueProfile(t2, t3, t0)
970 end
971
972 _llint_op_init_global_const:
973 traceExecution()
974 loadi 8[PC], t1
975 loadi 4[PC], t0
976 loadConstantOrVariable(t1, t2, t3)
977 writeBarrier(t2, t3)
978 storei t2, TagOffset[t0]
979 storei t3, PayloadOffset[t0]
980 dispatch(5)
981
982
983 _llint_op_init_global_const_check:
984 traceExecution()
985 loadp 12[PC], t2
986 loadi 8[PC], t1
987 loadi 4[PC], t0
988 btbnz [t2], .opInitGlobalConstCheckSlow
989 loadConstantOrVariable(t1, t2, t3)
990 writeBarrier(t2, t3)
991 storei t2, TagOffset[t0]
992 storei t3, PayloadOffset[t0]
993 dispatch(5)
994 .opInitGlobalConstCheckSlow:
995 callSlowPath(_llint_slow_path_init_global_const_check)
996 dispatch(5)
997
998 # We only do monomorphic get_by_id caching for now, and we do not modify the
999 # opcode. We do, however, allow for the cache to change anytime if fails, since
1000 # ping-ponging is free. At best we get lucky and the get_by_id will continue
1001 # to take fast path on the new cache. At worst we take slow path, which is what
1002 # we would have been doing anyway.
1003
1004 macro getById(getPropertyStorage)
1005 traceExecution()
1006 loadi 8[PC], t0
1007 loadi 16[PC], t1
1008 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
1009 loadi 20[PC], t2
1010 getPropertyStorage(
1011 t3,
1012 t0,
1013 macro (propertyStorage, scratch)
1014 bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
1015 loadi 4[PC], t1
1016 loadi TagOffset[propertyStorage, t2], scratch
1017 loadi PayloadOffset[propertyStorage, t2], t2
1018 storei scratch, TagOffset[cfr, t1, 8]
1019 storei t2, PayloadOffset[cfr, t1, 8]
1020 loadi 32[PC], t1
1021 valueProfile(scratch, t2, t1)
1022 dispatch(9)
1023 end)
1024
1025 .opGetByIdSlow:
1026 callSlowPath(_llint_slow_path_get_by_id)
1027 dispatch(9)
1028 end
1029
1030 _llint_op_get_by_id:
1031 getById(withInlineStorage)
1032
1033
1034 _llint_op_get_by_id_out_of_line:
1035 getById(withOutOfLineStorage)
1036
1037
1038 _llint_op_get_array_length:
1039 traceExecution()
1040 loadi 8[PC], t0
1041 loadp 16[PC], t1
1042 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
1043 loadp JSCell::m_structure[t3], t2
1044 arrayProfile(t2, t1, t0)
1045 btiz t2, IsArray, .opGetArrayLengthSlow
1046 btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
1047 loadi 4[PC], t1
1048 loadp 32[PC], t2
1049 loadp JSObject::m_butterfly[t3], t0
1050 loadi -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], t0
1051 bilt t0, 0, .opGetArrayLengthSlow
1052 valueProfile(Int32Tag, t0, t2)
1053 storep t0, PayloadOffset[cfr, t1, 8]
1054 storep Int32Tag, TagOffset[cfr, t1, 8]
1055 dispatch(9)
1056
1057 .opGetArrayLengthSlow:
1058 callSlowPath(_llint_slow_path_get_by_id)
1059 dispatch(9)
1060
1061
1062 _llint_op_get_arguments_length:
1063 traceExecution()
1064 loadi 8[PC], t0
1065 loadi 4[PC], t1
1066 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
1067 loadi ArgumentCount + PayloadOffset[cfr], t2
1068 subi 1, t2
1069 storei Int32Tag, TagOffset[cfr, t1, 8]
1070 storei t2, PayloadOffset[cfr, t1, 8]
1071 dispatch(4)
1072
1073 .opGetArgumentsLengthSlow:
1074 callSlowPath(_llint_slow_path_get_arguments_length)
1075 dispatch(4)
1076
1077
1078 macro putById(getPropertyStorage)
1079 traceExecution()
1080 loadi 4[PC], t3
1081 loadi 16[PC], t1
1082 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1083 loadi 12[PC], t2
1084 getPropertyStorage(
1085 t0,
1086 t3,
1087 macro (propertyStorage, scratch)
1088 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
1089 loadi 20[PC], t1
1090 loadConstantOrVariable2Reg(t2, scratch, t2)
1091 writeBarrier(scratch, t2)
1092 storei scratch, TagOffset[propertyStorage, t1]
1093 storei t2, PayloadOffset[propertyStorage, t1]
1094 dispatch(9)
1095 end)
1096 end
1097
1098 _llint_op_put_by_id:
1099 putById(withInlineStorage)
1100
1101 .opPutByIdSlow:
1102 callSlowPath(_llint_slow_path_put_by_id)
1103 dispatch(9)
1104
1105
1106 _llint_op_put_by_id_out_of_line:
1107 putById(withOutOfLineStorage)
1108
1109
1110 macro putByIdTransition(additionalChecks, getPropertyStorage)
1111 traceExecution()
1112 loadi 4[PC], t3
1113 loadi 16[PC], t1
1114 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1115 loadi 12[PC], t2
1116 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
1117 additionalChecks(t1, t3)
1118 loadi 20[PC], t1
1119 getPropertyStorage(
1120 t0,
1121 t3,
1122 macro (propertyStorage, scratch)
1123 addp t1, propertyStorage, t3
1124 loadConstantOrVariable2Reg(t2, t1, t2)
1125 writeBarrier(t1, t2)
1126 storei t1, TagOffset[t3]
1127 loadi 24[PC], t1
1128 storei t2, PayloadOffset[t3]
1129 storep t1, JSCell::m_structure[t0]
1130 dispatch(9)
1131 end)
1132 end
1133
1134 macro noAdditionalChecks(oldStructure, scratch)
1135 end
1136
1137 macro structureChainChecks(oldStructure, scratch)
1138 const protoCell = oldStructure # Reusing the oldStructure register for the proto
1139
1140 loadp 28[PC], scratch
1141 assert(macro (ok) btpnz scratch, ok end)
1142 loadp StructureChain::m_vector[scratch], scratch
1143 assert(macro (ok) btpnz scratch, ok end)
1144 bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
1145 .loop:
1146 loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
1147 loadp JSCell::m_structure[protoCell], oldStructure
1148 bpneq oldStructure, [scratch], .opPutByIdSlow
1149 addp 4, scratch
1150 bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
1151 .done:
1152 end
1153
1154 _llint_op_put_by_id_transition_direct:
1155 putByIdTransition(noAdditionalChecks, withInlineStorage)
1156
1157
1158 _llint_op_put_by_id_transition_direct_out_of_line:
1159 putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
1160
1161
1162 _llint_op_put_by_id_transition_normal:
1163 putByIdTransition(structureChainChecks, withInlineStorage)
1164
1165
1166 _llint_op_put_by_id_transition_normal_out_of_line:
1167 putByIdTransition(structureChainChecks, withOutOfLineStorage)
1168
1169
1170 _llint_op_get_by_val:
1171 traceExecution()
1172 loadi 8[PC], t2
1173 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
1174 loadp JSCell::m_structure[t0], t2
1175 loadp 16[PC], t3
1176 arrayProfile(t2, t3, t1)
1177 loadi 12[PC], t3
1178 loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
1179 loadp JSObject::m_butterfly[t0], t3
1180 andi IndexingShapeMask, t2
1181 bieq t2, Int32Shape, .opGetByValIsContiguous
1182 bineq t2, ContiguousShape, .opGetByValNotContiguous
1183 .opGetByValIsContiguous:
1184
1185 biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValOutOfBounds
1186 loadi TagOffset[t3, t1, 8], t2
1187 loadi PayloadOffset[t3, t1, 8], t1
1188 jmp .opGetByValDone
1189
1190 .opGetByValNotContiguous:
1191 bineq t2, DoubleShape, .opGetByValNotDouble
1192 biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValOutOfBounds
1193 loadd [t3, t1, 8], ft0
1194 bdnequn ft0, ft0, .opGetByValSlow
1195 # FIXME: This could be massively optimized.
1196 fd2ii ft0, t1, t2
1197 loadi 4[PC], t0
1198 jmp .opGetByValNotEmpty
1199
1200 .opGetByValNotDouble:
1201 subi ArrayStorageShape, t2
1202 bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
1203 biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t3], .opGetByValOutOfBounds
1204 loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
1205 loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
1206
1207 .opGetByValDone:
1208 loadi 4[PC], t0
1209 bieq t2, EmptyValueTag, .opGetByValOutOfBounds
1210 .opGetByValNotEmpty:
1211 storei t2, TagOffset[cfr, t0, 8]
1212 storei t1, PayloadOffset[cfr, t0, 8]
1213 loadi 20[PC], t0
1214 valueProfile(t2, t1, t0)
1215 dispatch(6)
1216
1217 .opGetByValOutOfBounds:
1218 if VALUE_PROFILER
1219 loadpFromInstruction(4, t0)
1220 storeb 1, ArrayProfile::m_outOfBounds[t0]
1221 end
1222 .opGetByValSlow:
1223 callSlowPath(_llint_slow_path_get_by_val)
1224 dispatch(6)
1225
1226
1227 _llint_op_get_argument_by_val:
1228 # FIXME: At some point we should array profile this. Right now it isn't necessary
1229 # since the DFG will never turn a get_argument_by_val into a GetByVal.
1230 traceExecution()
1231 loadi 8[PC], t0
1232 loadi 12[PC], t1
1233 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
1234 loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
1235 addi 1, t2
1236 loadi ArgumentCount + PayloadOffset[cfr], t1
1237 biaeq t2, t1, .opGetArgumentByValSlow
1238 negi t2
1239 loadi 4[PC], t3
1240 loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
1241 loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
1242 loadi 20[PC], t2
1243 storei t0, TagOffset[cfr, t3, 8]
1244 storei t1, PayloadOffset[cfr, t3, 8]
1245 valueProfile(t0, t1, t2)
1246 dispatch(6)
1247
1248 .opGetArgumentByValSlow:
1249 callSlowPath(_llint_slow_path_get_argument_by_val)
1250 dispatch(6)
1251
1252
1253 _llint_op_get_by_pname:
1254 traceExecution()
1255 loadi 12[PC], t0
1256 loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
1257 loadi 16[PC], t0
1258 bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
1259 loadi 8[PC], t0
1260 loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
1261 loadi 20[PC], t0
1262 loadi PayloadOffset[cfr, t0, 8], t3
1263 loadp JSCell::m_structure[t2], t0
1264 bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
1265 loadi 24[PC], t0
1266 loadi [cfr, t0, 8], t0
1267 subi 1, t0
1268 biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
1269 bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty
1270 addi firstOutOfLineOffset, t0
1271 subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0
1272 .opGetByPnameInlineProperty:
1273 loadPropertyAtVariableOffset(t0, t2, t1, t3)
1274 loadi 4[PC], t0
1275 storei t1, TagOffset[cfr, t0, 8]
1276 storei t3, PayloadOffset[cfr, t0, 8]
1277 dispatch(7)
1278
1279 .opGetByPnameSlow:
1280 callSlowPath(_llint_slow_path_get_by_pname)
1281 dispatch(7)
1282
1283
1284 macro contiguousPutByVal(storeCallback)
1285 biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .outOfBounds
1286 .storeResult:
1287 loadi 12[PC], t2
1288 storeCallback(t2, t1, t0, t3)
1289 dispatch(5)
1290
1291 .outOfBounds:
1292 biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValOutOfBounds
1293 if VALUE_PROFILER
1294 loadp 16[PC], t2
1295 storeb 1, ArrayProfile::m_mayStoreToHole[t2]
1296 end
1297 addi 1, t3, t2
1298 storei t2, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
1299 jmp .storeResult
1300 end
1301
1302 _llint_op_put_by_val:
1303 traceExecution()
1304 loadi 4[PC], t0
1305 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
1306 loadp JSCell::m_structure[t1], t2
1307 loadp 16[PC], t3
1308 arrayProfile(t2, t3, t0)
1309 loadi 8[PC], t0
1310 loadConstantOrVariablePayload(t0, Int32Tag, t3, .opPutByValSlow)
1311 loadp JSObject::m_butterfly[t1], t0
1312 andi IndexingShapeMask, t2
1313 bineq t2, Int32Shape, .opPutByValNotInt32
1314 contiguousPutByVal(
1315 macro (operand, scratch, base, index)
1316 loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow)
1317 storei Int32Tag, TagOffset[base, index, 8]
1318 storei scratch, PayloadOffset[base, index, 8]
1319 end)
1320
1321 .opPutByValNotInt32:
1322 bineq t2, DoubleShape, .opPutByValNotDouble
1323 contiguousPutByVal(
1324 macro (operand, scratch, base, index)
1325 const tag = scratch
1326 const payload = operand
1327 loadConstantOrVariable2Reg(operand, tag, payload)
1328 bineq tag, Int32Tag, .notInt
1329 ci2d payload, ft0
1330 jmp .ready
1331 .notInt:
1332 fii2d payload, tag, ft0
1333 bdnequn ft0, ft0, .opPutByValSlow
1334 .ready:
1335 stored ft0, [base, index, 8]
1336 end)
1337
1338 .opPutByValNotDouble:
1339 bineq t2, ContiguousShape, .opPutByValNotContiguous
1340 contiguousPutByVal(
1341 macro (operand, scratch, base, index)
1342 const tag = scratch
1343 const payload = operand
1344 loadConstantOrVariable2Reg(operand, tag, payload)
1345 writeBarrier(tag, payload)
1346 storei tag, TagOffset[base, index, 8]
1347 storei payload, PayloadOffset[base, index, 8]
1348 end)
1349
1350 .opPutByValNotContiguous:
1351 bineq t2, ArrayStorageShape, .opPutByValSlow
1352 biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValOutOfBounds
1353 bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
1354 .opPutByValArrayStorageStoreResult:
1355 loadi 12[PC], t2
1356 loadConstantOrVariable2Reg(t2, t1, t2)
1357 writeBarrier(t1, t2)
1358 storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8]
1359 storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8]
1360 dispatch(5)
1361
1362 .opPutByValArrayStorageEmpty:
1363 if VALUE_PROFILER
1364 loadp 16[PC], t1
1365 storeb 1, ArrayProfile::m_mayStoreToHole[t1]
1366 end
1367 addi 1, ArrayStorage::m_numValuesInVector[t0]
1368 bib t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult
1369 addi 1, t3, t1
1370 storei t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
1371 jmp .opPutByValArrayStorageStoreResult
1372
1373 .opPutByValOutOfBounds:
1374 if VALUE_PROFILER
1375 loadpFromInstruction(4, t0)
1376 storeb 1, ArrayProfile::m_outOfBounds[t0]
1377 end
1378 .opPutByValSlow:
1379 callSlowPath(_llint_slow_path_put_by_val)
1380 dispatch(5)
1381
1382
1383 _llint_op_jmp:
1384 traceExecution()
1385 dispatchBranch(4[PC])
1386
1387
1388 macro jumpTrueOrFalse(conditionOp, slow)
1389 loadi 4[PC], t1
1390 loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
1391 conditionOp(t0, .target)
1392 dispatch(3)
1393
1394 .target:
1395 dispatchBranch(8[PC])
1396
1397 .slow:
1398 callSlowPath(slow)
1399 dispatch(0)
1400 end
1401
1402
1403 macro equalNull(cellHandler, immediateHandler)
1404 loadi 4[PC], t0
1405 assertNotConstant(t0)
1406 loadi TagOffset[cfr, t0, 8], t1
1407 loadi PayloadOffset[cfr, t0, 8], t0
1408 bineq t1, CellTag, .immediate
1409 loadp JSCell::m_structure[t0], t2
1410 cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
1411 dispatch(3)
1412
1413 .target:
1414 dispatchBranch(8[PC])
1415
1416 .immediate:
1417 ori 1, t1
1418 immediateHandler(t1, .target)
1419 dispatch(3)
1420 end
1421
1422 _llint_op_jeq_null:
1423 traceExecution()
1424 equalNull(
1425 macro (structure, value, target)
1426 btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined
1427 loadp CodeBlock[cfr], t0
1428 loadp CodeBlock::m_globalObject[t0], t0
1429 bpeq Structure::m_globalObject[structure], t0, target
1430 .opJeqNullNotMasqueradesAsUndefined:
1431 end,
1432 macro (value, target) bieq value, NullTag, target end)
1433
1434
1435 _llint_op_jneq_null:
1436 traceExecution()
1437 equalNull(
1438 macro (structure, value, target)
1439 btbz value, MasqueradesAsUndefined, target
1440 loadp CodeBlock[cfr], t0
1441 loadp CodeBlock::m_globalObject[t0], t0
1442 bpneq Structure::m_globalObject[structure], t0, target
1443 end,
1444 macro (value, target) bineq value, NullTag, target end)
1445
1446
1447 _llint_op_jneq_ptr:
1448 traceExecution()
1449 loadi 4[PC], t0
1450 loadi 8[PC], t1
1451 loadp CodeBlock[cfr], t2
1452 loadp CodeBlock::m_globalObject[t2], t2
1453 bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
1454 loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
1455 bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
1456 .opJneqPtrBranch:
1457 dispatchBranch(12[PC])
1458 .opJneqPtrFallThrough:
1459 dispatch(4)
1460
1461
1462 macro compare(integerCompare, doubleCompare, slowPath)
1463 loadi 4[PC], t2
1464 loadi 8[PC], t3
1465 loadConstantOrVariable(t2, t0, t1)
1466 loadConstantOrVariable2Reg(t3, t2, t3)
1467 bineq t0, Int32Tag, .op1NotInt
1468 bineq t2, Int32Tag, .op2NotInt
1469 integerCompare(t1, t3, .jumpTarget)
1470 dispatch(4)
1471
1472 .op1NotInt:
1473 bia t0, LowestTag, .slow
1474 bib t2, LowestTag, .op1NotIntOp2Double
1475 bineq t2, Int32Tag, .slow
1476 ci2d t3, ft1
1477 jmp .op1NotIntReady
1478 .op1NotIntOp2Double:
1479 fii2d t3, t2, ft1
1480 .op1NotIntReady:
1481 fii2d t1, t0, ft0
1482 doubleCompare(ft0, ft1, .jumpTarget)
1483 dispatch(4)
1484
1485 .op2NotInt:
1486 ci2d t1, ft0
1487 bia t2, LowestTag, .slow
1488 fii2d t3, t2, ft1
1489 doubleCompare(ft0, ft1, .jumpTarget)
1490 dispatch(4)
1491
1492 .jumpTarget:
1493 dispatchBranch(12[PC])
1494
1495 .slow:
1496 callSlowPath(slowPath)
1497 dispatch(0)
1498 end
1499
1500
1501 _llint_op_switch_imm:
1502 traceExecution()
1503 loadi 12[PC], t2
1504 loadi 4[PC], t3
1505 loadConstantOrVariable(t2, t1, t0)
1506 loadp CodeBlock[cfr], t2
1507 loadp CodeBlock::m_rareData[t2], t2
1508 muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
1509 loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
1510 addp t3, t2
1511 bineq t1, Int32Tag, .opSwitchImmNotInt
1512 subi SimpleJumpTable::min[t2], t0
1513 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1514 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1515 loadi [t3, t0, 4], t1
1516 btiz t1, .opSwitchImmFallThrough
1517 dispatchBranchWithOffset(t1)
1518
1519 .opSwitchImmNotInt:
1520 bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
1521 .opSwitchImmFallThrough:
1522 dispatchBranch(8[PC])
1523
1524 .opSwitchImmSlow:
1525 callSlowPath(_llint_slow_path_switch_imm)
1526 dispatch(0)
1527
1528
1529 _llint_op_switch_char:
1530 traceExecution()
1531 loadi 12[PC], t2
1532 loadi 4[PC], t3
1533 loadConstantOrVariable(t2, t1, t0)
1534 loadp CodeBlock[cfr], t2
1535 loadp CodeBlock::m_rareData[t2], t2
1536 muli sizeof SimpleJumpTable, t3
1537 loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
1538 addp t3, t2
1539 bineq t1, CellTag, .opSwitchCharFallThrough
1540 loadp JSCell::m_structure[t0], t1
1541 bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
1542 bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
1543 loadp JSString::m_value[t0], t0
1544 btpz t0, .opSwitchOnRope
1545 loadp StringImpl::m_data8[t0], t1
1546 btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1547 loadh [t1], t0
1548 jmp .opSwitchCharReady
1549 .opSwitchChar8Bit:
1550 loadb [t1], t0
1551 .opSwitchCharReady:
1552 subi SimpleJumpTable::min[t2], t0
1553 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1554 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1555 loadi [t2, t0, 4], t1
1556 btiz t1, .opSwitchCharFallThrough
1557 dispatchBranchWithOffset(t1)
1558
1559 .opSwitchCharFallThrough:
1560 dispatchBranch(8[PC])
1561
1562 .opSwitchOnRope:
1563 callSlowPath(_llint_slow_path_switch_char)
1564 dispatch(0)
1565
1566
1567 _llint_op_new_func:
1568 traceExecution()
1569 btiz 12[PC], .opNewFuncUnchecked
1570 loadi 4[PC], t1
1571 bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
1572 .opNewFuncUnchecked:
1573 callSlowPath(_llint_slow_path_new_func)
1574 .opNewFuncDone:
1575 dispatch(4)
1576
1577
1578 macro arrayProfileForCall()
1579 if VALUE_PROFILER
1580 loadi 12[PC], t3
1581 bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
1582 loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
1583 loadp JSCell::m_structure[t0], t0
1584 loadp 20[PC], t1
1585 storep t0, ArrayProfile::m_lastSeenStructure[t1]
1586 .done:
1587 end
1588 end
1589
1590 macro doCall(slowPath)
1591 loadi 4[PC], t0
1592 loadi 16[PC], t1
1593 loadp LLIntCallLinkInfo::callee[t1], t2
1594 loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
1595 bineq t3, t2, .opCallSlow
1596 loadi 12[PC], t3
1597 addp 24, PC
1598 lshifti 3, t3
1599 addp cfr, t3 # t3 contains the new value of cfr
1600 loadp JSFunction::m_scope[t2], t0
1601 storei t2, Callee + PayloadOffset[t3]
1602 storei t0, ScopeChain + PayloadOffset[t3]
1603 loadi 8 - 24[PC], t2
1604 storei PC, ArgumentCount + TagOffset[cfr]
1605 storep cfr, CallerFrame[t3]
1606 storei t2, ArgumentCount + PayloadOffset[t3]
1607 storei CellTag, Callee + TagOffset[t3]
1608 storei CellTag, ScopeChain + TagOffset[t3]
1609 move t3, cfr
1610 callTargetFunction(t1)
1611
1612 .opCallSlow:
1613 slowPathForCall(6, slowPath)
1614 end
1615
1616
1617 _llint_op_tear_off_activation:
1618 traceExecution()
1619 loadi 4[PC], t0
1620 bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated
1621 callSlowPath(_llint_slow_path_tear_off_activation)
1622 .opTearOffActivationNotCreated:
1623 dispatch(2)
1624
1625
1626 _llint_op_tear_off_arguments:
1627 traceExecution()
1628 loadi 4[PC], t0
1629 subi 1, t0 # Get the unmodifiedArgumentsRegister
1630 bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
1631 callSlowPath(_llint_slow_path_tear_off_arguments)
1632 .opTearOffArgumentsNotCreated:
1633 dispatch(3)
1634
1635
1636 _llint_op_ret:
1637 traceExecution()
1638 checkSwitchToJITForEpilogue()
1639 loadi 4[PC], t2
1640 loadConstantOrVariable(t2, t1, t0)
1641 doReturn()
1642
1643
1644 _llint_op_call_put_result:
1645 loadi 4[PC], t2
1646 loadi 8[PC], t3
1647 storei t1, TagOffset[cfr, t2, 8]
1648 storei t0, PayloadOffset[cfr, t2, 8]
1649 valueProfile(t1, t0, t3)
1650 traceExecution() # Needs to be here because it would clobber t1, t0
1651 dispatch(3)
1652
1653
1654 _llint_op_ret_object_or_this:
1655 traceExecution()
1656 checkSwitchToJITForEpilogue()
1657 loadi 4[PC], t2
1658 loadConstantOrVariable(t2, t1, t0)
1659 bineq t1, CellTag, .opRetObjectOrThisNotObject
1660 loadp JSCell::m_structure[t0], t2
1661 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
1662 doReturn()
1663
1664 .opRetObjectOrThisNotObject:
1665 loadi 8[PC], t2
1666 loadConstantOrVariable(t2, t1, t0)
1667 doReturn()
1668
1669
1670 _llint_op_to_primitive:
1671 traceExecution()
1672 loadi 8[PC], t2
1673 loadi 4[PC], t3
1674 loadConstantOrVariable(t2, t1, t0)
1675 bineq t1, CellTag, .opToPrimitiveIsImm
1676 loadp JSCell::m_structure[t0], t2
1677 bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
1678 .opToPrimitiveIsImm:
1679 storei t1, TagOffset[cfr, t3, 8]
1680 storei t0, PayloadOffset[cfr, t3, 8]
1681 dispatch(3)
1682
1683 .opToPrimitiveSlowCase:
1684 callSlowPath(_llint_slow_path_to_primitive)
1685 dispatch(3)
1686
1687
1688 _llint_op_next_pname:
1689 traceExecution()
1690 loadi 12[PC], t1
1691 loadi 16[PC], t2
1692 loadi PayloadOffset[cfr, t1, 8], t0
1693 bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
1694 loadi 20[PC], t2
1695 loadi PayloadOffset[cfr, t2, 8], t2
1696 loadp JSPropertyNameIterator::m_jsStrings[t2], t3
1697 loadi [t3, t0, 8], t3
1698 addi 1, t0
1699 storei t0, PayloadOffset[cfr, t1, 8]
1700 loadi 4[PC], t1
1701 storei CellTag, TagOffset[cfr, t1, 8]
1702 storei t3, PayloadOffset[cfr, t1, 8]
1703 loadi 8[PC], t3
1704 loadi PayloadOffset[cfr, t3, 8], t3
1705 loadp JSCell::m_structure[t3], t1
1706 bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
1707 loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
1708 loadp StructureChain::m_vector[t0], t0
1709 btpz [t0], .opNextPnameTarget
1710 .opNextPnameCheckPrototypeLoop:
1711 bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
1712 loadp Structure::m_prototype + PayloadOffset[t1], t2
1713 loadp JSCell::m_structure[t2], t1
1714 bpneq t1, [t0], .opNextPnameSlow
1715 addp 4, t0
1716 btpnz [t0], .opNextPnameCheckPrototypeLoop
1717 .opNextPnameTarget:
1718 dispatchBranch(24[PC])
1719
1720 .opNextPnameEnd:
1721 dispatch(7)
1722
1723 .opNextPnameSlow:
1724 callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
1725 dispatch(0)
1726
1727
1728 _llint_op_catch:
1729 # This is where we end up from the JIT's throw trampoline (because the
1730 # machine code return address will be set to _llint_op_catch), and from
1731 # the interpreter's throw trampoline (see _llint_throw_trampoline).
1732 # The JIT throwing protocol calls for the cfr to be in t0. The throwing
1733 # code must have known that we were throwing to the interpreter, and have
1734 # set VM::targetInterpreterPCForThrow.
1735 move t0, cfr
1736 loadp JITStackFrame::vm[sp], t3
1737 loadi VM::targetInterpreterPCForThrow[t3], PC
1738 loadi VM::exception + PayloadOffset[t3], t0
1739 loadi VM::exception + TagOffset[t3], t1
1740 storei 0, VM::exception + PayloadOffset[t3]
1741 storei EmptyValueTag, VM::exception + TagOffset[t3]
1742 loadi 4[PC], t2
1743 storei t0, PayloadOffset[cfr, t2, 8]
1744 storei t1, TagOffset[cfr, t2, 8]
1745 traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
1746 dispatch(2)
1747
1748
1749 # Gives you the scope in t0, while allowing you to optionally perform additional checks on the
1750 # scopes as they are traversed. scopeCheck() is called with two arguments: the register
1751 # holding the scope, and a register that can be used for scratch. Note that this does not
1752 # use t3, so you can hold stuff in t3 if need be.
1753 macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
1754 loadp ScopeChain + PayloadOffset[cfr], t0
1755 loadi deBruijinIndexOperand, t2
1756
1757 btiz t2, .done
1758
1759 loadp CodeBlock[cfr], t1
1760 bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
1761 btbz CodeBlock::m_needsActivation[t1], .loop
1762
1763 loadi CodeBlock::m_activationRegister[t1], t1
1764
1765 # Need to conditionally skip over one scope.
1766 bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
1767 scopeCheck(t0, t1)
1768 loadp JSScope::m_next[t0], t0
1769 .noActivation:
1770 subi 1, t2
1771
1772 btiz t2, .done
1773 .loop:
1774 scopeCheck(t0, t1)
1775 loadp JSScope::m_next[t0], t0
1776 subi 1, t2
1777 btinz t2, .loop
1778
1779 .done:
1780
1781 end
1782
1783 _llint_op_get_scoped_var:
1784 traceExecution()
1785 # Operands are as follows:
1786 # 4[PC] Destination for the load.
1787 # 8[PC] Index of register in the scope.
1788 # 12[PC] De Bruijin index.
1789 getDeBruijnScope(12[PC], macro (scope, scratch) end)
1790 loadi 4[PC], t1
1791 loadi 8[PC], t2
1792 loadp JSVariableObject::m_registers[t0], t0
1793 loadi TagOffset[t0, t2, 8], t3
1794 loadi PayloadOffset[t0, t2, 8], t0
1795 storei t3, TagOffset[cfr, t1, 8]
1796 storei t0, PayloadOffset[cfr, t1, 8]
1797 loadi 16[PC], t1
1798 valueProfile(t3, t0, t1)
1799 dispatch(5)
1800
1801
1802 _llint_op_put_scoped_var:
1803 traceExecution()
1804 getDeBruijnScope(8[PC], macro (scope, scratch) end)
1805 loadi 12[PC], t1
1806 loadConstantOrVariable(t1, t3, t2)
1807 loadi 4[PC], t1
1808 writeBarrier(t3, t2)
1809 loadp JSVariableObject::m_registers[t0], t0
1810 storei t3, TagOffset[t0, t1, 8]
1811 storei t2, PayloadOffset[t0, t1, 8]
1812 dispatch(4)
1813
1814 _llint_op_end:
1815 traceExecution()
1816 checkSwitchToJITForEpilogue()
1817 loadi 4[PC], t0
1818 assertNotConstant(t0)
1819 loadi TagOffset[cfr, t0, 8], t1
1820 loadi PayloadOffset[cfr, t0, 8], t0
1821 doReturn()
1822
1823
1824 _llint_throw_from_slow_path_trampoline:
1825 # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
1826 # the throw target is not necessarily interpreted code, we come to here.
1827 # This essentially emulates the JIT's throwing protocol.
1828 loadp JITStackFrame::vm[sp], t1
1829 loadp VM::callFrameForThrow[t1], t0
1830 jmp VM::targetMachinePCForThrow[t1]
1831
1832
1833 _llint_throw_during_call_trampoline:
1834 preserveReturnAddressAfterCall(t2)
1835 loadp JITStackFrame::vm[sp], t1
1836 loadp VM::callFrameForThrow[t1], t0
1837 jmp VM::targetMachinePCForThrow[t1]
1838
1839
1840 macro nativeCallTrampoline(executableOffsetToFunction)
1841 storep 0, CodeBlock[cfr]
1842 loadp CallerFrame[cfr], t0
1843 loadi ScopeChain + PayloadOffset[t0], t1
1844 storei CellTag, ScopeChain + TagOffset[cfr]
1845 storei t1, ScopeChain + PayloadOffset[cfr]
1846 if X86
1847 loadp JITStackFrame::vm + 4[sp], t3 # Additional offset for return address
1848 storep cfr, VM::topCallFrame[t3]
1849 peek 0, t1
1850 storep t1, ReturnPC[cfr]
1851 move cfr, t2 # t2 = ecx
1852 subp 16 - 4, sp
1853 loadi Callee + PayloadOffset[cfr], t1
1854 loadp JSFunction::m_executable[t1], t1
1855 move t0, cfr
1856 call executableOffsetToFunction[t1]
1857 addp 16 - 4, sp
1858 loadp JITStackFrame::vm + 4[sp], t3
1859 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
1860 loadp JITStackFrame::vm[sp], t3
1861 storep cfr, VM::topCallFrame[t3]
1862 move t0, t2
1863 preserveReturnAddressAfterCall(t3)
1864 storep t3, ReturnPC[cfr]
1865 move cfr, t0
1866 loadi Callee + PayloadOffset[cfr], t1
1867 loadp JSFunction::m_executable[t1], t1
1868 move t2, cfr
1869 call executableOffsetToFunction[t1]
1870 restoreReturnAddressBeforeReturn(t3)
1871 loadp JITStackFrame::vm[sp], t3
1872 elsif MIPS
1873 loadp JITStackFrame::vm[sp], t3
1874 storep cfr, VM::topCallFrame[t3]
1875 move t0, t2
1876 preserveReturnAddressAfterCall(t3)
1877 storep t3, ReturnPC[cfr]
1878 move cfr, t0
1879 loadi Callee + PayloadOffset[cfr], t1
1880 loadp JSFunction::m_executable[t1], t1
1881 move t2, cfr
1882 move t0, a0
1883 call executableOffsetToFunction[t1]
1884 restoreReturnAddressBeforeReturn(t3)
1885 loadp JITStackFrame::vm[sp], t3
1886 elsif SH4
1887 loadp JITStackFrame::vm[sp], t3
1888 storep cfr, VM::topCallFrame[t3]
1889 move t0, t2
1890 preserveReturnAddressAfterCall(t3)
1891 storep t3, ReturnPC[cfr]
1892 move cfr, t0
1893 loadi Callee + PayloadOffset[cfr], t1
1894 loadp JSFunction::m_executable[t1], t1
1895 move t2, cfr
1896 call executableOffsetToFunction[t1]
1897 restoreReturnAddressBeforeReturn(t3)
1898 loadp JITStackFrame::vm[sp], t3
1899 elsif C_LOOP
1900 loadp JITStackFrame::vm[sp], t3
1901 storep cfr, VM::topCallFrame[t3]
1902 move t0, t2
1903 preserveReturnAddressAfterCall(t3)
1904 storep t3, ReturnPC[cfr]
1905 move cfr, t0
1906 loadi Callee + PayloadOffset[cfr], t1
1907 loadp JSFunction::m_executable[t1], t1
1908 move t2, cfr
1909 cloopCallNative executableOffsetToFunction[t1]
1910 restoreReturnAddressBeforeReturn(t3)
1911 loadp JITStackFrame::vm[sp], t3
1912 else
1913 error
1914 end
1915 bineq VM::exception + TagOffset[t3], EmptyValueTag, .exception
1916 ret
1917 .exception:
1918 preserveReturnAddressAfterCall(t1) # This is really only needed on X86
1919 loadi ArgumentCount + TagOffset[cfr], PC
1920 callSlowPath(_llint_throw_from_native_call)
1921 jmp _llint_throw_from_slow_path_trampoline
1922 end
1923