1 # Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
25 # Crash course on the language that this is written in (which I just call
26 # "assembly" even though it's more than that):
28 # - Mostly gas-style operand ordering. The last operand tends to be the
29 # destination. So "a := b" is written as "mov b, a". But unlike gas,
30 # comparisons are in-order, so "if (a < b)" is written as
33 # - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
34 # Currently this is just 32-bit so "i" and "p" are interchangeable
35 # except when an op supports one but not the other.
37 # - In general, valid operands for macro invocations and instructions are
38 # registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
39 # (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
40 # (eg "_foo" or ".foo"). Macro invocations can also take anonymous
41 # macros as operands. Instructions cannot take anonymous macros.
43 # - Labels must have names that begin with either "_" or ".". A "." label
44 # is local and gets renamed before code gen to minimize namespace
45 # pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
46 # may or may not be removed during code gen depending on whether the asm
47 # conventions for C name mangling on the target platform mandate a "_"
50 # - A "macro" is a lambda expression, which may be either anonymous or
51 # named. But this has caveats. "macro" can take zero or more arguments,
52 # which may be macros or any valid operands, but it can only return
53 # code. But you can do Turing-complete things via continuation passing
54 # style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
55 # that, since you'll just crash the assembler.
57 # - An "if" is a conditional on settings. Any identifier supplied in the
58 # predicate of an "if" is assumed to be a #define that is available
59 # during code gen. So you can't use "if" for computation in a macro, but
60 # you can use it to select different pieces of code for different
63 # - Arguments to macros follow lexical scoping rather than dynamic scoping.
64 # Const's also follow lexical scoping and may override (hide) arguments
65 # or other consts. All variables (arguments and constants) can be bound
66 # to operands. Additionally, arguments (but not constants) can be bound
70 # Below we have a bunch of constant declarations. Each constant must have
71 # a corresponding ASSERT() in LLIntData.cpp.
74 # Value representation constants.
78 const UndefinedTag = -4
80 const EmptyValueTag = -6
81 const DeletedValueTag = -7
82 const LowestTag = DeletedValueTag
86 macro dispatch(advance)
91 macro dispatchBranchWithOffset(pcOffset)
97 macro dispatchBranch(pcOffset)
99 dispatchBranchWithOffset(t0)
102 macro dispatchAfterCall()
103 loadi ArgumentCount + TagOffset[cfr], PC
107 macro cCall2(function, arg1, arg2)
108 if ARM or ARMv7 or ARMv7_TRADITIONAL
122 cloopCallSlowPath function, arg1, arg2
128 # This barely works. arg3 and arg4 should probably be immediates.
129 macro cCall4(function, arg1, arg2, arg3, arg4)
130 if ARM or ARMv7 or ARMv7_TRADITIONAL
156 macro callSlowPath(slowPath)
157 cCall2(slowPath, cfr, PC)
162 # Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
163 # should be an immediate integer - any integer you like; use it to identify the place you're
164 # debugging from. operand should likewise be an immediate, and should identify the operand
165 # in the instruction stream you'd like to print out.
166 macro traceOperand(fromWhere, operand)
167 cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
172 # Debugging operation if you'd like to print the value of an operand in the instruction
173 # stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
175 macro traceValue(fromWhere, operand)
176 cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
181 # Call a slowPath for call opcodes.
182 macro callCallSlowPath(advance, slowPath, action)
183 addp advance * 4, PC, t0
184 storep t0, ArgumentCount + TagOffset[cfr]
185 cCall2(slowPath, cfr, PC)
190 macro callWatchdogTimerHandler(throwHandler)
191 storei PC, ArgumentCount + TagOffset[cfr]
192 cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
194 btpnz t0, throwHandler
195 loadi ArgumentCount + TagOffset[cfr], PC
198 macro checkSwitchToJITForLoop()
202 storei PC, ArgumentCount + TagOffset[cfr]
203 cCall2(_llint_loop_osr, cfr, PC)
208 loadi ArgumentCount + TagOffset[cfr], PC
212 # Index, tag, and payload must be different registers. Index is not
214 macro loadConstantOrVariable(index, tag, payload)
215 bigteq index, FirstConstantRegisterIndex, .constant
216 loadi TagOffset[cfr, index, 8], tag
217 loadi PayloadOffset[cfr, index, 8], payload
220 loadp CodeBlock[cfr], payload
221 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
222 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
223 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
224 loadp TagOffset[payload, index, 8], tag
225 loadp PayloadOffset[payload, index, 8], payload
229 macro loadConstantOrVariableTag(index, tag)
230 bigteq index, FirstConstantRegisterIndex, .constant
231 loadi TagOffset[cfr, index, 8], tag
234 loadp CodeBlock[cfr], tag
235 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
236 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
237 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
238 loadp TagOffset[tag, index, 8], tag
242 # Index and payload may be the same register. Index may be clobbered.
243 macro loadConstantOrVariable2Reg(index, tag, payload)
244 bigteq index, FirstConstantRegisterIndex, .constant
245 loadi TagOffset[cfr, index, 8], tag
246 loadi PayloadOffset[cfr, index, 8], payload
249 loadp CodeBlock[cfr], tag
250 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
251 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
252 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
255 loadp PayloadOffset[tag], payload
256 loadp TagOffset[tag], tag
260 macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
261 bigteq index, FirstConstantRegisterIndex, .constant
262 tagCheck(TagOffset[cfr, index, 8])
263 loadi PayloadOffset[cfr, index, 8], payload
266 loadp CodeBlock[cfr], payload
267 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
268 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
269 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
270 tagCheck(TagOffset[payload, index, 8])
271 loadp PayloadOffset[payload, index, 8], payload
275 # Index and payload must be different registers. Index is not mutated. Use
276 # this if you know what the tag of the variable should be. Doing the tag
277 # test as part of loading the variable reduces register use, but may not
278 # be faster than doing loadConstantOrVariable followed by a branch on the
280 macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
281 loadConstantOrVariablePayloadTagCustom(
283 macro (actualTag) bineq actualTag, expectedTag, slow end,
287 macro loadConstantOrVariablePayloadUnchecked(index, payload)
288 loadConstantOrVariablePayloadTagCustom(
290 macro (actualTag) end,
294 macro writeBarrier(tag, payload)
295 # Nothing to do, since we don't have a generational or incremental collector.
298 macro valueProfile(tag, payload, profile)
300 storei tag, ValueProfile::m_buckets + TagOffset[profile]
301 storei payload, ValueProfile::m_buckets + PayloadOffset[profile]
306 # Entrypoints into the interpreter
308 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
309 macro functionArityCheck(doneLabel, slow_path)
310 loadi PayloadOffset + ArgumentCount[cfr], t0
311 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
312 cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
315 loadp JITStackFrame::vm[sp], t1
316 loadp VM::callFrameForThrow[t1], t0
317 jmp VM::targetMachinePCForThrow[t1]
319 # Reload CodeBlock and PC, since the slow_path clobbered it.
320 loadp CodeBlock[cfr], t1
321 loadp CodeBlock::m_instructions[t1], PC
326 # Instruction implementations
330 loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
331 loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
332 btiz t2, .opEnterDone
333 move UndefinedTag, t0
337 storei t0, TagOffset[cfr, t2, 8]
338 storei t1, PayloadOffset[cfr, t2, 8]
339 btinz t2, .opEnterLoop
344 _llint_op_create_activation:
347 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
348 callSlowPath(_llint_slow_path_create_activation)
349 .opCreateActivationDone:
353 _llint_op_init_lazy_reg:
356 storei EmptyValueTag, TagOffset[cfr, t0, 8]
357 storei 0, PayloadOffset[cfr, t0, 8]
361 _llint_op_create_arguments:
364 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
365 callSlowPath(_llint_slow_path_create_arguments)
366 .opCreateArgumentsDone:
370 _llint_op_create_this:
373 loadp PayloadOffset[cfr, t0, 8], t0
374 loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
375 loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
376 btpz t1, .opCreateThisSlow
377 allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
379 storei CellTag, TagOffset[cfr, t1, 8]
380 storei t0, PayloadOffset[cfr, t1, 8]
384 callSlowPath(_llint_slow_path_create_this)
388 _llint_op_get_callee:
391 loadp PayloadOffset + Callee[cfr], t1
393 valueProfile(CellTag, t1, t2)
394 storei CellTag, TagOffset[cfr, t0, 8]
395 storei t1, PayloadOffset[cfr, t0, 8]
399 _llint_op_convert_this:
402 bineq TagOffset[cfr, t0, 8], CellTag, .opConvertThisSlow
403 loadi PayloadOffset[cfr, t0, 8], t0
404 loadp JSCell::m_structure[t0], t0
405 bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
407 valueProfile(CellTag, t0, t1)
411 callSlowPath(_llint_slow_path_convert_this)
415 _llint_op_new_object:
417 loadpFromInstruction(3, t0)
418 loadp ObjectAllocationProfile::m_allocator[t0], t1
419 loadp ObjectAllocationProfile::m_structure[t0], t2
420 allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
422 storei CellTag, TagOffset[cfr, t1, 8]
423 storei t0, PayloadOffset[cfr, t1, 8]
427 callSlowPath(_llint_slow_path_new_object)
435 loadConstantOrVariable(t1, t2, t3)
436 storei t2, TagOffset[cfr, t0, 8]
437 storei t3, PayloadOffset[cfr, t0, 8]
445 loadConstantOrVariable(t0, t2, t3)
446 bineq t2, BooleanTag, .opNotSlow
448 storei t2, TagOffset[cfr, t1, 8]
449 storei t3, PayloadOffset[cfr, t1, 8]
453 callSlowPath(_llint_slow_path_not)
461 loadConstantOrVariable(t2, t3, t1)
462 loadConstantOrVariable2Reg(t0, t2, t0)
463 bineq t2, t3, .opEqSlow
464 bieq t2, CellTag, .opEqSlow
465 bib t2, LowestTag, .opEqSlow
468 storei BooleanTag, TagOffset[cfr, t2, 8]
469 storei t0, PayloadOffset[cfr, t2, 8]
473 callSlowPath(_llint_slow_path_eq)
481 assertNotConstant(t0)
482 loadi TagOffset[cfr, t0, 8], t1
483 loadi PayloadOffset[cfr, t0, 8], t0
484 bineq t1, CellTag, .opEqNullImmediate
485 loadp JSCell::m_structure[t0], t1
486 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
488 jmp .opEqNullNotImmediate
489 .opEqNullMasqueradesAsUndefined:
490 loadp CodeBlock[cfr], t0
491 loadp CodeBlock::m_globalObject[t0], t0
492 cpeq Structure::m_globalObject[t1], t0, t1
493 jmp .opEqNullNotImmediate
496 cieq t1, UndefinedTag, t1
498 .opEqNullNotImmediate:
499 storei BooleanTag, TagOffset[cfr, t3, 8]
500 storei t1, PayloadOffset[cfr, t3, 8]
508 loadConstantOrVariable(t2, t3, t1)
509 loadConstantOrVariable2Reg(t0, t2, t0)
510 bineq t2, t3, .opNeqSlow
511 bieq t2, CellTag, .opNeqSlow
512 bib t2, LowestTag, .opNeqSlow
515 storei BooleanTag, TagOffset[cfr, t2, 8]
516 storei t0, PayloadOffset[cfr, t2, 8]
520 callSlowPath(_llint_slow_path_neq)
528 assertNotConstant(t0)
529 loadi TagOffset[cfr, t0, 8], t1
530 loadi PayloadOffset[cfr, t0, 8], t0
531 bineq t1, CellTag, .opNeqNullImmediate
532 loadp JSCell::m_structure[t0], t1
533 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
535 jmp .opNeqNullNotImmediate
536 .opNeqNullMasqueradesAsUndefined:
537 loadp CodeBlock[cfr], t0
538 loadp CodeBlock::m_globalObject[t0], t0
539 cpneq Structure::m_globalObject[t1], t0, t1
540 jmp .opNeqNullNotImmediate
542 cineq t1, NullTag, t2
543 cineq t1, UndefinedTag, t1
545 .opNeqNullNotImmediate:
546 storei BooleanTag, TagOffset[cfr, t3, 8]
547 storei t1, PayloadOffset[cfr, t3, 8]
551 macro strictEq(equalityOperation, slowPath)
554 loadConstantOrVariable(t2, t3, t1)
555 loadConstantOrVariable2Reg(t0, t2, t0)
557 bib t2, LowestTag, .slow
558 bineq t2, CellTag, .notString
559 loadp JSCell::m_structure[t0], t2
560 loadp JSCell::m_structure[t1], t3
561 bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
562 bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
565 equalityOperation(t0, t1, t0)
566 storei BooleanTag, TagOffset[cfr, t2, 8]
567 storei t0, PayloadOffset[cfr, t2, 8]
571 callSlowPath(slowPath)
577 strictEq(macro (left, right, result) cieq left, right, result end, _llint_slow_path_stricteq)
582 strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
588 bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow
589 loadi PayloadOffset[cfr, t0, 8], t1
590 baddio 1, t1, .opIncSlow
591 storei t1, PayloadOffset[cfr, t0, 8]
595 callSlowPath(_llint_slow_path_pre_inc)
602 bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow
603 loadi PayloadOffset[cfr, t0, 8], t1
604 bsubio 1, t1, .opDecSlow
605 storei t1, PayloadOffset[cfr, t0, 8]
609 callSlowPath(_llint_slow_path_pre_dec)
617 loadConstantOrVariable(t0, t2, t3)
618 bieq t2, Int32Tag, .opToNumberIsInt
619 biaeq t2, LowestTag, .opToNumberSlow
621 storei t2, TagOffset[cfr, t1, 8]
622 storei t3, PayloadOffset[cfr, t1, 8]
626 callSlowPath(_llint_slow_path_to_number)
634 loadConstantOrVariable(t0, t1, t2)
635 bineq t1, Int32Tag, .opNegateSrcNotInt
636 btiz t2, 0x7fffffff, .opNegateSlow
638 storei Int32Tag, TagOffset[cfr, t3, 8]
639 storei t2, PayloadOffset[cfr, t3, 8]
642 bia t1, LowestTag, .opNegateSlow
644 storei t1, TagOffset[cfr, t3, 8]
645 storei t2, PayloadOffset[cfr, t3, 8]
649 callSlowPath(_llint_slow_path_negate)
653 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
656 loadConstantOrVariable(t2, t3, t1)
657 loadConstantOrVariable2Reg(t0, t2, t0)
658 bineq t2, Int32Tag, .op1NotInt
659 bineq t3, Int32Tag, .op2NotInt
661 integerOperationAndStore(t3, t1, t0, .slow, t2)
665 # First operand is definitely not an int, the second operand could be anything.
666 bia t2, LowestTag, .slow
667 bib t3, LowestTag, .op1NotIntOp2Double
668 bineq t3, Int32Tag, .slow
676 doubleOperation(ft1, ft0)
677 stored ft0, [cfr, t1, 8]
681 # First operand is definitely an int, the second operand is definitely not.
683 bia t3, LowestTag, .slow
686 doubleOperation(ft1, ft0)
687 stored ft0, [cfr, t2, 8]
691 callSlowPath(slowPath)
695 macro binaryOp(integerOperation, doubleOperation, slowPath)
697 macro (int32Tag, left, right, slow, index)
698 integerOperation(left, right, slow)
699 storei int32Tag, TagOffset[cfr, index, 8]
700 storei right, PayloadOffset[cfr, index, 8]
702 doubleOperation, slowPath)
708 macro (left, right, slow) baddio left, right, slow end,
709 macro (left, right) addd left, right end,
710 _llint_slow_path_add)
716 macro (int32Tag, left, right, slow, index)
717 const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
719 bmulio left, scratch, slow
724 storei Int32Tag, TagOffset[cfr, index, 8]
725 storei scratch, PayloadOffset[cfr, index, 8]
727 macro (left, right) muld left, right end,
728 _llint_slow_path_mul)
734 macro (left, right, slow) bsubio left, right, slow end,
735 macro (left, right) subd left, right end,
736 _llint_slow_path_sub)
742 macro (int32Tag, left, right, slow, index)
746 bcd2i ft1, right, .notInt
747 storei int32Tag, TagOffset[cfr, index, 8]
748 storei right, PayloadOffset[cfr, index, 8]
751 stored ft1, [cfr, index, 8]
754 macro (left, right) divd left, right end,
755 _llint_slow_path_div)
758 macro bitOp(operation, slowPath, advance)
761 loadConstantOrVariable(t2, t3, t1)
762 loadConstantOrVariable2Reg(t0, t2, t0)
763 bineq t3, Int32Tag, .slow
764 bineq t2, Int32Tag, .slow
766 operation(t1, t0, .slow)
767 storei t3, TagOffset[cfr, t2, 8]
768 storei t0, PayloadOffset[cfr, t2, 8]
772 callSlowPath(slowPath)
779 macro (left, right, slow) lshifti left, right end,
780 _llint_slow_path_lshift,
787 macro (left, right, slow) rshifti left, right end,
788 _llint_slow_path_rshift,
795 macro (left, right, slow)
799 _llint_slow_path_urshift,
806 macro (left, right, slow) andi left, right end,
807 _llint_slow_path_bitand,
814 macro (left, right, slow) xori left, right end,
815 _llint_slow_path_bitxor,
822 macro (left, right, slow) ori left, right end,
823 _llint_slow_path_bitor,
827 _llint_op_check_has_instance:
830 loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
831 loadp JSCell::m_structure[t0], t0
832 btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
835 .opCheckHasInstanceSlow:
836 callSlowPath(_llint_slow_path_check_has_instance)
840 _llint_op_instanceof:
842 # Actually do the work.
845 loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
846 loadp JSCell::m_structure[t1], t2
847 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
849 loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
851 # Register state: t1 = prototype, t2 = value
854 loadp JSCell::m_structure[t2], t2
855 loadi Structure::m_prototype + PayloadOffset[t2], t2
856 bpeq t2, t1, .opInstanceofDone
857 btinz t2, .opInstanceofLoop
861 storei BooleanTag, TagOffset[cfr, t3, 8]
862 storei t0, PayloadOffset[cfr, t3, 8]
866 callSlowPath(_llint_slow_path_instanceof)
870 _llint_op_is_undefined:
874 loadConstantOrVariable(t1, t2, t3)
875 storei BooleanTag, TagOffset[cfr, t0, 8]
876 bieq t2, CellTag, .opIsUndefinedCell
877 cieq t2, UndefinedTag, t3
878 storei t3, PayloadOffset[cfr, t0, 8]
881 loadp JSCell::m_structure[t3], t1
882 btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
884 storei t1, PayloadOffset[cfr, t0, 8]
886 .opIsUndefinedMasqueradesAsUndefined:
887 loadp CodeBlock[cfr], t3
888 loadp CodeBlock::m_globalObject[t3], t3
889 cpeq Structure::m_globalObject[t1], t3, t1
890 storei t1, PayloadOffset[cfr, t0, 8]
894 _llint_op_is_boolean:
898 loadConstantOrVariableTag(t1, t0)
899 cieq t0, BooleanTag, t0
900 storei BooleanTag, TagOffset[cfr, t2, 8]
901 storei t0, PayloadOffset[cfr, t2, 8]
909 loadConstantOrVariableTag(t1, t0)
910 storei BooleanTag, TagOffset[cfr, t2, 8]
912 cib t0, LowestTag + 1, t1
913 storei t1, PayloadOffset[cfr, t2, 8]
921 loadConstantOrVariable(t1, t0, t3)
922 storei BooleanTag, TagOffset[cfr, t2, 8]
923 bineq t0, CellTag, .opIsStringNotCell
924 loadp JSCell::m_structure[t3], t0
925 cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
926 storei t1, PayloadOffset[cfr, t2, 8]
929 storep 0, PayloadOffset[cfr, t2, 8]
933 macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload)
934 assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end)
936 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
937 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
938 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
941 macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload)
942 bilt propertyOffset, firstOutOfLineOffset, .isInline
943 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
947 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
949 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
950 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
953 macro resolveGlobal(size, slow)
954 # Operands are as follows:
955 # 4[PC] Destination for the load.
956 # 8[PC] Property identifier index in the code block.
957 # 12[PC] Structure pointer, initialized to 0 by bytecode generator.
958 # 16[PC] Offset in global object, initialized to 0 by bytecode generator.
959 loadp CodeBlock[cfr], t0
960 loadp CodeBlock::m_globalObject[t0], t0
961 loadp JSCell::m_structure[t0], t1
962 bpneq t1, 12[PC], slow
964 loadPropertyAtVariableOffsetKnownNotInline(t1, t0, t2, t3)
966 storei t2, TagOffset[cfr, t0, 8]
967 storei t3, PayloadOffset[cfr, t0, 8]
968 loadi (size - 1) * 4[PC], t0
969 valueProfile(t2, t3, t0)
972 _llint_op_init_global_const:
976 loadConstantOrVariable(t1, t2, t3)
978 storei t2, TagOffset[t0]
979 storei t3, PayloadOffset[t0]
983 _llint_op_init_global_const_check:
988 btbnz [t2], .opInitGlobalConstCheckSlow
989 loadConstantOrVariable(t1, t2, t3)
991 storei t2, TagOffset[t0]
992 storei t3, PayloadOffset[t0]
994 .opInitGlobalConstCheckSlow:
995 callSlowPath(_llint_slow_path_init_global_const_check)
998 # We only do monomorphic get_by_id caching for now, and we do not modify the
999 # opcode. We do, however, allow for the cache to change anytime if fails, since
1000 # ping-ponging is free. At best we get lucky and the get_by_id will continue
1001 # to take fast path on the new cache. At worst we take slow path, which is what
1002 # we would have been doing anyway.
1004 macro getById(getPropertyStorage)
1008 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
1013 macro (propertyStorage, scratch)
1014 bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
1016 loadi TagOffset[propertyStorage, t2], scratch
1017 loadi PayloadOffset[propertyStorage, t2], t2
1018 storei scratch, TagOffset[cfr, t1, 8]
1019 storei t2, PayloadOffset[cfr, t1, 8]
1021 valueProfile(scratch, t2, t1)
1026 callSlowPath(_llint_slow_path_get_by_id)
1030 _llint_op_get_by_id:
1031 getById(withInlineStorage)
1034 _llint_op_get_by_id_out_of_line:
1035 getById(withOutOfLineStorage)
1038 _llint_op_get_array_length:
1042 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
1043 loadp JSCell::m_structure[t3], t2
1044 arrayProfile(t2, t1, t0)
1045 btiz t2, IsArray, .opGetArrayLengthSlow
1046 btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
1049 loadp JSObject::m_butterfly[t3], t0
1050 loadi -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], t0
1051 bilt t0, 0, .opGetArrayLengthSlow
1052 valueProfile(Int32Tag, t0, t2)
1053 storep t0, PayloadOffset[cfr, t1, 8]
1054 storep Int32Tag, TagOffset[cfr, t1, 8]
1057 .opGetArrayLengthSlow:
1058 callSlowPath(_llint_slow_path_get_by_id)
1062 _llint_op_get_arguments_length:
1066 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
1067 loadi ArgumentCount + PayloadOffset[cfr], t2
1069 storei Int32Tag, TagOffset[cfr, t1, 8]
1070 storei t2, PayloadOffset[cfr, t1, 8]
1073 .opGetArgumentsLengthSlow:
1074 callSlowPath(_llint_slow_path_get_arguments_length)
1078 macro putById(getPropertyStorage)
1082 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1087 macro (propertyStorage, scratch)
1088 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
1090 loadConstantOrVariable2Reg(t2, scratch, t2)
1091 writeBarrier(scratch, t2)
1092 storei scratch, TagOffset[propertyStorage, t1]
1093 storei t2, PayloadOffset[propertyStorage, t1]
1098 _llint_op_put_by_id:
1099 putById(withInlineStorage)
1102 callSlowPath(_llint_slow_path_put_by_id)
1106 _llint_op_put_by_id_out_of_line:
1107 putById(withOutOfLineStorage)
1110 macro putByIdTransition(additionalChecks, getPropertyStorage)
1114 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1116 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
1117 additionalChecks(t1, t3)
1122 macro (propertyStorage, scratch)
1123 addp t1, propertyStorage, t3
1124 loadConstantOrVariable2Reg(t2, t1, t2)
1125 writeBarrier(t1, t2)
1126 storei t1, TagOffset[t3]
1128 storei t2, PayloadOffset[t3]
1129 storep t1, JSCell::m_structure[t0]
1134 macro noAdditionalChecks(oldStructure, scratch)
1137 macro structureChainChecks(oldStructure, scratch)
1138 const protoCell = oldStructure # Reusing the oldStructure register for the proto
1140 loadp 28[PC], scratch
1141 assert(macro (ok) btpnz scratch, ok end)
1142 loadp StructureChain::m_vector[scratch], scratch
1143 assert(macro (ok) btpnz scratch, ok end)
1144 bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
1146 loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
1147 loadp JSCell::m_structure[protoCell], oldStructure
1148 bpneq oldStructure, [scratch], .opPutByIdSlow
1150 bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
1154 _llint_op_put_by_id_transition_direct:
1155 putByIdTransition(noAdditionalChecks, withInlineStorage)
1158 _llint_op_put_by_id_transition_direct_out_of_line:
1159 putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
1162 _llint_op_put_by_id_transition_normal:
1163 putByIdTransition(structureChainChecks, withInlineStorage)
1166 _llint_op_put_by_id_transition_normal_out_of_line:
1167 putByIdTransition(structureChainChecks, withOutOfLineStorage)
1170 _llint_op_get_by_val:
1173 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
1174 loadp JSCell::m_structure[t0], t2
1176 arrayProfile(t2, t3, t1)
1178 loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
1179 loadp JSObject::m_butterfly[t0], t3
1180 andi IndexingShapeMask, t2
1181 bieq t2, Int32Shape, .opGetByValIsContiguous
1182 bineq t2, ContiguousShape, .opGetByValNotContiguous
1183 .opGetByValIsContiguous:
1185 biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValOutOfBounds
1186 loadi TagOffset[t3, t1, 8], t2
1187 loadi PayloadOffset[t3, t1, 8], t1
1190 .opGetByValNotContiguous:
1191 bineq t2, DoubleShape, .opGetByValNotDouble
1192 biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t3], .opGetByValOutOfBounds
1193 loadd [t3, t1, 8], ft0
1194 bdnequn ft0, ft0, .opGetByValSlow
1195 # FIXME: This could be massively optimized.
1198 jmp .opGetByValNotEmpty
1200 .opGetByValNotDouble:
1201 subi ArrayStorageShape, t2
1202 bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
1203 biaeq t1, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t3], .opGetByValOutOfBounds
1204 loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
1205 loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
1209 bieq t2, EmptyValueTag, .opGetByValOutOfBounds
1210 .opGetByValNotEmpty:
1211 storei t2, TagOffset[cfr, t0, 8]
1212 storei t1, PayloadOffset[cfr, t0, 8]
1214 valueProfile(t2, t1, t0)
1217 .opGetByValOutOfBounds:
1219 loadpFromInstruction(4, t0)
1220 storeb 1, ArrayProfile::m_outOfBounds[t0]
1223 callSlowPath(_llint_slow_path_get_by_val)
1227 _llint_op_get_argument_by_val:
1228 # FIXME: At some point we should array profile this. Right now it isn't necessary
1229 # since the DFG will never turn a get_argument_by_val into a GetByVal.
1233 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
1234 loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
1236 loadi ArgumentCount + PayloadOffset[cfr], t1
1237 biaeq t2, t1, .opGetArgumentByValSlow
1240 loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
1241 loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
1243 storei t0, TagOffset[cfr, t3, 8]
1244 storei t1, PayloadOffset[cfr, t3, 8]
1245 valueProfile(t0, t1, t2)
1248 .opGetArgumentByValSlow:
1249 callSlowPath(_llint_slow_path_get_argument_by_val)
1253 _llint_op_get_by_pname:
1256 loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
1258 bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
1260 loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
1262 loadi PayloadOffset[cfr, t0, 8], t3
1263 loadp JSCell::m_structure[t2], t0
1264 bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
1266 loadi [cfr, t0, 8], t0
1268 biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
1269 bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty
1270 addi firstOutOfLineOffset, t0
1271 subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0
1272 .opGetByPnameInlineProperty:
1273 loadPropertyAtVariableOffset(t0, t2, t1, t3)
1275 storei t1, TagOffset[cfr, t0, 8]
1276 storei t3, PayloadOffset[cfr, t0, 8]
1280 callSlowPath(_llint_slow_path_get_by_pname)
1284 macro contiguousPutByVal(storeCallback)
1285 biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .outOfBounds
1288 storeCallback(t2, t1, t0, t3)
1292 biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValOutOfBounds
1295 storeb 1, ArrayProfile::m_mayStoreToHole[t2]
1298 storei t2, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
1302 _llint_op_put_by_val:
1305 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
1306 loadp JSCell::m_structure[t1], t2
1308 arrayProfile(t2, t3, t0)
1310 loadConstantOrVariablePayload(t0, Int32Tag, t3, .opPutByValSlow)
1311 loadp JSObject::m_butterfly[t1], t0
1312 andi IndexingShapeMask, t2
1313 bineq t2, Int32Shape, .opPutByValNotInt32
1315 macro (operand, scratch, base, index)
1316 loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow)
1317 storei Int32Tag, TagOffset[base, index, 8]
1318 storei scratch, PayloadOffset[base, index, 8]
1321 .opPutByValNotInt32:
1322 bineq t2, DoubleShape, .opPutByValNotDouble
1324 macro (operand, scratch, base, index)
1326 const payload = operand
1327 loadConstantOrVariable2Reg(operand, tag, payload)
1328 bineq tag, Int32Tag, .notInt
1332 fii2d payload, tag, ft0
1333 bdnequn ft0, ft0, .opPutByValSlow
1335 stored ft0, [base, index, 8]
1338 .opPutByValNotDouble:
1339 bineq t2, ContiguousShape, .opPutByValNotContiguous
1341 macro (operand, scratch, base, index)
1343 const payload = operand
1344 loadConstantOrVariable2Reg(operand, tag, payload)
1345 writeBarrier(tag, payload)
1346 storei tag, TagOffset[base, index, 8]
1347 storei payload, PayloadOffset[base, index, 8]
1350 .opPutByValNotContiguous:
1351 bineq t2, ArrayStorageShape, .opPutByValSlow
1352 biaeq t3, -sizeof IndexingHeader + IndexingHeader::m_vectorLength[t0], .opPutByValOutOfBounds
1353 bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
1354 .opPutByValArrayStorageStoreResult:
1356 loadConstantOrVariable2Reg(t2, t1, t2)
1357 writeBarrier(t1, t2)
1358 storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8]
1359 storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8]
1362 .opPutByValArrayStorageEmpty:
1365 storeb 1, ArrayProfile::m_mayStoreToHole[t1]
1367 addi 1, ArrayStorage::m_numValuesInVector[t0]
1368 bib t3, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0], .opPutByValArrayStorageStoreResult
1370 storei t1, -sizeof IndexingHeader + IndexingHeader::m_publicLength[t0]
1371 jmp .opPutByValArrayStorageStoreResult
1373 .opPutByValOutOfBounds:
1375 loadpFromInstruction(4, t0)
1376 storeb 1, ArrayProfile::m_outOfBounds[t0]
1379 callSlowPath(_llint_slow_path_put_by_val)
1385 dispatchBranch(4[PC])
1388 macro jumpTrueOrFalse(conditionOp, slow)
1390 loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
1391 conditionOp(t0, .target)
1395 dispatchBranch(8[PC])
1403 macro equalNull(cellHandler, immediateHandler)
1405 assertNotConstant(t0)
1406 loadi TagOffset[cfr, t0, 8], t1
1407 loadi PayloadOffset[cfr, t0, 8], t0
1408 bineq t1, CellTag, .immediate
1409 loadp JSCell::m_structure[t0], t2
1410 cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
1414 dispatchBranch(8[PC])
1418 immediateHandler(t1, .target)
1425 macro (structure, value, target)
1426 btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined
1427 loadp CodeBlock[cfr], t0
1428 loadp CodeBlock::m_globalObject[t0], t0
1429 bpeq Structure::m_globalObject[structure], t0, target
1430 .opJeqNullNotMasqueradesAsUndefined:
1432 macro (value, target) bieq value, NullTag, target end)
1435 _llint_op_jneq_null:
1438 macro (structure, value, target)
1439 btbz value, MasqueradesAsUndefined, target
1440 loadp CodeBlock[cfr], t0
1441 loadp CodeBlock::m_globalObject[t0], t0
1442 bpneq Structure::m_globalObject[structure], t0, target
1444 macro (value, target) bineq value, NullTag, target end)
1451 loadp CodeBlock[cfr], t2
1452 loadp CodeBlock::m_globalObject[t2], t2
1453 bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
1454 loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
1455 bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
1457 dispatchBranch(12[PC])
1458 .opJneqPtrFallThrough:
1462 macro compare(integerCompare, doubleCompare, slowPath)
1465 loadConstantOrVariable(t2, t0, t1)
1466 loadConstantOrVariable2Reg(t3, t2, t3)
1467 bineq t0, Int32Tag, .op1NotInt
1468 bineq t2, Int32Tag, .op2NotInt
1469 integerCompare(t1, t3, .jumpTarget)
1473 bia t0, LowestTag, .slow
1474 bib t2, LowestTag, .op1NotIntOp2Double
1475 bineq t2, Int32Tag, .slow
1478 .op1NotIntOp2Double:
1482 doubleCompare(ft0, ft1, .jumpTarget)
1487 bia t2, LowestTag, .slow
1489 doubleCompare(ft0, ft1, .jumpTarget)
1493 dispatchBranch(12[PC])
1496 callSlowPath(slowPath)
1501 _llint_op_switch_imm:
1505 loadConstantOrVariable(t2, t1, t0)
1506 loadp CodeBlock[cfr], t2
1507 loadp CodeBlock::m_rareData[t2], t2
1508 muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
1509 loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
1511 bineq t1, Int32Tag, .opSwitchImmNotInt
1512 subi SimpleJumpTable::min[t2], t0
1513 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1514 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1515 loadi [t3, t0, 4], t1
1516 btiz t1, .opSwitchImmFallThrough
1517 dispatchBranchWithOffset(t1)
1520 bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
1521 .opSwitchImmFallThrough:
1522 dispatchBranch(8[PC])
1525 callSlowPath(_llint_slow_path_switch_imm)
1529 _llint_op_switch_char:
1533 loadConstantOrVariable(t2, t1, t0)
1534 loadp CodeBlock[cfr], t2
1535 loadp CodeBlock::m_rareData[t2], t2
1536 muli sizeof SimpleJumpTable, t3
1537 loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
1539 bineq t1, CellTag, .opSwitchCharFallThrough
1540 loadp JSCell::m_structure[t0], t1
1541 bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
1542 bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
1543 loadp JSString::m_value[t0], t0
1544 btpz t0, .opSwitchOnRope
1545 loadp StringImpl::m_data8[t0], t1
1546 btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1548 jmp .opSwitchCharReady
1552 subi SimpleJumpTable::min[t2], t0
1553 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1554 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1555 loadi [t2, t0, 4], t1
1556 btiz t1, .opSwitchCharFallThrough
1557 dispatchBranchWithOffset(t1)
1559 .opSwitchCharFallThrough:
1560 dispatchBranch(8[PC])
1563 callSlowPath(_llint_slow_path_switch_char)
1569 btiz 12[PC], .opNewFuncUnchecked
1571 bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
1572 .opNewFuncUnchecked:
1573 callSlowPath(_llint_slow_path_new_func)
1578 macro arrayProfileForCall()
1581 bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
1582 loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
1583 loadp JSCell::m_structure[t0], t0
1585 storep t0, ArrayProfile::m_lastSeenStructure[t1]
1590 macro doCall(slowPath)
1593 loadp LLIntCallLinkInfo::callee[t1], t2
1594 loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
1595 bineq t3, t2, .opCallSlow
1599 addp cfr, t3 # t3 contains the new value of cfr
1600 loadp JSFunction::m_scope[t2], t0
1601 storei t2, Callee + PayloadOffset[t3]
1602 storei t0, ScopeChain + PayloadOffset[t3]
1603 loadi 8 - 24[PC], t2
1604 storei PC, ArgumentCount + TagOffset[cfr]
1605 storep cfr, CallerFrame[t3]
1606 storei t2, ArgumentCount + PayloadOffset[t3]
1607 storei CellTag, Callee + TagOffset[t3]
1608 storei CellTag, ScopeChain + TagOffset[t3]
1610 callTargetFunction(t1)
1613 slowPathForCall(6, slowPath)
1617 _llint_op_tear_off_activation:
1620 bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated
1621 callSlowPath(_llint_slow_path_tear_off_activation)
1622 .opTearOffActivationNotCreated:
1626 _llint_op_tear_off_arguments:
1629 subi 1, t0 # Get the unmodifiedArgumentsRegister
1630 bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
1631 callSlowPath(_llint_slow_path_tear_off_arguments)
1632 .opTearOffArgumentsNotCreated:
1638 checkSwitchToJITForEpilogue()
1640 loadConstantOrVariable(t2, t1, t0)
1644 _llint_op_call_put_result:
1647 storei t1, TagOffset[cfr, t2, 8]
1648 storei t0, PayloadOffset[cfr, t2, 8]
1649 valueProfile(t1, t0, t3)
1650 traceExecution() # Needs to be here because it would clobber t1, t0
1654 _llint_op_ret_object_or_this:
1656 checkSwitchToJITForEpilogue()
1658 loadConstantOrVariable(t2, t1, t0)
1659 bineq t1, CellTag, .opRetObjectOrThisNotObject
1660 loadp JSCell::m_structure[t0], t2
1661 bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
1664 .opRetObjectOrThisNotObject:
1666 loadConstantOrVariable(t2, t1, t0)
1670 _llint_op_to_primitive:
1674 loadConstantOrVariable(t2, t1, t0)
1675 bineq t1, CellTag, .opToPrimitiveIsImm
1676 loadp JSCell::m_structure[t0], t2
1677 bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
1678 .opToPrimitiveIsImm:
1679 storei t1, TagOffset[cfr, t3, 8]
1680 storei t0, PayloadOffset[cfr, t3, 8]
1683 .opToPrimitiveSlowCase:
1684 callSlowPath(_llint_slow_path_to_primitive)
1688 _llint_op_next_pname:
1692 loadi PayloadOffset[cfr, t1, 8], t0
1693 bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
1695 loadi PayloadOffset[cfr, t2, 8], t2
1696 loadp JSPropertyNameIterator::m_jsStrings[t2], t3
1697 loadi [t3, t0, 8], t3
1699 storei t0, PayloadOffset[cfr, t1, 8]
1701 storei CellTag, TagOffset[cfr, t1, 8]
1702 storei t3, PayloadOffset[cfr, t1, 8]
1704 loadi PayloadOffset[cfr, t3, 8], t3
1705 loadp JSCell::m_structure[t3], t1
1706 bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
1707 loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
1708 loadp StructureChain::m_vector[t0], t0
1709 btpz [t0], .opNextPnameTarget
1710 .opNextPnameCheckPrototypeLoop:
1711 bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
1712 loadp Structure::m_prototype + PayloadOffset[t1], t2
1713 loadp JSCell::m_structure[t2], t1
1714 bpneq t1, [t0], .opNextPnameSlow
1716 btpnz [t0], .opNextPnameCheckPrototypeLoop
1718 dispatchBranch(24[PC])
1724 callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
1729 # This is where we end up from the JIT's throw trampoline (because the
1730 # machine code return address will be set to _llint_op_catch), and from
1731 # the interpreter's throw trampoline (see _llint_throw_trampoline).
1732 # The JIT throwing protocol calls for the cfr to be in t0. The throwing
1733 # code must have known that we were throwing to the interpreter, and have
1734 # set VM::targetInterpreterPCForThrow.
1736 loadp JITStackFrame::vm[sp], t3
1737 loadi VM::targetInterpreterPCForThrow[t3], PC
1738 loadi VM::exception + PayloadOffset[t3], t0
1739 loadi VM::exception + TagOffset[t3], t1
1740 storei 0, VM::exception + PayloadOffset[t3]
1741 storei EmptyValueTag, VM::exception + TagOffset[t3]
1743 storei t0, PayloadOffset[cfr, t2, 8]
1744 storei t1, TagOffset[cfr, t2, 8]
1745 traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
1749 # Gives you the scope in t0, while allowing you to optionally perform additional checks on the
1750 # scopes as they are traversed. scopeCheck() is called with two arguments: the register
1751 # holding the scope, and a register that can be used for scratch. Note that this does not
1752 # use t3, so you can hold stuff in t3 if need be.
1753 macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
1754 loadp ScopeChain + PayloadOffset[cfr], t0
1755 loadi deBruijinIndexOperand, t2
1759 loadp CodeBlock[cfr], t1
1760 bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
1761 btbz CodeBlock::m_needsActivation[t1], .loop
1763 loadi CodeBlock::m_activationRegister[t1], t1
1765 # Need to conditionally skip over one scope.
1766 bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
1768 loadp JSScope::m_next[t0], t0
1775 loadp JSScope::m_next[t0], t0
1783 _llint_op_get_scoped_var:
1785 # Operands are as follows:
1786 # 4[PC] Destination for the load.
1787 # 8[PC] Index of register in the scope.
1788 # 12[PC] De Bruijin index.
1789 getDeBruijnScope(12[PC], macro (scope, scratch) end)
1792 loadp JSVariableObject::m_registers[t0], t0
1793 loadi TagOffset[t0, t2, 8], t3
1794 loadi PayloadOffset[t0, t2, 8], t0
1795 storei t3, TagOffset[cfr, t1, 8]
1796 storei t0, PayloadOffset[cfr, t1, 8]
1798 valueProfile(t3, t0, t1)
1802 _llint_op_put_scoped_var:
1804 getDeBruijnScope(8[PC], macro (scope, scratch) end)
1806 loadConstantOrVariable(t1, t3, t2)
1808 writeBarrier(t3, t2)
1809 loadp JSVariableObject::m_registers[t0], t0
1810 storei t3, TagOffset[t0, t1, 8]
1811 storei t2, PayloadOffset[t0, t1, 8]
1816 checkSwitchToJITForEpilogue()
1818 assertNotConstant(t0)
1819 loadi TagOffset[cfr, t0, 8], t1
1820 loadi PayloadOffset[cfr, t0, 8], t0
1824 _llint_throw_from_slow_path_trampoline:
1825 # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
1826 # the throw target is not necessarily interpreted code, we come to here.
1827 # This essentially emulates the JIT's throwing protocol.
1828 loadp JITStackFrame::vm[sp], t1
1829 loadp VM::callFrameForThrow[t1], t0
1830 jmp VM::targetMachinePCForThrow[t1]
1833 _llint_throw_during_call_trampoline:
1834 preserveReturnAddressAfterCall(t2)
1835 loadp JITStackFrame::vm[sp], t1
1836 loadp VM::callFrameForThrow[t1], t0
1837 jmp VM::targetMachinePCForThrow[t1]
1840 macro nativeCallTrampoline(executableOffsetToFunction)
1841 storep 0, CodeBlock[cfr]
1842 loadp CallerFrame[cfr], t0
1843 loadi ScopeChain + PayloadOffset[t0], t1
1844 storei CellTag, ScopeChain + TagOffset[cfr]
1845 storei t1, ScopeChain + PayloadOffset[cfr]
1847 loadp JITStackFrame::vm + 4[sp], t3 # Additional offset for return address
1848 storep cfr, VM::topCallFrame[t3]
1850 storep t1, ReturnPC[cfr]
1851 move cfr, t2 # t2 = ecx
1853 loadi Callee + PayloadOffset[cfr], t1
1854 loadp JSFunction::m_executable[t1], t1
1856 call executableOffsetToFunction[t1]
1858 loadp JITStackFrame::vm + 4[sp], t3
1859 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
1860 loadp JITStackFrame::vm[sp], t3
1861 storep cfr, VM::topCallFrame[t3]
1863 preserveReturnAddressAfterCall(t3)
1864 storep t3, ReturnPC[cfr]
1866 loadi Callee + PayloadOffset[cfr], t1
1867 loadp JSFunction::m_executable[t1], t1
1869 call executableOffsetToFunction[t1]
1870 restoreReturnAddressBeforeReturn(t3)
1871 loadp JITStackFrame::vm[sp], t3
1873 loadp JITStackFrame::vm[sp], t3
1874 storep cfr, VM::topCallFrame[t3]
1876 preserveReturnAddressAfterCall(t3)
1877 storep t3, ReturnPC[cfr]
1879 loadi Callee + PayloadOffset[cfr], t1
1880 loadp JSFunction::m_executable[t1], t1
1883 call executableOffsetToFunction[t1]
1884 restoreReturnAddressBeforeReturn(t3)
1885 loadp JITStackFrame::vm[sp], t3
1887 loadp JITStackFrame::vm[sp], t3
1888 storep cfr, VM::topCallFrame[t3]
1890 preserveReturnAddressAfterCall(t3)
1891 storep t3, ReturnPC[cfr]
1893 loadi Callee + PayloadOffset[cfr], t1
1894 loadp JSFunction::m_executable[t1], t1
1896 call executableOffsetToFunction[t1]
1897 restoreReturnAddressBeforeReturn(t3)
1898 loadp JITStackFrame::vm[sp], t3
1900 loadp JITStackFrame::vm[sp], t3
1901 storep cfr, VM::topCallFrame[t3]
1903 preserveReturnAddressAfterCall(t3)
1904 storep t3, ReturnPC[cfr]
1906 loadi Callee + PayloadOffset[cfr], t1
1907 loadp JSFunction::m_executable[t1], t1
1909 cloopCallNative executableOffsetToFunction[t1]
1910 restoreReturnAddressBeforeReturn(t3)
1911 loadp JITStackFrame::vm[sp], t3
1915 bineq VM::exception + TagOffset[t3], EmptyValueTag, .exception
1918 preserveReturnAddressAfterCall(t1) # This is really only needed on X86
1919 loadi ArgumentCount + TagOffset[cfr], PC
1920 callSlowPath(_llint_throw_from_native_call)
1921 jmp _llint_throw_from_slow_path_trampoline