]> git.saurik.com Git - apple/javascriptcore.git/blob - llint/LowLevelInterpreter32_64.asm
8aa8126ad169f4a23b061e0cb4314b8bac5e0ac5
[apple/javascriptcore.git] / llint / LowLevelInterpreter32_64.asm
1 # Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
5 # are met:
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
11 #
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
23
24
25 # Crash course on the language that this is written in (which I just call
26 # "assembly" even though it's more than that):
27 #
28 # - Mostly gas-style operand ordering. The last operand tends to be the
29 # destination. So "a := b" is written as "mov b, a". But unlike gas,
30 # comparisons are in-order, so "if (a < b)" is written as
31 # "bilt a, b, ...".
32 #
33 # - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
34 # Currently this is just 32-bit so "i" and "p" are interchangeable
35 # except when an op supports one but not the other.
36 #
37 # - In general, valid operands for macro invocations and instructions are
38 # registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
39 # (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
40 # (eg "_foo" or ".foo"). Macro invocations can also take anonymous
41 # macros as operands. Instructions cannot take anonymous macros.
42 #
43 # - Labels must have names that begin with either "_" or ".". A "." label
44 # is local and gets renamed before code gen to minimize namespace
45 # pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
46 # may or may not be removed during code gen depending on whether the asm
47 # conventions for C name mangling on the target platform mandate a "_"
48 # prefix.
49 #
50 # - A "macro" is a lambda expression, which may be either anonymous or
51 # named. But this has caveats. "macro" can take zero or more arguments,
52 # which may be macros or any valid operands, but it can only return
53 # code. But you can do Turing-complete things via continuation passing
54 # style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
55 # that, since you'll just crash the assembler.
56 #
57 # - An "if" is a conditional on settings. Any identifier supplied in the
58 # predicate of an "if" is assumed to be a #define that is available
59 # during code gen. So you can't use "if" for computation in a macro, but
60 # you can use it to select different pieces of code for different
61 # platforms.
62 #
63 # - Arguments to macros follow lexical scoping rather than dynamic scoping.
64 # Const's also follow lexical scoping and may override (hide) arguments
65 # or other consts. All variables (arguments and constants) can be bound
66 # to operands. Additionally, arguments (but not constants) can be bound
67 # to macros.
68
69
70 # Below we have a bunch of constant declarations. Each constant must have
71 # a corresponding ASSERT() in LLIntData.cpp.
72
73 # Utilities
74 macro dispatch(advance)
75 addp advance * 4, PC
76 jmp [PC]
77 end
78
79 macro dispatchBranchWithOffset(pcOffset)
80 lshifti 2, pcOffset
81 addp pcOffset, PC
82 jmp [PC]
83 end
84
85 macro dispatchBranch(pcOffset)
86 loadi pcOffset, t0
87 dispatchBranchWithOffset(t0)
88 end
89
90 macro dispatchAfterCall()
91 loadi ArgumentCount + TagOffset[cfr], PC
92 loadi 4[PC], t2
93 storei t1, TagOffset[cfr, t2, 8]
94 storei t0, PayloadOffset[cfr, t2, 8]
95 valueProfile(t1, t0, 4 * (CallOpCodeSize - 1), t3)
96 dispatch(CallOpCodeSize)
97 end
98
99 macro cCall2(function, arg1, arg2)
100 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
101 move arg1, a0
102 move arg2, a1
103 call function
104 elsif X86 or X86_WIN
105 subp 8, sp
106 push arg2
107 push arg1
108 call function
109 addp 16, sp
110 elsif SH4
111 setargs arg1, arg2
112 call function
113 elsif C_LOOP
114 cloopCallSlowPath function, arg1, arg2
115 else
116 error
117 end
118 end
119
120 macro cCall2Void(function, arg1, arg2)
121 if C_LOOP
122 cloopCallSlowPathVoid function, arg1, arg2
123 else
124 cCall2(function, arg1, arg2)
125 end
126 end
127
128 # This barely works. arg3 and arg4 should probably be immediates.
129 macro cCall4(function, arg1, arg2, arg3, arg4)
130 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
131 move arg1, a0
132 move arg2, a1
133 move arg3, a2
134 move arg4, a3
135 call function
136 elsif X86 or X86_WIN
137 push arg4
138 push arg3
139 push arg2
140 push arg1
141 call function
142 addp 16, sp
143 elsif SH4
144 setargs arg1, arg2, arg3, arg4
145 call function
146 elsif C_LOOP
147 error
148 else
149 error
150 end
151 end
152
153 macro callSlowPath(slowPath)
154 cCall2(slowPath, cfr, PC)
155 move t0, PC
156 end
157
158 macro doCallToJavaScript(makeCall)
159 if X86 or X86_WIN
160 const entry = t4
161 const vm = t3
162 const protoCallFrame = t5
163
164 const previousCFR = t0
165 const previousPC = t1
166 const temp1 = t0 # Same as previousCFR
167 const temp2 = t1 # Same as previousPC
168 const temp3 = t2
169 const temp4 = t3 # same as vm
170 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP
171 const entry = a0
172 const vm = a1
173 const protoCallFrame = a2
174
175 const previousCFR = t3
176 const previousPC = lr
177 const temp1 = t3 # Same as previousCFR
178 const temp2 = t4
179 const temp3 = t5
180 const temp4 = t4 # Same as temp2
181 elsif MIPS
182 const entry = a0
183 const vmTopCallFrame = a1
184 const protoCallFrame = a2
185 const topOfStack = a3
186
187 const previousCFR = t2
188 const previousPC = lr
189 const temp1 = t3
190 const temp2 = t5
191 const temp3 = t4
192 const temp4 = t6
193 elsif SH4
194 const entry = a0
195 const vm = a1
196 const protoCallFrame = a2
197
198 const previousCFR = t3
199 const previousPC = lr
200 const temp1 = t3 # Same as previousCFR
201 const temp2 = a3
202 const temp3 = t8
203 const temp4 = t9
204 end
205
206 callToJavaScriptPrologue()
207
208 if X86
209 loadp 36[sp], vm
210 loadp 32[sp], entry
211 elsif X86_WIN
212 loadp 40[sp, temp3], vm
213 loadp 36[sp, temp3], entry
214 else
215 move cfr, previousCFR
216 end
217
218 checkStackPointerAlignment(temp2, 0xbad0dc01)
219
220 # The stack reserved zone ensures that we have adequate space for the
221 # VMEntrySentinelFrame. Proceed with allocating and initializing the
222 # sentinel frame.
223 move sp, cfr
224 subp CallFrameHeaderSlots * 8, cfr
225 storep 0, ArgumentCount[cfr]
226 storep vm, Callee[cfr]
227 loadp VM::topCallFrame[vm], temp2
228 storep temp2, ScopeChain[cfr]
229 storep 1, CodeBlock[cfr]
230 if X86
231 loadp 28[sp], previousPC
232 loadp 24[sp], previousCFR
233 elsif X86_WIN
234 loadp 32[sp, temp3], previousPC
235 loadp 28[sp, temp3], previousCFR
236 end
237 storep previousPC, ReturnPC[cfr]
238 storep previousCFR, CallerFrame[cfr]
239
240 if X86
241 loadp 40[sp], protoCallFrame
242 elsif X86_WIN
243 loadp 44[sp, temp3], protoCallFrame
244 end
245
246 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
247 addp CallFrameHeaderSlots, temp2, temp2
248 lshiftp 3, temp2
249 subp cfr, temp2, temp1
250
251 # Ensure that we have enough additional stack capacity for the incoming args,
252 # and the frame for the JS code we're executing. We need to do this check
253 # before we start copying the args from the protoCallFrame below.
254 bpaeq temp1, VM::m_jsStackLimit[vm], .stackHeightOK
255
256 if ARMv7
257 subp cfr, 8, temp2
258 move temp2, sp
259 else
260 subp cfr, 8, sp
261 end
262
263 if C_LOOP
264 move entry, temp2
265 move vm, temp3
266 cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, temp1
267 bpeq t0, 0, .stackCheckFailed
268 move temp2, entry
269 move temp3, vm
270 jmp .stackHeightOK
271
272 .stackCheckFailed:
273 move temp2, entry
274 move temp3, vm
275 end
276
277 cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame)
278 callToJavaScriptEpilogue()
279 ret
280
281 .stackHeightOK:
282 move temp1, sp
283 move 5, temp1
284
285 .copyHeaderLoop:
286 subi 1, temp1
287 loadi TagOffset[protoCallFrame, temp1, 8], temp3
288 storei temp3, TagOffset + CodeBlock[sp, temp1, 8]
289 loadi PayloadOffset[protoCallFrame, temp1, 8], temp3
290 storei temp3, PayloadOffset + CodeBlock[sp, temp1, 8]
291 btinz temp1, .copyHeaderLoop
292
293 loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
294 subi 1, temp2
295 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
296 subi 1, temp3
297
298 bieq temp2, temp3, .copyArgs
299 .fillExtraArgsLoop:
300 subi 1, temp3
301 storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, temp3, 8]
302 storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, temp3, 8]
303 bineq temp2, temp3, .fillExtraArgsLoop
304
305 .copyArgs:
306 loadp ProtoCallFrame::args[protoCallFrame], temp1
307
308 .copyArgsLoop:
309 btiz temp2, .copyArgsDone
310 subi 1, temp2
311 loadi TagOffset[temp1, temp2, 8], temp3
312 storei temp3, ThisArgumentOffset + 8 + TagOffset[sp, temp2, 8]
313 loadi PayloadOffset[temp1, temp2, 8], temp3
314 storei temp3, ThisArgumentOffset + 8 + PayloadOffset[sp, temp2, 8]
315 jmp .copyArgsLoop
316
317 .copyArgsDone:
318 storep sp, VM::topCallFrame[vm]
319
320 makeCall(entry, temp1, temp2)
321
322 bpeq CodeBlock[cfr], 1, .calleeFramePopped
323 loadp CallerFrame[cfr], cfr
324
325 .calleeFramePopped:
326 loadp Callee[cfr], temp3 # VM
327 loadp ScopeChain[cfr], temp4 # previous topCallFrame
328 storep temp4, VM::topCallFrame[temp3]
329
330 callToJavaScriptEpilogue()
331 ret
332 end
333
334 macro makeJavaScriptCall(entry, temp, unused)
335 addp CallerFrameAndPCSize, sp
336 checkStackPointerAlignment(t2, 0xbad0dc02)
337 if C_LOOP
338 cloopCallJSFunction entry
339 else
340 call entry
341 end
342 checkStackPointerAlignment(t2, 0xbad0dc03)
343 subp CallerFrameAndPCSize, sp
344 end
345
346 macro makeHostFunctionCall(entry, temp1, temp2)
347 move entry, temp1
348 if C_LOOP
349 move sp, a0
350 storep cfr, [sp]
351 storep lr, PtrSize[sp]
352 cloopCallNative temp1
353 else
354 if X86 or X86_WIN
355 # Put callee frame pointer on stack as arg0, also put it in ecx for "fastcall" targets
356 move 0, temp2
357 move temp2, 4[sp] # put 0 in ReturnPC
358 move cfr, [sp] # put caller frame pointer into callee frame since callee prologue can't
359 move sp, t2 # t2 is ecx
360 push temp2 # Push dummy arg1
361 push t2
362 else
363 move sp, a0
364 addp CallerFrameAndPCSize, sp
365 end
366 call temp1
367 if X86 or X86_WIN
368 addp 8, sp
369 else
370 subp CallerFrameAndPCSize, sp
371 end
372 end
373 end
374
375 _handleUncaughtException:
376 loadp ScopeChain + PayloadOffset[cfr], t3
377 andp MarkedBlockMask, t3
378 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
379 loadp VM::callFrameForThrow[t3], cfr
380
381 # So far, we've unwound the stack to the frame just below the sentinel frame, except
382 # in the case of stack overflow in the first function called from callToJavaScript.
383 # Check if we need to pop to the sentinel frame and do the necessary clean up for
384 # returning to the caller C frame.
385 bpeq CodeBlock[cfr], 1, .handleUncaughtExceptionAlreadyIsSentinel
386 loadp CallerFrame + PayloadOffset[cfr], cfr
387 .handleUncaughtExceptionAlreadyIsSentinel:
388
389 loadp Callee + PayloadOffset[cfr], t3 # VM
390 loadp ScopeChain + PayloadOffset[cfr], t5 # previous topCallFrame
391 storep t5, VM::topCallFrame[t3]
392
393 callToJavaScriptEpilogue()
394 ret
395
396 macro doReturnFromHostFunction(extraStackSpace)
397 functionEpilogue(extraStackSpace)
398 ret
399 end
400
401 # Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
402 # should be an immediate integer - any integer you like; use it to identify the place you're
403 # debugging from. operand should likewise be an immediate, and should identify the operand
404 # in the instruction stream you'd like to print out.
405 macro traceOperand(fromWhere, operand)
406 cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
407 move t0, PC
408 move t1, cfr
409 end
410
411 # Debugging operation if you'd like to print the value of an operand in the instruction
412 # stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
413 # value.
414 macro traceValue(fromWhere, operand)
415 cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
416 move t0, PC
417 move t1, cfr
418 end
419
420 # Call a slowPath for call opcodes.
421 macro callCallSlowPath(slowPath, action)
422 storep PC, ArgumentCount + TagOffset[cfr]
423 cCall2(slowPath, cfr, PC)
424 action(t0)
425 end
426
427 macro callWatchdogTimerHandler(throwHandler)
428 storei PC, ArgumentCount + TagOffset[cfr]
429 cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
430 btpnz t0, throwHandler
431 loadi ArgumentCount + TagOffset[cfr], PC
432 end
433
434 macro checkSwitchToJITForLoop()
435 checkSwitchToJIT(
436 1,
437 macro ()
438 storei PC, ArgumentCount + TagOffset[cfr]
439 cCall2(_llint_loop_osr, cfr, PC)
440 btpz t0, .recover
441 move t1, sp
442 jmp t0
443 .recover:
444 loadi ArgumentCount + TagOffset[cfr], PC
445 end)
446 end
447
448 macro loadVariable(operand, index, tag, payload)
449 loadisFromInstruction(operand, index)
450 loadi TagOffset[cfr, index, 8], tag
451 loadi PayloadOffset[cfr, index, 8], payload
452 end
453
454 # Index, tag, and payload must be different registers. Index is not
455 # changed.
456 macro loadConstantOrVariable(index, tag, payload)
457 bigteq index, FirstConstantRegisterIndex, .constant
458 loadi TagOffset[cfr, index, 8], tag
459 loadi PayloadOffset[cfr, index, 8], payload
460 jmp .done
461 .constant:
462 loadp CodeBlock[cfr], payload
463 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
464 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
465 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
466 loadp TagOffset[payload, index, 8], tag
467 loadp PayloadOffset[payload, index, 8], payload
468 .done:
469 end
470
471 macro loadConstantOrVariableTag(index, tag)
472 bigteq index, FirstConstantRegisterIndex, .constant
473 loadi TagOffset[cfr, index, 8], tag
474 jmp .done
475 .constant:
476 loadp CodeBlock[cfr], tag
477 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
478 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
479 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
480 loadp TagOffset[tag, index, 8], tag
481 .done:
482 end
483
484 # Index and payload may be the same register. Index may be clobbered.
485 macro loadConstantOrVariable2Reg(index, tag, payload)
486 bigteq index, FirstConstantRegisterIndex, .constant
487 loadi TagOffset[cfr, index, 8], tag
488 loadi PayloadOffset[cfr, index, 8], payload
489 jmp .done
490 .constant:
491 loadp CodeBlock[cfr], tag
492 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
493 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
494 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
495 lshifti 3, index
496 addp index, tag
497 loadp PayloadOffset[tag], payload
498 loadp TagOffset[tag], tag
499 .done:
500 end
501
502 macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
503 bigteq index, FirstConstantRegisterIndex, .constant
504 tagCheck(TagOffset[cfr, index, 8])
505 loadi PayloadOffset[cfr, index, 8], payload
506 jmp .done
507 .constant:
508 loadp CodeBlock[cfr], payload
509 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
510 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
511 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
512 tagCheck(TagOffset[payload, index, 8])
513 loadp PayloadOffset[payload, index, 8], payload
514 .done:
515 end
516
517 # Index and payload must be different registers. Index is not mutated. Use
518 # this if you know what the tag of the variable should be. Doing the tag
519 # test as part of loading the variable reduces register use, but may not
520 # be faster than doing loadConstantOrVariable followed by a branch on the
521 # tag.
522 macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
523 loadConstantOrVariablePayloadTagCustom(
524 index,
525 macro (actualTag) bineq actualTag, expectedTag, slow end,
526 payload)
527 end
528
529 macro loadConstantOrVariablePayloadUnchecked(index, payload)
530 loadConstantOrVariablePayloadTagCustom(
531 index,
532 macro (actualTag) end,
533 payload)
534 end
535
536 macro storeStructureWithTypeInfo(cell, structure, scratch)
537 storep structure, JSCell::m_structureID[cell]
538
539 loadi Structure::m_blob + StructureIDBlob::u.words.word2[structure], scratch
540 storei scratch, JSCell::m_indexingType[cell]
541 end
542
543 macro writeBarrierOnOperand(cellOperand)
544 if GGC
545 loadisFromInstruction(cellOperand, t1)
546 loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
547 checkMarkByte(t2, t1, t3,
548 macro(gcData)
549 btbnz gcData, .writeBarrierDone
550 push cfr, PC
551 # We make two extra slots because cCall2 will poke.
552 subp 8, sp
553 cCall2Void(_llint_write_barrier_slow, cfr, t2)
554 addp 8, sp
555 pop PC, cfr
556 end
557 )
558 .writeBarrierDone:
559 end
560 end
561
562 macro writeBarrierOnOperands(cellOperand, valueOperand)
563 if GGC
564 loadisFromInstruction(valueOperand, t1)
565 loadConstantOrVariableTag(t1, t0)
566 bineq t0, CellTag, .writeBarrierDone
567
568 writeBarrierOnOperand(cellOperand)
569 .writeBarrierDone:
570 end
571 end
572
573 macro writeBarrierOnGlobalObject(valueOperand)
574 if GGC
575 loadisFromInstruction(valueOperand, t1)
576 loadConstantOrVariableTag(t1, t0)
577 bineq t0, CellTag, .writeBarrierDone
578
579 loadp CodeBlock[cfr], t3
580 loadp CodeBlock::m_globalObject[t3], t3
581 checkMarkByte(t3, t1, t2,
582 macro(gcData)
583 btbnz gcData, .writeBarrierDone
584 push cfr, PC
585 # We make two extra slots because cCall2 will poke.
586 subp 8, sp
587 cCall2Void(_llint_write_barrier_slow, cfr, t3)
588 addp 8, sp
589 pop PC, cfr
590 end
591 )
592 .writeBarrierDone:
593 end
594 end
595
596 macro valueProfile(tag, payload, operand, scratch)
597 loadp operand[PC], scratch
598 storei tag, ValueProfile::m_buckets + TagOffset[scratch]
599 storei payload, ValueProfile::m_buckets + PayloadOffset[scratch]
600 end
601
602
603 # Entrypoints into the interpreter
604
605 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
606 macro functionArityCheck(doneLabel, slowPath)
607 loadi PayloadOffset + ArgumentCount[cfr], t0
608 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
609 cCall2(slowPath, cfr, PC) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error
610 btiz t0, .noError
611 move t1, cfr # t1 contains caller frame
612 jmp _llint_throw_from_slow_path_trampoline
613
614 .noError:
615 # t1 points to ArityCheckData.
616 loadp CommonSlowPaths::ArityCheckData::thunkToCall[t1], t2
617 btpz t2, .proceedInline
618
619 loadp CommonSlowPaths::ArityCheckData::returnPC[t1], t5
620 loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t0
621 call t2
622 if ASSERT_ENABLED
623 loadp ReturnPC[cfr], t0
624 loadp [t0], t0
625 end
626 jmp .continue
627
628 .proceedInline:
629 loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t1
630 btiz t1, .continue
631
632 // Move frame up "t1 * 2" slots
633 lshiftp 1, t1
634 negi t1
635 move cfr, t3
636 loadi PayloadOffset + ArgumentCount[cfr], t2
637 addi CallFrameHeaderSlots, t2
638 .copyLoop:
639 loadi PayloadOffset[t3], t0
640 storei t0, PayloadOffset[t3, t1, 8]
641 loadi TagOffset[t3], t0
642 storei t0, TagOffset[t3, t1, 8]
643 addp 8, t3
644 bsubinz 1, t2, .copyLoop
645
646 // Fill new slots with JSUndefined
647 move t1, t2
648 .fillLoop:
649 move 0, t0
650 storei t0, PayloadOffset[t3, t1, 8]
651 move UndefinedTag, t0
652 storei t0, TagOffset[t3, t1, 8]
653 addp 8, t3
654 baddinz 1, t2, .fillLoop
655
656 lshiftp 3, t1
657 addp t1, cfr
658 addp t1, sp
659 .continue:
660 # Reload CodeBlock and PC, since the slow_path clobbered it.
661 loadp CodeBlock[cfr], t1
662 loadp CodeBlock::m_instructions[t1], PC
663 jmp doneLabel
664 end
665
666 macro branchIfException(label)
667 loadp ScopeChain[cfr], t3
668 andp MarkedBlockMask, t3
669 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
670 bieq VM::m_exception + TagOffset[t3], EmptyValueTag, .noException
671 jmp label
672 .noException:
673 end
674
675
676 # Instruction implementations
677
678 _llint_op_enter:
679 traceExecution()
680 checkStackPointerAlignment(t2, 0xdead00e1)
681 loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
682 loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
683 btiz t2, .opEnterDone
684 move UndefinedTag, t0
685 move 0, t1
686 negi t2
687 .opEnterLoop:
688 storei t0, TagOffset[cfr, t2, 8]
689 storei t1, PayloadOffset[cfr, t2, 8]
690 addi 1, t2
691 btinz t2, .opEnterLoop
692 .opEnterDone:
693 callSlowPath(_slow_path_enter)
694 dispatch(1)
695
696
697 _llint_op_create_activation:
698 traceExecution()
699 loadi 4[PC], t0
700 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
701 callSlowPath(_llint_slow_path_create_activation)
702 .opCreateActivationDone:
703 dispatch(2)
704
705
706 _llint_op_init_lazy_reg:
707 traceExecution()
708 loadi 4[PC], t0
709 storei EmptyValueTag, TagOffset[cfr, t0, 8]
710 storei 0, PayloadOffset[cfr, t0, 8]
711 dispatch(2)
712
713
714 _llint_op_create_arguments:
715 traceExecution()
716 loadi 4[PC], t0
717 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
718 callSlowPath(_slow_path_create_arguments)
719 .opCreateArgumentsDone:
720 dispatch(2)
721
722
723 _llint_op_create_this:
724 traceExecution()
725 loadi 8[PC], t0
726 loadp PayloadOffset[cfr, t0, 8], t0
727 loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
728 loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
729 btpz t1, .opCreateThisSlow
730 allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
731 loadi 4[PC], t1
732 storei CellTag, TagOffset[cfr, t1, 8]
733 storei t0, PayloadOffset[cfr, t1, 8]
734 dispatch(4)
735
736 .opCreateThisSlow:
737 callSlowPath(_slow_path_create_this)
738 dispatch(4)
739
740
741 _llint_op_get_callee:
742 traceExecution()
743 loadi 4[PC], t0
744 loadp PayloadOffset + Callee[cfr], t1
745 loadpFromInstruction(2, t2)
746 bpneq t1, t2, .opGetCalleeSlow
747 storei CellTag, TagOffset[cfr, t0, 8]
748 storei t1, PayloadOffset[cfr, t0, 8]
749 dispatch(3)
750
751 .opGetCalleeSlow:
752 callSlowPath(_slow_path_get_callee)
753 dispatch(3)
754
755 _llint_op_to_this:
756 traceExecution()
757 loadi 4[PC], t0
758 bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow
759 loadi PayloadOffset[cfr, t0, 8], t0
760 bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
761 loadpFromInstruction(2, t2)
762 bpneq JSCell::m_structureID[t0], t2, .opToThisSlow
763 dispatch(3)
764
765 .opToThisSlow:
766 callSlowPath(_slow_path_to_this)
767 dispatch(3)
768
769
770 _llint_op_new_object:
771 traceExecution()
772 loadpFromInstruction(3, t0)
773 loadp ObjectAllocationProfile::m_allocator[t0], t1
774 loadp ObjectAllocationProfile::m_structure[t0], t2
775 allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
776 loadi 4[PC], t1
777 storei CellTag, TagOffset[cfr, t1, 8]
778 storei t0, PayloadOffset[cfr, t1, 8]
779 dispatch(4)
780
781 .opNewObjectSlow:
782 callSlowPath(_llint_slow_path_new_object)
783 dispatch(4)
784
785
786 _llint_op_mov:
787 traceExecution()
788 loadi 8[PC], t1
789 loadi 4[PC], t0
790 loadConstantOrVariable(t1, t2, t3)
791 storei t2, TagOffset[cfr, t0, 8]
792 storei t3, PayloadOffset[cfr, t0, 8]
793 dispatch(3)
794
795
796 macro notifyWrite(set, valueTag, valuePayload, scratch, slow)
797 loadb VariableWatchpointSet::m_state[set], scratch
798 bieq scratch, IsInvalidated, .done
799 bineq valuePayload, VariableWatchpointSet::m_inferredValue + PayloadOffset[set], slow
800 bineq valueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set], slow
801 .done:
802 end
803
804 _llint_op_captured_mov:
805 traceExecution()
806 loadi 8[PC], t1
807 loadConstantOrVariable(t1, t2, t3)
808 loadpFromInstruction(3, t0)
809 btpz t0, .opCapturedMovReady
810 notifyWrite(t0, t2, t3, t1, .opCapturedMovSlow)
811 .opCapturedMovReady:
812 loadi 4[PC], t0
813 storei t2, TagOffset[cfr, t0, 8]
814 storei t3, PayloadOffset[cfr, t0, 8]
815 dispatch(4)
816
817 .opCapturedMovSlow:
818 callSlowPath(_slow_path_captured_mov)
819 dispatch(4)
820
821
822 _llint_op_not:
823 traceExecution()
824 loadi 8[PC], t0
825 loadi 4[PC], t1
826 loadConstantOrVariable(t0, t2, t3)
827 bineq t2, BooleanTag, .opNotSlow
828 xori 1, t3
829 storei t2, TagOffset[cfr, t1, 8]
830 storei t3, PayloadOffset[cfr, t1, 8]
831 dispatch(3)
832
833 .opNotSlow:
834 callSlowPath(_slow_path_not)
835 dispatch(3)
836
837
838 _llint_op_eq:
839 traceExecution()
840 loadi 12[PC], t2
841 loadi 8[PC], t0
842 loadConstantOrVariable(t2, t3, t1)
843 loadConstantOrVariable2Reg(t0, t2, t0)
844 bineq t2, t3, .opEqSlow
845 bieq t2, CellTag, .opEqSlow
846 bib t2, LowestTag, .opEqSlow
847 loadi 4[PC], t2
848 cieq t0, t1, t0
849 storei BooleanTag, TagOffset[cfr, t2, 8]
850 storei t0, PayloadOffset[cfr, t2, 8]
851 dispatch(4)
852
853 .opEqSlow:
854 callSlowPath(_slow_path_eq)
855 dispatch(4)
856
857
858 _llint_op_eq_null:
859 traceExecution()
860 loadi 8[PC], t0
861 loadi 4[PC], t3
862 assertNotConstant(t0)
863 loadi TagOffset[cfr, t0, 8], t1
864 loadi PayloadOffset[cfr, t0, 8], t0
865 bineq t1, CellTag, .opEqNullImmediate
866 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
867 move 0, t1
868 jmp .opEqNullNotImmediate
869 .opEqNullMasqueradesAsUndefined:
870 loadp JSCell::m_structureID[t0], t1
871 loadp CodeBlock[cfr], t0
872 loadp CodeBlock::m_globalObject[t0], t0
873 cpeq Structure::m_globalObject[t1], t0, t1
874 jmp .opEqNullNotImmediate
875 .opEqNullImmediate:
876 cieq t1, NullTag, t2
877 cieq t1, UndefinedTag, t1
878 ori t2, t1
879 .opEqNullNotImmediate:
880 storei BooleanTag, TagOffset[cfr, t3, 8]
881 storei t1, PayloadOffset[cfr, t3, 8]
882 dispatch(3)
883
884
885 _llint_op_neq:
886 traceExecution()
887 loadi 12[PC], t2
888 loadi 8[PC], t0
889 loadConstantOrVariable(t2, t3, t1)
890 loadConstantOrVariable2Reg(t0, t2, t0)
891 bineq t2, t3, .opNeqSlow
892 bieq t2, CellTag, .opNeqSlow
893 bib t2, LowestTag, .opNeqSlow
894 loadi 4[PC], t2
895 cineq t0, t1, t0
896 storei BooleanTag, TagOffset[cfr, t2, 8]
897 storei t0, PayloadOffset[cfr, t2, 8]
898 dispatch(4)
899
900 .opNeqSlow:
901 callSlowPath(_slow_path_neq)
902 dispatch(4)
903
904
905 _llint_op_neq_null:
906 traceExecution()
907 loadi 8[PC], t0
908 loadi 4[PC], t3
909 assertNotConstant(t0)
910 loadi TagOffset[cfr, t0, 8], t1
911 loadi PayloadOffset[cfr, t0, 8], t0
912 bineq t1, CellTag, .opNeqNullImmediate
913 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
914 move 1, t1
915 jmp .opNeqNullNotImmediate
916 .opNeqNullMasqueradesAsUndefined:
917 loadp JSCell::m_structureID[t0], t1
918 loadp CodeBlock[cfr], t0
919 loadp CodeBlock::m_globalObject[t0], t0
920 cpneq Structure::m_globalObject[t1], t0, t1
921 jmp .opNeqNullNotImmediate
922 .opNeqNullImmediate:
923 cineq t1, NullTag, t2
924 cineq t1, UndefinedTag, t1
925 andi t2, t1
926 .opNeqNullNotImmediate:
927 storei BooleanTag, TagOffset[cfr, t3, 8]
928 storei t1, PayloadOffset[cfr, t3, 8]
929 dispatch(3)
930
931
932 macro strictEq(equalityOperation, slowPath)
933 loadi 12[PC], t2
934 loadi 8[PC], t0
935 loadConstantOrVariable(t2, t3, t1)
936 loadConstantOrVariable2Reg(t0, t2, t0)
937 bineq t2, t3, .slow
938 bib t2, LowestTag, .slow
939 bineq t2, CellTag, .notString
940 bbneq JSCell::m_type[t0], StringType, .notString
941 bbeq JSCell::m_type[t1], StringType, .slow
942 .notString:
943 loadi 4[PC], t2
944 equalityOperation(t0, t1, t0)
945 storei BooleanTag, TagOffset[cfr, t2, 8]
946 storei t0, PayloadOffset[cfr, t2, 8]
947 dispatch(4)
948
949 .slow:
950 callSlowPath(slowPath)
951 dispatch(4)
952 end
953
954 _llint_op_stricteq:
955 traceExecution()
956 strictEq(macro (left, right, result) cieq left, right, result end, _slow_path_stricteq)
957
958
959 _llint_op_nstricteq:
960 traceExecution()
961 strictEq(macro (left, right, result) cineq left, right, result end, _slow_path_nstricteq)
962
963
964 _llint_op_inc:
965 traceExecution()
966 loadi 4[PC], t0
967 bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow
968 loadi PayloadOffset[cfr, t0, 8], t1
969 baddio 1, t1, .opIncSlow
970 storei t1, PayloadOffset[cfr, t0, 8]
971 dispatch(2)
972
973 .opIncSlow:
974 callSlowPath(_slow_path_inc)
975 dispatch(2)
976
977
978 _llint_op_dec:
979 traceExecution()
980 loadi 4[PC], t0
981 bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow
982 loadi PayloadOffset[cfr, t0, 8], t1
983 bsubio 1, t1, .opDecSlow
984 storei t1, PayloadOffset[cfr, t0, 8]
985 dispatch(2)
986
987 .opDecSlow:
988 callSlowPath(_slow_path_dec)
989 dispatch(2)
990
991
992 _llint_op_to_number:
993 traceExecution()
994 loadi 8[PC], t0
995 loadi 4[PC], t1
996 loadConstantOrVariable(t0, t2, t3)
997 bieq t2, Int32Tag, .opToNumberIsInt
998 biaeq t2, LowestTag, .opToNumberSlow
999 .opToNumberIsInt:
1000 storei t2, TagOffset[cfr, t1, 8]
1001 storei t3, PayloadOffset[cfr, t1, 8]
1002 dispatch(3)
1003
1004 .opToNumberSlow:
1005 callSlowPath(_slow_path_to_number)
1006 dispatch(3)
1007
1008
1009 _llint_op_negate:
1010 traceExecution()
1011 loadi 8[PC], t0
1012 loadi 4[PC], t3
1013 loadConstantOrVariable(t0, t1, t2)
1014 bineq t1, Int32Tag, .opNegateSrcNotInt
1015 btiz t2, 0x7fffffff, .opNegateSlow
1016 negi t2
1017 storei Int32Tag, TagOffset[cfr, t3, 8]
1018 storei t2, PayloadOffset[cfr, t3, 8]
1019 dispatch(3)
1020 .opNegateSrcNotInt:
1021 bia t1, LowestTag, .opNegateSlow
1022 xori 0x80000000, t1
1023 storei t1, TagOffset[cfr, t3, 8]
1024 storei t2, PayloadOffset[cfr, t3, 8]
1025 dispatch(3)
1026
1027 .opNegateSlow:
1028 callSlowPath(_slow_path_negate)
1029 dispatch(3)
1030
1031
1032 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
1033 loadi 12[PC], t2
1034 loadi 8[PC], t0
1035 loadConstantOrVariable(t2, t3, t1)
1036 loadConstantOrVariable2Reg(t0, t2, t0)
1037 bineq t2, Int32Tag, .op1NotInt
1038 bineq t3, Int32Tag, .op2NotInt
1039 loadi 4[PC], t2
1040 integerOperationAndStore(t3, t1, t0, .slow, t2)
1041 dispatch(5)
1042
1043 .op1NotInt:
1044 # First operand is definitely not an int, the second operand could be anything.
1045 bia t2, LowestTag, .slow
1046 bib t3, LowestTag, .op1NotIntOp2Double
1047 bineq t3, Int32Tag, .slow
1048 ci2d t1, ft1
1049 jmp .op1NotIntReady
1050 .op1NotIntOp2Double:
1051 fii2d t1, t3, ft1
1052 .op1NotIntReady:
1053 loadi 4[PC], t1
1054 fii2d t0, t2, ft0
1055 doubleOperation(ft1, ft0)
1056 stored ft0, [cfr, t1, 8]
1057 dispatch(5)
1058
1059 .op2NotInt:
1060 # First operand is definitely an int, the second operand is definitely not.
1061 loadi 4[PC], t2
1062 bia t3, LowestTag, .slow
1063 ci2d t0, ft0
1064 fii2d t1, t3, ft1
1065 doubleOperation(ft1, ft0)
1066 stored ft0, [cfr, t2, 8]
1067 dispatch(5)
1068
1069 .slow:
1070 callSlowPath(slowPath)
1071 dispatch(5)
1072 end
1073
1074 macro binaryOp(integerOperation, doubleOperation, slowPath)
1075 binaryOpCustomStore(
1076 macro (int32Tag, left, right, slow, index)
1077 integerOperation(left, right, slow)
1078 storei int32Tag, TagOffset[cfr, index, 8]
1079 storei right, PayloadOffset[cfr, index, 8]
1080 end,
1081 doubleOperation, slowPath)
1082 end
1083
1084 _llint_op_add:
1085 traceExecution()
1086 binaryOp(
1087 macro (left, right, slow) baddio left, right, slow end,
1088 macro (left, right) addd left, right end,
1089 _slow_path_add)
1090
1091
1092 _llint_op_mul:
1093 traceExecution()
1094 binaryOpCustomStore(
1095 macro (int32Tag, left, right, slow, index)
1096 const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
1097 move right, scratch
1098 bmulio left, scratch, slow
1099 btinz scratch, .done
1100 bilt left, 0, slow
1101 bilt right, 0, slow
1102 .done:
1103 storei Int32Tag, TagOffset[cfr, index, 8]
1104 storei scratch, PayloadOffset[cfr, index, 8]
1105 end,
1106 macro (left, right) muld left, right end,
1107 _slow_path_mul)
1108
1109
1110 _llint_op_sub:
1111 traceExecution()
1112 binaryOp(
1113 macro (left, right, slow) bsubio left, right, slow end,
1114 macro (left, right) subd left, right end,
1115 _slow_path_sub)
1116
1117
1118 _llint_op_div:
1119 traceExecution()
1120 binaryOpCustomStore(
1121 macro (int32Tag, left, right, slow, index)
1122 ci2d left, ft0
1123 ci2d right, ft1
1124 divd ft0, ft1
1125 bcd2i ft1, right, .notInt
1126 storei int32Tag, TagOffset[cfr, index, 8]
1127 storei right, PayloadOffset[cfr, index, 8]
1128 jmp .done
1129 .notInt:
1130 stored ft1, [cfr, index, 8]
1131 .done:
1132 end,
1133 macro (left, right) divd left, right end,
1134 _slow_path_div)
1135
1136
1137 macro bitOp(operation, slowPath, advance)
1138 loadi 12[PC], t2
1139 loadi 8[PC], t0
1140 loadConstantOrVariable(t2, t3, t1)
1141 loadConstantOrVariable2Reg(t0, t2, t0)
1142 bineq t3, Int32Tag, .slow
1143 bineq t2, Int32Tag, .slow
1144 loadi 4[PC], t2
1145 operation(t1, t0)
1146 storei t3, TagOffset[cfr, t2, 8]
1147 storei t0, PayloadOffset[cfr, t2, 8]
1148 dispatch(advance)
1149
1150 .slow:
1151 callSlowPath(slowPath)
1152 dispatch(advance)
1153 end
1154
1155 _llint_op_lshift:
1156 traceExecution()
1157 bitOp(
1158 macro (left, right) lshifti left, right end,
1159 _slow_path_lshift,
1160 4)
1161
1162
1163 _llint_op_rshift:
1164 traceExecution()
1165 bitOp(
1166 macro (left, right) rshifti left, right end,
1167 _slow_path_rshift,
1168 4)
1169
1170
1171 _llint_op_urshift:
1172 traceExecution()
1173 bitOp(
1174 macro (left, right) urshifti left, right end,
1175 _slow_path_urshift,
1176 4)
1177
1178
1179 _llint_op_unsigned:
1180 traceExecution()
1181 loadi 4[PC], t0
1182 loadi 8[PC], t1
1183 loadConstantOrVariablePayload(t1, Int32Tag, t2, .opUnsignedSlow)
1184 bilt t2, 0, .opUnsignedSlow
1185 storei t2, PayloadOffset[cfr, t0, 8]
1186 storei Int32Tag, TagOffset[cfr, t0, 8]
1187 dispatch(3)
1188 .opUnsignedSlow:
1189 callSlowPath(_slow_path_unsigned)
1190 dispatch(3)
1191
1192
1193 _llint_op_bitand:
1194 traceExecution()
1195 bitOp(
1196 macro (left, right) andi left, right end,
1197 _slow_path_bitand,
1198 5)
1199
1200
1201 _llint_op_bitxor:
1202 traceExecution()
1203 bitOp(
1204 macro (left, right) xori left, right end,
1205 _slow_path_bitxor,
1206 5)
1207
1208
1209 _llint_op_bitor:
1210 traceExecution()
1211 bitOp(
1212 macro (left, right) ori left, right end,
1213 _slow_path_bitor,
1214 5)
1215
1216
1217 _llint_op_check_has_instance:
1218 traceExecution()
1219 loadi 12[PC], t1
1220 loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
1221 btbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
1222 dispatch(5)
1223
1224 .opCheckHasInstanceSlow:
1225 callSlowPath(_llint_slow_path_check_has_instance)
1226 dispatch(0)
1227
1228
1229 _llint_op_instanceof:
1230 traceExecution()
1231 # Actually do the work.
1232 loadi 12[PC], t0
1233 loadi 4[PC], t3
1234 loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
1235 bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
1236 loadi 8[PC], t0
1237 loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
1238
1239 # Register state: t1 = prototype, t2 = value
1240 move 1, t0
1241 .opInstanceofLoop:
1242 loadp JSCell::m_structureID[t2], t2
1243 loadi Structure::m_prototype + PayloadOffset[t2], t2
1244 bpeq t2, t1, .opInstanceofDone
1245 btinz t2, .opInstanceofLoop
1246
1247 move 0, t0
1248 .opInstanceofDone:
1249 storei BooleanTag, TagOffset[cfr, t3, 8]
1250 storei t0, PayloadOffset[cfr, t3, 8]
1251 dispatch(4)
1252
1253 .opInstanceofSlow:
1254 callSlowPath(_llint_slow_path_instanceof)
1255 dispatch(4)
1256
1257
1258 _llint_op_is_undefined:
1259 traceExecution()
1260 loadi 8[PC], t1
1261 loadi 4[PC], t0
1262 loadConstantOrVariable(t1, t2, t3)
1263 storei BooleanTag, TagOffset[cfr, t0, 8]
1264 bieq t2, CellTag, .opIsUndefinedCell
1265 cieq t2, UndefinedTag, t3
1266 storei t3, PayloadOffset[cfr, t0, 8]
1267 dispatch(3)
1268 .opIsUndefinedCell:
1269 btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
1270 move 0, t1
1271 storei t1, PayloadOffset[cfr, t0, 8]
1272 dispatch(3)
1273 .opIsUndefinedMasqueradesAsUndefined:
1274 loadp JSCell::m_structureID[t3], t1
1275 loadp CodeBlock[cfr], t3
1276 loadp CodeBlock::m_globalObject[t3], t3
1277 cpeq Structure::m_globalObject[t1], t3, t1
1278 storei t1, PayloadOffset[cfr, t0, 8]
1279 dispatch(3)
1280
1281
1282 _llint_op_is_boolean:
1283 traceExecution()
1284 loadi 8[PC], t1
1285 loadi 4[PC], t2
1286 loadConstantOrVariableTag(t1, t0)
1287 cieq t0, BooleanTag, t0
1288 storei BooleanTag, TagOffset[cfr, t2, 8]
1289 storei t0, PayloadOffset[cfr, t2, 8]
1290 dispatch(3)
1291
1292
1293 _llint_op_is_number:
1294 traceExecution()
1295 loadi 8[PC], t1
1296 loadi 4[PC], t2
1297 loadConstantOrVariableTag(t1, t0)
1298 storei BooleanTag, TagOffset[cfr, t2, 8]
1299 addi 1, t0
1300 cib t0, LowestTag + 1, t1
1301 storei t1, PayloadOffset[cfr, t2, 8]
1302 dispatch(3)
1303
1304
1305 _llint_op_is_string:
1306 traceExecution()
1307 loadi 8[PC], t1
1308 loadi 4[PC], t2
1309 loadConstantOrVariable(t1, t0, t3)
1310 storei BooleanTag, TagOffset[cfr, t2, 8]
1311 bineq t0, CellTag, .opIsStringNotCell
1312 cbeq JSCell::m_type[t3], StringType, t1
1313 storei t1, PayloadOffset[cfr, t2, 8]
1314 dispatch(3)
1315 .opIsStringNotCell:
1316 storep 0, PayloadOffset[cfr, t2, 8]
1317 dispatch(3)
1318
1319
1320 macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload)
1321 assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end)
1322 negi propertyOffset
1323 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1324 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
1325 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
1326 end
1327
1328 macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload)
1329 bilt propertyOffset, firstOutOfLineOffset, .isInline
1330 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1331 negi propertyOffset
1332 jmp .ready
1333 .isInline:
1334 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1335 .ready:
1336 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
1337 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
1338 end
1339
1340 macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag, payload)
1341 bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1342 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1343 negi propertyOffsetAsInt
1344 jmp .ready
1345 .isInline:
1346 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1347 .ready:
1348 storei tag, TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
1349 storei payload, PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
1350 end
1351
1352
1353 _llint_op_init_global_const:
1354 traceExecution()
1355 writeBarrierOnGlobalObject(2)
1356 loadi 8[PC], t1
1357 loadi 4[PC], t0
1358 loadConstantOrVariable(t1, t2, t3)
1359 storei t2, TagOffset[t0]
1360 storei t3, PayloadOffset[t0]
1361 dispatch(5)
1362
1363
1364 # We only do monomorphic get_by_id caching for now, and we do not modify the
1365 # opcode. We do, however, allow for the cache to change anytime if fails, since
1366 # ping-ponging is free. At best we get lucky and the get_by_id will continue
1367 # to take fast path on the new cache. At worst we take slow path, which is what
1368 # we would have been doing anyway.
1369
1370 macro getById(getPropertyStorage)
1371 traceExecution()
1372 loadi 8[PC], t0
1373 loadi 16[PC], t1
1374 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
1375 loadi 20[PC], t2
1376 getPropertyStorage(
1377 t3,
1378 t0,
1379 macro (propertyStorage, scratch)
1380 bpneq JSCell::m_structureID[t3], t1, .opGetByIdSlow
1381 loadi 4[PC], t1
1382 loadi TagOffset[propertyStorage, t2], scratch
1383 loadi PayloadOffset[propertyStorage, t2], t2
1384 storei scratch, TagOffset[cfr, t1, 8]
1385 storei t2, PayloadOffset[cfr, t1, 8]
1386 valueProfile(scratch, t2, 32, t1)
1387 dispatch(9)
1388 end)
1389
1390 .opGetByIdSlow:
1391 callSlowPath(_llint_slow_path_get_by_id)
1392 dispatch(9)
1393 end
1394
1395 _llint_op_get_by_id:
1396 getById(withInlineStorage)
1397
1398
1399 _llint_op_get_by_id_out_of_line:
1400 getById(withOutOfLineStorage)
1401
1402
1403 _llint_op_get_array_length:
1404 traceExecution()
1405 loadi 8[PC], t0
1406 loadp 16[PC], t1
1407 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
1408 move t3, t2
1409 arrayProfile(t2, t1, t0)
1410 btiz t2, IsArray, .opGetArrayLengthSlow
1411 btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
1412 loadi 4[PC], t1
1413 loadp JSObject::m_butterfly[t3], t0
1414 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
1415 bilt t0, 0, .opGetArrayLengthSlow
1416 valueProfile(Int32Tag, t0, 32, t2)
1417 storep t0, PayloadOffset[cfr, t1, 8]
1418 storep Int32Tag, TagOffset[cfr, t1, 8]
1419 dispatch(9)
1420
1421 .opGetArrayLengthSlow:
1422 callSlowPath(_llint_slow_path_get_by_id)
1423 dispatch(9)
1424
1425
1426 _llint_op_get_arguments_length:
1427 traceExecution()
1428 loadi 8[PC], t0
1429 loadi 4[PC], t1
1430 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
1431 loadi ArgumentCount + PayloadOffset[cfr], t2
1432 subi 1, t2
1433 storei Int32Tag, TagOffset[cfr, t1, 8]
1434 storei t2, PayloadOffset[cfr, t1, 8]
1435 dispatch(4)
1436
1437 .opGetArgumentsLengthSlow:
1438 callSlowPath(_llint_slow_path_get_arguments_length)
1439 dispatch(4)
1440
1441
1442 macro putById(getPropertyStorage)
1443 traceExecution()
1444 writeBarrierOnOperands(1, 3)
1445 loadi 4[PC], t3
1446 loadi 16[PC], t1
1447 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1448 loadi 12[PC], t2
1449 getPropertyStorage(
1450 t0,
1451 t3,
1452 macro (propertyStorage, scratch)
1453 bpneq JSCell::m_structureID[t0], t1, .opPutByIdSlow
1454 loadi 20[PC], t1
1455 loadConstantOrVariable2Reg(t2, scratch, t2)
1456 storei scratch, TagOffset[propertyStorage, t1]
1457 storei t2, PayloadOffset[propertyStorage, t1]
1458 dispatch(9)
1459 end)
1460
1461 .opPutByIdSlow:
1462 callSlowPath(_llint_slow_path_put_by_id)
1463 dispatch(9)
1464 end
1465
1466 _llint_op_put_by_id:
1467 putById(withInlineStorage)
1468
1469
1470 _llint_op_put_by_id_out_of_line:
1471 putById(withOutOfLineStorage)
1472
1473
1474 macro putByIdTransition(additionalChecks, getPropertyStorage)
1475 traceExecution()
1476 writeBarrierOnOperand(1)
1477 loadi 4[PC], t3
1478 loadi 16[PC], t1
1479 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1480 loadi 12[PC], t2
1481 bpneq JSCell::m_structureID[t0], t1, .opPutByIdSlow
1482 additionalChecks(t1, t3, .opPutByIdSlow)
1483 loadi 20[PC], t1
1484 getPropertyStorage(
1485 t0,
1486 t3,
1487 macro (propertyStorage, scratch)
1488 addp t1, propertyStorage, t3
1489 loadConstantOrVariable2Reg(t2, t1, t2)
1490 storei t1, TagOffset[t3]
1491 loadi 24[PC], t1
1492 storei t2, PayloadOffset[t3]
1493 storep t1, JSCell::m_structureID[t0]
1494 dispatch(9)
1495 end)
1496
1497 .opPutByIdSlow:
1498 callSlowPath(_llint_slow_path_put_by_id)
1499 dispatch(9)
1500 end
1501
1502 macro noAdditionalChecks(oldStructure, scratch, slowPath)
1503 end
1504
1505 macro structureChainChecks(oldStructure, scratch, slowPath)
1506 const protoCell = oldStructure # Reusing the oldStructure register for the proto
1507
1508 loadp 28[PC], scratch
1509 assert(macro (ok) btpnz scratch, ok end)
1510 loadp StructureChain::m_vector[scratch], scratch
1511 assert(macro (ok) btpnz scratch, ok end)
1512 bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
1513 .loop:
1514 loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
1515 loadp JSCell::m_structureID[protoCell], oldStructure
1516 bpneq oldStructure, [scratch], slowPath
1517 addp 4, scratch
1518 bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
1519 .done:
1520 end
1521
1522 _llint_op_put_by_id_transition_direct:
1523 putByIdTransition(noAdditionalChecks, withInlineStorage)
1524
1525
1526 _llint_op_put_by_id_transition_direct_out_of_line:
1527 putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
1528
1529
1530 _llint_op_put_by_id_transition_normal:
1531 putByIdTransition(structureChainChecks, withInlineStorage)
1532
1533
1534 _llint_op_put_by_id_transition_normal_out_of_line:
1535 putByIdTransition(structureChainChecks, withOutOfLineStorage)
1536
1537
1538 _llint_op_get_by_val:
1539 traceExecution()
1540 loadi 8[PC], t2
1541 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
1542 move t0, t2
1543 loadp 16[PC], t3
1544 arrayProfile(t2, t3, t1)
1545 loadi 12[PC], t3
1546 loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
1547 loadp JSObject::m_butterfly[t0], t3
1548 andi IndexingShapeMask, t2
1549 bieq t2, Int32Shape, .opGetByValIsContiguous
1550 bineq t2, ContiguousShape, .opGetByValNotContiguous
1551 .opGetByValIsContiguous:
1552
1553 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1554 loadi TagOffset[t3, t1, 8], t2
1555 loadi PayloadOffset[t3, t1, 8], t1
1556 jmp .opGetByValDone
1557
1558 .opGetByValNotContiguous:
1559 bineq t2, DoubleShape, .opGetByValNotDouble
1560 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1561 loadd [t3, t1, 8], ft0
1562 bdnequn ft0, ft0, .opGetByValSlow
1563 # FIXME: This could be massively optimized.
1564 fd2ii ft0, t1, t2
1565 loadi 4[PC], t0
1566 jmp .opGetByValNotEmpty
1567
1568 .opGetByValNotDouble:
1569 subi ArrayStorageShape, t2
1570 bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
1571 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
1572 loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
1573 loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
1574
1575 .opGetByValDone:
1576 loadi 4[PC], t0
1577 bieq t2, EmptyValueTag, .opGetByValOutOfBounds
1578 .opGetByValNotEmpty:
1579 storei t2, TagOffset[cfr, t0, 8]
1580 storei t1, PayloadOffset[cfr, t0, 8]
1581 valueProfile(t2, t1, 20, t0)
1582 dispatch(6)
1583
1584 .opGetByValOutOfBounds:
1585 loadpFromInstruction(4, t0)
1586 storeb 1, ArrayProfile::m_outOfBounds[t0]
1587 .opGetByValSlow:
1588 callSlowPath(_llint_slow_path_get_by_val)
1589 dispatch(6)
1590
1591
1592 _llint_op_get_argument_by_val:
1593 # FIXME: At some point we should array profile this. Right now it isn't necessary
1594 # since the DFG will never turn a get_argument_by_val into a GetByVal.
1595 traceExecution()
1596 loadi 8[PC], t0
1597 loadi 12[PC], t1
1598 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
1599 loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
1600 addi 1, t2
1601 loadi ArgumentCount + PayloadOffset[cfr], t1
1602 biaeq t2, t1, .opGetArgumentByValSlow
1603 loadi 4[PC], t3
1604 loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
1605 loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
1606 storei t0, TagOffset[cfr, t3, 8]
1607 storei t1, PayloadOffset[cfr, t3, 8]
1608 valueProfile(t0, t1, 20, t2)
1609 dispatch(6)
1610
1611 .opGetArgumentByValSlow:
1612 callSlowPath(_llint_slow_path_get_argument_by_val)
1613 dispatch(6)
1614
1615
1616 _llint_op_get_by_pname:
1617 traceExecution()
1618 loadi 12[PC], t0
1619 loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
1620 loadi 16[PC], t0
1621 bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
1622 loadi 8[PC], t0
1623 loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
1624 loadi 20[PC], t0
1625 loadi PayloadOffset[cfr, t0, 8], t3
1626 loadp JSCell::m_structureID[t2], t0
1627 bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
1628 loadi 24[PC], t0
1629 loadi [cfr, t0, 8], t0
1630 subi 1, t0
1631 biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
1632 bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty
1633 addi firstOutOfLineOffset, t0
1634 subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0
1635 .opGetByPnameInlineProperty:
1636 loadPropertyAtVariableOffset(t0, t2, t1, t3)
1637 loadi 4[PC], t0
1638 storei t1, TagOffset[cfr, t0, 8]
1639 storei t3, PayloadOffset[cfr, t0, 8]
1640 dispatch(7)
1641
1642 .opGetByPnameSlow:
1643 callSlowPath(_llint_slow_path_get_by_pname)
1644 dispatch(7)
1645
1646
1647 macro contiguousPutByVal(storeCallback)
1648 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
1649 .storeResult:
1650 loadi 12[PC], t2
1651 storeCallback(t2, t1, t0, t3)
1652 dispatch(5)
1653
1654 .outOfBounds:
1655 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1656 loadp 16[PC], t2
1657 storeb 1, ArrayProfile::m_mayStoreToHole[t2]
1658 addi 1, t3, t2
1659 storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1660 jmp .storeResult
1661 end
1662
1663 macro putByVal(slowPath)
1664 traceExecution()
1665 writeBarrierOnOperands(1, 3)
1666 loadi 4[PC], t0
1667 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
1668 move t1, t2
1669 loadp 16[PC], t3
1670 arrayProfile(t2, t3, t0)
1671 loadi 8[PC], t0
1672 loadConstantOrVariablePayload(t0, Int32Tag, t3, .opPutByValSlow)
1673 loadp JSObject::m_butterfly[t1], t0
1674 andi IndexingShapeMask, t2
1675 bineq t2, Int32Shape, .opPutByValNotInt32
1676 contiguousPutByVal(
1677 macro (operand, scratch, base, index)
1678 loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow)
1679 storei Int32Tag, TagOffset[base, index, 8]
1680 storei scratch, PayloadOffset[base, index, 8]
1681 end)
1682
1683 .opPutByValNotInt32:
1684 bineq t2, DoubleShape, .opPutByValNotDouble
1685 contiguousPutByVal(
1686 macro (operand, scratch, base, index)
1687 const tag = scratch
1688 const payload = operand
1689 loadConstantOrVariable2Reg(operand, tag, payload)
1690 bineq tag, Int32Tag, .notInt
1691 ci2d payload, ft0
1692 jmp .ready
1693 .notInt:
1694 fii2d payload, tag, ft0
1695 bdnequn ft0, ft0, .opPutByValSlow
1696 .ready:
1697 stored ft0, [base, index, 8]
1698 end)
1699
1700 .opPutByValNotDouble:
1701 bineq t2, ContiguousShape, .opPutByValNotContiguous
1702 contiguousPutByVal(
1703 macro (operand, scratch, base, index)
1704 const tag = scratch
1705 const payload = operand
1706 loadConstantOrVariable2Reg(operand, tag, payload)
1707 storei tag, TagOffset[base, index, 8]
1708 storei payload, PayloadOffset[base, index, 8]
1709 end)
1710
1711 .opPutByValNotContiguous:
1712 bineq t2, ArrayStorageShape, .opPutByValSlow
1713 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1714 bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
1715 .opPutByValArrayStorageStoreResult:
1716 loadi 12[PC], t2
1717 loadConstantOrVariable2Reg(t2, t1, t2)
1718 storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8]
1719 storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8]
1720 dispatch(5)
1721
1722 .opPutByValArrayStorageEmpty:
1723 loadp 16[PC], t1
1724 storeb 1, ArrayProfile::m_mayStoreToHole[t1]
1725 addi 1, ArrayStorage::m_numValuesInVector[t0]
1726 bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
1727 addi 1, t3, t1
1728 storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1729 jmp .opPutByValArrayStorageStoreResult
1730
1731 .opPutByValOutOfBounds:
1732 loadpFromInstruction(4, t0)
1733 storeb 1, ArrayProfile::m_outOfBounds[t0]
1734 .opPutByValSlow:
1735 callSlowPath(slowPath)
1736 dispatch(5)
1737 end
1738
1739 _llint_op_put_by_val:
1740 putByVal(_llint_slow_path_put_by_val)
1741
1742 _llint_op_put_by_val_direct:
1743 putByVal(_llint_slow_path_put_by_val_direct)
1744
1745 _llint_op_jmp:
1746 traceExecution()
1747 dispatchBranch(4[PC])
1748
1749
1750 macro jumpTrueOrFalse(conditionOp, slow)
1751 loadi 4[PC], t1
1752 loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
1753 conditionOp(t0, .target)
1754 dispatch(3)
1755
1756 .target:
1757 dispatchBranch(8[PC])
1758
1759 .slow:
1760 callSlowPath(slow)
1761 dispatch(0)
1762 end
1763
1764
1765 macro equalNull(cellHandler, immediateHandler)
1766 loadi 4[PC], t0
1767 assertNotConstant(t0)
1768 loadi TagOffset[cfr, t0, 8], t1
1769 loadi PayloadOffset[cfr, t0, 8], t0
1770 bineq t1, CellTag, .immediate
1771 loadp JSCell::m_structureID[t0], t2
1772 cellHandler(t2, JSCell::m_flags[t0], .target)
1773 dispatch(3)
1774
1775 .target:
1776 dispatchBranch(8[PC])
1777
1778 .immediate:
1779 ori 1, t1
1780 immediateHandler(t1, .target)
1781 dispatch(3)
1782 end
1783
1784 _llint_op_jeq_null:
1785 traceExecution()
1786 equalNull(
1787 macro (structure, value, target)
1788 btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined
1789 loadp CodeBlock[cfr], t0
1790 loadp CodeBlock::m_globalObject[t0], t0
1791 bpeq Structure::m_globalObject[structure], t0, target
1792 .opJeqNullNotMasqueradesAsUndefined:
1793 end,
1794 macro (value, target) bieq value, NullTag, target end)
1795
1796
1797 _llint_op_jneq_null:
1798 traceExecution()
1799 equalNull(
1800 macro (structure, value, target)
1801 btbz value, MasqueradesAsUndefined, target
1802 loadp CodeBlock[cfr], t0
1803 loadp CodeBlock::m_globalObject[t0], t0
1804 bpneq Structure::m_globalObject[structure], t0, target
1805 end,
1806 macro (value, target) bineq value, NullTag, target end)
1807
1808
1809 _llint_op_jneq_ptr:
1810 traceExecution()
1811 loadi 4[PC], t0
1812 loadi 8[PC], t1
1813 loadp CodeBlock[cfr], t2
1814 loadp CodeBlock::m_globalObject[t2], t2
1815 bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
1816 loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
1817 bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
1818 .opJneqPtrBranch:
1819 dispatchBranch(12[PC])
1820 .opJneqPtrFallThrough:
1821 dispatch(4)
1822
1823
1824 macro compare(integerCompare, doubleCompare, slowPath)
1825 loadi 4[PC], t2
1826 loadi 8[PC], t3
1827 loadConstantOrVariable(t2, t0, t1)
1828 loadConstantOrVariable2Reg(t3, t2, t3)
1829 bineq t0, Int32Tag, .op1NotInt
1830 bineq t2, Int32Tag, .op2NotInt
1831 integerCompare(t1, t3, .jumpTarget)
1832 dispatch(4)
1833
1834 .op1NotInt:
1835 bia t0, LowestTag, .slow
1836 bib t2, LowestTag, .op1NotIntOp2Double
1837 bineq t2, Int32Tag, .slow
1838 ci2d t3, ft1
1839 jmp .op1NotIntReady
1840 .op1NotIntOp2Double:
1841 fii2d t3, t2, ft1
1842 .op1NotIntReady:
1843 fii2d t1, t0, ft0
1844 doubleCompare(ft0, ft1, .jumpTarget)
1845 dispatch(4)
1846
1847 .op2NotInt:
1848 ci2d t1, ft0
1849 bia t2, LowestTag, .slow
1850 fii2d t3, t2, ft1
1851 doubleCompare(ft0, ft1, .jumpTarget)
1852 dispatch(4)
1853
1854 .jumpTarget:
1855 dispatchBranch(12[PC])
1856
1857 .slow:
1858 callSlowPath(slowPath)
1859 dispatch(0)
1860 end
1861
1862
1863 _llint_op_switch_imm:
1864 traceExecution()
1865 loadi 12[PC], t2
1866 loadi 4[PC], t3
1867 loadConstantOrVariable(t2, t1, t0)
1868 loadp CodeBlock[cfr], t2
1869 loadp CodeBlock::m_rareData[t2], t2
1870 muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
1871 loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1872 addp t3, t2
1873 bineq t1, Int32Tag, .opSwitchImmNotInt
1874 subi SimpleJumpTable::min[t2], t0
1875 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1876 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1877 loadi [t3, t0, 4], t1
1878 btiz t1, .opSwitchImmFallThrough
1879 dispatchBranchWithOffset(t1)
1880
1881 .opSwitchImmNotInt:
1882 bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
1883 .opSwitchImmFallThrough:
1884 dispatchBranch(8[PC])
1885
1886 .opSwitchImmSlow:
1887 callSlowPath(_llint_slow_path_switch_imm)
1888 dispatch(0)
1889
1890
1891 _llint_op_switch_char:
1892 traceExecution()
1893 loadi 12[PC], t2
1894 loadi 4[PC], t3
1895 loadConstantOrVariable(t2, t1, t0)
1896 loadp CodeBlock[cfr], t2
1897 loadp CodeBlock::m_rareData[t2], t2
1898 muli sizeof SimpleJumpTable, t3
1899 loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1900 addp t3, t2
1901 bineq t1, CellTag, .opSwitchCharFallThrough
1902 bbneq JSCell::m_type[t0], StringType, .opSwitchCharFallThrough
1903 bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
1904 loadp JSString::m_value[t0], t0
1905 btpz t0, .opSwitchOnRope
1906 loadp StringImpl::m_data8[t0], t1
1907 btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1908 loadh [t1], t0
1909 jmp .opSwitchCharReady
1910 .opSwitchChar8Bit:
1911 loadb [t1], t0
1912 .opSwitchCharReady:
1913 subi SimpleJumpTable::min[t2], t0
1914 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1915 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1916 loadi [t2, t0, 4], t1
1917 btiz t1, .opSwitchCharFallThrough
1918 dispatchBranchWithOffset(t1)
1919
1920 .opSwitchCharFallThrough:
1921 dispatchBranch(8[PC])
1922
1923 .opSwitchOnRope:
1924 callSlowPath(_llint_slow_path_switch_char)
1925 dispatch(0)
1926
1927
1928 _llint_op_new_func:
1929 traceExecution()
1930 btiz 12[PC], .opNewFuncUnchecked
1931 loadi 4[PC], t1
1932 bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
1933 .opNewFuncUnchecked:
1934 callSlowPath(_llint_slow_path_new_func)
1935 .opNewFuncDone:
1936 dispatch(4)
1937
1938
1939 _llint_op_new_captured_func:
1940 traceExecution()
1941 callSlowPath(_slow_path_new_captured_func)
1942 dispatch(4)
1943
1944
1945 macro arrayProfileForCall()
1946 loadi 16[PC], t3
1947 negi t3
1948 bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
1949 loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
1950 loadp JSCell::m_structureID[t0], t0
1951 loadpFromInstruction(CallOpCodeSize - 2, t1)
1952 storep t0, ArrayProfile::m_lastSeenStructureID[t1]
1953 .done:
1954 end
1955
1956 macro doCall(slowPath)
1957 loadi 8[PC], t0
1958 loadi 20[PC], t1
1959 loadp LLIntCallLinkInfo::callee[t1], t2
1960 loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
1961 bineq t3, t2, .opCallSlow
1962 loadi 16[PC], t3
1963 lshifti 3, t3
1964 negi t3
1965 addp cfr, t3 # t3 contains the new value of cfr
1966 loadp JSFunction::m_scope[t2], t0
1967 storei t2, Callee + PayloadOffset[t3]
1968 storei t0, ScopeChain + PayloadOffset[t3]
1969 loadi 12[PC], t2
1970 storei PC, ArgumentCount + TagOffset[cfr]
1971 storei t2, ArgumentCount + PayloadOffset[t3]
1972 storei CellTag, Callee + TagOffset[t3]
1973 storei CellTag, ScopeChain + TagOffset[t3]
1974 addp CallerFrameAndPCSize, t3
1975 callTargetFunction(t1, t3)
1976
1977 .opCallSlow:
1978 slowPathForCall(slowPath)
1979 end
1980
1981
1982 _llint_op_tear_off_activation:
1983 traceExecution()
1984 loadi 4[PC], t0
1985 bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated
1986 callSlowPath(_llint_slow_path_tear_off_activation)
1987 .opTearOffActivationNotCreated:
1988 dispatch(2)
1989
1990
1991 _llint_op_tear_off_arguments:
1992 traceExecution()
1993 loadi 4[PC], t0
1994 addi 1, t0 # Get the unmodifiedArgumentsRegister
1995 bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
1996 callSlowPath(_llint_slow_path_tear_off_arguments)
1997 .opTearOffArgumentsNotCreated:
1998 dispatch(3)
1999
2000
2001 _llint_op_ret:
2002 traceExecution()
2003 checkSwitchToJITForEpilogue()
2004 loadi 4[PC], t2
2005 loadConstantOrVariable(t2, t1, t0)
2006 doReturn()
2007
2008
2009 _llint_op_ret_object_or_this:
2010 traceExecution()
2011 checkSwitchToJITForEpilogue()
2012 loadi 4[PC], t2
2013 loadConstantOrVariable(t2, t1, t0)
2014 bineq t1, CellTag, .opRetObjectOrThisNotObject
2015 bbb JSCell::m_type[t0], ObjectType, .opRetObjectOrThisNotObject
2016 doReturn()
2017
2018 .opRetObjectOrThisNotObject:
2019 loadi 8[PC], t2
2020 loadConstantOrVariable(t2, t1, t0)
2021 doReturn()
2022
2023
2024 _llint_op_to_primitive:
2025 traceExecution()
2026 loadi 8[PC], t2
2027 loadi 4[PC], t3
2028 loadConstantOrVariable(t2, t1, t0)
2029 bineq t1, CellTag, .opToPrimitiveIsImm
2030 bbneq JSCell::m_type[t0], StringType, .opToPrimitiveSlowCase
2031 .opToPrimitiveIsImm:
2032 storei t1, TagOffset[cfr, t3, 8]
2033 storei t0, PayloadOffset[cfr, t3, 8]
2034 dispatch(3)
2035
2036 .opToPrimitiveSlowCase:
2037 callSlowPath(_slow_path_to_primitive)
2038 dispatch(3)
2039
2040
2041 _llint_op_next_pname:
2042 traceExecution()
2043 loadi 12[PC], t1
2044 loadi 16[PC], t2
2045 loadi PayloadOffset[cfr, t1, 8], t0
2046 bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
2047 loadi 20[PC], t2
2048 loadi PayloadOffset[cfr, t2, 8], t2
2049 loadp JSPropertyNameIterator::m_jsStrings[t2], t3
2050 loadi [t3, t0, 8], t3
2051 addi 1, t0
2052 storei t0, PayloadOffset[cfr, t1, 8]
2053 loadi 4[PC], t1
2054 storei CellTag, TagOffset[cfr, t1, 8]
2055 storei t3, PayloadOffset[cfr, t1, 8]
2056 loadi 8[PC], t3
2057 loadi PayloadOffset[cfr, t3, 8], t3
2058 loadp JSCell::m_structureID[t3], t1
2059 bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
2060 loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
2061 loadp StructureChain::m_vector[t0], t0
2062 btpz [t0], .opNextPnameTarget
2063 .opNextPnameCheckPrototypeLoop:
2064 bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
2065 loadp Structure::m_prototype + PayloadOffset[t1], t2
2066 loadp JSCell::m_structureID[t2], t1
2067 bpneq t1, [t0], .opNextPnameSlow
2068 addp 4, t0
2069 btpnz [t0], .opNextPnameCheckPrototypeLoop
2070 .opNextPnameTarget:
2071 dispatchBranch(24[PC])
2072
2073 .opNextPnameEnd:
2074 dispatch(7)
2075
2076 .opNextPnameSlow:
2077 callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
2078 dispatch(0)
2079
2080
2081 _llint_op_catch:
2082 # This is where we end up from the JIT's throw trampoline (because the
2083 # machine code return address will be set to _llint_op_catch), and from
2084 # the interpreter's throw trampoline (see _llint_throw_trampoline).
2085 # The throwing code must have known that we were throwing to the interpreter,
2086 # and have set VM::targetInterpreterPCForThrow.
2087 loadp ScopeChain + PayloadOffset[cfr], t3
2088 andp MarkedBlockMask, t3
2089 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
2090 loadp VM::callFrameForThrow[t3], cfr
2091 restoreStackPointerAfterCall()
2092
2093 loadi VM::targetInterpreterPCForThrow[t3], PC
2094 loadi VM::m_exception + PayloadOffset[t3], t0
2095 loadi VM::m_exception + TagOffset[t3], t1
2096 storei 0, VM::m_exception + PayloadOffset[t3]
2097 storei EmptyValueTag, VM::m_exception + TagOffset[t3]
2098 loadi 4[PC], t2
2099 storei t0, PayloadOffset[cfr, t2, 8]
2100 storei t1, TagOffset[cfr, t2, 8]
2101 traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
2102 dispatch(2)
2103
2104
2105 # Gives you the scope in t0, while allowing you to optionally perform additional checks on the
2106 # scopes as they are traversed. scopeCheck() is called with two arguments: the register
2107 # holding the scope, and a register that can be used for scratch. Note that this does not
2108 # use t3, so you can hold stuff in t3 if need be.
2109 macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
2110 loadp ScopeChain + PayloadOffset[cfr], t0
2111 loadi deBruijinIndexOperand, t2
2112
2113 btiz t2, .done
2114
2115 loadp CodeBlock[cfr], t1
2116 bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
2117 btbz CodeBlock::m_needsActivation[t1], .loop
2118
2119 loadi CodeBlock::m_activationRegister[t1], t1
2120
2121 # Need to conditionally skip over one scope.
2122 bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
2123 scopeCheck(t0, t1)
2124 loadp JSScope::m_next[t0], t0
2125 .noActivation:
2126 subi 1, t2
2127
2128 btiz t2, .done
2129 .loop:
2130 scopeCheck(t0, t1)
2131 loadp JSScope::m_next[t0], t0
2132 subi 1, t2
2133 btinz t2, .loop
2134
2135 .done:
2136
2137 end
2138
2139 _llint_op_end:
2140 traceExecution()
2141 checkSwitchToJITForEpilogue()
2142 loadi 4[PC], t0
2143 assertNotConstant(t0)
2144 loadi TagOffset[cfr, t0, 8], t1
2145 loadi PayloadOffset[cfr, t0, 8], t0
2146 doReturn()
2147
2148
2149 _llint_throw_from_slow_path_trampoline:
2150 callSlowPath(_llint_slow_path_handle_exception)
2151
2152 # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
2153 # the throw target is not necessarily interpreted code, we come to here.
2154 # This essentially emulates the JIT's throwing protocol.
2155 loadp CodeBlock[cfr], t1
2156 loadp CodeBlock::m_vm[t1], t1
2157 jmp VM::targetMachinePCForThrow[t1]
2158
2159
2160 _llint_throw_during_call_trampoline:
2161 preserveReturnAddressAfterCall(t2)
2162 jmp _llint_throw_from_slow_path_trampoline
2163
2164
2165 macro nativeCallTrampoline(executableOffsetToFunction)
2166
2167 functionPrologue()
2168 storep 0, CodeBlock[cfr]
2169 loadp CallerFrame[cfr], t0
2170 loadi ScopeChain + PayloadOffset[t0], t1
2171 storei CellTag, ScopeChain + TagOffset[cfr]
2172 storei t1, ScopeChain + PayloadOffset[cfr]
2173 if X86 or X86_WIN
2174 subp 8, sp # align stack pointer
2175 andp MarkedBlockMask, t1
2176 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t3
2177 storep cfr, VM::topCallFrame[t3]
2178 move cfr, t2 # t2 = ecx
2179 storep t2, [sp]
2180 loadi Callee + PayloadOffset[cfr], t1
2181 loadp JSFunction::m_executable[t1], t1
2182 checkStackPointerAlignment(t3, 0xdead0001)
2183 call executableOffsetToFunction[t1]
2184 loadp ScopeChain[cfr], t3
2185 andp MarkedBlockMask, t3
2186 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
2187 addp 8, sp
2188 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP or MIPS or SH4
2189 subp 8, sp # align stack pointer
2190 # t1 already contains the ScopeChain.
2191 andp MarkedBlockMask, t1
2192 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
2193 storep cfr, VM::topCallFrame[t1]
2194 if MIPS or SH4
2195 move cfr, a0
2196 else
2197 move cfr, t0
2198 end
2199 loadi Callee + PayloadOffset[cfr], t1
2200 loadp JSFunction::m_executable[t1], t1
2201 checkStackPointerAlignment(t3, 0xdead0001)
2202 if C_LOOP
2203 cloopCallNative executableOffsetToFunction[t1]
2204 else
2205 call executableOffsetToFunction[t1]
2206 end
2207 loadp ScopeChain[cfr], t3
2208 andp MarkedBlockMask, t3
2209 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
2210 addp 8, sp
2211 else
2212 error
2213 end
2214
2215 functionEpilogue()
2216 bineq VM::m_exception + TagOffset[t3], EmptyValueTag, .handleException
2217 ret
2218
2219 .handleException:
2220 storep cfr, VM::topCallFrame[t3]
2221 restoreStackPointerAfterCall()
2222 jmp _llint_throw_from_slow_path_trampoline
2223 end
2224
2225
2226 macro getGlobalObject(dst)
2227 loadp CodeBlock[cfr], t0
2228 loadp CodeBlock::m_globalObject[t0], t0
2229 loadisFromInstruction(dst, t1)
2230 storei CellTag, TagOffset[cfr, t1, 8]
2231 storei t0, PayloadOffset[cfr, t1, 8]
2232 end
2233
2234 macro varInjectionCheck(slowPath)
2235 loadp CodeBlock[cfr], t0
2236 loadp CodeBlock::m_globalObject[t0], t0
2237 loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
2238 bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
2239 end
2240
2241 macro resolveScope()
2242 loadp CodeBlock[cfr], t0
2243 loadisFromInstruction(4, t2)
2244 btbz CodeBlock::m_needsActivation[t0], .resolveScopeAfterActivationCheck
2245 loadis CodeBlock::m_activationRegister[t0], t1
2246 btpz PayloadOffset[cfr, t1, 8], .resolveScopeAfterActivationCheck
2247 addi 1, t2
2248
2249 .resolveScopeAfterActivationCheck:
2250 loadp ScopeChain[cfr], t0
2251 btiz t2, .resolveScopeLoopEnd
2252
2253 .resolveScopeLoop:
2254 loadp JSScope::m_next[t0], t0
2255 subi 1, t2
2256 btinz t2, .resolveScopeLoop
2257
2258 .resolveScopeLoopEnd:
2259 loadisFromInstruction(1, t1)
2260 storei CellTag, TagOffset[cfr, t1, 8]
2261 storei t0, PayloadOffset[cfr, t1, 8]
2262 end
2263
2264
2265 _llint_op_resolve_scope:
2266 traceExecution()
2267 loadisFromInstruction(3, t0)
2268
2269 #rGlobalProperty:
2270 bineq t0, GlobalProperty, .rGlobalVar
2271 getGlobalObject(1)
2272 dispatch(6)
2273
2274 .rGlobalVar:
2275 bineq t0, GlobalVar, .rClosureVar
2276 getGlobalObject(1)
2277 dispatch(6)
2278
2279 .rClosureVar:
2280 bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
2281 resolveScope()
2282 dispatch(6)
2283
2284 .rGlobalPropertyWithVarInjectionChecks:
2285 bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
2286 varInjectionCheck(.rDynamic)
2287 getGlobalObject(1)
2288 dispatch(6)
2289
2290 .rGlobalVarWithVarInjectionChecks:
2291 bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
2292 varInjectionCheck(.rDynamic)
2293 getGlobalObject(1)
2294 dispatch(6)
2295
2296 .rClosureVarWithVarInjectionChecks:
2297 bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
2298 varInjectionCheck(.rDynamic)
2299 resolveScope()
2300 dispatch(6)
2301
2302 .rDynamic:
2303 callSlowPath(_llint_slow_path_resolve_scope)
2304 dispatch(6)
2305
2306
2307 macro loadWithStructureCheck(operand, slowPath)
2308 loadisFromInstruction(operand, t0)
2309 loadp [cfr, t0, 8], t0
2310 loadpFromInstruction(5, t1)
2311 bpneq JSCell::m_structureID[t0], t1, slowPath
2312 end
2313
2314 macro getProperty()
2315 loadisFromInstruction(6, t3)
2316 loadPropertyAtVariableOffset(t3, t0, t1, t2)
2317 valueProfile(t1, t2, 28, t0)
2318 loadisFromInstruction(1, t0)
2319 storei t1, TagOffset[cfr, t0, 8]
2320 storei t2, PayloadOffset[cfr, t0, 8]
2321 end
2322
2323 macro getGlobalVar()
2324 loadpFromInstruction(6, t0)
2325 loadp TagOffset[t0], t1
2326 loadp PayloadOffset[t0], t2
2327 valueProfile(t1, t2, 28, t0)
2328 loadisFromInstruction(1, t0)
2329 storei t1, TagOffset[cfr, t0, 8]
2330 storei t2, PayloadOffset[cfr, t0, 8]
2331 end
2332
2333 macro getClosureVar()
2334 loadp JSVariableObject::m_registers[t0], t0
2335 loadisFromInstruction(6, t3)
2336 loadp TagOffset[t0, t3, 8], t1
2337 loadp PayloadOffset[t0, t3, 8], t2
2338 valueProfile(t1, t2, 28, t0)
2339 loadisFromInstruction(1, t0)
2340 storei t1, TagOffset[cfr, t0, 8]
2341 storei t2, PayloadOffset[cfr, t0, 8]
2342 end
2343
2344 _llint_op_get_from_scope:
2345 traceExecution()
2346 loadisFromInstruction(4, t0)
2347 andi ResolveModeMask, t0
2348
2349 #gGlobalProperty:
2350 bineq t0, GlobalProperty, .gGlobalVar
2351 loadWithStructureCheck(2, .gDynamic)
2352 getProperty()
2353 dispatch(8)
2354
2355 .gGlobalVar:
2356 bineq t0, GlobalVar, .gClosureVar
2357 getGlobalVar()
2358 dispatch(8)
2359
2360 .gClosureVar:
2361 bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
2362 loadVariable(2, t2, t1, t0)
2363 getClosureVar()
2364 dispatch(8)
2365
2366 .gGlobalPropertyWithVarInjectionChecks:
2367 bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
2368 loadWithStructureCheck(2, .gDynamic)
2369 getProperty()
2370 dispatch(8)
2371
2372 .gGlobalVarWithVarInjectionChecks:
2373 bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
2374 varInjectionCheck(.gDynamic)
2375 loadVariable(2, t2, t1, t0)
2376 getGlobalVar()
2377 dispatch(8)
2378
2379 .gClosureVarWithVarInjectionChecks:
2380 bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
2381 varInjectionCheck(.gDynamic)
2382 loadVariable(2, t2, t1, t0)
2383 getClosureVar()
2384 dispatch(8)
2385
2386 .gDynamic:
2387 callSlowPath(_llint_slow_path_get_from_scope)
2388 dispatch(8)
2389
2390
2391 macro putProperty()
2392 loadisFromInstruction(3, t1)
2393 loadConstantOrVariable(t1, t2, t3)
2394 loadisFromInstruction(6, t1)
2395 storePropertyAtVariableOffset(t1, t0, t2, t3)
2396 end
2397
2398 macro putGlobalVar()
2399 loadisFromInstruction(3, t0)
2400 loadConstantOrVariable(t0, t1, t2)
2401 loadpFromInstruction(5, t3)
2402 notifyWrite(t3, t1, t2, t0, .pDynamic)
2403 loadpFromInstruction(6, t0)
2404 storei t1, TagOffset[t0]
2405 storei t2, PayloadOffset[t0]
2406 end
2407
2408 macro putClosureVar()
2409 loadisFromInstruction(3, t1)
2410 loadConstantOrVariable(t1, t2, t3)
2411 loadp JSVariableObject::m_registers[t0], t0
2412 loadisFromInstruction(6, t1)
2413 storei t2, TagOffset[t0, t1, 8]
2414 storei t3, PayloadOffset[t0, t1, 8]
2415 end
2416
2417
2418 _llint_op_put_to_scope:
2419 traceExecution()
2420 loadisFromInstruction(4, t0)
2421 andi ResolveModeMask, t0
2422
2423 #pGlobalProperty:
2424 bineq t0, GlobalProperty, .pGlobalVar
2425 writeBarrierOnOperands(1, 3)
2426 loadWithStructureCheck(1, .pDynamic)
2427 putProperty()
2428 dispatch(7)
2429
2430 .pGlobalVar:
2431 bineq t0, GlobalVar, .pClosureVar
2432 writeBarrierOnGlobalObject(3)
2433 putGlobalVar()
2434 dispatch(7)
2435
2436 .pClosureVar:
2437 bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
2438 writeBarrierOnOperands(1, 3)
2439 loadVariable(1, t2, t1, t0)
2440 putClosureVar()
2441 dispatch(7)
2442
2443 .pGlobalPropertyWithVarInjectionChecks:
2444 bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
2445 writeBarrierOnOperands(1, 3)
2446 loadWithStructureCheck(1, .pDynamic)
2447 putProperty()
2448 dispatch(7)
2449
2450 .pGlobalVarWithVarInjectionChecks:
2451 bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
2452 writeBarrierOnGlobalObject(3)
2453 varInjectionCheck(.pDynamic)
2454 putGlobalVar()
2455 dispatch(7)
2456
2457 .pClosureVarWithVarInjectionChecks:
2458 bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
2459 writeBarrierOnOperands(1, 3)
2460 varInjectionCheck(.pDynamic)
2461 loadVariable(1, t2, t1, t0)
2462 putClosureVar()
2463 dispatch(7)
2464
2465 .pDynamic:
2466 callSlowPath(_llint_slow_path_put_to_scope)
2467 dispatch(7)