]> git.saurik.com Git - apple/javascriptcore.git/blob - llint/LowLevelInterpreter32_64.asm
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter32_64.asm
1 # Copyright (C) 2011-2015 Apple Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
5 # are met:
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
11 #
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
23
24
25 # Crash course on the language that this is written in (which I just call
26 # "assembly" even though it's more than that):
27 #
28 # - Mostly gas-style operand ordering. The last operand tends to be the
29 # destination. So "a := b" is written as "mov b, a". But unlike gas,
30 # comparisons are in-order, so "if (a < b)" is written as
31 # "bilt a, b, ...".
32 #
33 # - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
34 # Currently this is just 32-bit so "i" and "p" are interchangeable
35 # except when an op supports one but not the other.
36 #
37 # - In general, valid operands for macro invocations and instructions are
38 # registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
39 # (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
40 # (eg "_foo" or ".foo"). Macro invocations can also take anonymous
41 # macros as operands. Instructions cannot take anonymous macros.
42 #
43 # - Labels must have names that begin with either "_" or ".". A "." label
44 # is local and gets renamed before code gen to minimize namespace
45 # pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
46 # may or may not be removed during code gen depending on whether the asm
47 # conventions for C name mangling on the target platform mandate a "_"
48 # prefix.
49 #
50 # - A "macro" is a lambda expression, which may be either anonymous or
51 # named. But this has caveats. "macro" can take zero or more arguments,
52 # which may be macros or any valid operands, but it can only return
53 # code. But you can do Turing-complete things via continuation passing
54 # style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
55 # that, since you'll just crash the assembler.
56 #
57 # - An "if" is a conditional on settings. Any identifier supplied in the
58 # predicate of an "if" is assumed to be a #define that is available
59 # during code gen. So you can't use "if" for computation in a macro, but
60 # you can use it to select different pieces of code for different
61 # platforms.
62 #
63 # - Arguments to macros follow lexical scoping rather than dynamic scoping.
64 # Const's also follow lexical scoping and may override (hide) arguments
65 # or other consts. All variables (arguments and constants) can be bound
66 # to operands. Additionally, arguments (but not constants) can be bound
67 # to macros.
68
69
70 # Below we have a bunch of constant declarations. Each constant must have
71 # a corresponding ASSERT() in LLIntData.cpp.
72
73 # Utilities
74 macro dispatch(advance)
75 addp advance * 4, PC
76 jmp [PC]
77 end
78
79 macro dispatchBranchWithOffset(pcOffset)
80 lshifti 2, pcOffset
81 addp pcOffset, PC
82 jmp [PC]
83 end
84
85 macro dispatchBranch(pcOffset)
86 loadi pcOffset, t0
87 dispatchBranchWithOffset(t0)
88 end
89
90 macro dispatchAfterCall()
91 loadi ArgumentCount + TagOffset[cfr], PC
92 loadi 4[PC], t2
93 storei t1, TagOffset[cfr, t2, 8]
94 storei t0, PayloadOffset[cfr, t2, 8]
95 valueProfile(t1, t0, 4 * (CallOpCodeSize - 1), t3)
96 dispatch(CallOpCodeSize)
97 end
98
99 macro cCall2(function, arg1, arg2)
100 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
101 move arg1, a0
102 move arg2, a1
103 call function
104 elsif X86 or X86_WIN
105 subp 8, sp
106 push arg2
107 push arg1
108 call function
109 addp 16, sp
110 elsif SH4
111 setargs arg1, arg2
112 call function
113 elsif C_LOOP
114 cloopCallSlowPath function, arg1, arg2
115 else
116 error
117 end
118 end
119
120 macro cCall2Void(function, arg1, arg2)
121 if C_LOOP
122 cloopCallSlowPathVoid function, arg1, arg2
123 else
124 cCall2(function, arg1, arg2)
125 end
126 end
127
128 # This barely works. arg3 and arg4 should probably be immediates.
129 macro cCall4(function, arg1, arg2, arg3, arg4)
130 if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
131 move arg1, a0
132 move arg2, a1
133 move arg3, a2
134 move arg4, a3
135 call function
136 elsif X86 or X86_WIN
137 push arg4
138 push arg3
139 push arg2
140 push arg1
141 call function
142 addp 16, sp
143 elsif SH4
144 setargs arg1, arg2, arg3, arg4
145 call function
146 elsif C_LOOP
147 error
148 else
149 error
150 end
151 end
152
153 macro callSlowPath(slowPath)
154 cCall2(slowPath, cfr, PC)
155 move t0, PC
156 end
157
158 macro doVMEntry(makeCall)
159 if X86 or X86_WIN
160 const entry = t4
161 const vm = t3
162 const protoCallFrame = t5
163
164 const temp1 = t0
165 const temp2 = t1
166 const temp3 = t2
167 const temp4 = t3 # same as vm
168 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP
169 const entry = a0
170 const vm = a1
171 const protoCallFrame = a2
172
173 const temp1 = t3
174 const temp2 = t4
175 const temp3 = t5
176 const temp4 = t4 # Same as temp2
177 elsif MIPS
178 const entry = a0
179 const vm = a1
180 const protoCallFrame = a2
181
182 const temp1 = t3
183 const temp2 = t5
184 const temp3 = t4
185 const temp4 = t6
186 elsif SH4
187 const entry = a0
188 const vm = a1
189 const protoCallFrame = a2
190
191 const temp1 = t3
192 const temp2 = a3
193 const temp3 = t8
194 const temp4 = t9
195 end
196
197 functionPrologue()
198 pushCalleeSaves()
199
200 if X86 or X86_WIN
201 loadp 12[cfr], vm
202 loadp 8[cfr], entry
203 end
204
205 if ARMv7
206 vmEntryRecord(cfr, temp1)
207 move temp1, sp
208 else
209 vmEntryRecord(cfr, sp)
210 end
211
212 storep vm, VMEntryRecord::m_vm[sp]
213 loadp VM::topCallFrame[vm], temp2
214 storep temp2, VMEntryRecord::m_prevTopCallFrame[sp]
215 loadp VM::topVMEntryFrame[vm], temp2
216 storep temp2, VMEntryRecord::m_prevTopVMEntryFrame[sp]
217
218 # Align stack pointer
219 if X86_WIN
220 addp CallFrameAlignSlots * SlotSize, sp, temp1
221 andp ~StackAlignmentMask, temp1
222 subp temp1, CallFrameAlignSlots * SlotSize, sp
223 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
224 addp CallFrameAlignSlots * SlotSize, sp, temp1
225 clrbp temp1, StackAlignmentMask, temp1
226 if ARMv7
227 subp temp1, CallFrameAlignSlots * SlotSize, temp1
228 move temp1, sp
229 else
230 subp temp1, CallFrameAlignSlots * SlotSize, sp
231 end
232 end
233
234 if X86 or X86_WIN
235 loadp 16[cfr], protoCallFrame
236 end
237
238 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
239 addp CallFrameHeaderSlots, temp2, temp2
240 lshiftp 3, temp2
241 subp sp, temp2, temp1
242
243 # Ensure that we have enough additional stack capacity for the incoming args,
244 # and the frame for the JS code we're executing. We need to do this check
245 # before we start copying the args from the protoCallFrame below.
246 bpaeq temp1, VM::m_jsStackLimit[vm], .stackHeightOK
247
248 if C_LOOP
249 move entry, temp2
250 move vm, temp3
251 cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, temp1
252 bpeq t0, 0, .stackCheckFailed
253 move temp2, entry
254 move temp3, vm
255 jmp .stackHeightOK
256
257 .stackCheckFailed:
258 move temp2, entry
259 move temp3, vm
260 end
261
262 subp 8, sp # Align stack for cCall2() to make a call.
263 cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame)
264
265 if ARMv7
266 vmEntryRecord(cfr, temp1)
267 move temp1, sp
268 else
269 vmEntryRecord(cfr, sp)
270 end
271
272 loadp VMEntryRecord::m_vm[sp], temp3
273 loadp VMEntryRecord::m_prevTopCallFrame[sp], temp4
274 storep temp4, VM::topCallFrame[temp3]
275 loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], temp4
276 storep temp4, VM::topVMEntryFrame[temp3]
277
278 if ARMv7
279 subp cfr, CalleeRegisterSaveSize, temp3
280 move temp3, sp
281 else
282 subp cfr, CalleeRegisterSaveSize, sp
283 end
284
285 popCalleeSaves()
286 functionEpilogue()
287 ret
288
289 .stackHeightOK:
290 move temp1, sp
291 move 4, temp1
292
293 .copyHeaderLoop:
294 subi 1, temp1
295 loadi TagOffset[protoCallFrame, temp1, 8], temp3
296 storei temp3, TagOffset + CodeBlock[sp, temp1, 8]
297 loadi PayloadOffset[protoCallFrame, temp1, 8], temp3
298 storei temp3, PayloadOffset + CodeBlock[sp, temp1, 8]
299 btinz temp1, .copyHeaderLoop
300
301 loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
302 subi 1, temp2
303 loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3
304 subi 1, temp3
305
306 bieq temp2, temp3, .copyArgs
307 .fillExtraArgsLoop:
308 subi 1, temp3
309 storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, temp3, 8]
310 storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, temp3, 8]
311 bineq temp2, temp3, .fillExtraArgsLoop
312
313 .copyArgs:
314 loadp ProtoCallFrame::args[protoCallFrame], temp1
315
316 .copyArgsLoop:
317 btiz temp2, .copyArgsDone
318 subi 1, temp2
319 loadi TagOffset[temp1, temp2, 8], temp3
320 storei temp3, ThisArgumentOffset + 8 + TagOffset[sp, temp2, 8]
321 loadi PayloadOffset[temp1, temp2, 8], temp3
322 storei temp3, ThisArgumentOffset + 8 + PayloadOffset[sp, temp2, 8]
323 jmp .copyArgsLoop
324
325 .copyArgsDone:
326 storep sp, VM::topCallFrame[vm]
327 storep cfr, VM::topVMEntryFrame[vm]
328
329 makeCall(entry, temp1, temp2)
330
331 if ARMv7
332 vmEntryRecord(cfr, temp1)
333 move temp1, sp
334 else
335 vmEntryRecord(cfr, sp)
336 end
337
338 loadp VMEntryRecord::m_vm[sp], temp3
339 loadp VMEntryRecord::m_prevTopCallFrame[sp], temp4
340 storep temp4, VM::topCallFrame[temp3]
341 loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], temp4
342 storep temp4, VM::topVMEntryFrame[temp3]
343
344 if ARMv7
345 subp cfr, CalleeRegisterSaveSize, temp3
346 move temp3, sp
347 else
348 subp cfr, CalleeRegisterSaveSize, sp
349 end
350
351 popCalleeSaves()
352 functionEpilogue()
353 ret
354 end
355
356 macro makeJavaScriptCall(entry, temp, unused)
357 addp CallerFrameAndPCSize, sp
358 checkStackPointerAlignment(t2, 0xbad0dc02)
359 if C_LOOP
360 cloopCallJSFunction entry
361 else
362 call entry
363 end
364 checkStackPointerAlignment(t2, 0xbad0dc03)
365 subp CallerFrameAndPCSize, sp
366 end
367
368 macro makeHostFunctionCall(entry, temp1, temp2)
369 move entry, temp1
370 storep cfr, [sp]
371 if C_LOOP
372 move sp, a0
373 storep lr, PtrSize[sp]
374 cloopCallNative temp1
375 elsif X86 or X86_WIN
376 # Put callee frame pointer on stack as arg0, also put it in ecx for "fastcall" targets
377 move 0, temp2
378 move temp2, 4[sp] # put 0 in ReturnPC
379 move sp, t2 # t2 is ecx
380 push temp2 # Push dummy arg1
381 push t2
382 call temp1
383 addp 8, sp
384 else
385 move sp, a0
386 call temp1
387 end
388 end
389
390 _handleUncaughtException:
391 loadp Callee + PayloadOffset[cfr], t3
392 andp MarkedBlockMask, t3
393 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
394 loadp VM::callFrameForThrow[t3], cfr
395
396 loadp CallerFrame[cfr], cfr
397
398 if ARMv7
399 vmEntryRecord(cfr, t3)
400 move t3, sp
401 else
402 vmEntryRecord(cfr, sp)
403 end
404
405 loadp VMEntryRecord::m_vm[sp], t3
406 loadp VMEntryRecord::m_prevTopCallFrame[sp], t5
407 storep t5, VM::topCallFrame[t3]
408 loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t5
409 storep t5, VM::topVMEntryFrame[t3]
410
411 if ARMv7
412 subp cfr, CalleeRegisterSaveSize, t3
413 move t3, sp
414 else
415 subp cfr, CalleeRegisterSaveSize, sp
416 end
417
418 popCalleeSaves()
419 functionEpilogue()
420 ret
421
422 macro doReturnFromHostFunction(extraStackSpace)
423 functionEpilogue(extraStackSpace)
424 ret
425 end
426
427 # Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
428 # should be an immediate integer - any integer you like; use it to identify the place you're
429 # debugging from. operand should likewise be an immediate, and should identify the operand
430 # in the instruction stream you'd like to print out.
431 macro traceOperand(fromWhere, operand)
432 cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
433 move t0, PC
434 move t1, cfr
435 end
436
437 # Debugging operation if you'd like to print the value of an operand in the instruction
438 # stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
439 # value.
440 macro traceValue(fromWhere, operand)
441 cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
442 move t0, PC
443 move t1, cfr
444 end
445
446 # Call a slowPath for call opcodes.
447 macro callCallSlowPath(slowPath, action)
448 storep PC, ArgumentCount + TagOffset[cfr]
449 cCall2(slowPath, cfr, PC)
450 action(t0)
451 end
452
453 macro callWatchdogTimerHandler(throwHandler)
454 storei PC, ArgumentCount + TagOffset[cfr]
455 cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
456 btpnz t0, throwHandler
457 loadi ArgumentCount + TagOffset[cfr], PC
458 end
459
460 macro checkSwitchToJITForLoop()
461 checkSwitchToJIT(
462 1,
463 macro ()
464 storei PC, ArgumentCount + TagOffset[cfr]
465 cCall2(_llint_loop_osr, cfr, PC)
466 btpz t0, .recover
467 move t1, sp
468 jmp t0
469 .recover:
470 loadi ArgumentCount + TagOffset[cfr], PC
471 end)
472 end
473
474 macro loadVariable(operand, index, tag, payload)
475 loadisFromInstruction(operand, index)
476 loadi TagOffset[cfr, index, 8], tag
477 loadi PayloadOffset[cfr, index, 8], payload
478 end
479
480 # Index, tag, and payload must be different registers. Index is not
481 # changed.
482 macro loadConstantOrVariable(index, tag, payload)
483 bigteq index, FirstConstantRegisterIndex, .constant
484 loadi TagOffset[cfr, index, 8], tag
485 loadi PayloadOffset[cfr, index, 8], payload
486 jmp .done
487 .constant:
488 loadp CodeBlock[cfr], payload
489 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
490 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
491 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
492 loadp TagOffset[payload, index, 8], tag
493 loadp PayloadOffset[payload, index, 8], payload
494 .done:
495 end
496
497 macro loadConstantOrVariableTag(index, tag)
498 bigteq index, FirstConstantRegisterIndex, .constant
499 loadi TagOffset[cfr, index, 8], tag
500 jmp .done
501 .constant:
502 loadp CodeBlock[cfr], tag
503 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
504 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
505 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
506 loadp TagOffset[tag, index, 8], tag
507 .done:
508 end
509
510 # Index and payload may be the same register. Index may be clobbered.
511 macro loadConstantOrVariable2Reg(index, tag, payload)
512 bigteq index, FirstConstantRegisterIndex, .constant
513 loadi TagOffset[cfr, index, 8], tag
514 loadi PayloadOffset[cfr, index, 8], payload
515 jmp .done
516 .constant:
517 loadp CodeBlock[cfr], tag
518 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
519 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
520 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
521 lshifti 3, index
522 addp index, tag
523 loadp PayloadOffset[tag], payload
524 loadp TagOffset[tag], tag
525 .done:
526 end
527
528 macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
529 bigteq index, FirstConstantRegisterIndex, .constant
530 tagCheck(TagOffset[cfr, index, 8])
531 loadi PayloadOffset[cfr, index, 8], payload
532 jmp .done
533 .constant:
534 loadp CodeBlock[cfr], payload
535 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
536 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
537 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
538 tagCheck(TagOffset[payload, index, 8])
539 loadp PayloadOffset[payload, index, 8], payload
540 .done:
541 end
542
543 # Index and payload must be different registers. Index is not mutated. Use
544 # this if you know what the tag of the variable should be. Doing the tag
545 # test as part of loading the variable reduces register use, but may not
546 # be faster than doing loadConstantOrVariable followed by a branch on the
547 # tag.
548 macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
549 loadConstantOrVariablePayloadTagCustom(
550 index,
551 macro (actualTag) bineq actualTag, expectedTag, slow end,
552 payload)
553 end
554
555 macro loadConstantOrVariablePayloadUnchecked(index, payload)
556 loadConstantOrVariablePayloadTagCustom(
557 index,
558 macro (actualTag) end,
559 payload)
560 end
561
562 macro storeStructureWithTypeInfo(cell, structure, scratch)
563 storep structure, JSCell::m_structureID[cell]
564
565 loadi Structure::m_blob + StructureIDBlob::u.words.word2[structure], scratch
566 storei scratch, JSCell::m_indexingType[cell]
567 end
568
569 macro writeBarrierOnOperand(cellOperand)
570 if GGC
571 loadisFromInstruction(cellOperand, t1)
572 loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
573 skipIfIsRememberedOrInEden(t2, t1, t3,
574 macro(gcData)
575 btbnz gcData, .writeBarrierDone
576 push cfr, PC
577 # We make two extra slots because cCall2 will poke.
578 subp 8, sp
579 cCall2Void(_llint_write_barrier_slow, cfr, t2)
580 addp 8, sp
581 pop PC, cfr
582 end
583 )
584 .writeBarrierDone:
585 end
586 end
587
588 macro writeBarrierOnOperands(cellOperand, valueOperand)
589 if GGC
590 loadisFromInstruction(valueOperand, t1)
591 loadConstantOrVariableTag(t1, t0)
592 bineq t0, CellTag, .writeBarrierDone
593
594 writeBarrierOnOperand(cellOperand)
595 .writeBarrierDone:
596 end
597 end
598
599 macro writeBarrierOnGlobalObject(valueOperand)
600 if GGC
601 loadisFromInstruction(valueOperand, t1)
602 loadConstantOrVariableTag(t1, t0)
603 bineq t0, CellTag, .writeBarrierDone
604
605 loadp CodeBlock[cfr], t3
606 loadp CodeBlock::m_globalObject[t3], t3
607 skipIfIsRememberedOrInEden(t3, t1, t2,
608 macro(gcData)
609 btbnz gcData, .writeBarrierDone
610 push cfr, PC
611 # We make two extra slots because cCall2 will poke.
612 subp 8, sp
613 cCall2Void(_llint_write_barrier_slow, cfr, t3)
614 addp 8, sp
615 pop PC, cfr
616 end
617 )
618 .writeBarrierDone:
619 end
620 end
621
622 macro valueProfile(tag, payload, operand, scratch)
623 loadp operand[PC], scratch
624 storei tag, ValueProfile::m_buckets + TagOffset[scratch]
625 storei payload, ValueProfile::m_buckets + PayloadOffset[scratch]
626 end
627
628
629 # Entrypoints into the interpreter
630
631 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
632 macro functionArityCheck(doneLabel, slowPath)
633 loadi PayloadOffset + ArgumentCount[cfr], t0
634 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
635 cCall2(slowPath, cfr, PC) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error
636 btiz t0, .noError
637 move t1, cfr # t1 contains caller frame
638 jmp _llint_throw_from_slow_path_trampoline
639
640 .noError:
641 # t1 points to ArityCheckData.
642 loadp CommonSlowPaths::ArityCheckData::thunkToCall[t1], t2
643 btpz t2, .proceedInline
644
645 loadp CommonSlowPaths::ArityCheckData::returnPC[t1], t5
646 loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t0
647 call t2
648 if ASSERT_ENABLED
649 loadp ReturnPC[cfr], t0
650 loadp [t0], t0
651 end
652 jmp .continue
653
654 .proceedInline:
655 loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t1
656 btiz t1, .continue
657
658 // Move frame up "t1 * 2" slots
659 lshiftp 1, t1
660 negi t1
661 move cfr, t3
662 loadi PayloadOffset + ArgumentCount[cfr], t2
663 addi CallFrameHeaderSlots, t2
664 .copyLoop:
665 loadi PayloadOffset[t3], t0
666 storei t0, PayloadOffset[t3, t1, 8]
667 loadi TagOffset[t3], t0
668 storei t0, TagOffset[t3, t1, 8]
669 addp 8, t3
670 bsubinz 1, t2, .copyLoop
671
672 // Fill new slots with JSUndefined
673 move t1, t2
674 .fillLoop:
675 move 0, t0
676 storei t0, PayloadOffset[t3, t1, 8]
677 move UndefinedTag, t0
678 storei t0, TagOffset[t3, t1, 8]
679 addp 8, t3
680 baddinz 1, t2, .fillLoop
681
682 lshiftp 3, t1
683 addp t1, cfr
684 addp t1, sp
685 .continue:
686 # Reload CodeBlock and PC, since the slow_path clobbered it.
687 loadp CodeBlock[cfr], t1
688 loadp CodeBlock::m_instructions[t1], PC
689 jmp doneLabel
690 end
691
692 macro branchIfException(label)
693 loadp Callee + PayloadOffset[cfr], t3
694 andp MarkedBlockMask, t3
695 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
696 btiz VM::m_exception[t3], .noException
697 jmp label
698 .noException:
699 end
700
701
702 # Instruction implementations
703
704 _llint_op_enter:
705 traceExecution()
706 checkStackPointerAlignment(t2, 0xdead00e1)
707 loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
708 loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
709 btiz t2, .opEnterDone
710 move UndefinedTag, t0
711 move 0, t1
712 negi t2
713 .opEnterLoop:
714 storei t0, TagOffset[cfr, t2, 8]
715 storei t1, PayloadOffset[cfr, t2, 8]
716 addi 1, t2
717 btinz t2, .opEnterLoop
718 .opEnterDone:
719 callSlowPath(_slow_path_enter)
720 dispatch(1)
721
722
723 _llint_op_create_lexical_environment:
724 traceExecution()
725 callSlowPath(_llint_slow_path_create_lexical_environment)
726 dispatch(3)
727
728
729 _llint_op_get_scope:
730 traceExecution()
731 loadi Callee + PayloadOffset[cfr], t0
732 loadi JSCallee::m_scope[t0], t0
733 loadisFromInstruction(1, t1)
734 storei CellTag, TagOffset[cfr, t1, 8]
735 storei t0, PayloadOffset[cfr, t1, 8]
736 dispatch(2)
737
738
739 _llint_op_create_this:
740 traceExecution()
741 loadi 8[PC], t0
742 loadp PayloadOffset[cfr, t0, 8], t0
743 loadp JSFunction::m_rareData[t0], t4
744 btpz t4, .opCreateThisSlow
745 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_allocator[t4], t1
746 loadp FunctionRareData::m_allocationProfile + ObjectAllocationProfile::m_structure[t4], t2
747 btpz t1, .opCreateThisSlow
748 loadpFromInstruction(4, t4)
749 bpeq t4, 1, .hasSeenMultipleCallee
750 bpneq t4, t0, .opCreateThisSlow
751 .hasSeenMultipleCallee:
752 allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
753 loadi 4[PC], t1
754 storei CellTag, TagOffset[cfr, t1, 8]
755 storei t0, PayloadOffset[cfr, t1, 8]
756 dispatch(5)
757
758 .opCreateThisSlow:
759 callSlowPath(_slow_path_create_this)
760 dispatch(5)
761
762
763 _llint_op_to_this:
764 traceExecution()
765 loadi 4[PC], t0
766 bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow
767 loadi PayloadOffset[cfr, t0, 8], t0
768 bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
769 loadpFromInstruction(2, t2)
770 bpneq JSCell::m_structureID[t0], t2, .opToThisSlow
771 dispatch(4)
772
773 .opToThisSlow:
774 callSlowPath(_slow_path_to_this)
775 dispatch(4)
776
777
778 _llint_op_new_object:
779 traceExecution()
780 loadpFromInstruction(3, t0)
781 loadp ObjectAllocationProfile::m_allocator[t0], t1
782 loadp ObjectAllocationProfile::m_structure[t0], t2
783 allocateJSObject(t1, t2, t0, t3, .opNewObjectSlow)
784 loadi 4[PC], t1
785 storei CellTag, TagOffset[cfr, t1, 8]
786 storei t0, PayloadOffset[cfr, t1, 8]
787 dispatch(4)
788
789 .opNewObjectSlow:
790 callSlowPath(_llint_slow_path_new_object)
791 dispatch(4)
792
793
794 _llint_op_check_tdz:
795 traceExecution()
796 loadpFromInstruction(1, t0)
797 bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opNotTDZ
798 callSlowPath(_slow_path_throw_tdz_error)
799
800 .opNotTDZ:
801 dispatch(2)
802
803
804 _llint_op_mov:
805 traceExecution()
806 loadi 8[PC], t1
807 loadi 4[PC], t0
808 loadConstantOrVariable(t1, t2, t3)
809 storei t2, TagOffset[cfr, t0, 8]
810 storei t3, PayloadOffset[cfr, t0, 8]
811 dispatch(3)
812
813
814 _llint_op_not:
815 traceExecution()
816 loadi 8[PC], t0
817 loadi 4[PC], t1
818 loadConstantOrVariable(t0, t2, t3)
819 bineq t2, BooleanTag, .opNotSlow
820 xori 1, t3
821 storei t2, TagOffset[cfr, t1, 8]
822 storei t3, PayloadOffset[cfr, t1, 8]
823 dispatch(3)
824
825 .opNotSlow:
826 callSlowPath(_slow_path_not)
827 dispatch(3)
828
829
830 _llint_op_eq:
831 traceExecution()
832 loadi 12[PC], t2
833 loadi 8[PC], t0
834 loadConstantOrVariable(t2, t3, t1)
835 loadConstantOrVariable2Reg(t0, t2, t0)
836 bineq t2, t3, .opEqSlow
837 bieq t2, CellTag, .opEqSlow
838 bib t2, LowestTag, .opEqSlow
839 loadi 4[PC], t2
840 cieq t0, t1, t0
841 storei BooleanTag, TagOffset[cfr, t2, 8]
842 storei t0, PayloadOffset[cfr, t2, 8]
843 dispatch(4)
844
845 .opEqSlow:
846 callSlowPath(_slow_path_eq)
847 dispatch(4)
848
849
850 _llint_op_eq_null:
851 traceExecution()
852 loadi 8[PC], t0
853 loadi 4[PC], t3
854 assertNotConstant(t0)
855 loadi TagOffset[cfr, t0, 8], t1
856 loadi PayloadOffset[cfr, t0, 8], t0
857 bineq t1, CellTag, .opEqNullImmediate
858 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
859 move 0, t1
860 jmp .opEqNullNotImmediate
861 .opEqNullMasqueradesAsUndefined:
862 loadp JSCell::m_structureID[t0], t1
863 loadp CodeBlock[cfr], t0
864 loadp CodeBlock::m_globalObject[t0], t0
865 cpeq Structure::m_globalObject[t1], t0, t1
866 jmp .opEqNullNotImmediate
867 .opEqNullImmediate:
868 cieq t1, NullTag, t2
869 cieq t1, UndefinedTag, t1
870 ori t2, t1
871 .opEqNullNotImmediate:
872 storei BooleanTag, TagOffset[cfr, t3, 8]
873 storei t1, PayloadOffset[cfr, t3, 8]
874 dispatch(3)
875
876
877 _llint_op_neq:
878 traceExecution()
879 loadi 12[PC], t2
880 loadi 8[PC], t0
881 loadConstantOrVariable(t2, t3, t1)
882 loadConstantOrVariable2Reg(t0, t2, t0)
883 bineq t2, t3, .opNeqSlow
884 bieq t2, CellTag, .opNeqSlow
885 bib t2, LowestTag, .opNeqSlow
886 loadi 4[PC], t2
887 cineq t0, t1, t0
888 storei BooleanTag, TagOffset[cfr, t2, 8]
889 storei t0, PayloadOffset[cfr, t2, 8]
890 dispatch(4)
891
892 .opNeqSlow:
893 callSlowPath(_slow_path_neq)
894 dispatch(4)
895
896
897 _llint_op_neq_null:
898 traceExecution()
899 loadi 8[PC], t0
900 loadi 4[PC], t3
901 assertNotConstant(t0)
902 loadi TagOffset[cfr, t0, 8], t1
903 loadi PayloadOffset[cfr, t0, 8], t0
904 bineq t1, CellTag, .opNeqNullImmediate
905 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
906 move 1, t1
907 jmp .opNeqNullNotImmediate
908 .opNeqNullMasqueradesAsUndefined:
909 loadp JSCell::m_structureID[t0], t1
910 loadp CodeBlock[cfr], t0
911 loadp CodeBlock::m_globalObject[t0], t0
912 cpneq Structure::m_globalObject[t1], t0, t1
913 jmp .opNeqNullNotImmediate
914 .opNeqNullImmediate:
915 cineq t1, NullTag, t2
916 cineq t1, UndefinedTag, t1
917 andi t2, t1
918 .opNeqNullNotImmediate:
919 storei BooleanTag, TagOffset[cfr, t3, 8]
920 storei t1, PayloadOffset[cfr, t3, 8]
921 dispatch(3)
922
923
924 macro strictEq(equalityOperation, slowPath)
925 loadi 12[PC], t2
926 loadi 8[PC], t0
927 loadConstantOrVariable(t2, t3, t1)
928 loadConstantOrVariable2Reg(t0, t2, t0)
929 bineq t2, t3, .slow
930 bib t2, LowestTag, .slow
931 bineq t2, CellTag, .notStringOrSymbol
932 bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol
933 bbb JSCell::m_type[t1], ObjectType, .slow
934 .notStringOrSymbol:
935 loadi 4[PC], t2
936 equalityOperation(t0, t1, t0)
937 storei BooleanTag, TagOffset[cfr, t2, 8]
938 storei t0, PayloadOffset[cfr, t2, 8]
939 dispatch(4)
940
941 .slow:
942 callSlowPath(slowPath)
943 dispatch(4)
944 end
945
946 _llint_op_stricteq:
947 traceExecution()
948 strictEq(macro (left, right, result) cieq left, right, result end, _slow_path_stricteq)
949
950
951 _llint_op_nstricteq:
952 traceExecution()
953 strictEq(macro (left, right, result) cineq left, right, result end, _slow_path_nstricteq)
954
955
956 _llint_op_inc:
957 traceExecution()
958 loadi 4[PC], t0
959 bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow
960 loadi PayloadOffset[cfr, t0, 8], t1
961 baddio 1, t1, .opIncSlow
962 storei t1, PayloadOffset[cfr, t0, 8]
963 dispatch(2)
964
965 .opIncSlow:
966 callSlowPath(_slow_path_inc)
967 dispatch(2)
968
969
970 _llint_op_dec:
971 traceExecution()
972 loadi 4[PC], t0
973 bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow
974 loadi PayloadOffset[cfr, t0, 8], t1
975 bsubio 1, t1, .opDecSlow
976 storei t1, PayloadOffset[cfr, t0, 8]
977 dispatch(2)
978
979 .opDecSlow:
980 callSlowPath(_slow_path_dec)
981 dispatch(2)
982
983
984 _llint_op_to_number:
985 traceExecution()
986 loadi 8[PC], t0
987 loadi 4[PC], t1
988 loadConstantOrVariable(t0, t2, t3)
989 bieq t2, Int32Tag, .opToNumberIsInt
990 biaeq t2, LowestTag, .opToNumberSlow
991 .opToNumberIsInt:
992 storei t2, TagOffset[cfr, t1, 8]
993 storei t3, PayloadOffset[cfr, t1, 8]
994 dispatch(3)
995
996 .opToNumberSlow:
997 callSlowPath(_slow_path_to_number)
998 dispatch(3)
999
1000
1001 _llint_op_to_string:
1002 traceExecution()
1003 loadi 8[PC], t0
1004 loadi 4[PC], t1
1005 loadConstantOrVariable(t0, t2, t3)
1006 bineq t2, CellTag, .opToStringSlow
1007 bbneq JSCell::m_type[t3], StringType, .opToStringSlow
1008 .opToStringIsString:
1009 storei t2, TagOffset[cfr, t1, 8]
1010 storei t3, PayloadOffset[cfr, t1, 8]
1011 dispatch(3)
1012
1013 .opToStringSlow:
1014 callSlowPath(_slow_path_to_string)
1015 dispatch(3)
1016
1017
1018 _llint_op_negate:
1019 traceExecution()
1020 loadi 8[PC], t0
1021 loadi 4[PC], t3
1022 loadConstantOrVariable(t0, t1, t2)
1023 bineq t1, Int32Tag, .opNegateSrcNotInt
1024 btiz t2, 0x7fffffff, .opNegateSlow
1025 negi t2
1026 storei Int32Tag, TagOffset[cfr, t3, 8]
1027 storei t2, PayloadOffset[cfr, t3, 8]
1028 dispatch(3)
1029 .opNegateSrcNotInt:
1030 bia t1, LowestTag, .opNegateSlow
1031 xori 0x80000000, t1
1032 storei t1, TagOffset[cfr, t3, 8]
1033 storei t2, PayloadOffset[cfr, t3, 8]
1034 dispatch(3)
1035
1036 .opNegateSlow:
1037 callSlowPath(_slow_path_negate)
1038 dispatch(3)
1039
1040
1041 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath)
1042 loadi 12[PC], t2
1043 loadi 8[PC], t0
1044 loadConstantOrVariable(t2, t3, t1)
1045 loadConstantOrVariable2Reg(t0, t2, t0)
1046 bineq t2, Int32Tag, .op1NotInt
1047 bineq t3, Int32Tag, .op2NotInt
1048 loadi 4[PC], t2
1049 integerOperationAndStore(t3, t1, t0, .slow, t2)
1050 dispatch(5)
1051
1052 .op1NotInt:
1053 # First operand is definitely not an int, the second operand could be anything.
1054 bia t2, LowestTag, .slow
1055 bib t3, LowestTag, .op1NotIntOp2Double
1056 bineq t3, Int32Tag, .slow
1057 ci2d t1, ft1
1058 jmp .op1NotIntReady
1059 .op1NotIntOp2Double:
1060 fii2d t1, t3, ft1
1061 .op1NotIntReady:
1062 loadi 4[PC], t1
1063 fii2d t0, t2, ft0
1064 doubleOperation(ft1, ft0)
1065 stored ft0, [cfr, t1, 8]
1066 dispatch(5)
1067
1068 .op2NotInt:
1069 # First operand is definitely an int, the second operand is definitely not.
1070 loadi 4[PC], t2
1071 bia t3, LowestTag, .slow
1072 ci2d t0, ft0
1073 fii2d t1, t3, ft1
1074 doubleOperation(ft1, ft0)
1075 stored ft0, [cfr, t2, 8]
1076 dispatch(5)
1077
1078 .slow:
1079 callSlowPath(slowPath)
1080 dispatch(5)
1081 end
1082
1083 macro binaryOp(integerOperation, doubleOperation, slowPath)
1084 binaryOpCustomStore(
1085 macro (int32Tag, left, right, slow, index)
1086 integerOperation(left, right, slow)
1087 storei int32Tag, TagOffset[cfr, index, 8]
1088 storei right, PayloadOffset[cfr, index, 8]
1089 end,
1090 doubleOperation, slowPath)
1091 end
1092
1093 _llint_op_add:
1094 traceExecution()
1095 binaryOp(
1096 macro (left, right, slow) baddio left, right, slow end,
1097 macro (left, right) addd left, right end,
1098 _slow_path_add)
1099
1100
1101 _llint_op_mul:
1102 traceExecution()
1103 binaryOpCustomStore(
1104 macro (int32Tag, left, right, slow, index)
1105 const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
1106 move right, scratch
1107 bmulio left, scratch, slow
1108 btinz scratch, .done
1109 bilt left, 0, slow
1110 bilt right, 0, slow
1111 .done:
1112 storei Int32Tag, TagOffset[cfr, index, 8]
1113 storei scratch, PayloadOffset[cfr, index, 8]
1114 end,
1115 macro (left, right) muld left, right end,
1116 _slow_path_mul)
1117
1118
1119 _llint_op_sub:
1120 traceExecution()
1121 binaryOp(
1122 macro (left, right, slow) bsubio left, right, slow end,
1123 macro (left, right) subd left, right end,
1124 _slow_path_sub)
1125
1126
1127 _llint_op_div:
1128 traceExecution()
1129 binaryOpCustomStore(
1130 macro (int32Tag, left, right, slow, index)
1131 ci2d left, ft0
1132 ci2d right, ft1
1133 divd ft0, ft1
1134 bcd2i ft1, right, .notInt
1135 storei int32Tag, TagOffset[cfr, index, 8]
1136 storei right, PayloadOffset[cfr, index, 8]
1137 jmp .done
1138 .notInt:
1139 stored ft1, [cfr, index, 8]
1140 .done:
1141 end,
1142 macro (left, right) divd left, right end,
1143 _slow_path_div)
1144
1145
1146 macro bitOp(operation, slowPath, advance)
1147 loadi 12[PC], t2
1148 loadi 8[PC], t0
1149 loadConstantOrVariable(t2, t3, t1)
1150 loadConstantOrVariable2Reg(t0, t2, t0)
1151 bineq t3, Int32Tag, .slow
1152 bineq t2, Int32Tag, .slow
1153 loadi 4[PC], t2
1154 operation(t1, t0)
1155 storei t3, TagOffset[cfr, t2, 8]
1156 storei t0, PayloadOffset[cfr, t2, 8]
1157 dispatch(advance)
1158
1159 .slow:
1160 callSlowPath(slowPath)
1161 dispatch(advance)
1162 end
1163
1164 _llint_op_lshift:
1165 traceExecution()
1166 bitOp(
1167 macro (left, right) lshifti left, right end,
1168 _slow_path_lshift,
1169 4)
1170
1171
1172 _llint_op_rshift:
1173 traceExecution()
1174 bitOp(
1175 macro (left, right) rshifti left, right end,
1176 _slow_path_rshift,
1177 4)
1178
1179
1180 _llint_op_urshift:
1181 traceExecution()
1182 bitOp(
1183 macro (left, right) urshifti left, right end,
1184 _slow_path_urshift,
1185 4)
1186
1187
1188 _llint_op_unsigned:
1189 traceExecution()
1190 loadi 4[PC], t0
1191 loadi 8[PC], t1
1192 loadConstantOrVariablePayload(t1, Int32Tag, t2, .opUnsignedSlow)
1193 bilt t2, 0, .opUnsignedSlow
1194 storei t2, PayloadOffset[cfr, t0, 8]
1195 storei Int32Tag, TagOffset[cfr, t0, 8]
1196 dispatch(3)
1197 .opUnsignedSlow:
1198 callSlowPath(_slow_path_unsigned)
1199 dispatch(3)
1200
1201
1202 _llint_op_bitand:
1203 traceExecution()
1204 bitOp(
1205 macro (left, right) andi left, right end,
1206 _slow_path_bitand,
1207 5)
1208
1209
1210 _llint_op_bitxor:
1211 traceExecution()
1212 bitOp(
1213 macro (left, right) xori left, right end,
1214 _slow_path_bitxor,
1215 5)
1216
1217
1218 _llint_op_bitor:
1219 traceExecution()
1220 bitOp(
1221 macro (left, right) ori left, right end,
1222 _slow_path_bitor,
1223 5)
1224
1225
1226 _llint_op_check_has_instance:
1227 traceExecution()
1228 loadi 12[PC], t1
1229 loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
1230 btbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
1231 dispatch(5)
1232
1233 .opCheckHasInstanceSlow:
1234 callSlowPath(_llint_slow_path_check_has_instance)
1235 dispatch(0)
1236
1237
1238 _llint_op_instanceof:
1239 traceExecution()
1240 # Actually do the work.
1241 loadi 12[PC], t0
1242 loadi 4[PC], t3
1243 loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
1244 bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
1245 loadi 8[PC], t0
1246 loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
1247
1248 # Register state: t1 = prototype, t2 = value
1249 move 1, t0
1250 .opInstanceofLoop:
1251 loadp JSCell::m_structureID[t2], t2
1252 loadi Structure::m_prototype + PayloadOffset[t2], t2
1253 bpeq t2, t1, .opInstanceofDone
1254 btinz t2, .opInstanceofLoop
1255
1256 move 0, t0
1257 .opInstanceofDone:
1258 storei BooleanTag, TagOffset[cfr, t3, 8]
1259 storei t0, PayloadOffset[cfr, t3, 8]
1260 dispatch(4)
1261
1262 .opInstanceofSlow:
1263 callSlowPath(_llint_slow_path_instanceof)
1264 dispatch(4)
1265
1266
1267 _llint_op_is_undefined:
1268 traceExecution()
1269 loadi 8[PC], t1
1270 loadi 4[PC], t0
1271 loadConstantOrVariable(t1, t2, t3)
1272 storei BooleanTag, TagOffset[cfr, t0, 8]
1273 bieq t2, CellTag, .opIsUndefinedCell
1274 cieq t2, UndefinedTag, t3
1275 storei t3, PayloadOffset[cfr, t0, 8]
1276 dispatch(3)
1277 .opIsUndefinedCell:
1278 btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
1279 move 0, t1
1280 storei t1, PayloadOffset[cfr, t0, 8]
1281 dispatch(3)
1282 .opIsUndefinedMasqueradesAsUndefined:
1283 loadp JSCell::m_structureID[t3], t1
1284 loadp CodeBlock[cfr], t3
1285 loadp CodeBlock::m_globalObject[t3], t3
1286 cpeq Structure::m_globalObject[t1], t3, t1
1287 storei t1, PayloadOffset[cfr, t0, 8]
1288 dispatch(3)
1289
1290
1291 _llint_op_is_boolean:
1292 traceExecution()
1293 loadi 8[PC], t1
1294 loadi 4[PC], t2
1295 loadConstantOrVariableTag(t1, t0)
1296 cieq t0, BooleanTag, t0
1297 storei BooleanTag, TagOffset[cfr, t2, 8]
1298 storei t0, PayloadOffset[cfr, t2, 8]
1299 dispatch(3)
1300
1301
1302 _llint_op_is_number:
1303 traceExecution()
1304 loadi 8[PC], t1
1305 loadi 4[PC], t2
1306 loadConstantOrVariableTag(t1, t0)
1307 storei BooleanTag, TagOffset[cfr, t2, 8]
1308 addi 1, t0
1309 cib t0, LowestTag + 1, t1
1310 storei t1, PayloadOffset[cfr, t2, 8]
1311 dispatch(3)
1312
1313
1314 _llint_op_is_string:
1315 traceExecution()
1316 loadi 8[PC], t1
1317 loadi 4[PC], t2
1318 loadConstantOrVariable(t1, t0, t3)
1319 storei BooleanTag, TagOffset[cfr, t2, 8]
1320 bineq t0, CellTag, .opIsStringNotCell
1321 cbeq JSCell::m_type[t3], StringType, t1
1322 storei t1, PayloadOffset[cfr, t2, 8]
1323 dispatch(3)
1324 .opIsStringNotCell:
1325 storep 0, PayloadOffset[cfr, t2, 8]
1326 dispatch(3)
1327
1328
1329 _llint_op_is_object:
1330 traceExecution()
1331 loadi 8[PC], t1
1332 loadi 4[PC], t2
1333 loadConstantOrVariable(t1, t0, t3)
1334 storei BooleanTag, TagOffset[cfr, t2, 8]
1335 bineq t0, CellTag, .opIsObjectNotCell
1336 cbaeq JSCell::m_type[t3], ObjectType, t1
1337 storei t1, PayloadOffset[cfr, t2, 8]
1338 dispatch(3)
1339 .opIsObjectNotCell:
1340 storep 0, PayloadOffset[cfr, t2, 8]
1341 dispatch(3)
1342
1343
1344 macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload)
1345 assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end)
1346 negi propertyOffset
1347 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1348 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
1349 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
1350 end
1351
1352 macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload)
1353 bilt propertyOffset, firstOutOfLineOffset, .isInline
1354 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1355 negi propertyOffset
1356 jmp .ready
1357 .isInline:
1358 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1359 .ready:
1360 loadi TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], tag
1361 loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
1362 end
1363
1364 macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag, payload)
1365 bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
1366 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
1367 negi propertyOffsetAsInt
1368 jmp .ready
1369 .isInline:
1370 addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
1371 .ready:
1372 storei tag, TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
1373 storei payload, PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
1374 end
1375
1376
1377 _llint_op_init_global_const:
1378 traceExecution()
1379 writeBarrierOnGlobalObject(2)
1380 loadi 8[PC], t1
1381 loadi 4[PC], t0
1382 loadConstantOrVariable(t1, t2, t3)
1383 storei t2, TagOffset[t0]
1384 storei t3, PayloadOffset[t0]
1385 dispatch(5)
1386
1387
1388 # We only do monomorphic get_by_id caching for now, and we do not modify the
1389 # opcode. We do, however, allow for the cache to change anytime if fails, since
1390 # ping-ponging is free. At best we get lucky and the get_by_id will continue
1391 # to take fast path on the new cache. At worst we take slow path, which is what
1392 # we would have been doing anyway.
1393
1394 macro getById(getPropertyStorage)
1395 traceExecution()
1396 loadi 8[PC], t0
1397 loadi 16[PC], t1
1398 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
1399 loadi 20[PC], t2
1400 getPropertyStorage(
1401 t3,
1402 t0,
1403 macro (propertyStorage, scratch)
1404 bpneq JSCell::m_structureID[t3], t1, .opGetByIdSlow
1405 loadi 4[PC], t1
1406 loadi TagOffset[propertyStorage, t2], scratch
1407 loadi PayloadOffset[propertyStorage, t2], t2
1408 storei scratch, TagOffset[cfr, t1, 8]
1409 storei t2, PayloadOffset[cfr, t1, 8]
1410 valueProfile(scratch, t2, 32, t1)
1411 dispatch(9)
1412 end)
1413
1414 .opGetByIdSlow:
1415 callSlowPath(_llint_slow_path_get_by_id)
1416 dispatch(9)
1417 end
1418
1419 _llint_op_get_by_id:
1420 getById(withInlineStorage)
1421
1422
1423 _llint_op_get_by_id_out_of_line:
1424 getById(withOutOfLineStorage)
1425
1426
1427 _llint_op_get_array_length:
1428 traceExecution()
1429 loadi 8[PC], t0
1430 loadp 16[PC], t1
1431 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
1432 move t3, t2
1433 arrayProfile(t2, t1, t0)
1434 btiz t2, IsArray, .opGetArrayLengthSlow
1435 btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
1436 loadi 4[PC], t1
1437 loadp JSObject::m_butterfly[t3], t0
1438 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
1439 bilt t0, 0, .opGetArrayLengthSlow
1440 valueProfile(Int32Tag, t0, 32, t2)
1441 storep t0, PayloadOffset[cfr, t1, 8]
1442 storep Int32Tag, TagOffset[cfr, t1, 8]
1443 dispatch(9)
1444
1445 .opGetArrayLengthSlow:
1446 callSlowPath(_llint_slow_path_get_by_id)
1447 dispatch(9)
1448
1449
1450 macro putById(getPropertyStorage)
1451 traceExecution()
1452 writeBarrierOnOperands(1, 3)
1453 loadi 4[PC], t3
1454 loadi 16[PC], t1
1455 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1456 loadi 12[PC], t2
1457 getPropertyStorage(
1458 t0,
1459 t3,
1460 macro (propertyStorage, scratch)
1461 bpneq JSCell::m_structureID[t0], t1, .opPutByIdSlow
1462 loadi 20[PC], t1
1463 loadConstantOrVariable2Reg(t2, scratch, t2)
1464 storei scratch, TagOffset[propertyStorage, t1]
1465 storei t2, PayloadOffset[propertyStorage, t1]
1466 dispatch(9)
1467 end)
1468
1469 .opPutByIdSlow:
1470 callSlowPath(_llint_slow_path_put_by_id)
1471 dispatch(9)
1472 end
1473
1474 _llint_op_put_by_id:
1475 putById(withInlineStorage)
1476
1477
1478 _llint_op_put_by_id_out_of_line:
1479 putById(withOutOfLineStorage)
1480
1481
1482 macro putByIdTransition(additionalChecks, getPropertyStorage)
1483 traceExecution()
1484 writeBarrierOnOperand(1)
1485 loadi 4[PC], t3
1486 loadi 16[PC], t1
1487 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
1488 loadi 12[PC], t2
1489 bpneq JSCell::m_structureID[t0], t1, .opPutByIdSlow
1490 additionalChecks(t1, t3, .opPutByIdSlow)
1491 loadi 20[PC], t1
1492 getPropertyStorage(
1493 t0,
1494 t3,
1495 macro (propertyStorage, scratch)
1496 addp t1, propertyStorage, t3
1497 loadConstantOrVariable2Reg(t2, t1, t2)
1498 storei t1, TagOffset[t3]
1499 loadi 24[PC], t1
1500 storei t2, PayloadOffset[t3]
1501 storep t1, JSCell::m_structureID[t0]
1502 dispatch(9)
1503 end)
1504
1505 .opPutByIdSlow:
1506 callSlowPath(_llint_slow_path_put_by_id)
1507 dispatch(9)
1508 end
1509
1510 macro noAdditionalChecks(oldStructure, scratch, slowPath)
1511 end
1512
1513 macro structureChainChecks(oldStructure, scratch, slowPath)
1514 const protoCell = oldStructure # Reusing the oldStructure register for the proto
1515
1516 loadp 28[PC], scratch
1517 assert(macro (ok) btpnz scratch, ok end)
1518 loadp StructureChain::m_vector[scratch], scratch
1519 assert(macro (ok) btpnz scratch, ok end)
1520 bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
1521 .loop:
1522 loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
1523 loadp JSCell::m_structureID[protoCell], oldStructure
1524 bpneq oldStructure, [scratch], slowPath
1525 addp 4, scratch
1526 bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
1527 .done:
1528 end
1529
1530 _llint_op_put_by_id_transition_direct:
1531 putByIdTransition(noAdditionalChecks, withInlineStorage)
1532
1533
1534 _llint_op_put_by_id_transition_direct_out_of_line:
1535 putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
1536
1537
1538 _llint_op_put_by_id_transition_normal:
1539 putByIdTransition(structureChainChecks, withInlineStorage)
1540
1541
1542 _llint_op_put_by_id_transition_normal_out_of_line:
1543 putByIdTransition(structureChainChecks, withOutOfLineStorage)
1544
1545
1546 _llint_op_get_by_val:
1547 traceExecution()
1548 loadi 8[PC], t2
1549 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
1550 move t0, t2
1551 loadp 16[PC], t3
1552 arrayProfile(t2, t3, t1)
1553 loadi 12[PC], t3
1554 loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
1555 loadp JSObject::m_butterfly[t0], t3
1556 andi IndexingShapeMask, t2
1557 bieq t2, Int32Shape, .opGetByValIsContiguous
1558 bineq t2, ContiguousShape, .opGetByValNotContiguous
1559 .opGetByValIsContiguous:
1560
1561 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1562 loadi TagOffset[t3, t1, 8], t2
1563 loadi PayloadOffset[t3, t1, 8], t1
1564 jmp .opGetByValDone
1565
1566 .opGetByValNotContiguous:
1567 bineq t2, DoubleShape, .opGetByValNotDouble
1568 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValOutOfBounds
1569 loadd [t3, t1, 8], ft0
1570 bdnequn ft0, ft0, .opGetByValSlow
1571 # FIXME: This could be massively optimized.
1572 fd2ii ft0, t1, t2
1573 loadi 4[PC], t0
1574 jmp .opGetByValNotEmpty
1575
1576 .opGetByValNotDouble:
1577 subi ArrayStorageShape, t2
1578 bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValSlow
1579 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValOutOfBounds
1580 loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
1581 loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
1582
1583 .opGetByValDone:
1584 loadi 4[PC], t0
1585 bieq t2, EmptyValueTag, .opGetByValOutOfBounds
1586 .opGetByValNotEmpty:
1587 storei t2, TagOffset[cfr, t0, 8]
1588 storei t1, PayloadOffset[cfr, t0, 8]
1589 valueProfile(t2, t1, 20, t0)
1590 dispatch(6)
1591
1592 .opGetByValOutOfBounds:
1593 loadpFromInstruction(4, t0)
1594 storeb 1, ArrayProfile::m_outOfBounds[t0]
1595 .opGetByValSlow:
1596 callSlowPath(_llint_slow_path_get_by_val)
1597 dispatch(6)
1598
1599
1600 macro contiguousPutByVal(storeCallback)
1601 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
1602 .storeResult:
1603 loadi 12[PC], t2
1604 storeCallback(t2, t1, t0, t3)
1605 dispatch(5)
1606
1607 .outOfBounds:
1608 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1609 loadp 16[PC], t2
1610 storeb 1, ArrayProfile::m_mayStoreToHole[t2]
1611 addi 1, t3, t2
1612 storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1613 jmp .storeResult
1614 end
1615
1616 macro putByVal(slowPath)
1617 traceExecution()
1618 writeBarrierOnOperands(1, 3)
1619 loadi 4[PC], t0
1620 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
1621 move t1, t2
1622 loadp 16[PC], t3
1623 arrayProfile(t2, t3, t0)
1624 loadi 8[PC], t0
1625 loadConstantOrVariablePayload(t0, Int32Tag, t3, .opPutByValSlow)
1626 loadp JSObject::m_butterfly[t1], t0
1627 andi IndexingShapeMask, t2
1628 bineq t2, Int32Shape, .opPutByValNotInt32
1629 contiguousPutByVal(
1630 macro (operand, scratch, base, index)
1631 loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow)
1632 storei Int32Tag, TagOffset[base, index, 8]
1633 storei scratch, PayloadOffset[base, index, 8]
1634 end)
1635
1636 .opPutByValNotInt32:
1637 bineq t2, DoubleShape, .opPutByValNotDouble
1638 contiguousPutByVal(
1639 macro (operand, scratch, base, index)
1640 const tag = scratch
1641 const payload = operand
1642 loadConstantOrVariable2Reg(operand, tag, payload)
1643 bineq tag, Int32Tag, .notInt
1644 ci2d payload, ft0
1645 jmp .ready
1646 .notInt:
1647 fii2d payload, tag, ft0
1648 bdnequn ft0, ft0, .opPutByValSlow
1649 .ready:
1650 stored ft0, [base, index, 8]
1651 end)
1652
1653 .opPutByValNotDouble:
1654 bineq t2, ContiguousShape, .opPutByValNotContiguous
1655 contiguousPutByVal(
1656 macro (operand, scratch, base, index)
1657 const tag = scratch
1658 const payload = operand
1659 loadConstantOrVariable2Reg(operand, tag, payload)
1660 storei tag, TagOffset[base, index, 8]
1661 storei payload, PayloadOffset[base, index, 8]
1662 end)
1663
1664 .opPutByValNotContiguous:
1665 bineq t2, ArrayStorageShape, .opPutByValSlow
1666 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
1667 bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty
1668 .opPutByValArrayStorageStoreResult:
1669 loadi 12[PC], t2
1670 loadConstantOrVariable2Reg(t2, t1, t2)
1671 storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8]
1672 storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8]
1673 dispatch(5)
1674
1675 .opPutByValArrayStorageEmpty:
1676 loadp 16[PC], t1
1677 storeb 1, ArrayProfile::m_mayStoreToHole[t1]
1678 addi 1, ArrayStorage::m_numValuesInVector[t0]
1679 bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
1680 addi 1, t3, t1
1681 storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
1682 jmp .opPutByValArrayStorageStoreResult
1683
1684 .opPutByValOutOfBounds:
1685 loadpFromInstruction(4, t0)
1686 storeb 1, ArrayProfile::m_outOfBounds[t0]
1687 .opPutByValSlow:
1688 callSlowPath(slowPath)
1689 dispatch(5)
1690 end
1691
1692 _llint_op_put_by_val:
1693 putByVal(_llint_slow_path_put_by_val)
1694
1695 _llint_op_put_by_val_direct:
1696 putByVal(_llint_slow_path_put_by_val_direct)
1697
1698 _llint_op_jmp:
1699 traceExecution()
1700 dispatchBranch(4[PC])
1701
1702
1703 macro jumpTrueOrFalse(conditionOp, slow)
1704 loadi 4[PC], t1
1705 loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
1706 conditionOp(t0, .target)
1707 dispatch(3)
1708
1709 .target:
1710 dispatchBranch(8[PC])
1711
1712 .slow:
1713 callSlowPath(slow)
1714 dispatch(0)
1715 end
1716
1717
1718 macro equalNull(cellHandler, immediateHandler)
1719 loadi 4[PC], t0
1720 assertNotConstant(t0)
1721 loadi TagOffset[cfr, t0, 8], t1
1722 loadi PayloadOffset[cfr, t0, 8], t0
1723 bineq t1, CellTag, .immediate
1724 loadp JSCell::m_structureID[t0], t2
1725 cellHandler(t2, JSCell::m_flags[t0], .target)
1726 dispatch(3)
1727
1728 .target:
1729 dispatchBranch(8[PC])
1730
1731 .immediate:
1732 ori 1, t1
1733 immediateHandler(t1, .target)
1734 dispatch(3)
1735 end
1736
1737 _llint_op_jeq_null:
1738 traceExecution()
1739 equalNull(
1740 macro (structure, value, target)
1741 btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined
1742 loadp CodeBlock[cfr], t0
1743 loadp CodeBlock::m_globalObject[t0], t0
1744 bpeq Structure::m_globalObject[structure], t0, target
1745 .opJeqNullNotMasqueradesAsUndefined:
1746 end,
1747 macro (value, target) bieq value, NullTag, target end)
1748
1749
1750 _llint_op_jneq_null:
1751 traceExecution()
1752 equalNull(
1753 macro (structure, value, target)
1754 btbz value, MasqueradesAsUndefined, target
1755 loadp CodeBlock[cfr], t0
1756 loadp CodeBlock::m_globalObject[t0], t0
1757 bpneq Structure::m_globalObject[structure], t0, target
1758 end,
1759 macro (value, target) bineq value, NullTag, target end)
1760
1761
1762 _llint_op_jneq_ptr:
1763 traceExecution()
1764 loadi 4[PC], t0
1765 loadi 8[PC], t1
1766 loadp CodeBlock[cfr], t2
1767 loadp CodeBlock::m_globalObject[t2], t2
1768 bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
1769 loadp JSGlobalObject::m_specialPointers[t2, t1, 4], t1
1770 bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
1771 .opJneqPtrBranch:
1772 dispatchBranch(12[PC])
1773 .opJneqPtrFallThrough:
1774 dispatch(4)
1775
1776
1777 macro compare(integerCompare, doubleCompare, slowPath)
1778 loadi 4[PC], t2
1779 loadi 8[PC], t3
1780 loadConstantOrVariable(t2, t0, t1)
1781 loadConstantOrVariable2Reg(t3, t2, t3)
1782 bineq t0, Int32Tag, .op1NotInt
1783 bineq t2, Int32Tag, .op2NotInt
1784 integerCompare(t1, t3, .jumpTarget)
1785 dispatch(4)
1786
1787 .op1NotInt:
1788 bia t0, LowestTag, .slow
1789 bib t2, LowestTag, .op1NotIntOp2Double
1790 bineq t2, Int32Tag, .slow
1791 ci2d t3, ft1
1792 jmp .op1NotIntReady
1793 .op1NotIntOp2Double:
1794 fii2d t3, t2, ft1
1795 .op1NotIntReady:
1796 fii2d t1, t0, ft0
1797 doubleCompare(ft0, ft1, .jumpTarget)
1798 dispatch(4)
1799
1800 .op2NotInt:
1801 ci2d t1, ft0
1802 bia t2, LowestTag, .slow
1803 fii2d t3, t2, ft1
1804 doubleCompare(ft0, ft1, .jumpTarget)
1805 dispatch(4)
1806
1807 .jumpTarget:
1808 dispatchBranch(12[PC])
1809
1810 .slow:
1811 callSlowPath(slowPath)
1812 dispatch(0)
1813 end
1814
1815
1816 _llint_op_switch_imm:
1817 traceExecution()
1818 loadi 12[PC], t2
1819 loadi 4[PC], t3
1820 loadConstantOrVariable(t2, t1, t0)
1821 loadp CodeBlock[cfr], t2
1822 loadp CodeBlock::m_rareData[t2], t2
1823 muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
1824 loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1825 addp t3, t2
1826 bineq t1, Int32Tag, .opSwitchImmNotInt
1827 subi SimpleJumpTable::min[t2], t0
1828 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
1829 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
1830 loadi [t3, t0, 4], t1
1831 btiz t1, .opSwitchImmFallThrough
1832 dispatchBranchWithOffset(t1)
1833
1834 .opSwitchImmNotInt:
1835 bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
1836 .opSwitchImmFallThrough:
1837 dispatchBranch(8[PC])
1838
1839 .opSwitchImmSlow:
1840 callSlowPath(_llint_slow_path_switch_imm)
1841 dispatch(0)
1842
1843
1844 _llint_op_switch_char:
1845 traceExecution()
1846 loadi 12[PC], t2
1847 loadi 4[PC], t3
1848 loadConstantOrVariable(t2, t1, t0)
1849 loadp CodeBlock[cfr], t2
1850 loadp CodeBlock::m_rareData[t2], t2
1851 muli sizeof SimpleJumpTable, t3
1852 loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
1853 addp t3, t2
1854 bineq t1, CellTag, .opSwitchCharFallThrough
1855 bbneq JSCell::m_type[t0], StringType, .opSwitchCharFallThrough
1856 bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
1857 loadp JSString::m_value[t0], t0
1858 btpz t0, .opSwitchOnRope
1859 loadp StringImpl::m_data8[t0], t1
1860 btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
1861 loadh [t1], t0
1862 jmp .opSwitchCharReady
1863 .opSwitchChar8Bit:
1864 loadb [t1], t0
1865 .opSwitchCharReady:
1866 subi SimpleJumpTable::min[t2], t0
1867 biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
1868 loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
1869 loadi [t2, t0, 4], t1
1870 btiz t1, .opSwitchCharFallThrough
1871 dispatchBranchWithOffset(t1)
1872
1873 .opSwitchCharFallThrough:
1874 dispatchBranch(8[PC])
1875
1876 .opSwitchOnRope:
1877 callSlowPath(_llint_slow_path_switch_char)
1878 dispatch(0)
1879
1880
1881 macro arrayProfileForCall()
1882 loadi 16[PC], t3
1883 negi t3
1884 bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
1885 loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
1886 loadp JSCell::m_structureID[t0], t0
1887 loadpFromInstruction(CallOpCodeSize - 2, t1)
1888 storep t0, ArrayProfile::m_lastSeenStructureID[t1]
1889 .done:
1890 end
1891
1892 macro doCall(slowPath)
1893 loadi 8[PC], t0
1894 loadi 20[PC], t1
1895 loadp LLIntCallLinkInfo::callee[t1], t2
1896 loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
1897 bineq t3, t2, .opCallSlow
1898 loadi 16[PC], t3
1899 lshifti 3, t3
1900 negi t3
1901 addp cfr, t3 # t3 contains the new value of cfr
1902 storei t2, Callee + PayloadOffset[t3]
1903 loadi 12[PC], t2
1904 storei PC, ArgumentCount + TagOffset[cfr]
1905 storei t2, ArgumentCount + PayloadOffset[t3]
1906 storei CellTag, Callee + TagOffset[t3]
1907 addp CallerFrameAndPCSize, t3
1908 callTargetFunction(t1, t3)
1909
1910 .opCallSlow:
1911 slowPathForCall(slowPath)
1912 end
1913
1914
1915 _llint_op_ret:
1916 traceExecution()
1917 checkSwitchToJITForEpilogue()
1918 loadi 4[PC], t2
1919 loadConstantOrVariable(t2, t1, t0)
1920 doReturn()
1921
1922
1923 _llint_op_to_primitive:
1924 traceExecution()
1925 loadi 8[PC], t2
1926 loadi 4[PC], t3
1927 loadConstantOrVariable(t2, t1, t0)
1928 bineq t1, CellTag, .opToPrimitiveIsImm
1929 bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
1930 .opToPrimitiveIsImm:
1931 storei t1, TagOffset[cfr, t3, 8]
1932 storei t0, PayloadOffset[cfr, t3, 8]
1933 dispatch(3)
1934
1935 .opToPrimitiveSlowCase:
1936 callSlowPath(_slow_path_to_primitive)
1937 dispatch(3)
1938
1939
1940 _llint_op_catch:
1941 # This is where we end up from the JIT's throw trampoline (because the
1942 # machine code return address will be set to _llint_op_catch), and from
1943 # the interpreter's throw trampoline (see _llint_throw_trampoline).
1944 # The throwing code must have known that we were throwing to the interpreter,
1945 # and have set VM::targetInterpreterPCForThrow.
1946 loadp Callee + PayloadOffset[cfr], t3
1947 andp MarkedBlockMask, t3
1948 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
1949 loadp VM::callFrameForThrow[t3], cfr
1950 loadp VM::vmEntryFrameForThrow[t3], t0
1951 storep t0, VM::topVMEntryFrame[t3]
1952 restoreStackPointerAfterCall()
1953
1954 loadi VM::targetInterpreterPCForThrow[t3], PC
1955 loadi VM::m_exception[t3], t0
1956 storei 0, VM::m_exception[t3]
1957 loadi 4[PC], t2
1958 storei t0, PayloadOffset[cfr, t2, 8]
1959 storei CellTag, TagOffset[cfr, t2, 8]
1960
1961 loadi Exception::m_value + TagOffset[t0], t1
1962 loadi Exception::m_value + PayloadOffset[t0], t0
1963 loadi 8[PC], t2
1964 storei t0, PayloadOffset[cfr, t2, 8]
1965 storei t1, TagOffset[cfr, t2, 8]
1966
1967 traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
1968 dispatch(3)
1969
1970 _llint_op_end:
1971 traceExecution()
1972 checkSwitchToJITForEpilogue()
1973 loadi 4[PC], t0
1974 assertNotConstant(t0)
1975 loadi TagOffset[cfr, t0, 8], t1
1976 loadi PayloadOffset[cfr, t0, 8], t0
1977 doReturn()
1978
1979
1980 _llint_throw_from_slow_path_trampoline:
1981 callSlowPath(_llint_slow_path_handle_exception)
1982
1983 # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
1984 # the throw target is not necessarily interpreted code, we come to here.
1985 # This essentially emulates the JIT's throwing protocol.
1986 loadp Callee[cfr], t1
1987 andp MarkedBlockMask, t1
1988 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
1989 jmp VM::targetMachinePCForThrow[t1]
1990
1991
1992 _llint_throw_during_call_trampoline:
1993 preserveReturnAddressAfterCall(t2)
1994 jmp _llint_throw_from_slow_path_trampoline
1995
1996
1997 macro nativeCallTrampoline(executableOffsetToFunction)
1998
1999 functionPrologue()
2000 storep 0, CodeBlock[cfr]
2001 loadi Callee + PayloadOffset[cfr], t1
2002 // Callee is still in t1 for code below
2003 if X86 or X86_WIN
2004 subp 8, sp # align stack pointer
2005 andp MarkedBlockMask, t1
2006 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t3
2007 storep cfr, VM::topCallFrame[t3]
2008 move cfr, t2 # t2 = ecx
2009 storep t2, [sp]
2010 loadi Callee + PayloadOffset[cfr], t1
2011 loadp JSFunction::m_executable[t1], t1
2012 checkStackPointerAlignment(t3, 0xdead0001)
2013 call executableOffsetToFunction[t1]
2014 loadp Callee + PayloadOffset[cfr], t3
2015 andp MarkedBlockMask, t3
2016 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
2017 addp 8, sp
2018 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP or MIPS or SH4
2019 subp 8, sp # align stack pointer
2020 # t1 already contains the Callee.
2021 andp MarkedBlockMask, t1
2022 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
2023 storep cfr, VM::topCallFrame[t1]
2024 if MIPS or SH4
2025 move cfr, a0
2026 else
2027 move cfr, t0
2028 end
2029 loadi Callee + PayloadOffset[cfr], t1
2030 loadp JSFunction::m_executable[t1], t1
2031 checkStackPointerAlignment(t3, 0xdead0001)
2032 if C_LOOP
2033 cloopCallNative executableOffsetToFunction[t1]
2034 else
2035 call executableOffsetToFunction[t1]
2036 end
2037 loadp Callee + PayloadOffset[cfr], t3
2038 andp MarkedBlockMask, t3
2039 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
2040 addp 8, sp
2041 else
2042 error
2043 end
2044
2045 functionEpilogue()
2046 btinz VM::m_exception[t3], .handleException
2047 ret
2048
2049 .handleException:
2050 storep cfr, VM::topCallFrame[t3]
2051 restoreStackPointerAfterCall()
2052 jmp _llint_throw_from_slow_path_trampoline
2053 end
2054
2055
2056 macro getGlobalObject(dst)
2057 loadp CodeBlock[cfr], t0
2058 loadp CodeBlock::m_globalObject[t0], t0
2059 loadisFromInstruction(dst, t1)
2060 storei CellTag, TagOffset[cfr, t1, 8]
2061 storei t0, PayloadOffset[cfr, t1, 8]
2062 end
2063
2064 macro varInjectionCheck(slowPath)
2065 loadp CodeBlock[cfr], t0
2066 loadp CodeBlock::m_globalObject[t0], t0
2067 loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
2068 bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
2069 end
2070
2071 macro resolveScope()
2072 loadp CodeBlock[cfr], t0
2073 loadisFromInstruction(5, t2)
2074
2075 loadisFromInstruction(2, t0)
2076 loadp PayloadOffset[cfr, t0, 8], t0
2077 btiz t2, .resolveScopeLoopEnd
2078
2079 .resolveScopeLoop:
2080 loadp JSScope::m_next[t0], t0
2081 subi 1, t2
2082 btinz t2, .resolveScopeLoop
2083
2084 .resolveScopeLoopEnd:
2085 loadisFromInstruction(1, t1)
2086 storei CellTag, TagOffset[cfr, t1, 8]
2087 storei t0, PayloadOffset[cfr, t1, 8]
2088 end
2089
2090
2091 _llint_op_resolve_scope:
2092 traceExecution()
2093 loadisFromInstruction(4, t0)
2094
2095 #rGlobalProperty:
2096 bineq t0, GlobalProperty, .rGlobalVar
2097 getGlobalObject(1)
2098 dispatch(7)
2099
2100 .rGlobalVar:
2101 bineq t0, GlobalVar, .rClosureVar
2102 getGlobalObject(1)
2103 dispatch(7)
2104
2105 .rClosureVar:
2106 bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks
2107 resolveScope()
2108 dispatch(7)
2109
2110 .rGlobalPropertyWithVarInjectionChecks:
2111 bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
2112 varInjectionCheck(.rDynamic)
2113 getGlobalObject(1)
2114 dispatch(7)
2115
2116 .rGlobalVarWithVarInjectionChecks:
2117 bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
2118 varInjectionCheck(.rDynamic)
2119 getGlobalObject(1)
2120 dispatch(7)
2121
2122 .rClosureVarWithVarInjectionChecks:
2123 bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
2124 varInjectionCheck(.rDynamic)
2125 resolveScope()
2126 dispatch(7)
2127
2128 .rDynamic:
2129 callSlowPath(_llint_slow_path_resolve_scope)
2130 dispatch(7)
2131
2132
2133 macro loadWithStructureCheck(operand, slowPath)
2134 loadisFromInstruction(operand, t0)
2135 loadp PayloadOffset[cfr, t0, 8], t0
2136 loadpFromInstruction(5, t1)
2137 bpneq JSCell::m_structureID[t0], t1, slowPath
2138 end
2139
2140 macro getProperty()
2141 loadisFromInstruction(6, t3)
2142 loadPropertyAtVariableOffset(t3, t0, t1, t2)
2143 valueProfile(t1, t2, 28, t0)
2144 loadisFromInstruction(1, t0)
2145 storei t1, TagOffset[cfr, t0, 8]
2146 storei t2, PayloadOffset[cfr, t0, 8]
2147 end
2148
2149 macro getGlobalVar()
2150 loadpFromInstruction(6, t0)
2151 loadp TagOffset[t0], t1
2152 loadp PayloadOffset[t0], t2
2153 valueProfile(t1, t2, 28, t0)
2154 loadisFromInstruction(1, t0)
2155 storei t1, TagOffset[cfr, t0, 8]
2156 storei t2, PayloadOffset[cfr, t0, 8]
2157 end
2158
2159 macro getClosureVar()
2160 loadisFromInstruction(6, t3)
2161 loadp JSEnvironmentRecord_variables + TagOffset[t0, t3, 8], t1
2162 loadp JSEnvironmentRecord_variables + PayloadOffset[t0, t3, 8], t2
2163 valueProfile(t1, t2, 28, t0)
2164 loadisFromInstruction(1, t0)
2165 storei t1, TagOffset[cfr, t0, 8]
2166 storei t2, PayloadOffset[cfr, t0, 8]
2167 end
2168
2169 _llint_op_get_from_scope:
2170 traceExecution()
2171 loadisFromInstruction(4, t0)
2172 andi ResolveModeMask, t0
2173
2174 #gGlobalProperty:
2175 bineq t0, GlobalProperty, .gGlobalVar
2176 loadWithStructureCheck(2, .gDynamic)
2177 getProperty()
2178 dispatch(8)
2179
2180 .gGlobalVar:
2181 bineq t0, GlobalVar, .gClosureVar
2182 getGlobalVar()
2183 dispatch(8)
2184
2185 .gClosureVar:
2186 bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
2187 loadVariable(2, t2, t1, t0)
2188 getClosureVar()
2189 dispatch(8)
2190
2191 .gGlobalPropertyWithVarInjectionChecks:
2192 bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
2193 loadWithStructureCheck(2, .gDynamic)
2194 getProperty()
2195 dispatch(8)
2196
2197 .gGlobalVarWithVarInjectionChecks:
2198 bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
2199 varInjectionCheck(.gDynamic)
2200 getGlobalVar()
2201 dispatch(8)
2202
2203 .gClosureVarWithVarInjectionChecks:
2204 bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
2205 varInjectionCheck(.gDynamic)
2206 loadVariable(2, t2, t1, t0)
2207 getClosureVar()
2208 dispatch(8)
2209
2210 .gDynamic:
2211 callSlowPath(_llint_slow_path_get_from_scope)
2212 dispatch(8)
2213
2214
2215 macro putProperty()
2216 loadisFromInstruction(3, t1)
2217 loadConstantOrVariable(t1, t2, t3)
2218 loadisFromInstruction(6, t1)
2219 storePropertyAtVariableOffset(t1, t0, t2, t3)
2220 end
2221
2222 macro putGlobalVar()
2223 loadisFromInstruction(3, t0)
2224 loadConstantOrVariable(t0, t1, t2)
2225 loadpFromInstruction(5, t3)
2226 notifyWrite(t3, .pDynamic)
2227 loadpFromInstruction(6, t0)
2228 storei t1, TagOffset[t0]
2229 storei t2, PayloadOffset[t0]
2230 end
2231
2232 macro putClosureVar()
2233 loadisFromInstruction(3, t1)
2234 loadConstantOrVariable(t1, t2, t3)
2235 loadisFromInstruction(6, t1)
2236 storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
2237 storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
2238 end
2239
2240 macro putLocalClosureVar()
2241 loadisFromInstruction(3, t1)
2242 loadConstantOrVariable(t1, t2, t3)
2243 loadpFromInstruction(5, t4)
2244 btpz t4, .noVariableWatchpointSet
2245 notifyWrite(t4, .pDynamic)
2246 .noVariableWatchpointSet:
2247 loadisFromInstruction(6, t1)
2248 storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
2249 storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
2250 end
2251
2252
2253 _llint_op_put_to_scope:
2254 traceExecution()
2255 loadisFromInstruction(4, t0)
2256 andi ResolveModeMask, t0
2257
2258 #pLocalClosureVar:
2259 bineq t0, LocalClosureVar, .pGlobalProperty
2260 writeBarrierOnOperands(1, 3)
2261 loadVariable(1, t2, t1, t0)
2262 putLocalClosureVar()
2263 dispatch(7)
2264
2265 .pGlobalProperty:
2266 bineq t0, GlobalProperty, .pGlobalVar
2267 writeBarrierOnOperands(1, 3)
2268 loadWithStructureCheck(1, .pDynamic)
2269 putProperty()
2270 dispatch(7)
2271
2272 .pGlobalVar:
2273 bineq t0, GlobalVar, .pClosureVar
2274 writeBarrierOnGlobalObject(3)
2275 putGlobalVar()
2276 dispatch(7)
2277
2278 .pClosureVar:
2279 bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
2280 writeBarrierOnOperands(1, 3)
2281 loadVariable(1, t2, t1, t0)
2282 putClosureVar()
2283 dispatch(7)
2284
2285 .pGlobalPropertyWithVarInjectionChecks:
2286 bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
2287 writeBarrierOnOperands(1, 3)
2288 loadWithStructureCheck(1, .pDynamic)
2289 putProperty()
2290 dispatch(7)
2291
2292 .pGlobalVarWithVarInjectionChecks:
2293 bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
2294 writeBarrierOnGlobalObject(3)
2295 varInjectionCheck(.pDynamic)
2296 putGlobalVar()
2297 dispatch(7)
2298
2299 .pClosureVarWithVarInjectionChecks:
2300 bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic
2301 writeBarrierOnOperands(1, 3)
2302 varInjectionCheck(.pDynamic)
2303 loadVariable(1, t2, t1, t0)
2304 putClosureVar()
2305 dispatch(7)
2306
2307 .pDynamic:
2308 callSlowPath(_llint_slow_path_put_to_scope)
2309 dispatch(7)
2310
2311
2312 _llint_op_get_from_arguments:
2313 traceExecution()
2314 loadisFromInstruction(2, t0)
2315 loadi PayloadOffset[cfr, t0, 8], t0
2316 loadi 12[PC], t1
2317 loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2
2318 loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3
2319 loadisFromInstruction(1, t1)
2320 valueProfile(t2, t3, 16, t0)
2321 storei t2, TagOffset[cfr, t1, 8]
2322 storei t3, PayloadOffset[cfr, t1, 8]
2323 dispatch(5)
2324
2325
2326 _llint_op_put_to_arguments:
2327 traceExecution()
2328 writeBarrierOnOperands(1, 3)
2329 loadisFromInstruction(1, t0)
2330 loadi PayloadOffset[cfr, t0, 8], t0
2331 loadisFromInstruction(3, t1)
2332 loadConstantOrVariable(t1, t2, t3)
2333 loadi 8[PC], t1
2334 storei t2, DirectArguments_storage + TagOffset[t0, t1, 8]
2335 storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8]
2336 dispatch(4)
2337
2338
2339 _llint_op_profile_type:
2340 traceExecution()
2341 loadp CodeBlock[cfr], t1
2342 loadp CodeBlock::m_vm[t1], t1
2343 # t1 is holding the pointer to the typeProfilerLog.
2344 loadp VM::m_typeProfilerLog[t1], t1
2345
2346 # t0 is holding the payload, t4 is holding the tag.
2347 loadisFromInstruction(1, t2)
2348 loadConstantOrVariable(t2, t4, t0)
2349
2350 # t2 is holding the pointer to the current log entry.
2351 loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
2352
2353 # Store the JSValue onto the log entry.
2354 storei t4, TypeProfilerLog::LogEntry::value + TagOffset[t2]
2355 storei t0, TypeProfilerLog::LogEntry::value + PayloadOffset[t2]
2356
2357 # Store the TypeLocation onto the log entry.
2358 loadpFromInstruction(2, t3)
2359 storep t3, TypeProfilerLog::LogEntry::location[t2]
2360
2361 bieq t4, CellTag, .opProfileTypeIsCell
2362 storei 0, TypeProfilerLog::LogEntry::structureID[t2]
2363 jmp .opProfileTypeSkipIsCell
2364 .opProfileTypeIsCell:
2365 loadi JSCell::m_structureID[t0], t3
2366 storei t3, TypeProfilerLog::LogEntry::structureID[t2]
2367 .opProfileTypeSkipIsCell:
2368
2369 # Increment the current log entry.
2370 addp sizeof TypeProfilerLog::LogEntry, t2
2371 storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
2372
2373 loadp TypeProfilerLog::m_logEndPtr[t1], t1
2374 bpneq t2, t1, .opProfileTypeDone
2375 callSlowPath(_slow_path_profile_type_clear_log)
2376
2377 .opProfileTypeDone:
2378 dispatch(6)