]> git.saurik.com Git - apple/javascriptcore.git/blob - llint/LowLevelInterpreter.asm
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / llint / LowLevelInterpreter.asm
1 # Copyright (C) 2011-2015 Apple Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions
5 # are met:
6 # 1. Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer.
8 # 2. Redistributions in binary form must reproduce the above copyright
9 # notice, this list of conditions and the following disclaimer in the
10 # documentation and/or other materials provided with the distribution.
11 #
12 # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14 # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22 # THE POSSIBILITY OF SUCH DAMAGE.
23
24 # First come the common protocols that both interpreters use. Note that each
25 # of these must have an ASSERT() in LLIntData.cpp
26
27 # Work-around for the fact that the toolchain's awareness of armv7k / armv7s
28 # results in a separate slab in the fat binary, yet the offlineasm doesn't know
29 # to expect it.
30 if ARMv7k
31 end
32 if ARMv7s
33 end
34
35 # These declarations must match interpreter/JSStack.h.
36
37 if JSVALUE64
38 const PtrSize = 8
39 const CallFrameHeaderSlots = 5
40 else
41 const PtrSize = 4
42 const CallFrameHeaderSlots = 4
43 const CallFrameAlignSlots = 1
44 end
45 const SlotSize = 8
46
47 const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1)
48 const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
49
50 const StackAlignment = 16
51 const StackAlignmentMask = StackAlignment - 1
52
53 const CallerFrameAndPCSize = 2 * PtrSize
54
55 const CallerFrame = 0
56 const ReturnPC = CallerFrame + PtrSize
57 const CodeBlock = ReturnPC + PtrSize
58 const Callee = CodeBlock + SlotSize
59 const ArgumentCount = Callee + SlotSize
60 const ThisArgumentOffset = ArgumentCount + SlotSize
61 const FirstArgumentOffset = ThisArgumentOffset + SlotSize
62 const CallFrameHeaderSize = ThisArgumentOffset
63
64 # Some value representation constants.
65 if JSVALUE64
66 const TagBitTypeOther = 0x2
67 const TagBitBool = 0x4
68 const TagBitUndefined = 0x8
69 const ValueEmpty = 0x0
70 const ValueFalse = TagBitTypeOther | TagBitBool
71 const ValueTrue = TagBitTypeOther | TagBitBool | 1
72 const ValueUndefined = TagBitTypeOther | TagBitUndefined
73 const ValueNull = TagBitTypeOther
74 const TagTypeNumber = 0xffff000000000000
75 const TagMask = TagTypeNumber | TagBitTypeOther
76 else
77 const Int32Tag = -1
78 const BooleanTag = -2
79 const NullTag = -3
80 const UndefinedTag = -4
81 const CellTag = -5
82 const EmptyValueTag = -6
83 const DeletedValueTag = -7
84 const LowestTag = DeletedValueTag
85 end
86
87 const CallOpCodeSize = 9
88
89 if X86_64 or ARM64 or C_LOOP
90 const maxFrameExtentForSlowPathCall = 0
91 elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
92 const maxFrameExtentForSlowPathCall = 24
93 elsif X86 or X86_WIN
94 const maxFrameExtentForSlowPathCall = 40
95 elsif MIPS
96 const maxFrameExtentForSlowPathCall = 40
97 elsif X86_64_WIN
98 const maxFrameExtentForSlowPathCall = 64
99 end
100
101 # Watchpoint states
102 const ClearWatchpoint = 0
103 const IsWatched = 1
104 const IsInvalidated = 2
105
106 # Some register conventions.
107 if JSVALUE64
108 # - Use a pair of registers to represent the PC: one register for the
109 # base of the bytecodes, and one register for the index.
110 # - The PC base (or PB for short) should be stored in the csr. It will
111 # get clobbered on calls to other JS code, but will get saved on calls
112 # to C functions.
113 # - C calls are still given the Instruction* rather than the PC index.
114 # This requires an add before the call, and a sub after.
115 const PC = t5
116 const PB = t6
117 const tagTypeNumber = csr1
118 const tagMask = csr2
119
120 macro loadisFromInstruction(offset, dest)
121 loadis offset * 8[PB, PC, 8], dest
122 end
123
124 macro loadpFromInstruction(offset, dest)
125 loadp offset * 8[PB, PC, 8], dest
126 end
127
128 macro storepToInstruction(value, offset)
129 storep value, offset * 8[PB, PC, 8]
130 end
131
132 else
133 const PC = t5
134 macro loadisFromInstruction(offset, dest)
135 loadis offset * 4[PC], dest
136 end
137
138 macro loadpFromInstruction(offset, dest)
139 loadp offset * 4[PC], dest
140 end
141 end
142
143 # Constants for reasoning about value representation.
144 if BIG_ENDIAN
145 const TagOffset = 0
146 const PayloadOffset = 4
147 else
148 const TagOffset = 4
149 const PayloadOffset = 0
150 end
151
152 # Constant for reasoning about butterflies.
153 const IsArray = 1
154 const IndexingShapeMask = 30
155 const NoIndexingShape = 0
156 const Int32Shape = 20
157 const DoubleShape = 22
158 const ContiguousShape = 26
159 const ArrayStorageShape = 28
160 const SlowPutArrayStorageShape = 30
161
162 # Type constants.
163 const StringType = 6
164 const ObjectType = 18
165 const FinalObjectType = 19
166
167 # Type flags constants.
168 const MasqueradesAsUndefined = 1
169 const ImplementsHasInstance = 2
170 const ImplementsDefaultHasInstance = 8
171
172 # Bytecode operand constants.
173 const FirstConstantRegisterIndex = 0x40000000
174
175 # Code type constants.
176 const GlobalCode = 0
177 const EvalCode = 1
178 const FunctionCode = 2
179
180 # The interpreter steals the tag word of the argument count.
181 const LLIntReturnPC = ArgumentCount + TagOffset
182
183 # String flags.
184 const HashFlags8BitBuffer = 8
185
186 # Copied from PropertyOffset.h
187 const firstOutOfLineOffset = 100
188
189 # ResolveType
190 const GlobalProperty = 0
191 const GlobalVar = 1
192 const ClosureVar = 2
193 const LocalClosureVar = 3
194 const GlobalPropertyWithVarInjectionChecks = 4
195 const GlobalVarWithVarInjectionChecks = 5
196 const ClosureVarWithVarInjectionChecks = 6
197 const Dynamic = 7
198
199 const ResolveModeMask = 0xffff
200
201 const MarkedBlockSize = 16 * 1024
202 const MarkedBlockMask = ~(MarkedBlockSize - 1)
203 # Constants for checking mark bits.
204 const AtomNumberShift = 3
205 const BitMapWordShift = 4
206
207 # Allocation constants
208 if JSVALUE64
209 const JSFinalObjectSizeClassIndex = 1
210 else
211 const JSFinalObjectSizeClassIndex = 3
212 end
213
214 # This must match wtf/Vector.h
215 const VectorBufferOffset = 0
216 if JSVALUE64
217 const VectorSizeOffset = 12
218 else
219 const VectorSizeOffset = 8
220 end
221
222 # Some common utilities.
223 macro crash()
224 if C_LOOP
225 cloopCrash
226 else
227 call _llint_crash
228 end
229 end
230
231 macro assert(assertion)
232 if ASSERT_ENABLED
233 assertion(.ok)
234 crash()
235 .ok:
236 end
237 end
238
239 macro checkStackPointerAlignment(tempReg, location)
240 if ARM64 or C_LOOP or SH4
241 # ARM64 will check for us!
242 # C_LOOP does not need the alignment, and can use a little perf
243 # improvement from avoiding useless work.
244 # SH4 does not need specific alignment (4 bytes).
245 else
246 if ARM or ARMv7 or ARMv7_TRADITIONAL
247 # ARM can't do logical ops with the sp as a source
248 move sp, tempReg
249 andp StackAlignmentMask, tempReg
250 else
251 andp sp, StackAlignmentMask, tempReg
252 end
253 btpz tempReg, .stackPointerOkay
254 move location, tempReg
255 break
256 .stackPointerOkay:
257 end
258 end
259
260 if C_LOOP
261 const CalleeSaveRegisterCount = 0
262 elsif ARM or ARMv7_TRADITIONAL or ARMv7
263 const CalleeSaveRegisterCount = 7
264 elsif ARM64
265 const CalleeSaveRegisterCount = 10
266 elsif SH4 or X86_64 or MIPS
267 const CalleeSaveRegisterCount = 5
268 elsif X86 or X86_WIN
269 const CalleeSaveRegisterCount = 3
270 elsif X86_64_WIN
271 const CalleeSaveRegisterCount = 7
272 end
273
274 const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize
275
276 # VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the
277 # callee save registers rounded up to keep the stack aligned
278 const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask
279
280 macro pushCalleeSaves()
281 if C_LOOP
282 elsif ARM or ARMv7_TRADITIONAL
283 emit "push {r4-r10}"
284 elsif ARMv7
285 emit "push {r4-r6, r8-r11}"
286 elsif ARM64
287 emit "stp x20, x19, [sp, #-16]!"
288 emit "stp x22, x21, [sp, #-16]!"
289 emit "stp x24, x23, [sp, #-16]!"
290 emit "stp x26, x25, [sp, #-16]!"
291 emit "stp x28, x27, [sp, #-16]!"
292 elsif MIPS
293 emit "addiu $sp, $sp, -20"
294 emit "sw $20, 16($sp)"
295 emit "sw $19, 12($sp)"
296 emit "sw $18, 8($sp)"
297 emit "sw $17, 4($sp)"
298 emit "sw $16, 0($sp)"
299 elsif SH4
300 emit "mov.l r13, @-r15"
301 emit "mov.l r11, @-r15"
302 emit "mov.l r10, @-r15"
303 emit "mov.l r9, @-r15"
304 emit "mov.l r8, @-r15"
305 elsif X86
306 emit "push %esi"
307 emit "push %edi"
308 emit "push %ebx"
309 elsif X86_WIN
310 emit "push esi"
311 emit "push edi"
312 emit "push ebx"
313 elsif X86_64
314 emit "push %r12"
315 emit "push %r13"
316 emit "push %r14"
317 emit "push %r15"
318 emit "push %rbx"
319 elsif X86_64_WIN
320 emit "push r12"
321 emit "push r13"
322 emit "push r14"
323 emit "push r15"
324 emit "push rbx"
325 emit "push rdi"
326 emit "push rsi"
327 end
328 end
329
330 macro popCalleeSaves()
331 if C_LOOP
332 elsif ARM or ARMv7_TRADITIONAL
333 emit "pop {r4-r10}"
334 elsif ARMv7
335 emit "pop {r4-r6, r8-r11}"
336 elsif ARM64
337 emit "ldp x28, x27, [sp], #16"
338 emit "ldp x26, x25, [sp], #16"
339 emit "ldp x24, x23, [sp], #16"
340 emit "ldp x22, x21, [sp], #16"
341 emit "ldp x20, x19, [sp], #16"
342 elsif MIPS
343 emit "lw $16, 0($sp)"
344 emit "lw $17, 4($sp)"
345 emit "lw $18, 8($sp)"
346 emit "lw $19, 12($sp)"
347 emit "lw $20, 16($sp)"
348 emit "addiu $sp, $sp, 20"
349 elsif SH4
350 emit "mov.l @r15+, r8"
351 emit "mov.l @r15+, r9"
352 emit "mov.l @r15+, r10"
353 emit "mov.l @r15+, r11"
354 emit "mov.l @r15+, r13"
355 elsif X86
356 emit "pop %ebx"
357 emit "pop %edi"
358 emit "pop %esi"
359 elsif X86_WIN
360 emit "pop ebx"
361 emit "pop edi"
362 emit "pop esi"
363 elsif X86_64
364 emit "pop %rbx"
365 emit "pop %r15"
366 emit "pop %r14"
367 emit "pop %r13"
368 emit "pop %r12"
369 elsif X86_64_WIN
370 emit "pop rsi"
371 emit "pop rdi"
372 emit "pop rbx"
373 emit "pop r15"
374 emit "pop r14"
375 emit "pop r13"
376 emit "pop r12"
377 end
378 end
379
380 macro preserveCallerPCAndCFR()
381 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
382 push lr
383 push cfr
384 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
385 push cfr
386 elsif ARM64
387 push cfr, lr
388 else
389 error
390 end
391 move sp, cfr
392 end
393
394 macro restoreCallerPCAndCFR()
395 move cfr, sp
396 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
397 pop cfr
398 pop lr
399 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
400 pop cfr
401 elsif ARM64
402 pop lr, cfr
403 end
404 end
405
406 macro preserveReturnAddressAfterCall(destinationRegister)
407 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
408 # In C_LOOP case, we're only preserving the bytecode vPC.
409 move lr, destinationRegister
410 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
411 pop destinationRegister
412 else
413 error
414 end
415 end
416
417 macro restoreReturnAddressBeforeReturn(sourceRegister)
418 if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
419 # In C_LOOP case, we're only restoring the bytecode vPC.
420 move sourceRegister, lr
421 elsif X86 or X86_WIN or X86_64 or X86_64_WIN
422 push sourceRegister
423 else
424 error
425 end
426 end
427
428 macro functionPrologue()
429 if X86 or X86_WIN or X86_64 or X86_64_WIN
430 push cfr
431 elsif ARM64
432 push cfr, lr
433 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
434 push lr
435 push cfr
436 end
437 move sp, cfr
438 end
439
440 macro functionEpilogue()
441 if X86 or X86_WIN or X86_64 or X86_64_WIN
442 pop cfr
443 elsif ARM64
444 pop lr, cfr
445 elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
446 pop cfr
447 pop lr
448 end
449 end
450
451 macro vmEntryRecord(entryFramePointer, resultReg)
452 subp entryFramePointer, VMEntryTotalFrameSize, resultReg
453 end
454
455 macro getFrameRegisterSizeForCodeBlock(codeBlock, size)
456 loadi CodeBlock::m_numCalleeRegisters[codeBlock], size
457 lshiftp 3, size
458 addp maxFrameExtentForSlowPathCall, size
459 end
460
461 macro restoreStackPointerAfterCall()
462 loadp CodeBlock[cfr], t2
463 getFrameRegisterSizeForCodeBlock(t2, t4)
464 if ARMv7
465 subp cfr, t4, t4
466 move t4, sp
467 else
468 subp cfr, t4, sp
469 end
470 end
471
472 macro traceExecution()
473 if EXECUTION_TRACING
474 callSlowPath(_llint_trace)
475 end
476 end
477
478 macro callTargetFunction(callLinkInfo, calleeFramePtr)
479 move calleeFramePtr, sp
480 if C_LOOP
481 cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
482 else
483 call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
484 end
485 restoreStackPointerAfterCall()
486 dispatchAfterCall()
487 end
488
489 macro slowPathForCall(slowPath)
490 callCallSlowPath(
491 slowPath,
492 macro (callee)
493 btpz t1, .dontUpdateSP
494 if ARMv7
495 addp CallerFrameAndPCSize, t1, t1
496 move t1, sp
497 else
498 addp CallerFrameAndPCSize, t1, sp
499 end
500 .dontUpdateSP:
501 if C_LOOP
502 cloopCallJSFunction callee
503 else
504 call callee
505 end
506 restoreStackPointerAfterCall()
507 dispatchAfterCall()
508 end)
509 end
510
511 macro arrayProfile(cellAndIndexingType, profile, scratch)
512 const cell = cellAndIndexingType
513 const indexingType = cellAndIndexingType
514 loadi JSCell::m_structureID[cell], scratch
515 storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
516 loadb JSCell::m_indexingType[cell], indexingType
517 end
518
519 macro skipIfIsRememberedOrInEden(cell, scratch1, scratch2, continuation)
520 loadb JSCell::m_gcData[cell], scratch1
521 continuation(scratch1)
522 end
523
524 macro notifyWrite(set, slow)
525 bbneq WatchpointSet::m_state[set], IsInvalidated, slow
526 end
527
528 macro checkSwitchToJIT(increment, action)
529 loadp CodeBlock[cfr], t0
530 baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
531 action()
532 .continue:
533 end
534
535 macro checkSwitchToJITForEpilogue()
536 checkSwitchToJIT(
537 10,
538 macro ()
539 callSlowPath(_llint_replace)
540 end)
541 end
542
543 macro assertNotConstant(index)
544 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
545 end
546
547 macro functionForCallCodeBlockGetter(targetRegister)
548 if JSVALUE64
549 loadp Callee[cfr], targetRegister
550 else
551 loadp Callee + PayloadOffset[cfr], targetRegister
552 end
553 loadp JSFunction::m_executable[targetRegister], targetRegister
554 loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
555 end
556
557 macro functionForConstructCodeBlockGetter(targetRegister)
558 if JSVALUE64
559 loadp Callee[cfr], targetRegister
560 else
561 loadp Callee + PayloadOffset[cfr], targetRegister
562 end
563 loadp JSFunction::m_executable[targetRegister], targetRegister
564 loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
565 end
566
567 macro notFunctionCodeBlockGetter(targetRegister)
568 loadp CodeBlock[cfr], targetRegister
569 end
570
571 macro functionCodeBlockSetter(sourceRegister)
572 storep sourceRegister, CodeBlock[cfr]
573 end
574
575 macro notFunctionCodeBlockSetter(sourceRegister)
576 # Nothing to do!
577 end
578
579 # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
580 # in t1. May also trigger prologue entry OSR.
581 macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
582 # Set up the call frame and check if we should OSR.
583 preserveCallerPCAndCFR()
584
585 if EXECUTION_TRACING
586 subp maxFrameExtentForSlowPathCall, sp
587 callSlowPath(traceSlowPath)
588 addp maxFrameExtentForSlowPathCall, sp
589 end
590 codeBlockGetter(t1)
591 if not C_LOOP
592 baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
593 if JSVALUE64
594 cCall2(osrSlowPath, cfr, PC)
595 else
596 # We are after the function prologue, but before we have set up sp from the CodeBlock.
597 # Temporarily align stack pointer for this call.
598 subp 8, sp
599 cCall2(osrSlowPath, cfr, PC)
600 addp 8, sp
601 end
602 btpz t0, .recover
603 move cfr, sp # restore the previous sp
604 # pop the callerFrame since we will jump to a function that wants to save it
605 if ARM64
606 pop lr, cfr
607 elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
608 pop cfr
609 pop lr
610 else
611 pop cfr
612 end
613 jmp t0
614 .recover:
615 codeBlockGetter(t1)
616 .continue:
617 end
618
619 codeBlockSetter(t1)
620
621 # Set up the PC.
622 if JSVALUE64
623 loadp CodeBlock::m_instructions[t1], PB
624 move 0, PC
625 else
626 loadp CodeBlock::m_instructions[t1], PC
627 end
628
629 # Get new sp in t0 and check stack height.
630 getFrameRegisterSizeForCodeBlock(t1, t0)
631 subp cfr, t0, t0
632 loadp CodeBlock::m_vm[t1], t2
633 bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
634
635 # Stack height check failed - need to call a slow_path.
636 subp maxFrameExtentForSlowPathCall, sp # Set up temporary stack pointer for call
637 callSlowPath(_llint_stack_check)
638 bpeq t1, 0, .stackHeightOKGetCodeBlock
639 move t1, cfr
640 dispatch(0) # Go to exception handler in PC
641
642 .stackHeightOKGetCodeBlock:
643 # Stack check slow path returned that the stack was ok.
644 # Since they were clobbered, need to get CodeBlock and new sp
645 codeBlockGetter(t1)
646 getFrameRegisterSizeForCodeBlock(t1, t0)
647 subp cfr, t0, t0
648
649 .stackHeightOK:
650 move t0, sp
651 end
652
653 # Expects that CodeBlock is in t1, which is what prologue() leaves behind.
654 # Must call dispatch(0) after calling this.
655 macro functionInitialization(profileArgSkip)
656 # Profile the arguments. Unfortunately, we have no choice but to do this. This
657 # code is pretty horrendous because of the difference in ordering between
658 # arguments and value profiles, the desire to have a simple loop-down-to-zero
659 # loop, and the desire to use only three registers so as to preserve the PC and
660 # the code block. It is likely that this code should be rewritten in a more
661 # optimal way for architectures that have more than five registers available
662 # for arbitrary use in the interpreter.
663 loadi CodeBlock::m_numParameters[t1], t0
664 addp -profileArgSkip, t0 # Use addi because that's what has the peephole
665 assert(macro (ok) bpgteq t0, 0, ok end)
666 btpz t0, .argumentProfileDone
667 loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
668 mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
669 lshiftp 3, t0
670 addp t2, t3
671 .argumentProfileLoop:
672 if JSVALUE64
673 loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
674 subp sizeof ValueProfile, t3
675 storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
676 else
677 loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
678 subp sizeof ValueProfile, t3
679 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
680 loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
681 storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
682 end
683 baddpnz -8, t0, .argumentProfileLoop
684 .argumentProfileDone:
685 end
686
687 macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
688 const offsetOfFirstFreeCell =
689 MarkedAllocator::m_freeList +
690 MarkedBlock::FreeList::head
691
692 # Get the object from the free list.
693 loadp offsetOfFirstFreeCell[allocator], result
694 btpz result, slowCase
695
696 # Remove the object from the free list.
697 loadp [result], scratch1
698 storep scratch1, offsetOfFirstFreeCell[allocator]
699
700 # Initialize the object.
701 storep 0, JSObject::m_butterfly[result]
702 storeStructureWithTypeInfo(result, structure, scratch1)
703 end
704
705 macro doReturn()
706 restoreCallerPCAndCFR()
707 ret
708 end
709
710 # stub to call into JavaScript or Native functions
711 # EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame)
712 # EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame)
713
714 if C_LOOP
715 _llint_vm_entry_to_javascript:
716 else
717 global _vmEntryToJavaScript
718 _vmEntryToJavaScript:
719 end
720 doVMEntry(makeJavaScriptCall)
721
722
723 if C_LOOP
724 _llint_vm_entry_to_native:
725 else
726 global _vmEntryToNative
727 _vmEntryToNative:
728 end
729 doVMEntry(makeHostFunctionCall)
730
731
732 if not C_LOOP
733 # void sanitizeStackForVMImpl(VM* vm)
734 global _sanitizeStackForVMImpl
735 _sanitizeStackForVMImpl:
736 if X86_64
737 const vm = t4
738 const address = t1
739 const zeroValue = t0
740 elsif X86_64_WIN
741 const vm = t2
742 const address = t1
743 const zeroValue = t0
744 elsif X86 or X86_WIN
745 const vm = t2
746 const address = t1
747 const zeroValue = t0
748 else
749 const vm = a0
750 const address = t1
751 const zeroValue = t2
752 end
753
754 if X86 or X86_WIN
755 loadp 4[sp], vm
756 end
757
758 loadp VM::m_lastStackTop[vm], address
759 bpbeq sp, address, .zeroFillDone
760
761 move 0, zeroValue
762 .zeroFillLoop:
763 storep zeroValue, [address]
764 addp PtrSize, address
765 bpa sp, address, .zeroFillLoop
766
767 .zeroFillDone:
768 move sp, address
769 storep address, VM::m_lastStackTop[vm]
770 ret
771
772 # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame)
773 global _vmEntryRecord
774 _vmEntryRecord:
775 if X86_64
776 const entryFrame = t4
777 const result = t0
778 elsif X86 or X86_WIN or X86_64_WIN
779 const entryFrame = t2
780 const result = t0
781 else
782 const entryFrame = a0
783 const result = t0
784 end
785
786 if X86 or X86_WIN
787 loadp 4[sp], entryFrame
788 end
789
790 vmEntryRecord(entryFrame, result)
791 ret
792 end
793
794 if C_LOOP
795 # Dummy entry point the C Loop uses to initialize.
796 _llint_entry:
797 crash()
798 else
799 macro initPCRelative(pcBase)
800 if X86_64 or X86_64_WIN
801 call _relativePCBase
802 _relativePCBase:
803 pop pcBase
804 elsif X86 or X86_WIN
805 call _relativePCBase
806 _relativePCBase:
807 pop pcBase
808 loadp 20[sp], t4
809 elsif ARM64
810 elsif ARMv7
811 _relativePCBase:
812 move pc, pcBase
813 subp 3, pcBase # Need to back up the PC and set the Thumb2 bit
814 elsif ARM or ARMv7_TRADITIONAL
815 _relativePCBase:
816 move pc, pcBase
817 subp 8, pcBase
818 elsif MIPS
819 la _relativePCBase, pcBase
820 _relativePCBase:
821 elsif SH4
822 mova _relativePCBase, t0
823 move t0, pcBase
824 alignformova
825 _relativePCBase:
826 end
827 end
828
829 macro setEntryAddress(index, label)
830 if X86_64
831 leap (label - _relativePCBase)[t1], t0
832 move index, t2
833 storep t0, [t4, t2, 8]
834 elsif X86_64_WIN
835 leap (label - _relativePCBase)[t1], t0
836 move index, t4
837 storep t0, [t2, t4, 8]
838 elsif X86 or X86_WIN
839 leap (label - _relativePCBase)[t1], t0
840 move index, t2
841 storep t0, [t4, t2, 4]
842 elsif ARM64
843 pcrtoaddr label, t1
844 move index, t2
845 storep t1, [a0, t2, 8]
846 elsif ARM or ARMv7 or ARMv7_TRADITIONAL
847 mvlbl (label - _relativePCBase), t2
848 addp t2, t1, t2
849 move index, t3
850 storep t2, [a0, t3, 4]
851 elsif SH4
852 move (label - _relativePCBase), t2
853 addp t2, t1, t2
854 move index, t3
855 storep t2, [a0, t3, 4]
856 flushcp # Force constant pool flush to avoid "pcrel too far" link error.
857 elsif MIPS
858 la label, t2
859 la _relativePCBase, t3
860 subp t3, t2
861 addp t2, t1, t2
862 move index, t3
863 storep t2, [a0, t3, 4]
864 end
865 end
866
867 global _llint_entry
868 # Entry point for the llint to initialize.
869 _llint_entry:
870 functionPrologue()
871 pushCalleeSaves()
872 initPCRelative(t1)
873
874 # Include generated bytecode initialization file.
875 include InitBytecodes
876
877 popCalleeSaves()
878 functionEpilogue()
879 ret
880 end
881
882 _llint_program_prologue:
883 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
884 dispatch(0)
885
886
887 _llint_eval_prologue:
888 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
889 dispatch(0)
890
891
892 _llint_function_for_call_prologue:
893 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
894 functionInitialization(0)
895 dispatch(0)
896
897
898 _llint_function_for_construct_prologue:
899 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
900 functionInitialization(1)
901 dispatch(0)
902
903
904 _llint_function_for_call_arity_check:
905 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
906 functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
907 .functionForCallBegin:
908 functionInitialization(0)
909 dispatch(0)
910
911
912 _llint_function_for_construct_arity_check:
913 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
914 functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
915 .functionForConstructBegin:
916 functionInitialization(1)
917 dispatch(0)
918
919
920 # Value-representation-specific code.
921 if JSVALUE64
922 include LowLevelInterpreter64
923 else
924 include LowLevelInterpreter32_64
925 end
926
927
928 # Value-representation-agnostic code.
929 _llint_op_create_direct_arguments:
930 traceExecution()
931 callSlowPath(_slow_path_create_direct_arguments)
932 dispatch(2)
933
934
935 _llint_op_create_scoped_arguments:
936 traceExecution()
937 callSlowPath(_slow_path_create_scoped_arguments)
938 dispatch(3)
939
940
941 _llint_op_create_out_of_band_arguments:
942 traceExecution()
943 callSlowPath(_slow_path_create_out_of_band_arguments)
944 dispatch(2)
945
946
947 _llint_op_new_func:
948 traceExecution()
949 callSlowPath(_llint_slow_path_new_func)
950 dispatch(4)
951
952
953 _llint_op_new_array:
954 traceExecution()
955 callSlowPath(_llint_slow_path_new_array)
956 dispatch(5)
957
958
959 _llint_op_new_array_with_size:
960 traceExecution()
961 callSlowPath(_llint_slow_path_new_array_with_size)
962 dispatch(4)
963
964
965 _llint_op_new_array_buffer:
966 traceExecution()
967 callSlowPath(_llint_slow_path_new_array_buffer)
968 dispatch(5)
969
970
971 _llint_op_new_regexp:
972 traceExecution()
973 callSlowPath(_llint_slow_path_new_regexp)
974 dispatch(3)
975
976
977 _llint_op_less:
978 traceExecution()
979 callSlowPath(_slow_path_less)
980 dispatch(4)
981
982
983 _llint_op_lesseq:
984 traceExecution()
985 callSlowPath(_slow_path_lesseq)
986 dispatch(4)
987
988
989 _llint_op_greater:
990 traceExecution()
991 callSlowPath(_slow_path_greater)
992 dispatch(4)
993
994
995 _llint_op_greatereq:
996 traceExecution()
997 callSlowPath(_slow_path_greatereq)
998 dispatch(4)
999
1000
1001 _llint_op_mod:
1002 traceExecution()
1003 callSlowPath(_slow_path_mod)
1004 dispatch(4)
1005
1006
1007 _llint_op_typeof:
1008 traceExecution()
1009 callSlowPath(_slow_path_typeof)
1010 dispatch(3)
1011
1012
1013 _llint_op_is_object_or_null:
1014 traceExecution()
1015 callSlowPath(_slow_path_is_object_or_null)
1016 dispatch(3)
1017
1018 _llint_op_is_function:
1019 traceExecution()
1020 callSlowPath(_slow_path_is_function)
1021 dispatch(3)
1022
1023
1024 _llint_op_in:
1025 traceExecution()
1026 callSlowPath(_slow_path_in)
1027 dispatch(4)
1028
1029 macro withInlineStorage(object, propertyStorage, continuation)
1030 # Indicate that the object is the property storage, and that the
1031 # property storage register is unused.
1032 continuation(object, propertyStorage)
1033 end
1034
1035 macro withOutOfLineStorage(object, propertyStorage, continuation)
1036 loadp JSObject::m_butterfly[object], propertyStorage
1037 # Indicate that the propertyStorage register now points to the
1038 # property storage, and that the object register may be reused
1039 # if the object pointer is not needed anymore.
1040 continuation(propertyStorage, object)
1041 end
1042
1043
1044 _llint_op_del_by_id:
1045 traceExecution()
1046 callSlowPath(_llint_slow_path_del_by_id)
1047 dispatch(4)
1048
1049
1050 _llint_op_del_by_val:
1051 traceExecution()
1052 callSlowPath(_llint_slow_path_del_by_val)
1053 dispatch(4)
1054
1055
1056 _llint_op_put_by_index:
1057 traceExecution()
1058 callSlowPath(_llint_slow_path_put_by_index)
1059 dispatch(4)
1060
1061
1062 _llint_op_put_getter_by_id:
1063 traceExecution()
1064 callSlowPath(_llint_slow_path_put_getter_by_id)
1065 dispatch(4)
1066
1067
1068 _llint_op_put_setter_by_id:
1069 traceExecution()
1070 callSlowPath(_llint_slow_path_put_setter_by_id)
1071 dispatch(4)
1072
1073
1074 _llint_op_put_getter_setter:
1075 traceExecution()
1076 callSlowPath(_llint_slow_path_put_getter_setter)
1077 dispatch(5)
1078
1079
1080 _llint_op_jtrue:
1081 traceExecution()
1082 jumpTrueOrFalse(
1083 macro (value, target) btinz value, target end,
1084 _llint_slow_path_jtrue)
1085
1086
1087 _llint_op_jfalse:
1088 traceExecution()
1089 jumpTrueOrFalse(
1090 macro (value, target) btiz value, target end,
1091 _llint_slow_path_jfalse)
1092
1093
1094 _llint_op_jless:
1095 traceExecution()
1096 compare(
1097 macro (left, right, target) bilt left, right, target end,
1098 macro (left, right, target) bdlt left, right, target end,
1099 _llint_slow_path_jless)
1100
1101
1102 _llint_op_jnless:
1103 traceExecution()
1104 compare(
1105 macro (left, right, target) bigteq left, right, target end,
1106 macro (left, right, target) bdgtequn left, right, target end,
1107 _llint_slow_path_jnless)
1108
1109
1110 _llint_op_jgreater:
1111 traceExecution()
1112 compare(
1113 macro (left, right, target) bigt left, right, target end,
1114 macro (left, right, target) bdgt left, right, target end,
1115 _llint_slow_path_jgreater)
1116
1117
1118 _llint_op_jngreater:
1119 traceExecution()
1120 compare(
1121 macro (left, right, target) bilteq left, right, target end,
1122 macro (left, right, target) bdltequn left, right, target end,
1123 _llint_slow_path_jngreater)
1124
1125
1126 _llint_op_jlesseq:
1127 traceExecution()
1128 compare(
1129 macro (left, right, target) bilteq left, right, target end,
1130 macro (left, right, target) bdlteq left, right, target end,
1131 _llint_slow_path_jlesseq)
1132
1133
1134 _llint_op_jnlesseq:
1135 traceExecution()
1136 compare(
1137 macro (left, right, target) bigt left, right, target end,
1138 macro (left, right, target) bdgtun left, right, target end,
1139 _llint_slow_path_jnlesseq)
1140
1141
1142 _llint_op_jgreatereq:
1143 traceExecution()
1144 compare(
1145 macro (left, right, target) bigteq left, right, target end,
1146 macro (left, right, target) bdgteq left, right, target end,
1147 _llint_slow_path_jgreatereq)
1148
1149
1150 _llint_op_jngreatereq:
1151 traceExecution()
1152 compare(
1153 macro (left, right, target) bilt left, right, target end,
1154 macro (left, right, target) bdltun left, right, target end,
1155 _llint_slow_path_jngreatereq)
1156
1157
1158 _llint_op_loop_hint:
1159 traceExecution()
1160 loadp CodeBlock[cfr], t1
1161 loadp CodeBlock::m_vm[t1], t1
1162 loadp VM::watchdog[t1], t0
1163 btpnz t0, .handleWatchdogTimer
1164 .afterWatchdogTimerCheck:
1165 checkSwitchToJITForLoop()
1166 dispatch(1)
1167 .handleWatchdogTimer:
1168 loadb Watchdog::m_timerDidFire[t0], t0
1169 btbz t0, .afterWatchdogTimerCheck
1170 callWatchdogTimerHandler(.throwHandler)
1171 jmp .afterWatchdogTimerCheck
1172 .throwHandler:
1173 jmp _llint_throw_from_slow_path_trampoline
1174
1175 _llint_op_switch_string:
1176 traceExecution()
1177 callSlowPath(_llint_slow_path_switch_string)
1178 dispatch(0)
1179
1180
1181 _llint_op_new_func_exp:
1182 traceExecution()
1183 callSlowPath(_llint_slow_path_new_func_exp)
1184 dispatch(4)
1185
1186
1187 _llint_op_call:
1188 traceExecution()
1189 arrayProfileForCall()
1190 doCall(_llint_slow_path_call)
1191
1192
1193 _llint_op_construct:
1194 traceExecution()
1195 doCall(_llint_slow_path_construct)
1196
1197
1198 _llint_op_call_varargs:
1199 traceExecution()
1200 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1201 branchIfException(_llint_throw_from_slow_path_trampoline)
1202 # calleeFrame in t1
1203 if JSVALUE64
1204 move t1, sp
1205 else
1206 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1207 if ARMv7
1208 subp t1, CallerFrameAndPCSize, t2
1209 move t2, sp
1210 else
1211 subp t1, CallerFrameAndPCSize, sp
1212 end
1213 end
1214 slowPathForCall(_llint_slow_path_call_varargs)
1215
1216 _llint_op_construct_varargs:
1217 traceExecution()
1218 callSlowPath(_llint_slow_path_size_frame_for_varargs)
1219 branchIfException(_llint_throw_from_slow_path_trampoline)
1220 # calleeFrame in t1
1221 if JSVALUE64
1222 move t1, sp
1223 else
1224 # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1225 if ARMv7
1226 subp t1, CallerFrameAndPCSize, t2
1227 move t2, sp
1228 else
1229 subp t1, CallerFrameAndPCSize, sp
1230 end
1231 end
1232 slowPathForCall(_llint_slow_path_construct_varargs)
1233
1234
1235 _llint_op_call_eval:
1236 traceExecution()
1237
1238 # Eval is executed in one of two modes:
1239 #
1240 # 1) We find that we're really invoking eval() in which case the
1241 # execution is perfomed entirely inside the slow_path, and it
1242 # returns the PC of a function that just returns the return value
1243 # that the eval returned.
1244 #
1245 # 2) We find that we're invoking something called eval() that is not
1246 # the real eval. Then the slow_path returns the PC of the thing to
1247 # call, and we call it.
1248 #
1249 # This allows us to handle two cases, which would require a total of
1250 # up to four pieces of state that cannot be easily packed into two
1251 # registers (C functions can return up to two registers, easily):
1252 #
1253 # - The call frame register. This may or may not have been modified
1254 # by the slow_path, but the convention is that it returns it. It's not
1255 # totally clear if that's necessary, since the cfr is callee save.
1256 # But that's our style in this here interpreter so we stick with it.
1257 #
1258 # - A bit to say if the slow_path successfully executed the eval and has
1259 # the return value, or did not execute the eval but has a PC for us
1260 # to call.
1261 #
1262 # - Either:
1263 # - The JS return value (two registers), or
1264 #
1265 # - The PC to call.
1266 #
1267 # It turns out to be easier to just always have this return the cfr
1268 # and a PC to call, and that PC may be a dummy thunk that just
1269 # returns the JS value that the eval returned.
1270
1271 slowPathForCall(_llint_slow_path_call_eval)
1272
1273
1274 _llint_generic_return_point:
1275 dispatchAfterCall()
1276
1277
1278 _llint_op_strcat:
1279 traceExecution()
1280 callSlowPath(_slow_path_strcat)
1281 dispatch(4)
1282
1283
1284 _llint_op_push_with_scope:
1285 traceExecution()
1286 callSlowPath(_llint_slow_path_push_with_scope)
1287 dispatch(3)
1288
1289
1290 _llint_op_pop_scope:
1291 traceExecution()
1292 callSlowPath(_llint_slow_path_pop_scope)
1293 dispatch(2)
1294
1295
1296 _llint_op_push_name_scope:
1297 traceExecution()
1298 callSlowPath(_llint_slow_path_push_name_scope)
1299 dispatch(5)
1300
1301
1302 _llint_op_throw:
1303 traceExecution()
1304 callSlowPath(_llint_slow_path_throw)
1305 dispatch(2)
1306
1307
1308 _llint_op_throw_static_error:
1309 traceExecution()
1310 callSlowPath(_llint_slow_path_throw_static_error)
1311 dispatch(3)
1312
1313
1314 _llint_op_profile_will_call:
1315 traceExecution()
1316 loadp CodeBlock[cfr], t0
1317 loadp CodeBlock::m_vm[t0], t0
1318 loadi VM::m_enabledProfiler[t0], t0
1319 btpz t0, .opProfilerWillCallDone
1320 callSlowPath(_llint_slow_path_profile_will_call)
1321 .opProfilerWillCallDone:
1322 dispatch(2)
1323
1324
1325 _llint_op_profile_did_call:
1326 traceExecution()
1327 loadp CodeBlock[cfr], t0
1328 loadp CodeBlock::m_vm[t0], t0
1329 loadi VM::m_enabledProfiler[t0], t0
1330 btpz t0, .opProfilerDidCallDone
1331 callSlowPath(_llint_slow_path_profile_did_call)
1332 .opProfilerDidCallDone:
1333 dispatch(2)
1334
1335
1336 _llint_op_debug:
1337 traceExecution()
1338 loadp CodeBlock[cfr], t0
1339 loadi CodeBlock::m_debuggerRequests[t0], t0
1340 btiz t0, .opDebugDone
1341 callSlowPath(_llint_slow_path_debug)
1342 .opDebugDone:
1343 dispatch(3)
1344
1345
1346 _llint_native_call_trampoline:
1347 nativeCallTrampoline(NativeExecutable::m_function)
1348
1349
1350 _llint_native_construct_trampoline:
1351 nativeCallTrampoline(NativeExecutable::m_constructor)
1352
1353 _llint_op_get_enumerable_length:
1354 traceExecution()
1355 callSlowPath(_slow_path_get_enumerable_length)
1356 dispatch(3)
1357
1358 _llint_op_has_indexed_property:
1359 traceExecution()
1360 callSlowPath(_slow_path_has_indexed_property)
1361 dispatch(5)
1362
1363 _llint_op_has_structure_property:
1364 traceExecution()
1365 callSlowPath(_slow_path_has_structure_property)
1366 dispatch(5)
1367
1368 _llint_op_has_generic_property:
1369 traceExecution()
1370 callSlowPath(_slow_path_has_generic_property)
1371 dispatch(4)
1372
1373 _llint_op_get_direct_pname:
1374 traceExecution()
1375 callSlowPath(_slow_path_get_direct_pname)
1376 dispatch(7)
1377
1378 _llint_op_get_property_enumerator:
1379 traceExecution()
1380 callSlowPath(_slow_path_get_property_enumerator)
1381 dispatch(3)
1382
1383 _llint_op_enumerator_structure_pname:
1384 traceExecution()
1385 callSlowPath(_slow_path_next_structure_enumerator_pname)
1386 dispatch(4)
1387
1388 _llint_op_enumerator_generic_pname:
1389 traceExecution()
1390 callSlowPath(_slow_path_next_generic_enumerator_pname)
1391 dispatch(4)
1392
1393 _llint_op_to_index_string:
1394 traceExecution()
1395 callSlowPath(_slow_path_to_index_string)
1396 dispatch(3)
1397
1398 _llint_op_profile_control_flow:
1399 traceExecution()
1400 loadpFromInstruction(1, t0)
1401 storeb 1, BasicBlockLocation::m_hasExecuted[t0]
1402 dispatch(2)
1403
1404 # Lastly, make sure that we can link even though we don't support all opcodes.
1405 # These opcodes should never arise when using LLInt or either JIT. We assert
1406 # as much.
1407
1408 macro notSupported()
1409 if ASSERT_ENABLED
1410 crash()
1411 else
1412 # We should use whatever the smallest possible instruction is, just to
1413 # ensure that there is a gap between instruction labels. If multiple
1414 # smallest instructions exist, we should pick the one that is most
1415 # likely result in execution being halted. Currently that is the break
1416 # instruction on all architectures we're interested in. (Break is int3
1417 # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
1418 break
1419 end
1420 end
1421
1422 _llint_op_init_global_const_nop:
1423 dispatch(5)