]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | ||
28 | #if ENABLE(JIT) | |
29 | #if USE(JSVALUE32_64) | |
30 | #include "JIT.h" | |
31 | ||
32 | #include "Arguments.h" | |
33 | #include "CodeBlock.h" | |
34 | #include "Interpreter.h" | |
35 | #include "JITInlines.h" | |
36 | #include "JSArray.h" | |
37 | #include "JSFunction.h" | |
38 | #include "JSCInlines.h" | |
39 | #include "LinkBuffer.h" | |
40 | #include "RepatchBuffer.h" | |
41 | #include "ResultType.h" | |
42 | #include "SamplingTool.h" | |
43 | #include "StackAlignment.h" | |
44 | #include <wtf/StringPrintStream.h> | |
45 | ||
46 | ||
47 | namespace JSC { | |
48 | ||
49 | void JIT::emitPutCallResult(Instruction* instruction) | |
50 | { | |
51 | int dst = instruction[1].u.operand; | |
52 | emitValueProfilingSite(); | |
53 | emitStore(dst, regT1, regT0); | |
54 | } | |
55 | ||
56 | void JIT::emit_op_ret(Instruction* currentInstruction) | |
57 | { | |
58 | unsigned dst = currentInstruction[1].u.operand; | |
59 | ||
60 | emitLoad(dst, regT1, regT0); | |
61 | ||
62 | checkStackPointerAlignment(); | |
63 | emitFunctionEpilogue(); | |
64 | ret(); | |
65 | } | |
66 | ||
67 | void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) | |
68 | { | |
69 | unsigned result = currentInstruction[1].u.operand; | |
70 | unsigned thisReg = currentInstruction[2].u.operand; | |
71 | ||
72 | emitLoad(result, regT1, regT0); | |
73 | Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); | |
74 | Jump notObject = emitJumpIfCellNotObject(regT0); | |
75 | ||
76 | checkStackPointerAlignment(); | |
77 | emitFunctionEpilogue(); | |
78 | ret(); | |
79 | ||
80 | notJSCell.link(this); | |
81 | notObject.link(this); | |
82 | emitLoad(thisReg, regT1, regT0); | |
83 | ||
84 | checkStackPointerAlignment(); | |
85 | emitFunctionEpilogue(); | |
86 | ret(); | |
87 | } | |
88 | ||
89 | void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
90 | { | |
91 | compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++); | |
92 | } | |
93 | ||
94 | void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
95 | { | |
96 | compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex); | |
97 | } | |
98 | ||
99 | void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
100 | { | |
101 | compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++); | |
102 | } | |
103 | ||
104 | void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
105 | { | |
106 | compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++); | |
107 | } | |
108 | ||
109 | void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
110 | { | |
111 | compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++); | |
112 | } | |
113 | ||
114 | void JIT::emit_op_call(Instruction* currentInstruction) | |
115 | { | |
116 | compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); | |
117 | } | |
118 | ||
119 | void JIT::emit_op_call_eval(Instruction* currentInstruction) | |
120 | { | |
121 | compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex); | |
122 | } | |
123 | ||
124 | void JIT::emit_op_call_varargs(Instruction* currentInstruction) | |
125 | { | |
126 | compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++); | |
127 | } | |
128 | ||
129 | void JIT::emit_op_construct_varargs(Instruction* currentInstruction) | |
130 | { | |
131 | compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++); | |
132 | } | |
133 | ||
134 | void JIT::emit_op_construct(Instruction* currentInstruction) | |
135 | { | |
136 | compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); | |
137 | } | |
138 | ||
139 | void JIT::compileLoadVarargs(Instruction* instruction) | |
140 | { | |
141 | int thisValue = instruction[3].u.operand; | |
142 | int arguments = instruction[4].u.operand; | |
143 | int firstFreeRegister = instruction[5].u.operand; | |
144 | int firstVarArgOffset = instruction[6].u.operand; | |
145 | ||
146 | JumpList slowCase; | |
147 | JumpList end; | |
148 | bool canOptimize = m_codeBlock->usesArguments() | |
149 | && VirtualRegister(arguments) == m_codeBlock->argumentsRegister() | |
150 | && !m_codeBlock->symbolTable()->slowArguments(); | |
151 | ||
152 | if (canOptimize) { | |
153 | emitLoadTag(arguments, regT1); | |
154 | slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag))); | |
155 | ||
156 | load32(payloadFor(JSStack::ArgumentCount), regT2); | |
157 | if (firstVarArgOffset) { | |
158 | Jump sufficientArguments = branch32(GreaterThan, regT2, TrustedImm32(firstVarArgOffset + 1)); | |
159 | move(TrustedImm32(1), regT2); | |
160 | Jump endVarArgs = jump(); | |
161 | sufficientArguments.link(this); | |
162 | sub32(TrustedImm32(firstVarArgOffset), regT2); | |
163 | endVarArgs.link(this); | |
164 | } | |
165 | slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1))); | |
166 | // regT2: argumentCountIncludingThis | |
167 | ||
168 | move(regT2, regT3); | |
169 | addPtr(TrustedImm32(-firstFreeRegister + JSStack::CallFrameHeaderSize), regT3); | |
170 | // regT1 now has the required frame size in Register units | |
171 | // Round regT1 to next multiple of stackAlignmentRegisters() | |
172 | addPtr(TrustedImm32(stackAlignmentRegisters() - 1), regT3); | |
173 | andPtr(TrustedImm32(~(stackAlignmentRegisters() - 1)), regT3); | |
174 | neg32(regT3); | |
175 | lshift32(TrustedImm32(3), regT3); | |
176 | addPtr(callFrameRegister, regT3); | |
177 | // regT3: newCallFrame | |
178 | ||
179 | slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT3)); | |
180 | ||
181 | // Initialize ArgumentCount. | |
182 | store32(regT2, payloadFor(JSStack::ArgumentCount, regT3)); | |
183 | ||
184 | // Initialize 'this'. | |
185 | emitLoad(thisValue, regT1, regT0); | |
186 | store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); | |
187 | store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); | |
188 | ||
189 | // Copy arguments. | |
190 | end.append(branchSub32(Zero, TrustedImm32(1), regT2)); | |
191 | // regT2: argumentCount; | |
192 | ||
193 | Label copyLoop = label(); | |
194 | load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +((CallFrame::thisArgumentOffset() + firstVarArgOffset) * static_cast<int>(sizeof(Register)))), regT0); | |
195 | load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +((CallFrame::thisArgumentOffset() + firstVarArgOffset) * static_cast<int>(sizeof(Register)))), regT1); | |
196 | store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); | |
197 | store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); | |
198 | branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this); | |
199 | ||
200 | end.append(jump()); | |
201 | } | |
202 | ||
203 | if (canOptimize) | |
204 | slowCase.link(this); | |
205 | ||
206 | emitLoad(arguments, regT1, regT0); | |
207 | callOperation(operationSizeFrameForVarargs, regT1, regT0, firstFreeRegister, firstVarArgOffset); | |
208 | addPtr(TrustedImm32(-sizeof(CallerFrameAndPC)), returnValueGPR, stackPointerRegister); | |
209 | emitLoad(thisValue, regT1, regT4); | |
210 | emitLoad(arguments, regT3, regT2); | |
211 | callOperation(operationLoadVarargs, returnValueGPR, regT1, regT4, regT3, regT2, firstVarArgOffset); | |
212 | move(returnValueGPR, regT3); | |
213 | ||
214 | if (canOptimize) | |
215 | end.link(this); | |
216 | ||
217 | addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT3, stackPointerRegister); | |
218 | } | |
219 | ||
220 | void JIT::compileCallEval(Instruction* instruction) | |
221 | { | |
222 | addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1); | |
223 | ||
224 | callOperationNoExceptionCheck(operationCallEval, regT1); | |
225 | ||
226 | Jump noException = emitExceptionCheck(InvertedExceptionCheck); | |
227 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); | |
228 | exceptionCheck(jump()); | |
229 | ||
230 | noException.link(this); | |
231 | addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag))); | |
232 | ||
233 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); | |
234 | checkStackPointerAlignment(); | |
235 | ||
236 | sampleCodeBlock(m_codeBlock); | |
237 | ||
238 | emitPutCallResult(instruction); | |
239 | } | |
240 | ||
241 | void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) | |
242 | { | |
243 | linkSlowCase(iter); | |
244 | ||
245 | loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0); | |
246 | loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT1); | |
247 | move(TrustedImmPtr(&CallLinkInfo::dummy()), regT2); | |
248 | ||
249 | emitLoad(JSStack::Callee, regT1, regT0); | |
250 | emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code()); | |
251 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); | |
252 | checkStackPointerAlignment(); | |
253 | ||
254 | sampleCodeBlock(m_codeBlock); | |
255 | ||
256 | emitPutCallResult(instruction); | |
257 | } | |
258 | ||
259 | void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) | |
260 | { | |
261 | int callee = instruction[2].u.operand; | |
262 | ||
263 | /* Caller always: | |
264 | - Updates callFrameRegister to callee callFrame. | |
265 | - Initializes ArgumentCount; CallerFrame; Callee. | |
266 | ||
267 | For a JS call: | |
268 | - Caller initializes ScopeChain. | |
269 | - Callee initializes ReturnPC; CodeBlock. | |
270 | - Callee restores callFrameRegister before return. | |
271 | ||
272 | For a non-JS call: | |
273 | - Caller initializes ScopeChain; ReturnPC; CodeBlock. | |
274 | - Caller restores callFrameRegister after return. | |
275 | */ | |
276 | ||
277 | if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) | |
278 | compileLoadVarargs(instruction); | |
279 | else { | |
280 | int argCount = instruction[3].u.operand; | |
281 | int registerOffset = -instruction[4].u.operand; | |
282 | ||
283 | if (opcodeID == op_call && shouldEmitProfiling()) { | |
284 | emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1); | |
285 | Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag)); | |
286 | loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1); | |
287 | storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); | |
288 | done.link(this); | |
289 | } | |
290 | ||
291 | addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); | |
292 | ||
293 | store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); | |
294 | } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. | |
295 | ||
296 | uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); | |
297 | store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister)); | |
298 | emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee. | |
299 | ||
300 | store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); | |
301 | store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC))); | |
302 | ||
303 | if (opcodeID == op_call_eval) { | |
304 | compileCallEval(instruction); | |
305 | return; | |
306 | } | |
307 | ||
308 | addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); | |
309 | ||
310 | DataLabelPtr addressOfLinkedFunctionCheck; | |
311 | Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); | |
312 | ||
313 | addSlowCase(slowCase); | |
314 | ||
315 | ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); | |
316 | CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); | |
317 | info->callType = CallLinkInfo::callTypeFor(opcodeID); | |
318 | info->codeOrigin = CodeOrigin(m_bytecodeOffset); | |
319 | info->calleeGPR = regT0; | |
320 | m_callCompilationInfo.append(CallCompilationInfo()); | |
321 | m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; | |
322 | m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; | |
323 | ||
324 | loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT2); | |
325 | store32(regT2, Address(MacroAssembler::stackPointerRegister, JSStack::ScopeChain * sizeof(Register) + PayloadOffset - sizeof(CallerFrameAndPC))); | |
326 | store32(TrustedImm32(JSValue::CellTag), Address(stackPointerRegister, JSStack::ScopeChain * sizeof(Register) + TagOffset - sizeof(CallerFrameAndPC))); | |
327 | ||
328 | checkStackPointerAlignment(); | |
329 | m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); | |
330 | ||
331 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); | |
332 | checkStackPointerAlignment(); | |
333 | ||
334 | sampleCodeBlock(m_codeBlock); | |
335 | emitPutCallResult(instruction); | |
336 | } | |
337 | ||
338 | void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) | |
339 | { | |
340 | if (opcodeID == op_call_eval) { | |
341 | compileCallEvalSlowCase(instruction, iter); | |
342 | return; | |
343 | } | |
344 | ||
345 | linkSlowCase(iter); | |
346 | linkSlowCase(iter); | |
347 | ||
348 | ThunkGenerator generator = linkThunkGeneratorFor( | |
349 | (opcodeID == op_construct || opcodeID == op_construct_varargs) ? CodeForConstruct : CodeForCall, | |
350 | RegisterPreservationNotRequired); | |
351 | ||
352 | move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2); | |
353 | m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(generator).code()); | |
354 | ||
355 | addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); | |
356 | checkStackPointerAlignment(); | |
357 | ||
358 | sampleCodeBlock(m_codeBlock); | |
359 | emitPutCallResult(instruction); | |
360 | } | |
361 | ||
362 | void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr) | |
363 | { | |
364 | JumpList slowCases; | |
365 | ||
366 | slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); | |
367 | slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(expectedStructure))); | |
368 | slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable))); | |
369 | ||
370 | loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1); | |
371 | emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); | |
372 | ||
373 | Call call = nearCall(); | |
374 | Jump done = jump(); | |
375 | ||
376 | slowCases.link(this); | |
377 | move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2); | |
378 | restoreReturnAddressBeforeReturn(regT2); | |
379 | Jump slow = jump(); | |
380 | ||
381 | LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); | |
382 | ||
383 | patchBuffer.link(call, FunctionPtr(codePtr.executableAddress())); | |
384 | patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0)); | |
385 | patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code())); | |
386 | ||
387 | RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine( | |
388 | FINALIZE_CODE( | |
389 | patchBuffer, | |
390 | ("Baseline closure call stub for %s, return point %p, target %p (%s)", | |
391 | toCString(*m_codeBlock).data(), | |
392 | callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(), | |
393 | codePtr.executableAddress(), | |
394 | toCString(pointerDump(calleeCodeBlock)).data())), | |
395 | *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable, | |
396 | callLinkInfo->codeOrigin)); | |
397 | ||
398 | RepatchBuffer repatchBuffer(m_codeBlock); | |
399 | ||
400 | repatchBuffer.replaceWithJump( | |
401 | RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin), | |
402 | CodeLocationLabel(stubRoutine->code().code())); | |
403 | repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code()); | |
404 | ||
405 | callLinkInfo->stub = stubRoutine.release(); | |
406 | } | |
407 | ||
408 | } // namespace JSC | |
409 | ||
410 | #endif // USE(JSVALUE32_64) | |
411 | #endif // ENABLE(JIT) |