2  * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved. 
   4  * Redistribution and use in source and binary forms, with or without 
   5  * modification, are permitted provided that the following conditions 
   7  * 1. Redistributions of source code must retain the above copyright 
   8  *    notice, this list of conditions and the following disclaimer. 
   9  * 2. Redistributions in binary form must reproduce the above copyright 
  10  *    notice, this list of conditions and the following disclaimer in the 
  11  *    documentation and/or other materials provided with the distribution. 
  13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' 
  14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 
  15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS 
  17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
  18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
  19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
  20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
  21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
  22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 
  23  * THE POSSIBILITY OF SUCH DAMAGE. 
  27 #include "ThunkGenerators.h" 
  29 #include "CodeBlock.h" 
  30 #include "Operations.h" 
  31 #include "SpecializedThunkJIT.h" 
  32 #include <wtf/InlineASM.h> 
  33 #include <wtf/StringPrintStream.h> 
  34 #include <wtf/text/StringImpl.h> 
  40 static JSInterfaceJIT::Call 
generateSlowCaseFor(VM
* vm
, JSInterfaceJIT
& jit
) 
  42     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, JSInterfaceJIT::regT2
); 
  43     jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT2
, JSInterfaceJIT::regT2
); 
  44     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2
, JSStack::ScopeChain
); 
  46     // Also initialize ReturnPC and CodeBlock, like a JS function would. 
  47     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3
); 
  48     jit
.emitPutToCallFrameHeader(JSInterfaceJIT::regT3
, JSStack::ReturnPC
); 
  49     jit
.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock
); 
  51     jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
); 
  52     jit
.restoreArgumentReference(); 
  53     JSInterfaceJIT::Call callNotJSFunction 
= jit
.call(); 
  54     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, JSInterfaceJIT::callFrameRegister
); 
  55     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3
); 
  58     return callNotJSFunction
; 
  61 static MacroAssemblerCodeRef 
linkForGenerator(VM
* vm
, FunctionPtr lazyLink
, FunctionPtr notJSFunction
, const char* name
) 
  65     JSInterfaceJIT::JumpList slowCase
; 
  68     slowCase
.append(jit
.emitJumpIfNotJSCell(JSInterfaceJIT::regT0
)); 
  69     slowCase
.append(jit
.emitJumpIfNotType(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT1
, JSFunctionType
)); 
  70 #else // USE(JSVALUE64) 
  71     slowCase
.append(jit
.branch32(JSInterfaceJIT::NotEqual
, JSInterfaceJIT::regT1
, JSInterfaceJIT::TrustedImm32(JSValue::CellTag
))); 
  72     slowCase
.append(jit
.emitJumpIfNotType(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT1
, JSFunctionType
)); 
  73 #endif // USE(JSVALUE64) 
  75     // Finish canonical initialization before JS function call. 
  76     jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0
, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1
); 
  77     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
  79     // Also initialize ReturnPC for use by lazy linking and exceptions. 
  80     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3
); 
  81     jit
.emitPutToCallFrameHeader(JSInterfaceJIT::regT3
, JSStack::ReturnPC
); 
  83     jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
); 
  84     jit
.restoreArgumentReference(); 
  85     JSInterfaceJIT::Call callLazyLink 
= jit
.call(); 
  86     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3
); 
  87     jit
.jump(JSInterfaceJIT::regT0
); 
  90     JSInterfaceJIT::Call callNotJSFunction 
= generateSlowCaseFor(vm
, jit
); 
  92     LinkBuffer 
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
); 
  93     patchBuffer
.link(callLazyLink
, lazyLink
); 
  94     patchBuffer
.link(callNotJSFunction
, notJSFunction
); 
  96     return FINALIZE_CODE(patchBuffer
, ("link %s trampoline", name
)); 
  99 MacroAssemblerCodeRef 
linkCallGenerator(VM
* vm
) 
 101     return linkForGenerator(vm
, FunctionPtr(cti_vm_lazyLinkCall
), FunctionPtr(cti_op_call_NotJSFunction
), "call"); 
 104 MacroAssemblerCodeRef 
linkConstructGenerator(VM
* vm
) 
 106     return linkForGenerator(vm
, FunctionPtr(cti_vm_lazyLinkConstruct
), FunctionPtr(cti_op_construct_NotJSConstruct
), "construct"); 
 109 MacroAssemblerCodeRef 
linkClosureCallGenerator(VM
* vm
) 
 111     return linkForGenerator(vm
, FunctionPtr(cti_vm_lazyLinkClosureCall
), FunctionPtr(cti_op_call_NotJSFunction
), "closure call"); 
 114 static MacroAssemblerCodeRef 
virtualForGenerator(VM
* vm
, FunctionPtr compile
, FunctionPtr notJSFunction
, const char* name
, CodeSpecializationKind kind
) 
 118     JSInterfaceJIT::JumpList slowCase
; 
 121     slowCase
.append(jit
.emitJumpIfNotJSCell(JSInterfaceJIT::regT0
)); 
 122 #else // USE(JSVALUE64) 
 123     slowCase
.append(jit
.branch32(JSInterfaceJIT::NotEqual
, JSInterfaceJIT::regT1
, JSInterfaceJIT::TrustedImm32(JSValue::CellTag
))); 
 124 #endif // USE(JSVALUE64) 
 125     slowCase
.append(jit
.emitJumpIfNotType(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT1
, JSFunctionType
)); 
 127     // Finish canonical initialization before JS function call. 
 128     jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0
, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1
); 
 129     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
 131     jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2
); 
 132     JSInterfaceJIT::Jump hasCodeBlock1 
= jit
.branch32(JSInterfaceJIT::GreaterThanOrEqual
, JSInterfaceJIT::Address(JSInterfaceJIT::regT2
, FunctionExecutable::offsetOfNumParametersFor(kind
)), JSInterfaceJIT::TrustedImm32(0)); 
 133     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3
); 
 134     jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
); 
 135     jit
.restoreArgumentReference(); 
 136     JSInterfaceJIT::Call callCompile 
= jit
.call(); 
 137     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3
); 
 138     jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2
); 
 140     hasCodeBlock1
.link(&jit
); 
 141     jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2
, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind
)), JSInterfaceJIT::regT0
); 
 142     jit
.jump(JSInterfaceJIT::regT0
); 
 145     JSInterfaceJIT::Call callNotJSFunction 
= generateSlowCaseFor(vm
, jit
); 
 147     LinkBuffer 
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
); 
 148     patchBuffer
.link(callCompile
, compile
); 
 149     patchBuffer
.link(callNotJSFunction
, notJSFunction
); 
 151     return FINALIZE_CODE(patchBuffer
, ("virtual %s trampoline", name
)); 
 154 MacroAssemblerCodeRef 
virtualCallGenerator(VM
* vm
) 
 156     return virtualForGenerator(vm
, FunctionPtr(cti_op_call_jitCompile
), FunctionPtr(cti_op_call_NotJSFunction
), "call", CodeForCall
); 
 159 MacroAssemblerCodeRef 
virtualConstructGenerator(VM
* vm
) 
 161     return virtualForGenerator(vm
, FunctionPtr(cti_op_construct_jitCompile
), FunctionPtr(cti_op_construct_NotJSConstruct
), "construct", CodeForConstruct
); 
 164 MacroAssemblerCodeRef 
stringLengthTrampolineGenerator(VM
* vm
) 
 169     // Check eax is a string 
 170     JSInterfaceJIT::Jump failureCases1 
= jit
.emitJumpIfNotJSCell(JSInterfaceJIT::regT0
); 
 171     JSInterfaceJIT::Jump failureCases2 
= jit
.branchPtr( 
 172         JSInterfaceJIT::NotEqual
, JSInterfaceJIT::Address( 
 173             JSInterfaceJIT::regT0
, JSCell::structureOffset()), 
 174         JSInterfaceJIT::TrustedImmPtr(vm
->stringStructure
.get())); 
 176     // Checks out okay! - get the length from the Ustring. 
 178         JSInterfaceJIT::Address(JSInterfaceJIT::regT0
, JSString::offsetOfLength()), 
 179         JSInterfaceJIT::regT0
); 
 181     JSInterfaceJIT::Jump failureCases3 
= jit
.branch32( 
 182         JSInterfaceJIT::LessThan
, JSInterfaceJIT::regT0
, JSInterfaceJIT::TrustedImm32(0)); 
 184     // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here. 
 185     jit
.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0
, JSInterfaceJIT::regT0
); 
 187 #else // USE(JSVALUE64) 
 188     // regT0 holds payload, regT1 holds tag 
 190     JSInterfaceJIT::Jump failureCases1 
= jit
.branch32( 
 191         JSInterfaceJIT::NotEqual
, JSInterfaceJIT::regT1
, 
 192         JSInterfaceJIT::TrustedImm32(JSValue::CellTag
)); 
 193     JSInterfaceJIT::Jump failureCases2 
= jit
.branchPtr( 
 194         JSInterfaceJIT::NotEqual
, 
 195         JSInterfaceJIT::Address(JSInterfaceJIT::regT0
, JSCell::structureOffset()), 
 196         JSInterfaceJIT::TrustedImmPtr(vm
->stringStructure
.get())); 
 198     // Checks out okay! - get the length from the Ustring. 
 200         JSInterfaceJIT::Address(JSInterfaceJIT::regT0
, JSString::offsetOfLength()), 
 201         JSInterfaceJIT::regT2
); 
 203     JSInterfaceJIT::Jump failureCases3 
= jit
.branch32( 
 204         JSInterfaceJIT::Above
, JSInterfaceJIT::regT2
, JSInterfaceJIT::TrustedImm32(INT_MAX
)); 
 205     jit
.move(JSInterfaceJIT::regT2
, JSInterfaceJIT::regT0
); 
 206     jit
.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag
), JSInterfaceJIT::regT1
); 
 207 #endif // USE(JSVALUE64) 
 211     JSInterfaceJIT::Call failureCases1Call 
= jit
.makeTailRecursiveCall(failureCases1
); 
 212     JSInterfaceJIT::Call failureCases2Call 
= jit
.makeTailRecursiveCall(failureCases2
); 
 213     JSInterfaceJIT::Call failureCases3Call 
= jit
.makeTailRecursiveCall(failureCases3
); 
 215     LinkBuffer 
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
); 
 217     patchBuffer
.link(failureCases1Call
, FunctionPtr(cti_op_get_by_id_string_fail
)); 
 218     patchBuffer
.link(failureCases2Call
, FunctionPtr(cti_op_get_by_id_string_fail
)); 
 219     patchBuffer
.link(failureCases3Call
, FunctionPtr(cti_op_get_by_id_string_fail
)); 
 221     return FINALIZE_CODE(patchBuffer
, ("string length trampoline")); 
 224 static MacroAssemblerCodeRef 
nativeForGenerator(VM
* vm
, CodeSpecializationKind kind
) 
 226     int executableOffsetToFunction 
= NativeExecutable::offsetOfNativeFunctionFor(kind
); 
 230     jit
.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock
); 
 231     jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
); 
 234     // Load caller frame's scope chain into this callframe so that whatever we call can 
 235     // get to its global data. 
 236     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, JSInterfaceJIT::regT0
); 
 237     jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT0
); 
 238     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
 240     jit
.peek(JSInterfaceJIT::regT1
); 
 241     jit
.emitPutToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ReturnPC
); 
 243     // Calling convention:      f(ecx, edx, ...); 
 244     // Host function signature: f(ExecState*); 
 245     jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::ecx
); 
 247     jit
.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister
); // Align stack after call. 
 250     jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, JSInterfaceJIT::regT1
); 
 251     jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1
); 
 252     jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::callFrameRegister
); // Eagerly restore caller frame register to avoid loading from stack. 
 253     jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1
, executableOffsetToFunction
)); 
 255     jit
.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister
); 
 258     // Load caller frame's scope chain into this callframe so that whatever we call can 
 259     // get to its global data. 
 260     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, JSInterfaceJIT::regT0
); 
 261     jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT0
); 
 262     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
 264     jit
.peek(JSInterfaceJIT::regT1
); 
 265     jit
.emitPutToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ReturnPC
); 
 268     // Calling convention:      f(edi, esi, edx, ecx, ...); 
 269     // Host function signature: f(ExecState*); 
 270     jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::edi
); 
 272     jit
.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
); // Align stack after call. 
 274     jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, X86Registers::esi
); 
 275     jit
.loadPtr(JSInterfaceJIT::Address(X86Registers::esi
, JSFunction::offsetOfExecutable()), X86Registers::r9
); 
 276     jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::callFrameRegister
); // Eagerly restore caller frame register to avoid loading from stack. 
 277     jit
.call(JSInterfaceJIT::Address(X86Registers::r9
, executableOffsetToFunction
)); 
 279     jit
.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
); 
 281     // Calling convention:      f(ecx, edx, r8, r9, ...); 
 282     // Host function signature: f(ExecState*); 
 283     jit
.move(JSInterfaceJIT::callFrameRegister
, X86Registers::ecx
); 
 285     // Leave space for the callee parameter home addresses and align the stack. 
 286     jit
.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
); 
 288     jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, X86Registers::edx
); 
 289     jit
.loadPtr(JSInterfaceJIT::Address(X86Registers::edx
, JSFunction::offsetOfExecutable()), X86Registers::r9
); 
 290     jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::callFrameRegister
); // Eagerly restore caller frame register to avoid loading from stack. 
 291     jit
.call(JSInterfaceJIT::Address(X86Registers::r9
, executableOffsetToFunction
)); 
 293     jit
.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister
); 
 297     COMPILE_ASSERT(ARM64Registers::x3 
!= JSInterfaceJIT::regT1
, prev_callframe_not_trampled_by_T1
); 
 298     COMPILE_ASSERT(ARM64Registers::x3 
!= JSInterfaceJIT::regT3
, prev_callframe_not_trampled_by_T3
); 
 299     COMPILE_ASSERT(ARM64Registers::x0 
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_0
); 
 300     COMPILE_ASSERT(ARM64Registers::x1 
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_1
); 
 301     COMPILE_ASSERT(ARM64Registers::x2 
!= JSInterfaceJIT::regT3
, T3_not_trampled_by_arg_2
); 
 303     // Load caller frame's scope chain into this callframe so that whatever we call can 
 304     // get to its global data. 
 305     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, ARM64Registers::x3
); 
 306     jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, ARM64Registers::x3
); 
 307     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
 309     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3
); // Callee preserved 
 310     jit
.emitPutToCallFrameHeader(ARM64Registers::lr
, JSStack::ReturnPC
); 
 312     // Calling convention:      f(edi, esi, edx, ecx, ...); 
 313     // Host function signature: f(ExecState*); 
 314     jit
.move(JSInterfaceJIT::callFrameRegister
, ARM64Registers::x0
); 
 316     jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, ARM64Registers::x1
); 
 317     jit
.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1
, JSFunction::offsetOfExecutable()), ARM64Registers::x2
); 
 318     jit
.move(ARM64Registers::x3
, JSInterfaceJIT::callFrameRegister
); // Eagerly restore caller frame register to avoid loading from stack. 
 319     jit
.call(JSInterfaceJIT::Address(ARM64Registers::x2
, executableOffsetToFunction
)); 
 321     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3
); 
 324     // Load caller frame's scope chain into this callframe so that whatever we call can 
 325     // get to its global data. 
 326     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, JSInterfaceJIT::regT2
); 
 327     jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT2
); 
 328     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
 330     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3
); // Callee preserved 
 331     jit
.emitPutToCallFrameHeader(JSInterfaceJIT::regT3
, JSStack::ReturnPC
); 
 333     // Calling convention:      f(r0 == regT0, r1 == regT1, ...); 
 334     // Host function signature: f(ExecState*); 
 335     jit
.move(JSInterfaceJIT::callFrameRegister
, ARMRegisters::r0
); 
 337     jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, ARMRegisters::r1
); 
 338     jit
.move(JSInterfaceJIT::regT2
, JSInterfaceJIT::callFrameRegister
); // Eagerly restore caller frame register to avoid loading from stack. 
 339     jit
.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2
); 
 340     jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2
, executableOffsetToFunction
)); 
 342     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3
); 
 345     // Load caller frame's scope chain into this callframe so that whatever we call can 
 346     // get to its global data. 
 347     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, JSInterfaceJIT::regT2
); 
 348     jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT2
); 
 349     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
 351     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3
); // Callee preserved 
 352     jit
.emitPutToCallFrameHeader(JSInterfaceJIT::regT3
, JSStack::ReturnPC
); 
 354     // Calling convention: f(r0 == regT4, r1 == regT5, ...); 
 355     // Host function signature: f(ExecState*); 
 356     jit
.move(JSInterfaceJIT::callFrameRegister
, JSInterfaceJIT::regT4
); 
 358     jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, JSInterfaceJIT::regT5
); 
 359     jit
.move(JSInterfaceJIT::regT2
, JSInterfaceJIT::callFrameRegister
); // Eagerly restore caller frame register to avoid loading from stack. 
 360     jit
.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2
); 
 362     jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2
, executableOffsetToFunction
), JSInterfaceJIT::regT0
); 
 363     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3
); 
 366     // Load caller frame's scope chain into this callframe so that whatever we call can 
 367     // get to its global data. 
 368     jit
.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame
, JSInterfaceJIT::regT0
); 
 369     jit
.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain
, JSInterfaceJIT::regT1
, JSInterfaceJIT::regT0
); 
 370     jit
.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1
, JSStack::ScopeChain
); 
 372     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3
); // Callee preserved 
 373     jit
.emitPutToCallFrameHeader(JSInterfaceJIT::regT3
, JSStack::ReturnPC
); 
 375     // Calling convention:      f(a0, a1, a2, a3); 
 376     // Host function signature: f(ExecState*); 
 378     // Allocate stack space for 16 bytes (8-byte aligned) 
 379     // 16 bytes (unused) for 4 arguments 
 380     jit
.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
); 
 383     jit
.move(JSInterfaceJIT::callFrameRegister
, MIPSRegisters::a0
); 
 386     jit
.emitGetFromCallFrameHeaderPtr(JSStack::Callee
, MIPSRegisters::a2
); 
 387     jit
.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2
, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2
); 
 388     jit
.move(JSInterfaceJIT::regT0
, JSInterfaceJIT::callFrameRegister
); // Eagerly restore caller frame register to avoid loading from stack. 
 389     jit
.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2
, executableOffsetToFunction
)); 
 391     // Restore stack space 
 392     jit
.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister
); 
 394     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3
); 
 396 #error "JIT not supported on this platform." 
 397     UNUSED_PARAM(executableOffsetToFunction
); 
 401     // Check for an exception 
 403     jit
.load64(&(vm
->exception
), JSInterfaceJIT::regT2
); 
 404     JSInterfaceJIT::Jump exceptionHandler 
= jit
.branchTest64(JSInterfaceJIT::NonZero
, JSInterfaceJIT::regT2
); 
 406     JSInterfaceJIT::Jump exceptionHandler 
= jit
.branch32( 
 407         JSInterfaceJIT::NotEqual
, 
 408         JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm
->exception
) + OBJECT_OFFSETOF(EncodedValueDescriptor
, asBits
.tag
)), 
 409         JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag
)); 
 415     // Handle an exception 
 416     exceptionHandler
.link(&jit
); 
 418     // Grab the return address. 
 419     jit
.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1
); 
 421     jit
.move(JSInterfaceJIT::TrustedImmPtr(&vm
->exceptionLocation
), JSInterfaceJIT::regT2
); 
 422     jit
.storePtr(JSInterfaceJIT::regT1
, JSInterfaceJIT::regT2
); 
 423     jit
.poke(JSInterfaceJIT::callFrameRegister
, OBJECT_OFFSETOF(struct JITStackFrame
, callFrame
) / sizeof(void*)); 
 425     jit
.storePtr(JSInterfaceJIT::callFrameRegister
, &vm
->topCallFrame
); 
 426     // Set the return address. 
 427     jit
.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline
).value()), JSInterfaceJIT::regT1
); 
 428     jit
.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1
); 
 432     LinkBuffer 
patchBuffer(*vm
, &jit
, GLOBAL_THUNK_ID
); 
 433     return FINALIZE_CODE(patchBuffer
, ("native %s trampoline", toCString(kind
).data())); 
 436 MacroAssemblerCodeRef 
nativeCallGenerator(VM
* vm
) 
 438     return nativeForGenerator(vm
, CodeForCall
); 
 441 MacroAssemblerCodeRef 
nativeConstructGenerator(VM
* vm
) 
 443     return nativeForGenerator(vm
, CodeForConstruct
); 
 446 static void stringCharLoad(SpecializedThunkJIT
& jit
, VM
* vm
) 
 449     jit
.loadJSStringArgument(*vm
, SpecializedThunkJIT::ThisArgument
, SpecializedThunkJIT::regT0
); 
 451     // Load string length to regT2, and start the process of loading the data pointer into regT0 
 452     jit
.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0
, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2
); 
 453     jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0
); 
 454     jit
.appendFailure(jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
)); 
 457     jit
.loadInt32Argument(0, SpecializedThunkJIT::regT1
); // regT1 contains the index 
 459     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large 
 460     jit
.appendFailure(jit
.branch32(MacroAssembler::AboveOrEqual
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT2
)); 
 462     // Load the character 
 463     SpecializedThunkJIT::JumpList is16Bit
; 
 464     SpecializedThunkJIT::JumpList cont8Bit
; 
 465     // Load the string flags 
 466     jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, StringImpl::flagsOffset()), SpecializedThunkJIT::regT2
); 
 467     jit
.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0
, StringImpl::dataOffset()), SpecializedThunkJIT::regT0
); 
 468     is16Bit
.append(jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT2
, MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit()))); 
 469     jit
.load8(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
, MacroAssembler::TimesOne
, 0), SpecializedThunkJIT::regT0
); 
 470     cont8Bit
.append(jit
.jump()); 
 472     jit
.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
, MacroAssembler::TimesTwo
, 0), SpecializedThunkJIT::regT0
); 
 476 static void charToString(SpecializedThunkJIT
& jit
, VM
* vm
, MacroAssembler::RegisterID src
, MacroAssembler::RegisterID dst
, MacroAssembler::RegisterID scratch
) 
 478     jit
.appendFailure(jit
.branch32(MacroAssembler::AboveOrEqual
, src
, MacroAssembler::TrustedImm32(0x100))); 
 479     jit
.move(MacroAssembler::TrustedImmPtr(vm
->smallStrings
.singleCharacterStrings()), scratch
); 
 480     jit
.loadPtr(MacroAssembler::BaseIndex(scratch
, src
, MacroAssembler::ScalePtr
, 0), dst
); 
 481     jit
.appendFailure(jit
.branchTestPtr(MacroAssembler::Zero
, dst
)); 
 484 MacroAssemblerCodeRef 
charCodeAtThunkGenerator(VM
* vm
) 
 486     SpecializedThunkJIT 
jit(1); 
 487     stringCharLoad(jit
, vm
); 
 488     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 489     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "charCodeAt"); 
 492 MacroAssemblerCodeRef 
charAtThunkGenerator(VM
* vm
) 
 494     SpecializedThunkJIT 
jit(1); 
 495     stringCharLoad(jit
, vm
); 
 496     charToString(jit
, vm
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
); 
 497     jit
.returnJSCell(SpecializedThunkJIT::regT0
); 
 498     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "charAt"); 
 501 MacroAssemblerCodeRef 
fromCharCodeThunkGenerator(VM
* vm
) 
 503     SpecializedThunkJIT 
jit(1); 
 505     jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
); 
 506     charToString(jit
, vm
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT1
); 
 507     jit
.returnJSCell(SpecializedThunkJIT::regT0
); 
 508     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "fromCharCode"); 
 511 MacroAssemblerCodeRef 
sqrtThunkGenerator(VM
* vm
) 
 513     SpecializedThunkJIT 
jit(1); 
 514     if (!jit
.supportsFloatingPointSqrt()) 
 515         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 517     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 518     jit
.sqrtDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
); 
 519     jit
.returnDouble(SpecializedThunkJIT::fpRegT0
); 
 520     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "sqrt"); 
 524 #define UnaryDoubleOpWrapper(function) function##Wrapper 
 525 enum MathThunkCallingConvention 
{ }; 
 526 typedef MathThunkCallingConvention(*MathThunk
)(MathThunkCallingConvention
); 
 529 double jsRound(double) REFERENCED_FROM_ASM
; 
 530 double jsRound(double d
) 
 532     double integer 
= ceil(d
); 
 533     return integer 
- (integer 
- d 
> 0.5); 
 538 #if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX)) 
 540 #define defineUnaryDoubleOpWrapper(function) \ 
 543         ".globl " SYMBOL_STRING(function##Thunk) "\n" \ 
 544         HIDE_SYMBOL(function##Thunk) "\n" \ 
 545         SYMBOL_STRING(function##Thunk) ":" "\n" \ 
 546         "call " GLOBAL_REFERENCE(function) "\n" \ 
 550         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ 
 552     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; 
 554 #elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX)) 
 555 #define defineUnaryDoubleOpWrapper(function) \ 
 558         ".globl " SYMBOL_STRING(function##Thunk) "\n" \ 
 559         HIDE_SYMBOL(function##Thunk) "\n" \ 
 560         SYMBOL_STRING(function##Thunk) ":" "\n" \ 
 562         "movsd %xmm0, (%esp) \n" \ 
 563         "call " GLOBAL_REFERENCE(function) "\n" \ 
 565         "movsd (%esp), %xmm0 \n" \ 
 570         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ 
 572     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; 
 574 #elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS) 
 576 #define defineUnaryDoubleOpWrapper(function) \ 
 580         ".globl " SYMBOL_STRING(function##Thunk) "\n" \ 
 581         HIDE_SYMBOL(function##Thunk) "\n" \ 
 583         ".thumb_func " THUMB_FUNC_PARAM(function##Thunk) "\n" \ 
 584         SYMBOL_STRING(function##Thunk) ":" "\n" \ 
 586         "vmov r0, r1, d0\n" \ 
 587         "blx " GLOBAL_REFERENCE(function) "\n" \ 
 588         "vmov d0, r0, r1\n" \ 
 593         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ 
 595     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; 
 599 #define defineUnaryDoubleOpWrapper(function) \ 
 603         ".globl " SYMBOL_STRING(function##Thunk) "\n" \ 
 604         HIDE_SYMBOL(function##Thunk) "\n" \ 
 605         SYMBOL_STRING(function##Thunk) ":" "\n" \ 
 606         "b " GLOBAL_REFERENCE(function) "\n" \ 
 609         MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ 
 611     static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; 
 615 #define defineUnaryDoubleOpWrapper(function) \ 
 616     static MathThunk UnaryDoubleOpWrapper(function) = 0 
 619 defineUnaryDoubleOpWrapper(jsRound
); 
 620 defineUnaryDoubleOpWrapper(exp
); 
 621 defineUnaryDoubleOpWrapper(log
); 
 622 defineUnaryDoubleOpWrapper(floor
); 
 623 defineUnaryDoubleOpWrapper(ceil
); 
 625 static const double oneConstant 
= 1.0; 
 626 static const double negativeHalfConstant 
= -0.5; 
 627 static const double zeroConstant 
= 0.0; 
 628 static const double halfConstant 
= 0.5; 
 630 MacroAssemblerCodeRef 
floorThunkGenerator(VM
* vm
) 
 632     SpecializedThunkJIT 
jit(1); 
 633     MacroAssembler::Jump nonIntJump
; 
 634     if (!UnaryDoubleOpWrapper(floor
) || !jit
.supportsFloatingPoint()) 
 635         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 636     jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
); 
 637     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 638     nonIntJump
.link(&jit
); 
 639     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 641     SpecializedThunkJIT::JumpList doubleResult
; 
 642     jit
.floorDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
); 
 643     jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
); 
 644     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 645     doubleResult
.link(&jit
); 
 646     jit
.returnDouble(SpecializedThunkJIT::fpRegT0
); 
 648     SpecializedThunkJIT::Jump intResult
; 
 649     SpecializedThunkJIT::JumpList doubleResult
; 
 650     if (jit
.supportsFloatingPointTruncate()) { 
 651         jit
.loadDouble(&zeroConstant
, SpecializedThunkJIT::fpRegT1
); 
 652         doubleResult
.append(jit
.branchDouble(MacroAssembler::DoubleEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
)); 
 653         SpecializedThunkJIT::JumpList slowPath
; 
 654         // Handle the negative doubles in the slow path for now. 
 655         slowPath
.append(jit
.branchDouble(MacroAssembler::DoubleLessThanOrUnordered
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
)); 
 656         slowPath
.append(jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
)); 
 657         intResult 
= jit
.jump(); 
 660     jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor
)); 
 661     jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
); 
 662     if (jit
.supportsFloatingPointTruncate()) 
 663         intResult
.link(&jit
); 
 664     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 665     doubleResult
.link(&jit
); 
 666     jit
.returnDouble(SpecializedThunkJIT::fpRegT0
); 
 668     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "floor"); 
 671 MacroAssemblerCodeRef 
ceilThunkGenerator(VM
* vm
) 
 673     SpecializedThunkJIT 
jit(1); 
 674     if (!UnaryDoubleOpWrapper(ceil
) || !jit
.supportsFloatingPoint()) 
 675         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 676     MacroAssembler::Jump nonIntJump
; 
 677     jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
); 
 678     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 679     nonIntJump
.link(&jit
); 
 680     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 682     jit
.ceilDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
); 
 684     jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil
)); 
 686     SpecializedThunkJIT::JumpList doubleResult
; 
 687     jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
); 
 688     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 689     doubleResult
.link(&jit
); 
 690     jit
.returnDouble(SpecializedThunkJIT::fpRegT0
); 
 691     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "ceil"); 
 694 MacroAssemblerCodeRef 
roundThunkGenerator(VM
* vm
) 
 696     SpecializedThunkJIT 
jit(1); 
 697     if (!UnaryDoubleOpWrapper(jsRound
) || !jit
.supportsFloatingPoint()) 
 698         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 699     MacroAssembler::Jump nonIntJump
; 
 700     jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
); 
 701     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 702     nonIntJump
.link(&jit
); 
 703     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 704     SpecializedThunkJIT::Jump intResult
; 
 705     SpecializedThunkJIT::JumpList doubleResult
; 
 706     if (jit
.supportsFloatingPointTruncate()) { 
 707         jit
.loadDouble(&zeroConstant
, SpecializedThunkJIT::fpRegT1
); 
 708         doubleResult
.append(jit
.branchDouble(MacroAssembler::DoubleEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
)); 
 709         SpecializedThunkJIT::JumpList slowPath
; 
 710         // Handle the negative doubles in the slow path for now. 
 711         slowPath
.append(jit
.branchDouble(MacroAssembler::DoubleLessThanOrUnordered
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
)); 
 712         jit
.loadDouble(&halfConstant
, SpecializedThunkJIT::fpRegT1
); 
 713         jit
.addDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
); 
 714         slowPath
.append(jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
)); 
 715         intResult 
= jit
.jump(); 
 718     jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound
)); 
 719     jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT1
); 
 720     if (jit
.supportsFloatingPointTruncate()) 
 721         intResult
.link(&jit
); 
 722     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 723     doubleResult
.link(&jit
); 
 724     jit
.returnDouble(SpecializedThunkJIT::fpRegT0
); 
 725     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "round"); 
 728 MacroAssemblerCodeRef 
expThunkGenerator(VM
* vm
) 
 730     if (!UnaryDoubleOpWrapper(exp
)) 
 731         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 732     SpecializedThunkJIT 
jit(1); 
 733     if (!jit
.supportsFloatingPoint()) 
 734         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 735     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 736     jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp
)); 
 737     jit
.returnDouble(SpecializedThunkJIT::fpRegT0
); 
 738     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "exp"); 
 741 MacroAssemblerCodeRef 
logThunkGenerator(VM
* vm
) 
 743     if (!UnaryDoubleOpWrapper(log
)) 
 744         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 745     SpecializedThunkJIT 
jit(1); 
 746     if (!jit
.supportsFloatingPoint()) 
 747         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 748     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 749     jit
.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log
)); 
 750     jit
.returnDouble(SpecializedThunkJIT::fpRegT0
); 
 751     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "log"); 
 754 MacroAssemblerCodeRef 
absThunkGenerator(VM
* vm
) 
 756     SpecializedThunkJIT 
jit(1); 
 757     if (!jit
.supportsFloatingPointAbs()) 
 758         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 759     MacroAssembler::Jump nonIntJump
; 
 760     jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntJump
); 
 761     jit
.rshift32(SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1
); 
 762     jit
.add32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
); 
 763     jit
.xor32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
); 
 764     jit
.appendFailure(jit
.branch32(MacroAssembler::Equal
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(1 << 31))); 
 765     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 766     nonIntJump
.link(&jit
); 
 767     // Shame about the double int conversion here. 
 768     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 769     jit
.absDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
); 
 770     jit
.returnDouble(SpecializedThunkJIT::fpRegT1
); 
 771     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "abs"); 
 774 MacroAssemblerCodeRef 
powThunkGenerator(VM
* vm
) 
 776     SpecializedThunkJIT 
jit(2); 
 777     if (!jit
.supportsFloatingPoint()) 
 778         return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm
->jitStubs
->ctiNativeCall(vm
)); 
 780     jit
.loadDouble(&oneConstant
, SpecializedThunkJIT::fpRegT1
); 
 781     jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 782     MacroAssembler::Jump nonIntExponent
; 
 783     jit
.loadInt32Argument(1, SpecializedThunkJIT::regT0
, nonIntExponent
); 
 784     jit
.appendFailure(jit
.branch32(MacroAssembler::LessThan
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(0))); 
 786     MacroAssembler::Jump exponentIsZero 
= jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
); 
 787     MacroAssembler::Label 
startLoop(jit
.label()); 
 789     MacroAssembler::Jump exponentIsEven 
= jit
.branchTest32(MacroAssembler::Zero
, SpecializedThunkJIT::regT0
, MacroAssembler::TrustedImm32(1)); 
 790     jit
.mulDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
); 
 791     exponentIsEven
.link(&jit
); 
 792     jit
.mulDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
); 
 793     jit
.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0
); 
 794     jit
.branchTest32(MacroAssembler::NonZero
, SpecializedThunkJIT::regT0
).linkTo(startLoop
, &jit
); 
 796     exponentIsZero
.link(&jit
); 
 799         SpecializedThunkJIT::JumpList doubleResult
; 
 800         jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT0
); 
 801         jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 802         doubleResult
.link(&jit
); 
 803         jit
.returnDouble(SpecializedThunkJIT::fpRegT1
); 
 806     if (jit
.supportsFloatingPointSqrt()) { 
 807         nonIntExponent
.link(&jit
); 
 808         jit
.loadDouble(&negativeHalfConstant
, SpecializedThunkJIT::fpRegT3
); 
 809         jit
.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2
, SpecializedThunkJIT::regT0
); 
 810         jit
.appendFailure(jit
.branchDouble(MacroAssembler::DoubleLessThanOrEqual
, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
)); 
 811         jit
.appendFailure(jit
.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered
, SpecializedThunkJIT::fpRegT2
, SpecializedThunkJIT::fpRegT3
)); 
 812         jit
.sqrtDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT0
); 
 813         jit
.divDouble(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::fpRegT1
); 
 815         SpecializedThunkJIT::JumpList doubleResult
; 
 816         jit
.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1
, SpecializedThunkJIT::regT0
, doubleResult
, SpecializedThunkJIT::fpRegT0
); 
 817         jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 818         doubleResult
.link(&jit
); 
 819         jit
.returnDouble(SpecializedThunkJIT::fpRegT1
); 
 821         jit
.appendFailure(nonIntExponent
); 
 823     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "pow"); 
 826 MacroAssemblerCodeRef 
imulThunkGenerator(VM
* vm
) 
 828     SpecializedThunkJIT 
jit(2); 
 829     MacroAssembler::Jump nonIntArg0Jump
; 
 830     jit
.loadInt32Argument(0, SpecializedThunkJIT::regT0
, nonIntArg0Jump
); 
 831     SpecializedThunkJIT::Label 
doneLoadingArg0(&jit
); 
 832     MacroAssembler::Jump nonIntArg1Jump
; 
 833     jit
.loadInt32Argument(1, SpecializedThunkJIT::regT1
, nonIntArg1Jump
); 
 834     SpecializedThunkJIT::Label 
doneLoadingArg1(&jit
); 
 835     jit
.mul32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT0
); 
 836     jit
.returnInt32(SpecializedThunkJIT::regT0
); 
 838     if (jit
.supportsFloatingPointTruncate()) { 
 839         nonIntArg0Jump
.link(&jit
); 
 840         jit
.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
); 
 841         jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT0
, SpecializedThunkJIT::BranchIfTruncateSuccessful
).linkTo(doneLoadingArg0
, &jit
); 
 842         jit
.xor32(SpecializedThunkJIT::regT0
, SpecializedThunkJIT::regT0
); 
 843         jit
.jump(doneLoadingArg0
); 
 845         jit
.appendFailure(nonIntArg0Jump
); 
 847     if (jit
.supportsFloatingPointTruncate()) { 
 848         nonIntArg1Jump
.link(&jit
); 
 849         jit
.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT1
); 
 850         jit
.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0
, SpecializedThunkJIT::regT1
, SpecializedThunkJIT::BranchIfTruncateSuccessful
).linkTo(doneLoadingArg1
, &jit
); 
 851         jit
.xor32(SpecializedThunkJIT::regT1
, SpecializedThunkJIT::regT1
); 
 852         jit
.jump(doneLoadingArg1
); 
 854         jit
.appendFailure(nonIntArg1Jump
); 
 856     return jit
.finalize(*vm
, vm
->jitStubs
->ctiNativeCall(vm
), "imul"); 
 861 #endif // ENABLE(JIT)