]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - assembler/MacroAssembler.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssembler.h
index 9d24653444b5bbd9b853e637c4b00850ef072db6..fd4c5bbf5e4938e7ff96f1429d8fb5659ea2a555 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #ifndef MacroAssembler_h
 #define MacroAssembler_h
 
-#include <wtf/Platform.h>
-
 #if ENABLE(ASSEMBLER)
 
-#include "X86Assembler.h"
-
-namespace JSC {
-
-class MacroAssembler {
-protected:
-    X86Assembler m_assembler;
-
-#if PLATFORM(X86_64)
-    static const X86::RegisterID scratchRegister = X86::r11;
-#endif
-
-public:
-    typedef X86::RegisterID RegisterID;
-
-    // Note: do not rely on values in this enum, these will change (to 0..3).
-    enum Scale {
-        TimesOne = 1,
-        TimesTwo = 2,
-        TimesFour = 4,
-        TimesEight = 8,
-#if PLATFORM(X86)
-        ScalePtr = TimesFour
-#endif
-#if PLATFORM(X86_64)
-        ScalePtr = TimesEight
-#endif
-    };
-
-    MacroAssembler()
-    {
-    }
-    
-    size_t size() { return m_assembler.size(); }
-    void* copyCode(ExecutablePool* allocator)
-    {
-        return m_assembler.executableCopy(allocator);
-    }
-
-
-    // Address:
-    //
-    // Describes a simple base-offset address.
-    struct Address {
-        explicit Address(RegisterID base, int32_t offset = 0)
-            : base(base)
-            , offset(offset)
-        {
-        }
-
-        RegisterID base;
-        int32_t offset;
-    };
-
-    // ImplicitAddress:
-    //
-    // This class is used for explicit 'load' and 'store' operations
-    // (as opposed to situations in which a memory operand is provided
-    // to a generic operation, such as an integer arithmetic instruction).
-    //
-    // In the case of a load (or store) operation we want to permit
-    // addresses to be implicitly constructed, e.g. the two calls:
-    //
-    //     load32(Address(addrReg), destReg);
-    //     load32(addrReg, destReg);
-    //
-    // Are equivalent, and the explicit wrapping of the Address in the former
-    // is unnecessary.
-    struct ImplicitAddress {
-        ImplicitAddress(RegisterID base)
-            : base(base)
-            , offset(0)
-        {
-        }
-
-        ImplicitAddress(Address address)
-            : base(address.base)
-            , offset(address.offset)
-        {
-        }
-
-        RegisterID base;
-        int32_t offset;
-    };
-
-    // BaseIndex:
-    //
-    // Describes a complex addressing mode.
-    struct BaseIndex {
-        BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
-            : base(base)
-            , index(index)
-            , scale(scale)
-            , offset(offset)
-        {
-        }
-
-        RegisterID base;
-        RegisterID index;
-        Scale scale;
-        int32_t offset;
-    };
-
-    // AbsoluteAddress:
-    //
-    // Describes an memory operand given by a pointer.  For regular load & store
-    // operations an unwrapped void* will be used, rather than using this.
-    struct AbsoluteAddress {
-        explicit AbsoluteAddress(void* ptr)
-            : m_ptr(ptr)
-        {
-        }
-
-        void* m_ptr;
-    };
-
-
-    class Jump;
-    class PatchBuffer;
-
-    // DataLabelPtr:
-    //
-    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
-    // patched after the code has been generated.
-    class DataLabelPtr {
-        friend class MacroAssembler;
-        friend class PatchBuffer;
-
-    public:
-        DataLabelPtr()
-        {
-        }
-
-        DataLabelPtr(MacroAssembler* masm)
-            : m_label(masm->m_assembler.label())
-        {
-        }
-
-        static void patch(void* address, void* value)
-        {
-            X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
-        }
-        
-    private:
-        X86Assembler::JmpDst m_label;
-    };
-
-    // DataLabel32:
-    //
-    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
-    // patched after the code has been generated.
-    class DataLabel32 {
-        friend class MacroAssembler;
-        friend class PatchBuffer;
-
-    public:
-        DataLabel32()
-        {
-        }
-
-        DataLabel32(MacroAssembler* masm)
-            : m_label(masm->m_assembler.label())
-        {
-        }
-
-        static void patch(void* address, int32_t value)
-        {
-            X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address), value);
-        }
-
-    private:
-        X86Assembler::JmpDst m_label;
-    };
-
-    // Label:
-    //
-    // A Label records a point in the generated instruction stream, typically such that
-    // it may be used as a destination for a jump.
-    class Label {
-        friend class Jump;
-        friend class MacroAssembler;
-        friend class PatchBuffer;
-
-    public:
-        Label()
-        {
-        }
-
-        Label(MacroAssembler* masm)
-            : m_label(masm->m_assembler.label())
-        {
-        }
-        
-        // FIXME: transitionary method, while we replace JmpSrces with Jumps.
-        operator X86Assembler::JmpDst()
-        {
-            return m_label;
-        }
-
-    private:
-        X86Assembler::JmpDst m_label;
-    };
-
-
-    // Jump:
-    //
-    // A jump object is a reference to a jump instruction that has been planted
-    // into the code buffer - it is typically used to link the jump, setting the
-    // relative offset such that when executed it will jump to the desired
-    // destination.
-    //
-    // Jump objects retain a pointer to the assembler for syntactic purposes -
-    // to allow the jump object to be able to link itself, e.g.:
-    //
-    //     Jump forwardsBranch = jne32(Imm32(0), reg1);
-    //     // ...
-    //     forwardsBranch.link();
-    //
-    // Jumps may also be linked to a Label.
-    class Jump {
-        friend class PatchBuffer;
-        friend class MacroAssembler;
-
-    public:
-        Jump()
-        {
-        }
-        
-        // FIXME: transitionary method, while we replace JmpSrces with Jumps.
-        Jump(X86Assembler::JmpSrc jmp)
-            : m_jmp(jmp)
-        {
-        }
-        
-        void link(MacroAssembler* masm)
-        {
-            masm->m_assembler.link(m_jmp, masm->m_assembler.label());
-        }
-        
-        void linkTo(Label label, MacroAssembler* masm)
-        {
-            masm->m_assembler.link(m_jmp, label.m_label);
-        }
-        
-        // FIXME: transitionary method, while we replace JmpSrces with Jumps.
-        operator X86Assembler::JmpSrc()
-        {
-            return m_jmp;
-        }
-
-        static void patch(void* address, void* destination)
-        {
-            X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
-        }
-
-    private:
-        X86Assembler::JmpSrc m_jmp;
-    };
+#if CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
 
-    // JumpList:
-    //
-    // A JumpList is a set of Jump objects.
-    // All jumps in the set will be linked to the same destination.
-    class JumpList {
-        friend class PatchBuffer;
+#elif CPU(ARM64)
+#include "MacroAssemblerARM64.h"
+namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
 
-    public:
-        void link(MacroAssembler* masm)
-        {
-            size_t size = m_jumps.size();
-            for (size_t i = 0; i < size; ++i)
-                m_jumps[i].link(masm);
-            m_jumps.clear();
-        }
-        
-        void linkTo(Label label, MacroAssembler* masm)
-        {
-            size_t size = m_jumps.size();
-            for (size_t i = 0; i < size; ++i)
-                m_jumps[i].linkTo(label, masm);
-            m_jumps.clear();
-        }
-        
-        void append(Jump jump)
-        {
-            m_jumps.append(jump);
-        }
-        
-        void append(JumpList& other)
-        {
-            m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
-        }
-
-        bool empty()
-        {
-            return !m_jumps.size();
-        }
-
-    private:
-        Vector<Jump, 16> m_jumps;
-    };
+#elif CPU(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
 
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
 
-    // PatchBuffer:
-    //
-    // This class assists in linking code generated by the macro assembler, once code generation
-    // has been completed, and the code has been copied to is final location in memory.  At this
-    // time pointers to labels within the code may be resolved, and relative offsets to external
-    // addresses may be fixed.
-    //
-    // Specifically:
-    //   * Jump objects may be linked to external targets,
-    //   * The address of Jump objects may taken, such that it can later be relinked.
-    //   * The return address of a Jump object representing a call may be acquired.
-    //   * The address of a Label pointing into the code may be resolved.
-    //   * The value referenced by a DataLabel may be fixed.
-    //
-    // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
-    // address of calls, as opposed to a point that can be used to later relink a Jump -
-    // possibly wrap the later up in an object that can do just that).
-    class PatchBuffer {
-    public:
-        PatchBuffer(void* code)
-            : m_code(code)
-        {
-        }
-
-        void link(Jump jump, void* target)
-        {
-            X86Assembler::link(m_code, jump.m_jmp, target);
-        }
-
-        void link(JumpList list, void* target)
-        {
-            for (unsigned i = 0; i < list.m_jumps.size(); ++i)
-                X86Assembler::link(m_code, list.m_jumps[i], target);
-        }
-
-        void* addressOf(Jump jump)
-        {
-            return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp);
-        }
-
-        void* addressOf(Label label)
-        {
-            return X86Assembler::getRelocatedAddress(m_code, label.m_label);
-        }
-
-        void* addressOf(DataLabelPtr label)
-        {
-            return X86Assembler::getRelocatedAddress(m_code, label.m_label);
-        }
-
-        void* addressOf(DataLabel32 label)
-        {
-            return X86Assembler::getRelocatedAddress(m_code, label.m_label);
-        }
-
-        void setPtr(DataLabelPtr label, void* value)
-        {
-            X86Assembler::patchAddress(m_code, label.m_label, value);
-        }
-
-    private:
-        void* m_code;
-    };
-
-    // ImmPtr:
-    //
-    // A pointer sized immediate operand to an instruction - this is wrapped
-    // in a class requiring explicit construction in order to differentiate
-    // from pointers used as absolute addresses to memory operations
-    struct ImmPtr {
-        explicit ImmPtr(void* value)
-            : m_value(value)
-        {
-        }
-
-        intptr_t asIntptr()
-        {
-            return reinterpret_cast<intptr_t>(m_value);
-        }
-
-        void* m_value;
-    };
-
-
-    // Imm32:
-    //
-    // A 32bit immediate operand to an instruction - this is wrapped in a
-    // class requiring explicit construction in order to prevent RegisterIDs
-    // (which are implemented as an enum) from accidentally being passed as
-    // immediate values.
-    struct Imm32 {
-        explicit Imm32(int32_t value)
-            : m_value(value)
-        {
-        }
-
-#if PLATFORM(X86)
-        explicit Imm32(ImmPtr ptr)
-            : m_value(ptr.asIntptr())
-        {
-        }
-#endif
+#elif CPU(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
 
-        int32_t m_value;
-    };
+#elif CPU(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
 
-    // Integer arithmetic operations:
-    //
-    // Operations are typically two operand - operation(source, srcDst)
-    // For many operations the source may be an Imm32, the srcDst operand
-    // may often be a memory location (explictly described using an Address
-    // object).
+#elif CPU(SH4)
+#include "MacroAssemblerSH4.h"
+namespace JSC {
+typedef MacroAssemblerSH4 MacroAssemblerBase;
+};
 
-    void addPtr(RegisterID src, RegisterID dest)
-    {
-#if PLATFORM(X86_64)
-        m_assembler.addq_rr(src, dest);
 #else
-        add32(src, dest);
+#error "The MacroAssembler is not supported on this platform."
 #endif
-    }
 
-    void addPtr(Imm32 imm, RegisterID srcDest)
-    {
-#if PLATFORM(X86_64)
-        m_assembler.addq_ir(imm.m_value, srcDest);
-#else
-        add32(imm, srcDest);
-#endif
-    }
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
 
-    void addPtr(ImmPtr imm, RegisterID dest)
+    static RegisterID nextRegister(RegisterID reg)
     {
-#if PLATFORM(X86_64)
-        move(imm, scratchRegister);
-        m_assembler.addq_rr(scratchRegister, dest);
-#else
-        add32(Imm32(imm), dest);
-#endif
+        return static_cast<RegisterID>(reg + 1);
     }
-
-    void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
+    
+    static FPRegisterID nextFPRegister(FPRegisterID reg)
     {
-        m_assembler.leal_mr(imm.m_value, src, dest);
+        return static_cast<FPRegisterID>(reg + 1);
     }
-
-    void add32(RegisterID src, RegisterID dest)
+    
+    static unsigned numberOfRegisters()
     {
-        m_assembler.addl_rr(src, dest);
+        return lastRegister() - firstRegister() + 1;
     }
-
-    void add32(Imm32 imm, Address address)
+    
+    static unsigned registerIndex(RegisterID reg)
     {
-        m_assembler.addl_im(imm.m_value, address.offset, address.base);
+        return reg - firstRegister();
     }
-
-    void add32(Imm32 imm, RegisterID dest)
+    
+    static unsigned numberOfFPRegisters()
     {
-        m_assembler.addl_ir(imm.m_value, dest);
+        return lastFPRegister() - firstFPRegister() + 1;
     }
     
-    void add32(Imm32 imm, AbsoluteAddress address)
+    static unsigned fpRegisterIndex(FPRegisterID reg)
     {
-#if PLATFORM(X86_64)
-        move(ImmPtr(address.m_ptr), scratchRegister);
-        add32(imm, Address(scratchRegister));
-#else
-        m_assembler.addl_im(imm.m_value, address.m_ptr);
-#endif
+        return reg - firstFPRegister();
     }
     
-    void add32(Address src, RegisterID dest)
+    static unsigned registerIndex(FPRegisterID reg)
     {
-        m_assembler.addl_mr(src.offset, src.base, dest);
+        return fpRegisterIndex(reg) + numberOfRegisters();
     }
     
-    void andPtr(RegisterID src, RegisterID dest)
+    static unsigned totalNumberOfRegisters()
     {
-#if PLATFORM(X86_64)
-        m_assembler.andq_rr(src, dest);
-#else
-        and32(src, dest);
-#endif
+        return numberOfRegisters() + numberOfFPRegisters();
     }
 
-    void andPtr(Imm32 imm, RegisterID srcDest)
-    {
-#if PLATFORM(X86_64)
-        m_assembler.andq_ir(imm.m_value, srcDest);
-#else
-        and32(imm, srcDest);
+    using MacroAssemblerBase::pop;
+    using MacroAssemblerBase::jump;
+    using MacroAssemblerBase::branch32;
+    using MacroAssemblerBase::move;
+    using MacroAssemblerBase::add32;
+    using MacroAssemblerBase::and32;
+    using MacroAssemblerBase::branchAdd32;
+    using MacroAssemblerBase::branchMul32;
+#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
+    using MacroAssemblerBase::branchPtr;
 #endif
-    }
+    using MacroAssemblerBase::branchSub32;
+    using MacroAssemblerBase::lshift32;
+    using MacroAssemblerBase::or32;
+    using MacroAssemblerBase::rshift32;
+    using MacroAssemblerBase::store32;
+    using MacroAssemblerBase::sub32;
+    using MacroAssemblerBase::urshift32;
+    using MacroAssemblerBase::xor32;
 
-    void and32(RegisterID src, RegisterID dest)
+    static bool isPtrAlignedAddressOffset(ptrdiff_t value)
     {
-        m_assembler.andl_rr(src, dest);
+        return value == static_cast<int32_t>(value);
     }
 
-    void and32(Imm32 imm, RegisterID dest)
-    {
-        m_assembler.andl_ir(imm.m_value, dest);
-    }
+    static const double twoToThe32; // This is super useful for some double code.
 
-    void lshift32(Imm32 imm, RegisterID dest)
-    {
-        m_assembler.shll_i8r(imm.m_value, dest);
+    // Utilities used by the DFG JIT.
+#if ENABLE(DFG_JIT)
+    using MacroAssemblerBase::invert;
+    
+    static DoubleCondition invert(DoubleCondition cond)
+    {
+        switch (cond) {
+        case DoubleEqual:
+            return DoubleNotEqualOrUnordered;
+        case DoubleNotEqual:
+            return DoubleEqualOrUnordered;
+        case DoubleGreaterThan:
+            return DoubleLessThanOrEqualOrUnordered;
+        case DoubleGreaterThanOrEqual:
+            return DoubleLessThanOrUnordered;
+        case DoubleLessThan:
+            return DoubleGreaterThanOrEqualOrUnordered;
+        case DoubleLessThanOrEqual:
+            return DoubleGreaterThanOrUnordered;
+        case DoubleEqualOrUnordered:
+            return DoubleNotEqual;
+        case DoubleNotEqualOrUnordered:
+            return DoubleEqual;
+        case DoubleGreaterThanOrUnordered:
+            return DoubleLessThanOrEqual;
+        case DoubleGreaterThanOrEqualOrUnordered:
+            return DoubleLessThan;
+        case DoubleLessThanOrUnordered:
+            return DoubleGreaterThanOrEqual;
+        case DoubleLessThanOrEqualOrUnordered:
+            return DoubleGreaterThan;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return DoubleEqual; // make compiler happy
+        }
     }
     
-    void lshift32(RegisterID shift_amount, RegisterID dest)
-    {
-        // On x86 we can only shift by ecx; if asked to shift by another register we'll
-        // need rejig the shift amount into ecx first, and restore the registers afterwards.
-        if (shift_amount != X86::ecx) {
-            swap(shift_amount, X86::ecx);
-
-            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
-            if (dest == shift_amount)
-                m_assembler.shll_CLr(X86::ecx);
-            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
-            else if (dest == X86::ecx)
-                m_assembler.shll_CLr(shift_amount);
-            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
-            else
-                m_assembler.shll_CLr(dest);
-        
-            swap(shift_amount, X86::ecx);
-        } else
-            m_assembler.shll_CLr(dest);
+    static bool isInvertible(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+        case NonZero:
+            return true;
+        default:
+            return false;
+        }
     }
     
-    // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
-    // For now, this operation has specific register requirements, and the three register must
-    // be unique.  It is unfortunate to expose this in the MacroAssembler interface, however
-    // given the complexity to fix, the fact that it is not uncommmon  for processors to have
-    // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
-    // support a hardware divide at all, it may not be 
-    void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
-    {
-#ifdef NDEBUG
-#pragma unused(dividend,remainder)
-#else
-        ASSERT((dividend == X86::eax) && (remainder == X86::edx));
-        ASSERT((dividend != divisor) && (remainder != divisor));
-#endif
-
-        m_assembler.cdq();
-        m_assembler.idivl_r(divisor);
+    static ResultCondition invert(ResultCondition cond)
+    {
+        switch (cond) {
+        case Zero:
+            return NonZero;
+        case NonZero:
+            return Zero;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return Zero; // Make compiler happy for release builds.
+        }
     }
+#endif
 
-    void mul32(RegisterID src, RegisterID dest)
+    // Platform agnostic onvenience functions,
+    // described in terms of other macro assembly methods.
+    void pop()
     {
-        m_assembler.imull_rr(src, dest);
+        addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
     }
     
-    void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+    void peek(RegisterID dest, int index = 0)
     {
-        m_assembler.imull_i32r(src, imm.m_value, dest);
+        loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
     }
-    
-    void not32(RegisterID srcDest)
+
+    Address addressForPoke(int index)
     {
-        m_assembler.notl_r(srcDest);
+        return Address(stackPointerRegister, (index * sizeof(void*)));
     }
     
-    void orPtr(RegisterID src, RegisterID dest)
+    void poke(RegisterID src, int index = 0)
     {
-#if PLATFORM(X86_64)
-        m_assembler.orq_rr(src, dest);
-#else
-        or32(src, dest);
-#endif
+        storePtr(src, addressForPoke(index));
     }
 
-    void orPtr(ImmPtr imm, RegisterID dest)
+    void poke(TrustedImm32 value, int index = 0)
     {
-#if PLATFORM(X86_64)
-        move(imm, scratchRegister);
-        m_assembler.orq_rr(scratchRegister, dest);
-#else
-        or32(Imm32(imm), dest);
-#endif
+        store32(value, addressForPoke(index));
     }
 
-    void orPtr(Imm32 imm, RegisterID dest)
+    void poke(TrustedImmPtr imm, int index = 0)
     {
-#if PLATFORM(X86_64)
-        m_assembler.orq_ir(imm.m_value, dest);
-#else
-        or32(imm, dest);
-#endif
+        storePtr(imm, addressForPoke(index));
     }
 
-    void or32(RegisterID src, RegisterID dest)
+#if !CPU(ARM64)
+    void pushToSave(RegisterID src)
     {
-        m_assembler.orl_rr(src, dest);
+        push(src);
     }
-
-    void or32(Imm32 imm, RegisterID dest)
+    void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
     {
-        m_assembler.orl_ir(imm.m_value, dest);
+        push(imm);
     }
-
-    void rshiftPtr(RegisterID shift_amount, RegisterID dest)
+    void popToRestore(RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        // On x86 we can only shift by ecx; if asked to shift by another register we'll
-        // need rejig the shift amount into ecx first, and restore the registers afterwards.
-        if (shift_amount != X86::ecx) {
-            swap(shift_amount, X86::ecx);
-
-            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
-            if (dest == shift_amount)
-                m_assembler.sarq_CLr(X86::ecx);
-            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
-            else if (dest == X86::ecx)
-                m_assembler.sarq_CLr(shift_amount);
-            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
-            else
-                m_assembler.sarq_CLr(dest);
-        
-            swap(shift_amount, X86::ecx);
-        } else
-            m_assembler.sarq_CLr(dest);
-#else
-        rshift32(shift_amount, dest);
-#endif
+        pop(dest);
     }
-
-    void rshiftPtr(Imm32 imm, RegisterID dest)
+    void pushToSave(FPRegisterID src)
     {
-#if PLATFORM(X86_64)
-        m_assembler.sarq_i8r(imm.m_value, dest);
-#else
-        rshift32(imm, dest);
-#endif
+        subPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
+        storeDouble(src, stackPointerRegister);
     }
-
-    void rshift32(RegisterID shift_amount, RegisterID dest)
+    void popToRestore(FPRegisterID dest)
     {
-        // On x86 we can only shift by ecx; if asked to shift by another register we'll
-        // need rejig the shift amount into ecx first, and restore the registers afterwards.
-        if (shift_amount != X86::ecx) {
-            swap(shift_amount, X86::ecx);
-
-            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
-            if (dest == shift_amount)
-                m_assembler.sarl_CLr(X86::ecx);
-            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
-            else if (dest == X86::ecx)
-                m_assembler.sarl_CLr(shift_amount);
-            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
-            else
-                m_assembler.sarl_CLr(dest);
-        
-            swap(shift_amount, X86::ecx);
-        } else
-            m_assembler.sarl_CLr(dest);
+        loadDouble(stackPointerRegister, dest);
+        addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
     }
+    
+    static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
+#endif // !CPU(ARM64)
 
-    void rshift32(Imm32 imm, RegisterID dest)
+#if CPU(X86_64) || CPU(ARM64)
+    void peek64(RegisterID dest, int index = 0)
     {
-        m_assembler.sarl_i8r(imm.m_value, dest);
+        load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
     }
 
-    void subPtr(RegisterID src, RegisterID dest)
+    void poke(TrustedImm64 value, int index = 0)
     {
-#if PLATFORM(X86_64)
-        m_assembler.subq_rr(src, dest);
-#else
-        sub32(src, dest);
-#endif
+        store64(value, addressForPoke(index));
     }
-    
-    void subPtr(Imm32 imm, RegisterID dest)
+
+    void poke64(RegisterID src, int index = 0)
     {
-#if PLATFORM(X86_64)
-        m_assembler.subq_ir(imm.m_value, dest);
-#else
-        sub32(imm, dest);
-#endif
+        store64(src, addressForPoke(index));
     }
+#endif
     
-    void subPtr(ImmPtr imm, RegisterID dest)
+#if CPU(MIPS)
+    void poke(FPRegisterID src, int index = 0)
     {
-#if PLATFORM(X86_64)
-        move(imm, scratchRegister);
-        m_assembler.subq_rr(scratchRegister, dest);
-#else
-        sub32(Imm32(imm), dest);
+        ASSERT(!(index & 1));
+        storeDouble(src, addressForPoke(index));
+    }
 #endif
+
+    // Immediate shifts only have 5 controllable bits
+    // so we'll consider them safe for now.
+    TrustedImm32 trustedImm32ForShift(Imm32 imm)
+    {
+        return TrustedImm32(imm.asTrustedImm32().m_value & 31);
     }
 
-    void sub32(RegisterID src, RegisterID dest)
+    // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+    void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
     {
-        m_assembler.subl_rr(src, dest);
+        branchPtr(cond, op1, imm).linkTo(target, this);
     }
-    
-    void sub32(Imm32 imm, RegisterID dest)
+    void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
     {
-        m_assembler.subl_ir(imm.m_value, dest);
+        branchPtr(cond, op1, imm).linkTo(target, this);
     }
-    
-    void sub32(Imm32 imm, Address address)
+
+    void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
     {
-        m_assembler.subl_im(imm.m_value, address.offset, address.base);
+        branch32(cond, op1, op2).linkTo(target, this);
     }
 
-    void sub32(Imm32 imm, AbsoluteAddress address)
+    void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
     {
-#if PLATFORM(X86_64)
-        move(ImmPtr(address.m_ptr), scratchRegister);
-        sub32(imm, Address(scratchRegister));
-#else
-        m_assembler.subl_im(imm.m_value, address.m_ptr);
-#endif
+        branch32(cond, op1, imm).linkTo(target, this);
+    }
+    
+    void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
+    {
+        branch32(cond, op1, imm).linkTo(target, this);
     }
 
-    void sub32(Address src, RegisterID dest)
+    void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
     {
-        m_assembler.subl_mr(src.offset, src.base, dest);
+        branch32(cond, left, right).linkTo(target, this);
     }
 
-    void xorPtr(RegisterID src, RegisterID dest)
+    Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
     {
-#if PLATFORM(X86_64)
-        m_assembler.xorq_rr(src, dest);
-#else
-        xor32(src, dest);
-#endif
+        return branch32(commute(cond), right, left);
     }
 
-    void xorPtr(Imm32 imm, RegisterID srcDest)
+    Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
     {
-#if PLATFORM(X86_64)
-        m_assembler.xorq_ir(imm.m_value, srcDest);
-#else
-        xor32(imm, srcDest);
-#endif
+        return branch32(commute(cond), right, left);
     }
 
-    void xor32(RegisterID src, RegisterID dest)
+    void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
     {
-        m_assembler.xorl_rr(src, dest);
+        branchTestPtr(cond, reg).linkTo(target, this);
     }
 
-    void xor32(Imm32 imm, RegisterID srcDest)
+#if !CPU(ARM_THUMB2) && !CPU(ARM64)
+    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
     {
-        m_assembler.xorl_ir(imm.m_value, srcDest);
+        return PatchableJump(branchPtr(cond, left, right));
     }
     
-
-    // Memory access operations:
-    //
-    // Loads are of the form load(address, destination) and stores of the form
-    // store(source, address).  The source for a store may be an Imm32.  Address
-    // operand objects to loads and store will be implicitly constructed if a
-    // register is passed.
-
-    void loadPtr(ImplicitAddress address, RegisterID dest)
+    PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_mr(address.offset, address.base, dest);
-#else
-        load32(address, dest);
-#endif
+        return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
     }
 
-    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_mr_disp32(address.offset, address.base, dest);
-        return DataLabel32(this);
-#else
-        m_assembler.movl_mr_disp32(address.offset, address.base, dest);
-        return DataLabel32(this);
-#endif
+        return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
     }
 
-    void loadPtr(BaseIndex address, RegisterID dest)
+#if !CPU(ARM_TRADITIONAL)
+    PatchableJump patchableJump()
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
-#else
-        load32(address, dest);
-#endif
+        return PatchableJump(jump());
     }
 
-    void loadPtr(void* address, RegisterID dest)
+    PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
-#if PLATFORM(X86_64)
-        if (dest == X86::eax)
-            m_assembler.movq_mEAX(address);
-        else {
-            move(X86::eax, dest);
-            m_assembler.movq_mEAX(address);
-            swap(X86::eax, dest);
-        }
-#else
-        load32(address, dest);
-#endif
+        return PatchableJump(branchTest32(cond, reg, mask));
     }
 
-    void load32(ImplicitAddress address, RegisterID dest)
+    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
     {
-        m_assembler.movl_mr(address.offset, address.base, dest);
+        return PatchableJump(branch32(cond, reg, imm));
     }
 
-    void load32(BaseIndex address, RegisterID dest)
+    PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
     {
-        m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+        return PatchableJump(branch32(cond, address, imm));
     }
+#endif
+#endif
 
-    void load32(void* address, RegisterID dest)
+    void jump(Label target)
     {
-#if PLATFORM(X86_64)
-        if (dest == X86::eax)
-            m_assembler.movl_mEAX(address);
-        else {
-            move(X86::eax, dest);
-            m_assembler.movl_mEAX(address);
-            swap(X86::eax, dest);
+        jump().linkTo(target, this);
+    }
+
+    // Commute a relational condition, returns a new condition that will produce
+    // the same results given the same inputs but with their positions exchanged.
+    static RelationalCondition commute(RelationalCondition condition)
+    {
+        switch (condition) {
+        case Above:
+            return Below;
+        case AboveOrEqual:
+            return BelowOrEqual;
+        case Below:
+            return Above;
+        case BelowOrEqual:
+            return AboveOrEqual;
+        case GreaterThan:
+            return LessThan;
+        case GreaterThanOrEqual:
+            return LessThanOrEqual;
+        case LessThan:
+            return GreaterThan;
+        case LessThanOrEqual:
+            return GreaterThanOrEqual;
+        default:
+            break;
         }
-#else
-        m_assembler.movl_mr(address, dest);
-#endif
+
+        ASSERT(condition == Equal || condition == NotEqual);
+        return condition;
     }
 
-    void load16(BaseIndex address, RegisterID dest)
+    static const unsigned BlindingModulus = 64;
+    bool shouldConsiderBlinding()
     {
-        m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+        return !(random() & (BlindingModulus - 1));
     }
 
-    void storePtr(RegisterID src, ImplicitAddress address)
+    // Ptr methods
+    // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+    // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64) && !CPU(ARM64)
+    void addPtr(Address src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_rm(src, address.offset, address.base);
-#else
-        store32(src, address);
-#endif
+        add32(src, dest);
     }
 
-    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+    void addPtr(AbsoluteAddress src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_rm_disp32(src, address.offset, address.base);
-        return DataLabel32(this);
-#else
-        m_assembler.movl_rm_disp32(src, address.offset, address.base);
-        return DataLabel32(this);
-#endif
+        add32(src, dest);
     }
 
-    void storePtr(RegisterID src, BaseIndex address)
+    void addPtr(RegisterID src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
-#else
-        store32(src, address);
-#endif
+        add32(src, dest);
     }
 
-    void storePtr(ImmPtr imm, ImplicitAddress address)
+    void addPtr(TrustedImm32 imm, RegisterID srcDest)
     {
-#if PLATFORM(X86_64)
-        move(imm, scratchRegister);
-        storePtr(scratchRegister, address);
-#else
-        m_assembler.movl_i32m(imm.asIntptr(), address.offset, address.base);
-#endif
+        add32(imm, srcDest);
     }
 
-#if !PLATFORM(X86_64)
-    void storePtr(ImmPtr imm, void* address)
+    void addPtr(TrustedImmPtr imm, RegisterID dest)
     {
-        store32(Imm32(imm), address);
+        add32(TrustedImm32(imm), dest);
     }
-#endif
 
-    DataLabelPtr storePtrWithPatch(Address address)
+    void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_i64r(0, scratchRegister);
-        DataLabelPtr label(this);
-        storePtr(scratchRegister, address);
-        return label;
-#else
-        m_assembler.movl_i32m(0, address.offset, address.base);
-        return DataLabelPtr(this);
-#endif
+        add32(imm, src, dest);
+    }
+
+    void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+    {
+        add32(imm, address);
+    }
+    
+    void andPtr(RegisterID src, RegisterID dest)
+    {
+        and32(src, dest);
     }
 
-    void store32(RegisterID src, ImplicitAddress address)
+    void andPtr(TrustedImm32 imm, RegisterID srcDest)
     {
-        m_assembler.movl_rm(src, address.offset, address.base);
+        and32(imm, srcDest);
     }
 
-    void store32(RegisterID src, BaseIndex address)
+    void andPtr(TrustedImmPtr imm, RegisterID srcDest)
     {
-        m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+        and32(TrustedImm32(imm), srcDest);
     }
 
-    void store32(Imm32 imm, ImplicitAddress address)
+    void lshiftPtr(Imm32 imm, RegisterID srcDest)
     {
-        m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+        lshift32(trustedImm32ForShift(imm), srcDest);
     }
     
-    void store32(Imm32 imm, void* address)
+    void rshiftPtr(Imm32 imm, RegisterID srcDest)
     {
-#if PLATFORM(X86_64)
-        move(X86::eax, scratchRegister);
-        move(imm, X86::eax);
-        m_assembler.movl_EAXm(address);
-        move(scratchRegister, X86::eax);
-#else
-        m_assembler.movl_i32m(imm.m_value, address);
-#endif
+        rshift32(trustedImm32ForShift(imm), srcDest);
     }
 
+    void urshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        urshift32(trustedImm32ForShift(imm), srcDest);
+    }
 
-    // Stack manipulation operations:
-    //
-    // The ABI is assumed to provide a stack abstraction to memory,
-    // containing machine word sized units of data.  Push and pop
-    // operations add and remove a single register sized unit of data
-    // to or from the stack.  Peek and poke operations read or write
-    // values on the stack, without moving the current stack position.
-    
-    void pop(RegisterID dest)
+    void negPtr(RegisterID dest)
     {
-        m_assembler.pop_r(dest);
+        neg32(dest);
     }
 
-    void push(RegisterID src)
+    void orPtr(RegisterID src, RegisterID dest)
     {
-        m_assembler.push_r(src);
+        or32(src, dest);
     }
 
-    void push(Address address)
+    void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
     {
-        m_assembler.push_m(address.offset, address.base);
+        or32(op1, op2, dest);
     }
 
-    void push(Imm32 imm)
+    void orPtr(TrustedImmPtr imm, RegisterID dest)
     {
-        m_assembler.push_i32(imm.m_value);
+        or32(TrustedImm32(imm), dest);
     }
 
-    void pop()
+    void orPtr(TrustedImm32 imm, RegisterID dest)
     {
-        addPtr(Imm32(sizeof(void*)), X86::esp);
+        or32(imm, dest);
+    }
+
+    void subPtr(RegisterID src, RegisterID dest)
+    {
+        sub32(src, dest);
     }
     
-    void peek(RegisterID dest, int index = 0)
+    void subPtr(TrustedImm32 imm, RegisterID dest)
+    {
+        sub32(imm, dest);
+    }
+    
+    void subPtr(TrustedImmPtr imm, RegisterID dest)
     {
-        loadPtr(Address(X86::esp, (index * sizeof(void *))), dest);
+        sub32(TrustedImm32(imm), dest);
     }
 
-    void poke(RegisterID src, int index = 0)
+    void xorPtr(RegisterID src, RegisterID dest)
     {
-        storePtr(src, Address(X86::esp, (index * sizeof(void *))));
+        xor32(src, dest);
     }
 
-    void poke(Imm32 value, int index = 0)
+    void xorPtr(TrustedImm32 imm, RegisterID srcDest)
     {
-        store32(value, Address(X86::esp, (index * sizeof(void *))));
+        xor32(imm, srcDest);
     }
 
-    void poke(ImmPtr imm, int index = 0)
+
+    void loadPtr(ImplicitAddress address, RegisterID dest)
     {
-        storePtr(imm, Address(X86::esp, (index * sizeof(void *))));
+        load32(address, dest);
     }
 
-    // Register move operations:
-    //
-    // Move values in registers.
+    void loadPtr(BaseIndex address, RegisterID dest)
+    {
+        load32(address, dest);
+    }
 
-    void move(Imm32 imm, RegisterID dest)
+    void loadPtr(const void* address, RegisterID dest)
     {
-        // Note: on 64-bit the Imm32 value is zero extended into the register, it
-        // may be useful to have a separate version that sign extends the value?
-        if (!imm.m_value)
-            m_assembler.xorl_rr(dest, dest);
-        else
-            m_assembler.movl_i32r(imm.m_value, dest);
+        load32(address, dest);
     }
 
-    void move(RegisterID src, RegisterID dest)
+    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
     {
-        // Note: on 64-bit this is is a full register move; perhaps it would be
-        // useful to have separate move32 & movePtr, with move32 zero extending?
-#if PLATFORM(X86_64)
-        m_assembler.movq_rr(src, dest);
-#else
-        m_assembler.movl_rr(src, dest);
-#endif
+        return load32WithAddressOffsetPatch(address, dest);
+    }
+    
+    DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        return load32WithCompactAddressOffsetPatch(address, dest);
     }
 
     void move(ImmPtr imm, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
-            m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
-        else
-            m_assembler.movq_i64r(imm.asIntptr(), dest);
-#else
-        m_assembler.movl_i32r(imm.asIntptr(), dest);
-#endif
+        move(Imm32(imm.asTrustedImmPtr()), dest);
     }
 
-    void swap(RegisterID reg1, RegisterID reg2)
+    void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.xchgq_rr(reg1, reg2);
-#else
-        m_assembler.xchgl_rr(reg1, reg2);
-#endif
+        compare32(cond, left, right, dest);
     }
 
-    void signExtend32ToPtr(RegisterID src, RegisterID dest)
+    void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movsxd_rr(src, dest);
-#else
-        if (src != dest)
-            move(src, dest);
-#endif
+        compare32(cond, left, right, dest);
     }
-
-    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+    
+    void storePtr(RegisterID src, ImplicitAddress address)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movl_rr(src, dest);
-#else
-        if (src != dest)
-            move(src, dest);
-#endif
+        store32(src, address);
     }
 
+    void storePtr(RegisterID src, BaseIndex address)
+    {
+        store32(src, address);
+    }
 
-    // Forwards / external control flow operations:
-    //
-    // This set of jump and conditional branch operations return a Jump
-    // object which may linked at a later point, allow forwards jump,
-    // or jumps that will require external linkage (after the code has been
-    // relocated).
-    //
-    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
-    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
-    // used (representing the names 'below' and 'above').
-    //
-    // Operands to the comparision are provided in the expected order, e.g.
-    // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
-    // treated as a signed 32bit value, is less than or equal to 5.
-    //
-    // jz and jnz test whether the first operand is equal to zero, and take
-    // an optional second operand of a mask under which to perform the test.
+    void storePtr(RegisterID src, void* address)
+    {
+        store32(src, address);
+    }
 
-private:
-    void compareImm32ForBranch(RegisterID left, int32_t right)
+    void storePtr(TrustedImmPtr imm, ImplicitAddress address)
     {
-        m_assembler.cmpl_ir(right, left);
+        store32(TrustedImm32(imm), address);
     }
-
-    void compareImm32ForBranchEquality(RegisterID reg, int32_t imm)
+    
+    void storePtr(ImmPtr imm, Address address)
     {
-        if (!imm)
-            m_assembler.testl_rr(reg, reg);
-        else
-            m_assembler.cmpl_ir(imm, reg);
+        store32(Imm32(imm.asTrustedImmPtr()), address);
     }
 
-    void compareImm32ForBranchEquality(Address address, int32_t imm)
+    void storePtr(TrustedImmPtr imm, void* address)
     {
-        m_assembler.cmpl_im(imm, address.offset, address.base);
+        store32(TrustedImm32(imm), address);
     }
 
-    void testImm32(RegisterID reg, Imm32 mask)
+    void storePtr(TrustedImm32 imm, ImplicitAddress address)
     {
-        // if we are only interested in the low seven bits, this can be tested with a testb
-        if (mask.m_value == -1)
-            m_assembler.testl_rr(reg, reg);
-        else if ((mask.m_value & ~0x7f) == 0)
-            m_assembler.testb_i8r(mask.m_value, reg);
-        else
-            m_assembler.testl_i32r(mask.m_value, reg);
+        store32(imm, address);
     }
 
-    void testImm32(Address address, Imm32 mask)
+    void storePtr(TrustedImmPtr imm, BaseIndex address)
     {
-        if (mask.m_value == -1)
-            m_assembler.cmpl_im(0, address.offset, address.base);
-        else
-            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+        store32(TrustedImm32(imm), address);
     }
 
-    void testImm32(BaseIndex address, Imm32 mask)
+    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
     {
-        if (mask.m_value == -1)
-            m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
-        else
-            m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+        return store32WithAddressOffsetPatch(src, address);
     }
 
-#if PLATFORM(X86_64)
-    void compareImm64ForBranch(RegisterID left, int32_t right)
+    Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
     {
-        m_assembler.cmpq_ir(right, left);
+        return branch32(cond, left, right);
     }
 
-    void compareImm64ForBranchEquality(RegisterID reg, int32_t imm)
+    Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
     {
-        if (!imm)
-            m_assembler.testq_rr(reg, reg);
-        else
-            m_assembler.cmpq_ir(imm, reg);
+        return branch32(cond, left, TrustedImm32(right));
+    }
+    
+    Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+    {
+        return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
     }
 
-    void testImm64(RegisterID reg, Imm32 mask)
+    Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
     {
-        // if we are only interested in the low seven bits, this can be tested with a testb
-        if (mask.m_value == -1)
-            m_assembler.testq_rr(reg, reg);
-        else if ((mask.m_value & ~0x7f) == 0)
-            m_assembler.testb_i8r(mask.m_value, reg);
-        else
-            m_assembler.testq_i32r(mask.m_value, reg);
+        return branch32(cond, left, right);
     }
 
-    void testImm64(Address address, Imm32 mask)
+    Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
     {
-        if (mask.m_value == -1)
-            m_assembler.cmpq_im(0, address.offset, address.base);
-        else
-            m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
+        return branch32(cond, left, right);
     }
 
-    void testImm64(BaseIndex address, Imm32 mask)
+    Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
     {
-        if (mask.m_value == -1)
-            m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
-        else
-            m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+        return branch32(cond, left, right);
     }
-#endif
 
-public:
-    Jump ja32(RegisterID left, Imm32 right)
+    Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
     {
-        compareImm32ForBranch(left, right.m_value);
-        return Jump(m_assembler.ja());
+        return branch32(cond, left, TrustedImm32(right));
     }
     
-    Jump jaePtr(RegisterID left, RegisterID right)
+    Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rr(right, left);
-        return Jump(m_assembler.jae());
-#else
-        return jae32(left, right);
-#endif
+        return branch32(cond, left, TrustedImm32(right));
     }
 
-    Jump jaePtr(RegisterID reg, ImmPtr ptr)
+    Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        intptr_t imm = ptr.asIntptr();
-        if (CAN_SIGN_EXTEND_32_64(imm)) {
-            compareImm64ForBranch(reg, imm);
-            return Jump(m_assembler.jae());
-        } else {
-            move(ptr, scratchRegister);
-            return jaePtr(reg, scratchRegister);
-        }
-#else
-        return jae32(reg, Imm32(ptr));
-#endif
+        return branchSub32(cond, src, dest);
     }
 
-    Jump jae32(RegisterID left, RegisterID right)
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
     {
-        m_assembler.cmpl_rr(right, left);
-        return Jump(m_assembler.jae());
+        return branchTest32(cond, reg, mask);
     }
 
-    Jump jae32(RegisterID left, Imm32 right)
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
-        compareImm32ForBranch(left, right.m_value);
-        return Jump(m_assembler.jae());
+        return branchTest32(cond, reg, mask);
     }
-    
-    Jump jae32(RegisterID left, Address right)
+
+    Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        m_assembler.cmpl_mr(right.offset, right.base, left);
-        return Jump(m_assembler.jae());
+        return branchTest32(cond, address, mask);
     }
-    
-    Jump jae32(Address left, RegisterID right)
+
+    Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        m_assembler.cmpl_rm(right, left.offset, left.base);
-        return Jump(m_assembler.jae());
+        return branchTest32(cond, address, mask);
     }
-    
-    Jump jbPtr(RegisterID left, RegisterID right)
+
+    Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rr(right, left);
-        return Jump(m_assembler.jb());
-#else
-        return jb32(left, right);
-#endif
+        return branchAdd32(cond, src, dest);
     }
 
-    Jump jbPtr(RegisterID reg, ImmPtr ptr)
+    Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        intptr_t imm = ptr.asIntptr();
-        if (CAN_SIGN_EXTEND_32_64(imm)) {
-            compareImm64ForBranch(reg, imm);
-            return Jump(m_assembler.jb());
-        } else {
-            move(ptr, scratchRegister);
-            return jbPtr(reg, scratchRegister);
-        }
-#else
-        return jb32(reg, Imm32(ptr));
-#endif
+        return branchSub32(cond, imm, dest);
     }
-
-    Jump jb32(RegisterID left, RegisterID right)
+    using MacroAssemblerBase::branchTest8;
+    Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        m_assembler.cmpl_rr(right, left);
-        return Jump(m_assembler.jb());
+        return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
     }
 
-    Jump jb32(RegisterID left, Imm32 right)
+#else // !CPU(X86_64)
+
+    void addPtr(RegisterID src, RegisterID dest)
     {
-        compareImm32ForBranch(left, right.m_value);
-        return Jump(m_assembler.jb());
+        add64(src, dest);
     }
     
-    Jump jb32(RegisterID left, Address right)
+    void addPtr(Address src, RegisterID dest)
     {
-        m_assembler.cmpl_mr(right.offset, right.base, left);
-        return Jump(m_assembler.jb());
+        add64(src, dest);
     }
-    
-    Jump jePtr(RegisterID op1, RegisterID op2)
+
+    void addPtr(TrustedImm32 imm, RegisterID srcDest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rr(op1, op2);
-        return Jump(m_assembler.je());
-#else
-        return je32(op1, op2);
-#endif
+        add64(imm, srcDest);
     }
 
-    Jump jePtr(RegisterID reg, Address address)
+    void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rm(reg, address.offset, address.base);
-#else
-        m_assembler.cmpl_rm(reg, address.offset, address.base);
-#endif
-        return Jump(m_assembler.je());
+        add64(imm, src, dest);
     }
 
-    Jump jePtr(RegisterID reg, ImmPtr ptr)
+    void addPtr(TrustedImm32 imm, Address address)
     {
-#if PLATFORM(X86_64)
-        intptr_t imm = ptr.asIntptr();
-        if (CAN_SIGN_EXTEND_32_64(imm)) {
-            compareImm64ForBranchEquality(reg, imm);
-            return Jump(m_assembler.je());
-        } else {
-            move(ptr, scratchRegister);
-            return jePtr(scratchRegister, reg);
-        }
-#else
-        return je32(reg, Imm32(ptr));
-#endif
+        add64(imm, address);
     }
 
-    Jump jePtr(Address address, ImmPtr imm)
+    void addPtr(AbsoluteAddress src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        move(imm, scratchRegister);
-        return jePtr(scratchRegister, address);
-#else
-        return je32(address, Imm32(imm));
-#endif
+        add64(src, dest);
     }
 
-    Jump je32(RegisterID op1, RegisterID op2)
+    void addPtr(TrustedImmPtr imm, RegisterID dest)
     {
-        m_assembler.cmpl_rr(op1, op2);
-        return Jump(m_assembler.je());
+        add64(TrustedImm64(imm), dest);
     }
-    
-    Jump je32(Address op1, RegisterID op2)
+
+    void addPtr(TrustedImm32 imm, AbsoluteAddress address)
     {
-        m_assembler.cmpl_mr(op1.offset, op1.base, op2);
-        return Jump(m_assembler.je());
+        add64(imm, address);
     }
-    
-    Jump je32(RegisterID reg, Imm32 imm)
+
+    void andPtr(RegisterID src, RegisterID dest)
     {
-        compareImm32ForBranchEquality(reg, imm.m_value);
-        return Jump(m_assembler.je());
+        and64(src, dest);
     }
 
-    Jump je32(Address address, Imm32 imm)
+    void andPtr(TrustedImm32 imm, RegisterID srcDest)
     {
-        compareImm32ForBranchEquality(address, imm.m_value);
-        return Jump(m_assembler.je());
+        and64(imm, srcDest);
     }
     
-    Jump je16(RegisterID op1, BaseIndex op2)
+    void andPtr(TrustedImmPtr imm, RegisterID srcDest)
     {
-        m_assembler.cmpw_rm(op1, op2.offset, op2.base, op2.index, op2.scale);
-        return Jump(m_assembler.je());
+        and64(imm, srcDest);
     }
     
-    Jump jg32(RegisterID left, RegisterID right)
+    void lshiftPtr(Imm32 imm, RegisterID srcDest)
     {
-        m_assembler.cmpl_rr(right, left);
-        return Jump(m_assembler.jg());
+        lshift64(trustedImm32ForShift(imm), srcDest);
     }
 
-    Jump jg32(RegisterID reg, Address address)
+    void rshiftPtr(Imm32 imm, RegisterID srcDest)
     {
-        m_assembler.cmpl_mr(address.offset, address.base, reg);
-        return Jump(m_assembler.jg());
+        rshift64(trustedImm32ForShift(imm), srcDest);
     }
 
-    Jump jgePtr(RegisterID left, RegisterID right)
+    void urshiftPtr(Imm32 imm, RegisterID srcDest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rr(right, left);
-        return Jump(m_assembler.jge());
-#else
-        return jge32(left, right);
-#endif
+        urshift64(trustedImm32ForShift(imm), srcDest);
     }
 
-    Jump jgePtr(RegisterID reg, ImmPtr ptr)
+    void negPtr(RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        intptr_t imm = ptr.asIntptr();
-        if (CAN_SIGN_EXTEND_32_64(imm)) {
-            compareImm64ForBranch(reg, imm);
-            return Jump(m_assembler.jge());
-        } else {
-            move(ptr, scratchRegister);
-            return jgePtr(reg, scratchRegister);
-        }
-#else
-        return jge32(reg, Imm32(ptr));
-#endif
+        neg64(dest);
     }
 
-    Jump jge32(RegisterID left, RegisterID right)
+    void orPtr(RegisterID src, RegisterID dest)
     {
-        m_assembler.cmpl_rr(right, left);
-        return Jump(m_assembler.jge());
+        or64(src, dest);
     }
 
-    Jump jge32(RegisterID left, Imm32 right)
+    void orPtr(TrustedImm32 imm, RegisterID dest)
     {
-        compareImm32ForBranch(left, right.m_value);
-        return Jump(m_assembler.jge());
+        or64(imm, dest);
     }
 
-    Jump jlPtr(RegisterID left, RegisterID right)
+    void orPtr(TrustedImmPtr imm, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rr(right, left);
-        return Jump(m_assembler.jl());
-#else
-        return jl32(left, right);
-#endif
+        or64(TrustedImm64(imm), dest);
     }
 
-    Jump jlPtr(RegisterID reg, ImmPtr ptr)
+    void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        intptr_t imm = ptr.asIntptr();
-        if (CAN_SIGN_EXTEND_32_64(imm)) {
-            compareImm64ForBranch(reg, imm);
-            return Jump(m_assembler.jl());
-        } else {
-            move(ptr, scratchRegister);
-            return jlPtr(reg, scratchRegister);
-        }
-#else
-        return jl32(reg, Imm32(ptr));
-#endif
+        or64(op1, op2, dest);
     }
 
-    Jump jl32(RegisterID left, RegisterID right)
+    void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
     {
-        m_assembler.cmpl_rr(right, left);
-        return Jump(m_assembler.jl());
+        or64(imm, src, dest);
     }
     
-    Jump jl32(RegisterID left, Imm32 right)
+    void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
     {
-        compareImm32ForBranch(left, right.m_value);
-        return Jump(m_assembler.jl());
+        rotateRight64(imm, srcDst);
     }
 
-    Jump jlePtr(RegisterID left, RegisterID right)
+    void subPtr(RegisterID src, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rr(right, left);
-        return Jump(m_assembler.jle());
-#else
-        return jle32(left, right);
-#endif
+        sub64(src, dest);
     }
-
-    Jump jlePtr(RegisterID reg, ImmPtr ptr)
+    
+    void subPtr(TrustedImm32 imm, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        intptr_t imm = ptr.asIntptr();
-        if (CAN_SIGN_EXTEND_32_64(imm)) {
-            compareImm64ForBranch(reg, imm);
-            return Jump(m_assembler.jle());
-        } else {
-            move(ptr, scratchRegister);
-            return jlePtr(reg, scratchRegister);
-        }
-#else
-        return jle32(reg, Imm32(ptr));
-#endif
+        sub64(imm, dest);
+    }
+    
+    void subPtr(TrustedImmPtr imm, RegisterID dest)
+    {
+        sub64(TrustedImm64(imm), dest);
     }
 
-    Jump jle32(RegisterID left, RegisterID right)
+    void xorPtr(RegisterID src, RegisterID dest)
     {
-        m_assembler.cmpl_rr(right, left);
-        return Jump(m_assembler.jle());
+        xor64(src, dest);
     }
     
-    Jump jle32(RegisterID left, Imm32 right)
+    void xorPtr(RegisterID src, Address dest)
     {
-        compareImm32ForBranch(left, right.m_value);
-        return Jump(m_assembler.jle());
+        xor64(src, dest);
     }
 
-    Jump jnePtr(RegisterID op1, RegisterID op2)
+    void xorPtr(TrustedImm32 imm, RegisterID srcDest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rr(op1, op2);
-        return Jump(m_assembler.jne());
-#else
-        return jne32(op1, op2);
-#endif
+        xor64(imm, srcDest);
     }
 
-    Jump jnePtr(RegisterID reg, Address address)
+    void loadPtr(ImplicitAddress address, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.cmpq_rm(reg, address.offset, address.base);
-#else
-        m_assembler.cmpl_rm(reg, address.offset, address.base);
-#endif
-        return Jump(m_assembler.jne());
+        load64(address, dest);
     }
 
-    Jump jnePtr(RegisterID reg, AbsoluteAddress address)
+    void loadPtr(BaseIndex address, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        move(ImmPtr(address.m_ptr), scratchRegister);
-        return jnePtr(reg, Address(scratchRegister));
-#else
-        m_assembler.cmpl_rm(reg, address.m_ptr);
-        return Jump(m_assembler.jne());
-#endif
+        load64(address, dest);
     }
 
-    Jump jnePtr(RegisterID reg, ImmPtr ptr)
+    void loadPtr(const void* address, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        intptr_t imm = ptr.asIntptr();
-        if (CAN_SIGN_EXTEND_32_64(imm)) {
-            compareImm64ForBranchEquality(reg, imm);
-            return Jump(m_assembler.jne());
-        } else {
-            move(ptr, scratchRegister);
-            return jnePtr(scratchRegister, reg);
-        }
-#else
-        return jne32(reg, Imm32(ptr));
-#endif
+        load64(address, dest);
     }
 
-    Jump jnePtr(Address address, ImmPtr imm)
+    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        move(imm, scratchRegister);
-        return jnePtr(scratchRegister, address);
-#else
-        return jne32(address, Imm32(imm));
-#endif
+        return load64WithAddressOffsetPatch(address, dest);
+    }
+    
+    DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+    {
+        return load64WithCompactAddressOffsetPatch(address, dest);
     }
 
-#if !PLATFORM(X86_64)
-    Jump jnePtr(AbsoluteAddress address, ImmPtr imm)
+    void storePtr(RegisterID src, ImplicitAddress address)
     {
-        m_assembler.cmpl_im(imm.asIntptr(), address.m_ptr);
-        return Jump(m_assembler.jne());
+        store64(src, address);
     }
-#endif
 
-    Jump jnePtrWithPatch(RegisterID reg, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0))
+    void storePtr(RegisterID src, BaseIndex address)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister);
-        dataLabel = DataLabelPtr(this);
-        return jnePtr(scratchRegister, reg);
-#else
-        m_assembler.cmpl_ir_force32(initialValue.asIntptr(), reg);
-        dataLabel = DataLabelPtr(this);
-        return Jump(m_assembler.jne());
-#endif
+        store64(src, address);
+    }
+    
+    void storePtr(RegisterID src, void* address)
+    {
+        store64(src, address);
     }
 
-    Jump jnePtrWithPatch(Address address, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0))
+    void storePtr(TrustedImmPtr imm, ImplicitAddress address)
     {
-#if PLATFORM(X86_64)
-        m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister);
-        dataLabel = DataLabelPtr(this);
-        return jnePtr(scratchRegister, address);
-#else
-        m_assembler.cmpl_im_force32(initialValue.asIntptr(), address.offset, address.base);
-        dataLabel = DataLabelPtr(this);
-        return Jump(m_assembler.jne());
-#endif
+        store64(TrustedImm64(imm), address);
     }
 
-    Jump jne32(RegisterID op1, RegisterID op2)
+    void storePtr(TrustedImmPtr imm, BaseIndex address)
     {
-        m_assembler.cmpl_rr(op1, op2);
-        return Jump(m_assembler.jne());
+        store64(TrustedImm64(imm), address);
     }
 
-    Jump jne32(RegisterID reg, Imm32 imm)
+    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
     {
-        compareImm32ForBranchEquality(reg, imm.m_value);
-        return Jump(m_assembler.jne());
+        return store64WithAddressOffsetPatch(src, address);
     }
 
-    Jump jne32(Address address, Imm32 imm)
+    void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
     {
-        compareImm32ForBranchEquality(address, imm.m_value);
-        return Jump(m_assembler.jne());
+        compare64(cond, left, right, dest);
     }
     
-    Jump jne32(Address address, RegisterID reg)
+    void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
     {
-        m_assembler.cmpl_rm(reg, address.offset, address.base);
-        return Jump(m_assembler.jne());
+        compare64(cond, left, right, dest);
     }
     
-    Jump jnzPtr(RegisterID reg, RegisterID mask)
+    void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        m_assembler.testq_rr(reg, mask);
-        return Jump(m_assembler.jne());
-#else
-        return jnz32(reg, mask);
-#endif
+        test64(cond, reg, mask, dest);
     }
 
-    Jump jnzPtr(RegisterID reg, Imm32 mask = Imm32(-1))
+    void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
     {
-#if PLATFORM(X86_64)
-        testImm64(reg, mask);
-        return Jump(m_assembler.jne());
-#else
-        return jnz32(reg, mask);
-#endif
+        test64(cond, reg, mask, dest);
     }
 
-    Jump jnzPtr(RegisterID reg, ImmPtr mask)
+    Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
     {
-#if PLATFORM(X86_64)
-        move(mask, scratchRegister);
-        m_assembler.testq_rr(scratchRegister, reg);
-        return Jump(m_assembler.jne());
-#else
-        return jnz32(reg, Imm32(mask));
-#endif
+        return branch64(cond, left, right);
     }
 
-    Jump jnzPtr(Address address, Imm32 mask = Imm32(-1))
+    Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
     {
-#if PLATFORM(X86_64)
-        testImm64(address, mask);
-        return Jump(m_assembler.jne());
-#else
-        return jnz32(address, mask);
-#endif
+        return branch64(cond, left, TrustedImm64(right));
+    }
+    
+    Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+    {
+        return branch64(cond, left, right);
     }
 
-    Jump jnz32(RegisterID reg, RegisterID mask)
+    Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
     {
-        m_assembler.testl_rr(reg, mask);
-        return Jump(m_assembler.jne());
+        return branch64(cond, left, right);
     }
 
-    Jump jnz32(RegisterID reg, Imm32 mask = Imm32(-1))
+    Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
     {
-        testImm32(reg, mask);
-        return Jump(m_assembler.jne());
+        return branch64(cond, left, right);
     }
 
-    Jump jnz32(Address address, Imm32 mask = Imm32(-1))
+    Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
     {
-        testImm32(address, mask);
-        return Jump(m_assembler.jne());
+        return branch64(cond, left, TrustedImm64(right));
     }
 
-    Jump jzPtr(RegisterID reg, RegisterID mask)
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
     {
-#if PLATFORM(X86_64)
-        m_assembler.testq_rr(reg, mask);
-        return Jump(m_assembler.je());
-#else
-        return jz32(reg, mask);
-#endif
+        return branchTest64(cond, reg, mask);
+    }
+    
+    Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        return branchTest64(cond, reg, mask);
     }
 
-    Jump jzPtr(RegisterID reg, Imm32 mask = Imm32(-1))
+    Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
-#if PLATFORM(X86_64)
-        testImm64(reg, mask);
-        return Jump(m_assembler.je());
-#else
-        return jz32(reg, mask);
-#endif
+        return branchTest64(cond, address, mask);
     }
 
-    Jump jzPtr(RegisterID reg, ImmPtr mask)
+    Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
     {
-#if PLATFORM(X86_64)
-        move(mask, scratchRegister);
-        m_assembler.testq_rr(scratchRegister, reg);
-        return Jump(m_assembler.je());
-#else
-        return jz32(reg, Imm32(mask));
-#endif
+        return branchTest64(cond, address, reg);
     }
 
-    Jump jzPtr(Address address, Imm32 mask = Imm32(-1))
+    Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
     {
-#if PLATFORM(X86_64)
-        testImm64(address, mask);
-        return Jump(m_assembler.je());
-#else
-        return jz32(address, mask);
-#endif
+        return branchTest64(cond, address, mask);
     }
 
-    Jump jzPtr(BaseIndex address, Imm32 mask = Imm32(-1))
+    Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
     {
-#if PLATFORM(X86_64)
-        testImm64(address, mask);
-        return Jump(m_assembler.je());
-#else
-        return jz32(address, mask);
-#endif
+        return branchTest64(cond, address, mask);
     }
 
-    Jump jz32(RegisterID reg, RegisterID mask)
+    Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.testl_rr(reg, mask);
-        return Jump(m_assembler.je());
+        return branchAdd64(cond, imm, dest);
     }
 
-    Jump jz32(RegisterID reg, Imm32 mask = Imm32(-1))
+    Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
     {
-        testImm32(reg, mask);
-        return Jump(m_assembler.je());
+        return branchAdd64(cond, src, dest);
     }
 
-    Jump jz32(Address address, Imm32 mask = Imm32(-1))
+    Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
     {
-        testImm32(address, mask);
-        return Jump(m_assembler.je());
+        return branchSub64(cond, imm, dest);
     }
 
-    Jump jz32(BaseIndex address, Imm32 mask = Imm32(-1))
+    Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
     {
-        testImm32(address, mask);
-        return Jump(m_assembler.je());
+        return branchSub64(cond, src, dest);
     }
 
-    Jump jump()
+    Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
     {
-        return Jump(m_assembler.jmp());
+        return branchSub64(cond, src1, src2, dest);
     }
 
+    using MacroAssemblerBase::and64;
+    using MacroAssemblerBase::convertInt32ToDouble;
+    using MacroAssemblerBase::store64;
+    bool shouldBlindDouble(double value)
+    {
+        // Don't trust NaN or +/-Infinity
+        if (!std::isfinite(value))
+            return shouldConsiderBlinding();
+
+        // Try to force normalisation, and check that there's no change
+        // in the bit pattern
+        if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
+            return shouldConsiderBlinding();
 
-    // Backwards, local control flow operations:
-    //
-    // These operations provide a shorter notation for local
-    // backwards branches, which may be both more convenient
-    // for the user, and for the programmer, and for the
-    // assembler (allowing shorter values to be used in
-    // relative offsets).
-    //
-    // The code sequence:
-    //
-    //     Label topOfLoop(this);
-    //     // ...
-    //     jne32(reg1, reg2, topOfLoop);
-    //
-    // Is equivalent to the longer, potentially less efficient form:
-    //
-    //     Label topOfLoop(this);
-    //     // ...
-    //     jne32(reg1, reg2).linkTo(topOfLoop);
+        value = fabs(value);
+        // Only allow a limited set of fractional components
+        double scaledValue = value * 8;
+        if (scaledValue / 8 != value)
+            return shouldConsiderBlinding();
+        double frac = scaledValue - floor(scaledValue);
+        if (frac != 0.0)
+            return shouldConsiderBlinding();
 
-    void jae32(RegisterID left, Address right, Label target)
+        return value > 0xff;
+    }
+    
+    bool shouldBlindPointerForSpecificArch(uintptr_t value)
     {
-        jae32(left, right).linkTo(target, this);
+        if (sizeof(void*) == 4)
+            return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
+        return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
     }
+    
+    bool shouldBlind(ImmPtr imm)
+    {
+        if (!canBlind())
+            return false;
+        
+#if ENABLE(FORCED_JIT_BLINDING)
+        UNUSED_PARAM(imm);
+        // Debug always blind all constants, if only so we know
+        // if we've broken blinding during patch development.
+        return true;
+#endif
+
+        // First off we'll special case common, "safe" values to avoid hurting
+        // performance too much
+        uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+        switch (value) {
+        case 0xffff:
+        case 0xffffff:
+        case 0xffffffffL:
+        case 0xffffffffffL:
+        case 0xffffffffffffL:
+        case 0xffffffffffffffL:
+        case 0xffffffffffffffffL:
+            return false;
+        default: {
+            if (value <= 0xff)
+                return false;
+            if (~value <= 0xff)
+                return false;
+        }
+        }
+
+        if (!shouldConsiderBlinding())
+            return false;
 
-    void je32(RegisterID op1, Imm32 imm, Label target)
+        return shouldBlindPointerForSpecificArch(value);
+    }
+    
+    struct RotatedImmPtr {
+        RotatedImmPtr(uintptr_t v1, uint8_t v2)
+            : value(v1)
+            , rotation(v2)
+        {
+        }
+        TrustedImmPtr value;
+        TrustedImm32 rotation;
+    };
+    
+    RotatedImmPtr rotationBlindConstant(ImmPtr imm)
     {
-        je32(op1, imm).linkTo(target, this);
+        uint8_t rotation = random() % (sizeof(void*) * 8);
+        uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+        value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
+        return RotatedImmPtr(value, rotation);
     }
+    
+    void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
+    {
+        move(constant.value, dest);
+        rotateRightPtr(constant.rotation, dest);
+    }
+
+    bool shouldBlind(Imm64 imm)
+    {
+#if ENABLE(FORCED_JIT_BLINDING)
+        UNUSED_PARAM(imm);
+        // Debug always blind all constants, if only so we know
+        // if we've broken blinding during patch development.
+        return true;        
+#endif
+
+        // First off we'll special case common, "safe" values to avoid hurting
+        // performance too much
+        uint64_t value = imm.asTrustedImm64().m_value;
+        switch (value) {
+        case 0xffff:
+        case 0xffffff:
+        case 0xffffffffL:
+        case 0xffffffffffL:
+        case 0xffffffffffffL:
+        case 0xffffffffffffffL:
+        case 0xffffffffffffffffL:
+            return false;
+        default: {
+            if (value <= 0xff)
+                return false;
+            if (~value <= 0xff)
+                return false;
+
+            JSValue jsValue = JSValue::decode(value);
+            if (jsValue.isInt32())
+                return shouldBlind(Imm32(jsValue.asInt32()));
+            if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+                return false;
+
+            if (!shouldBlindDouble(bitwise_cast<double>(value)))
+                return false;
+        }
+        }
 
-    void je16(RegisterID op1, BaseIndex op2, Label target)
-    {
-        je16(op1, op2).linkTo(target, this);
+        if (!shouldConsiderBlinding())
+            return false;
+
+        return shouldBlindForSpecificArch(value);
     }
     
-    void jl32(RegisterID left, Imm32 right, Label target)
+    struct RotatedImm64 {
+        RotatedImm64(uint64_t v1, uint8_t v2)
+            : value(v1)
+            , rotation(v2)
+        {
+        }
+        TrustedImm64 value;
+        TrustedImm32 rotation;
+    };
+    
+    RotatedImm64 rotationBlindConstant(Imm64 imm)
     {
-        jl32(left, right).linkTo(target, this);
+        uint8_t rotation = random() % (sizeof(int64_t) * 8);
+        uint64_t value = imm.asTrustedImm64().m_value;
+        value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
+        return RotatedImm64(value, rotation);
     }
     
-    void jle32(RegisterID left, RegisterID right, Label target)
+    void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
     {
-        jle32(left, right).linkTo(target, this);
+        move(constant.value, dest);
+        rotateRight64(constant.rotation, dest);
     }
-    
-    void jnePtr(RegisterID op1, ImmPtr imm, Label target)
+
+    void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
     {
-        jnePtr(op1, imm).linkTo(target, this);
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+            convertInt32ToDouble(scratchRegister, dest);
+        } else
+            convertInt32ToDouble(imm.asTrustedImm32(), dest);
     }
 
-    void jne32(RegisterID op1, RegisterID op2, Label target)
+    void move(ImmPtr imm, RegisterID dest)
     {
-        jne32(op1, op2).linkTo(target, this);
+        if (shouldBlind(imm))
+            loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+        else
+            move(imm.asTrustedImmPtr(), dest);
     }
 
-    void jne32(RegisterID op1, Imm32 imm, Label target)
+    void move(Imm64 imm, RegisterID dest)
     {
-        jne32(op1, imm).linkTo(target, this);
+        if (shouldBlind(imm))
+            loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+        else
+            move(imm.asTrustedImm64(), dest);
     }
 
-    void jzPtr(RegisterID reg, Label target)
+    void and64(Imm32 imm, RegisterID dest)
     {
-        jzPtr(reg).linkTo(target, this);
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = andBlindedConstant(imm);
+            and64(key.value1, dest);
+            and64(key.value2, dest);
+        } else
+            and64(imm.asTrustedImm32(), dest);
     }
 
-    void jump(Label target)
+    Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+    {
+        if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
+            return branchPtr(cond, left, scratchRegister);
+        }
+        return branchPtr(cond, left, right.asTrustedImmPtr());
+    }
+    
+    void storePtr(ImmPtr imm, Address dest)
     {
-        m_assembler.link(m_assembler.jmp(), target.m_label);
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+            storePtr(scratchRegister, dest);
+        } else
+            storePtr(imm.asTrustedImmPtr(), dest);
     }
 
-    void jump(RegisterID target)
+    void store64(Imm64 imm, Address dest)
     {
-        m_assembler.jmp_r(target);
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
+            RegisterID scratchRegister = scratchRegisterForBlinding();
+            loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+            store64(scratchRegister, dest);
+        } else
+            store64(imm.asTrustedImm64(), dest);
+    }
+
+#endif // !CPU(X86_64)
+
+    bool shouldBlind(Imm32 imm)
+    {
+#if ENABLE(FORCED_JIT_BLINDING)
+        UNUSED_PARAM(imm);
+        // Debug always blind all constants, if only so we know
+        // if we've broken blinding during patch development.
+        return true;
+#else // ENABLE(FORCED_JIT_BLINDING)
+
+        // First off we'll special case common, "safe" values to avoid hurting
+        // performance too much
+        uint32_t value = imm.asTrustedImm32().m_value;
+        switch (value) {
+        case 0xffff:
+        case 0xffffff:
+        case 0xffffffff:
+            return false;
+        default:
+            if (value <= 0xff)
+                return false;
+            if (~value <= 0xff)
+                return false;
+        }
+
+        if (!shouldConsiderBlinding())
+            return false;
+
+        return shouldBlindForSpecificArch(value);
+#endif // ENABLE(FORCED_JIT_BLINDING)
     }
 
-    // Address is a memory location containing the address to jump to
-    void jump(Address address)
+    struct BlindedImm32 {
+        BlindedImm32(int32_t v1, int32_t v2)
+            : value1(v1)
+            , value2(v2)
+        {
+        }
+        TrustedImm32 value1;
+        TrustedImm32 value2;
+    };
+
+    uint32_t keyForConstant(uint32_t value, uint32_t& mask)
     {
-        m_assembler.jmp_m(address.offset, address.base);
+        uint32_t key = random();
+        if (value <= 0xff)
+            mask = 0xff;
+        else if (value <= 0xffff)
+            mask = 0xffff;
+        else if (value <= 0xffffff)
+            mask = 0xffffff;
+        else
+            mask = 0xffffffff;
+        return key & mask;
     }
 
+    uint32_t keyForConstant(uint32_t value)
+    {
+        uint32_t mask = 0;
+        return keyForConstant(value, mask);
+    }
 
-    // Arithmetic control flow operations:
-    //
-    // This set of conditional branch operations branch based
-    // on the result of an arithmetic operation.  The operation
-    // is performed as normal, storing the result.
-    //
-    // * jz operations branch if the result is zero.
-    // * jo operations branch if the (signed) arithmetic
-    //   operation caused an overflow to occur.
+    BlindedImm32 xorBlindConstant(Imm32 imm)
+    {
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t key = keyForConstant(baseValue);
+        return BlindedImm32(baseValue ^ key, key);
+    }
 
-    Jump jnzSubPtr(Imm32 imm, RegisterID dest)
+    BlindedImm32 additionBlindedConstant(Imm32 imm)
     {
-        subPtr(imm, dest);
-        return Jump(m_assembler.jne());
+        // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
+        static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
+
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
+        if (key > baseValue)
+            key = key - baseValue;
+        return BlindedImm32(baseValue - key, key);
     }
     
-    Jump jnzSub32(Imm32 imm, RegisterID dest)
+    BlindedImm32 andBlindedConstant(Imm32 imm)
     {
-        sub32(imm, dest);
-        return Jump(m_assembler.jne());
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t mask = 0;
+        uint32_t key = keyForConstant(baseValue, mask);
+        ASSERT((baseValue & mask) == baseValue);
+        return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
     }
     
-    Jump joAddPtr(RegisterID src, RegisterID dest)
+    BlindedImm32 orBlindedConstant(Imm32 imm)
     {
-        addPtr(src, dest);
-        return Jump(m_assembler.jo());
+        uint32_t baseValue = imm.asTrustedImm32().m_value;
+        uint32_t mask = 0;
+        uint32_t key = keyForConstant(baseValue, mask);
+        ASSERT((baseValue & mask) == baseValue);
+        return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
     }
     
-    Jump joAdd32(RegisterID src, RegisterID dest)
+    void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
     {
-        add32(src, dest);
-        return Jump(m_assembler.jo());
+        move(constant.value1, dest);
+        xor32(constant.value2, dest);
     }
     
-    Jump joAdd32(Imm32 imm, RegisterID dest)
+    void add32(Imm32 imm, RegisterID dest)
     {
-        add32(imm, dest);
-        return Jump(m_assembler.jo());
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            add32(key.value1, dest);
+            add32(key.value2, dest);
+        } else
+            add32(imm.asTrustedImm32(), dest);
     }
     
-    Jump joMul32(RegisterID src, RegisterID dest)
+    void addPtr(Imm32 imm, RegisterID dest)
     {
-        mul32(src, dest);
-        return Jump(m_assembler.jo());
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            addPtr(key.value1, dest);
+            addPtr(key.value2, dest);
+        } else
+            addPtr(imm.asTrustedImm32(), dest);
     }
-    
-    Jump joMul32(Imm32 imm, RegisterID src, RegisterID dest)
+
+    void and32(Imm32 imm, RegisterID dest)
     {
-        mul32(imm, src, dest);
-        return Jump(m_assembler.jo());
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = andBlindedConstant(imm);
+            and32(key.value1, dest);
+            and32(key.value2, dest);
+        } else
+            and32(imm.asTrustedImm32(), dest);
     }
-    
-    Jump joSub32(RegisterID src, RegisterID dest)
+
+    void andPtr(Imm32 imm, RegisterID dest)
     {
-        sub32(src, dest);
-        return Jump(m_assembler.jo());
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = andBlindedConstant(imm);
+            andPtr(key.value1, dest);
+            andPtr(key.value2, dest);
+        } else
+            andPtr(imm.asTrustedImm32(), dest);
     }
     
-    Jump joSub32(Imm32 imm, RegisterID dest)
+    void and32(Imm32 imm, RegisterID src, RegisterID dest)
     {
-        sub32(imm, dest);
-        return Jump(m_assembler.jo());
+        if (shouldBlind(imm)) {
+            if (src == dest)
+                return and32(imm.asTrustedImm32(), dest);
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+            and32(src, dest);
+        } else
+            and32(imm.asTrustedImm32(), src, dest);
     }
-    
-    Jump jzSubPtr(Imm32 imm, RegisterID dest)
+
+    void move(Imm32 imm, RegisterID dest)
     {
-        subPtr(imm, dest);
-        return Jump(m_assembler.je());
+        if (shouldBlind(imm))
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+        else
+            move(imm.asTrustedImm32(), dest);
     }
     
-    Jump jzSub32(Imm32 imm, RegisterID dest)
+    void or32(Imm32 imm, RegisterID src, RegisterID dest)
     {
-        sub32(imm, dest);
-        return Jump(m_assembler.je());
+        if (shouldBlind(imm)) {
+            if (src == dest)
+                return or32(imm, dest);
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+            or32(src, dest);
+        } else
+            or32(imm.asTrustedImm32(), src, dest);
     }
     
-
-    // Miscellaneous operations:
-
-    void breakpoint()
+    void or32(Imm32 imm, RegisterID dest)
     {
-        m_assembler.int3();
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = orBlindedConstant(imm);
+            or32(key.value1, dest);
+            or32(key.value2, dest);
+        } else
+            or32(imm.asTrustedImm32(), dest);
     }
-
-    Jump call()
+    
+    void poke(Imm32 value, int index = 0)
     {
-        return Jump(m_assembler.call());
+        store32(value, addressForPoke(index));
     }
-
-    // FIXME: why does this return a Jump object? - it can't be linked.
-    // This may be to get a reference to the return address of the call.
-    //
-    // This should probably be handled by a separate label type to a regular
-    // jump.  Todo: add a CallLabel type, for the regular call - can be linked
-    // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
-    // Also add a CallReturnLabel type for this to return (just a more JmpDsty
-    // form of label, can get the void* after the code has been linked, but can't
-    // try to link it like a Jump object), and let the CallLabel be cast into a
-    // CallReturnLabel.
-    Jump call(RegisterID target)
+    
+    void poke(ImmPtr value, int index = 0)
     {
-        return Jump(m_assembler.call(target));
+        storePtr(value, addressForPoke(index));
     }
-
-    Label label()
+    
+#if CPU(X86_64) || CPU(ARM64)
+    void poke(Imm64 value, int index = 0)
     {
-        return Label(this);
+        store64(value, addressForPoke(index));
     }
+#endif // CPU(X86_64)
     
-    Label align()
-    {
-        m_assembler.align(16);
-        return Label(this);
+    void store32(Imm32 imm, Address dest)
+    {
+        if (shouldBlind(imm)) {
+#if CPU(X86) || CPU(X86_64)
+            BlindedImm32 blind = xorBlindConstant(imm);
+            store32(blind.value1, dest);
+            xor32(blind.value2, dest);
+#else // CPU(X86) || CPU(X86_64)
+            if (haveScratchRegisterForBlinding()) {
+                loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
+                store32(scratchRegisterForBlinding(), dest);
+            } else {
+                // If we don't have a scratch register available for use, we'll just 
+                // place a random number of nops.
+                uint32_t nopCount = random() & 3;
+                while (nopCount--)
+                    nop();
+                store32(imm.asTrustedImm32(), dest);
+            }
+#endif // CPU(X86) || CPU(X86_64)
+        } else
+            store32(imm.asTrustedImm32(), dest);
     }
-
-    ptrdiff_t differenceBetween(Label from, Jump to)
+    
+    void sub32(Imm32 imm, RegisterID dest)
     {
-        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            sub32(key.value1, dest);
+            sub32(key.value2, dest);
+        } else
+            sub32(imm.asTrustedImm32(), dest);
     }
-
-    ptrdiff_t differenceBetween(Label from, Label to)
+    
+    void subPtr(Imm32 imm, RegisterID dest)
     {
-        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+        if (shouldBlind(imm)) {
+            BlindedImm32 key = additionBlindedConstant(imm);
+            subPtr(key.value1, dest);
+            subPtr(key.value2, dest);
+        } else
+            subPtr(imm.asTrustedImm32(), dest);
     }
-
-    ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
+    
+    void xor32(Imm32 imm, RegisterID src, RegisterID dest)
     {
-        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+        if (shouldBlind(imm)) {
+            BlindedImm32 blind = xorBlindConstant(imm);
+            xor32(blind.value1, src, dest);
+            xor32(blind.value2, dest);
+        } else
+            xor32(imm.asTrustedImm32(), src, dest);
     }
-
-    ptrdiff_t differenceBetween(Label from, DataLabel32 to)
+    
+    void xor32(Imm32 imm, RegisterID dest)
     {
-        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+        if (shouldBlind(imm)) {
+            BlindedImm32 blind = xorBlindConstant(imm);
+            xor32(blind.value1, dest);
+            xor32(blind.value2, dest);
+        } else
+            xor32(imm.asTrustedImm32(), dest);
+    }
+
+    Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
+    {
+        if (shouldBlind(right)) {
+            if (haveScratchRegisterForBlinding()) {
+                loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
+                return branch32(cond, left, scratchRegisterForBlinding());
+            }
+            // If we don't have a scratch register available for use, we'll just 
+            // place a random number of nops.
+            uint32_t nopCount = random() & 3;
+            while (nopCount--)
+                nop();
+            return branch32(cond, left, right.asTrustedImm32());
+        }
+        
+        return branch32(cond, left, right.asTrustedImm32());
     }
 
-    ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
+    Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
     {
-        return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+        if (src == dest)
+            ASSERT(haveScratchRegisterForBlinding());
+
+        if (shouldBlind(imm)) {
+            if (src == dest) {
+                move(src, scratchRegisterForBlinding());
+                src = scratchRegisterForBlinding();
+            }
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+            return branchAdd32(cond, src, dest);  
+        }
+        return branchAdd32(cond, src, imm.asTrustedImm32(), dest);            
+    }
+    
+    Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+    {
+        if (src == dest)
+            ASSERT(haveScratchRegisterForBlinding());
+
+        if (shouldBlind(imm)) {
+            if (src == dest) {
+                move(src, scratchRegisterForBlinding());
+                src = scratchRegisterForBlinding();
+            }
+            loadXorBlindedConstant(xorBlindConstant(imm), dest);
+            return branchMul32(cond, src, dest);  
+        }
+        return branchMul32(cond, imm.asTrustedImm32(), src, dest);
     }
 
-    void ret()
+    // branchSub32 takes a scratch register as 32 bit platforms make use of this,
+    // with src == dst, and on x86-32 we don't have a platform scratch register.
+    Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
     {
-        m_assembler.ret();
+        if (shouldBlind(imm)) {
+            ASSERT(scratch != dest);
+            ASSERT(scratch != src);
+            loadXorBlindedConstant(xorBlindConstant(imm), scratch);
+            return branchSub32(cond, src, scratch, dest);
+        }
+        return branchSub32(cond, src, imm.asTrustedImm32(), dest);            
     }
-
-    void sete32(RegisterID src, RegisterID srcDest)
+    
+    void lshift32(Imm32 imm, RegisterID dest)
     {
-        m_assembler.cmpl_rr(srcDest, src);
-        m_assembler.sete_r(srcDest);
-        m_assembler.movzbl_rr(srcDest, srcDest);
+        lshift32(trustedImm32ForShift(imm), dest);
     }
-
-    void sete32(Imm32 imm, RegisterID srcDest)
+    
+    void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
     {
-        compareImm32ForBranchEquality(srcDest, imm.m_value);
-        m_assembler.sete_r(srcDest);
-        m_assembler.movzbl_rr(srcDest, srcDest);
+        lshift32(src, trustedImm32ForShift(amount), dest);
     }
-
-    void setne32(RegisterID src, RegisterID srcDest)
+    
+    void rshift32(Imm32 imm, RegisterID dest)
     {
-        m_assembler.cmpl_rr(srcDest, src);
-        m_assembler.setne_r(srcDest);
-        m_assembler.movzbl_rr(srcDest, srcDest);
+        rshift32(trustedImm32ForShift(imm), dest);
     }
-
-    void setne32(Imm32 imm, RegisterID srcDest)
+    
+    void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
     {
-        compareImm32ForBranchEquality(srcDest, imm.m_value);
-        m_assembler.setne_r(srcDest);
-        m_assembler.movzbl_rr(srcDest, srcDest);
+        rshift32(src, trustedImm32ForShift(amount), dest);
     }
-
-    // FIXME:
-    // The mask should be optional... paerhaps the argument order should be
-    // dest-src, operations always have a dest? ... possibly not true, considering
-    // asm ops like test, or pseudo ops like pop().
-    void setnz32(Address address, Imm32 mask, RegisterID dest)
+    
+    void urshift32(Imm32 imm, RegisterID dest)
     {
-        testImm32(address, mask);
-        m_assembler.setnz_r(dest);
-        m_assembler.movzbl_rr(dest, dest);
+        urshift32(trustedImm32ForShift(imm), dest);
     }
-
-    void setz32(Address address, Imm32 mask, RegisterID dest)
+    
+    void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
     {
-        testImm32(address, mask);
-        m_assembler.setz_r(dest);
-        m_assembler.movzbl_rr(dest, dest);
+        urshift32(src, trustedImm32ForShift(amount), dest);
     }
 };
 
 } // namespace JSC
 
+#else // ENABLE(ASSEMBLER)
+
+// If there is no assembler for this platform, at least allow code to make references to
+// some of the things it would otherwise define, albeit without giving that code any way
+// of doing anything useful.
+class MacroAssembler {
+private:
+    MacroAssembler() { }
+    
+public:
+    
+    enum RegisterID { NoRegister };
+    enum FPRegisterID { NoFPRegister };
+};
+
 #endif // ENABLE(ASSEMBLER)
 
 #endif // MacroAssembler_h