2 * Copyright (C) 2013 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "JITInlineCacheGenerator.h"
31 #include "CodeBlock.h"
32 #include "LinkBuffer.h"
33 #include "JSCInlines.h"
37 static StructureStubInfo
* garbageStubInfo()
39 static StructureStubInfo
* stubInfo
= new StructureStubInfo();
43 JITInlineCacheGenerator::JITInlineCacheGenerator(CodeBlock
* codeBlock
, CodeOrigin codeOrigin
)
44 : m_codeBlock(codeBlock
)
46 m_stubInfo
= m_codeBlock
? m_codeBlock
->addStubInfo() : garbageStubInfo();
47 m_stubInfo
->codeOrigin
= codeOrigin
;
50 JITByIdGenerator::JITByIdGenerator(
51 CodeBlock
* codeBlock
, CodeOrigin codeOrigin
, const RegisterSet
& usedRegisters
,
52 JSValueRegs base
, JSValueRegs value
, SpillRegistersMode spillMode
)
53 : JITInlineCacheGenerator(codeBlock
, codeOrigin
)
57 m_stubInfo
->patch
.spillMode
= spillMode
;
58 m_stubInfo
->patch
.usedRegisters
= usedRegisters
;
60 // This is a convenience - in cases where the only registers you're using are base/value,
61 // it allows you to pass RegisterSet() as the usedRegisters argument.
62 m_stubInfo
->patch
.usedRegisters
.set(base
);
63 m_stubInfo
->patch
.usedRegisters
.set(value
);
65 m_stubInfo
->patch
.baseGPR
= static_cast<int8_t>(base
.payloadGPR());
66 m_stubInfo
->patch
.valueGPR
= static_cast<int8_t>(value
.payloadGPR());
68 m_stubInfo
->patch
.valueTagGPR
= static_cast<int8_t>(value
.tagGPR());
72 void JITByIdGenerator::finalize(LinkBuffer
& fastPath
, LinkBuffer
& slowPath
)
74 CodeLocationCall callReturnLocation
= slowPath
.locationOf(m_call
);
75 m_stubInfo
->callReturnLocation
= callReturnLocation
;
76 m_stubInfo
->patch
.deltaCheckImmToCall
= MacroAssembler::differenceBetweenCodePtr(
77 fastPath
.locationOf(m_structureImm
), callReturnLocation
);
78 m_stubInfo
->patch
.deltaCallToJump
= MacroAssembler::differenceBetweenCodePtr(
79 callReturnLocation
, fastPath
.locationOf(m_structureCheck
));
81 m_stubInfo
->patch
.deltaCallToLoadOrStore
= MacroAssembler::differenceBetweenCodePtr(
82 callReturnLocation
, fastPath
.locationOf(m_loadOrStore
));
84 m_stubInfo
->patch
.deltaCallToTagLoadOrStore
= MacroAssembler::differenceBetweenCodePtr(
85 callReturnLocation
, fastPath
.locationOf(m_tagLoadOrStore
));
86 m_stubInfo
->patch
.deltaCallToPayloadLoadOrStore
= MacroAssembler::differenceBetweenCodePtr(
87 callReturnLocation
, fastPath
.locationOf(m_loadOrStore
));
89 m_stubInfo
->patch
.deltaCallToSlowCase
= MacroAssembler::differenceBetweenCodePtr(
90 callReturnLocation
, slowPath
.locationOf(m_slowPathBegin
));
91 m_stubInfo
->patch
.deltaCallToDone
= MacroAssembler::differenceBetweenCodePtr(
92 callReturnLocation
, fastPath
.locationOf(m_done
));
93 m_stubInfo
->patch
.deltaCallToStorageLoad
= MacroAssembler::differenceBetweenCodePtr(
94 callReturnLocation
, fastPath
.locationOf(m_propertyStorageLoad
));
97 void JITByIdGenerator::finalize(LinkBuffer
& linkBuffer
)
99 finalize(linkBuffer
, linkBuffer
);
102 void JITByIdGenerator::generateFastPathChecks(MacroAssembler
& jit
, GPRReg butterfly
)
104 m_structureCheck
= jit
.patchableBranch32WithPatch(
105 MacroAssembler::NotEqual
,
106 MacroAssembler::Address(m_base
.payloadGPR(), JSCell::structureIDOffset()),
107 m_structureImm
, MacroAssembler::TrustedImm32(0));
109 m_propertyStorageLoad
= jit
.convertibleLoadPtr(
110 MacroAssembler::Address(m_base
.payloadGPR(), JSObject::butterflyOffset()), butterfly
);
113 JITGetByIdGenerator::JITGetByIdGenerator(
114 CodeBlock
* codeBlock
, CodeOrigin codeOrigin
, const RegisterSet
& usedRegisters
,
115 JSValueRegs base
, JSValueRegs value
, SpillRegistersMode spillMode
)
116 : JITByIdGenerator(codeBlock
, codeOrigin
, usedRegisters
, base
, value
, spillMode
)
118 RELEASE_ASSERT(base
.payloadGPR() != value
.tagGPR());
121 void JITGetByIdGenerator::generateFastPath(MacroAssembler
& jit
)
123 generateFastPathChecks(jit
, m_value
.payloadGPR());
126 m_loadOrStore
= jit
.load64WithCompactAddressOffsetPatch(
127 MacroAssembler::Address(m_value
.payloadGPR(), 0), m_value
.payloadGPR()).label();
129 m_tagLoadOrStore
= jit
.load32WithCompactAddressOffsetPatch(
130 MacroAssembler::Address(m_value
.payloadGPR(), 0), m_value
.tagGPR()).label();
131 m_loadOrStore
= jit
.load32WithCompactAddressOffsetPatch(
132 MacroAssembler::Address(m_value
.payloadGPR(), 0), m_value
.payloadGPR()).label();
135 m_done
= jit
.label();
138 JITPutByIdGenerator::JITPutByIdGenerator(
139 CodeBlock
* codeBlock
, CodeOrigin codeOrigin
, const RegisterSet
& usedRegisters
,
140 JSValueRegs base
, JSValueRegs value
, GPRReg scratch
, SpillRegistersMode spillMode
,
141 ECMAMode ecmaMode
, PutKind putKind
)
142 : JITByIdGenerator(codeBlock
, codeOrigin
, usedRegisters
, base
, value
, spillMode
)
144 , m_ecmaMode(ecmaMode
)
147 m_stubInfo
->patch
.usedRegisters
.clear(scratch
);
150 void JITPutByIdGenerator::generateFastPath(MacroAssembler
& jit
)
152 generateFastPathChecks(jit
, m_scratch
);
155 m_loadOrStore
= jit
.store64WithAddressOffsetPatch(
156 m_value
.payloadGPR(), MacroAssembler::Address(m_scratch
, 0)).label();
158 m_tagLoadOrStore
= jit
.store32WithAddressOffsetPatch(
159 m_value
.tagGPR(), MacroAssembler::Address(m_scratch
, 0)).label();
160 m_loadOrStore
= jit
.store32WithAddressOffsetPatch(
161 m_value
.payloadGPR(), MacroAssembler::Address(m_scratch
, 0)).label();
164 m_done
= jit
.label();
167 V_JITOperation_ESsiJJI
JITPutByIdGenerator::slowPathFunction()
169 if (m_ecmaMode
== StrictMode
) {
170 if (m_putKind
== Direct
)
171 return operationPutByIdDirectStrictOptimize
;
172 return operationPutByIdStrictOptimize
;
174 if (m_putKind
== Direct
)
175 return operationPutByIdDirectNonStrictOptimize
;
176 return operationPutByIdNonStrictOptimize
;
181 #endif // ENABLE(JIT)