2 * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "PutByIdStatus.h"
29 #include "AccessorCallJITStubRoutine.h"
30 #include "CodeBlock.h"
31 #include "ComplexGetStatus.h"
32 #include "LLIntData.h"
33 #include "LowLevelInterpreter.h"
34 #include "JSCInlines.h"
35 #include "PolymorphicPutByIdList.h"
36 #include "Structure.h"
37 #include "StructureChain.h"
38 #include <wtf/ListDump.h>
42 bool PutByIdStatus::appendVariant(const PutByIdVariant
& variant
)
44 for (unsigned i
= 0; i
< m_variants
.size(); ++i
) {
45 if (m_variants
[i
].attemptToMerge(variant
))
48 for (unsigned i
= 0; i
< m_variants
.size(); ++i
) {
49 if (m_variants
[i
].oldStructure().overlaps(variant
.oldStructure()))
52 m_variants
.append(variant
);
57 bool PutByIdStatus::hasExitSite(const ConcurrentJITLocker
& locker
, CodeBlock
* profiledBlock
, unsigned bytecodeIndex
)
59 return profiledBlock
->hasExitSite(locker
, DFG::FrequentExitSite(bytecodeIndex
, BadCache
))
60 || profiledBlock
->hasExitSite(locker
, DFG::FrequentExitSite(bytecodeIndex
, BadConstantCache
));
65 PutByIdStatus
PutByIdStatus::computeFromLLInt(CodeBlock
* profiledBlock
, unsigned bytecodeIndex
, UniquedStringImpl
* uid
)
67 UNUSED_PARAM(profiledBlock
);
68 UNUSED_PARAM(bytecodeIndex
);
70 Instruction
* instruction
= profiledBlock
->instructions().begin() + bytecodeIndex
;
72 Structure
* structure
= instruction
[4].u
.structure
.get();
74 return PutByIdStatus(NoInformation
);
76 if (instruction
[0].u
.opcode
== LLInt::getOpcode(op_put_by_id
)
77 || instruction
[0].u
.opcode
== LLInt::getOpcode(op_put_by_id_out_of_line
)) {
78 PropertyOffset offset
= structure
->getConcurrently(uid
);
79 if (!isValidOffset(offset
))
80 return PutByIdStatus(NoInformation
);
82 return PutByIdVariant::replace(structure
, offset
);
85 ASSERT(structure
->transitionWatchpointSetHasBeenInvalidated());
87 ASSERT(instruction
[0].u
.opcode
== LLInt::getOpcode(op_put_by_id_transition_direct
)
88 || instruction
[0].u
.opcode
== LLInt::getOpcode(op_put_by_id_transition_normal
)
89 || instruction
[0].u
.opcode
== LLInt::getOpcode(op_put_by_id_transition_direct_out_of_line
)
90 || instruction
[0].u
.opcode
== LLInt::getOpcode(op_put_by_id_transition_normal_out_of_line
));
92 Structure
* newStructure
= instruction
[6].u
.structure
.get();
93 StructureChain
* chain
= instruction
[7].u
.structureChain
.get();
97 PropertyOffset offset
= newStructure
->getConcurrently(uid
);
98 if (!isValidOffset(offset
))
99 return PutByIdStatus(NoInformation
);
101 RefPtr
<IntendedStructureChain
> intendedChain
;
103 intendedChain
= adoptRef(new IntendedStructureChain(profiledBlock
, structure
, chain
));
105 return PutByIdVariant::transition(structure
, newStructure
, intendedChain
.get(), offset
);
108 PutByIdStatus
PutByIdStatus::computeFor(CodeBlock
* profiledBlock
, StubInfoMap
& map
, unsigned bytecodeIndex
, UniquedStringImpl
* uid
)
110 ConcurrentJITLocker
locker(profiledBlock
->m_lock
);
112 UNUSED_PARAM(profiledBlock
);
113 UNUSED_PARAM(bytecodeIndex
);
116 if (hasExitSite(locker
, profiledBlock
, bytecodeIndex
))
117 return PutByIdStatus(TakesSlowPath
);
119 StructureStubInfo
* stubInfo
= map
.get(CodeOrigin(bytecodeIndex
));
120 PutByIdStatus result
= computeForStubInfo(
121 locker
, profiledBlock
, stubInfo
, uid
,
122 CallLinkStatus::computeExitSiteData(locker
, profiledBlock
, bytecodeIndex
));
124 return computeFromLLInt(profiledBlock
, bytecodeIndex
, uid
);
129 return PutByIdStatus(NoInformation
);
130 #endif // ENABLE(JIT)
134 PutByIdStatus
PutByIdStatus::computeForStubInfo(
135 const ConcurrentJITLocker
& locker
, CodeBlock
* profiledBlock
, StructureStubInfo
* stubInfo
,
136 UniquedStringImpl
* uid
, CallLinkStatus::ExitSiteData callExitSiteData
)
139 return PutByIdStatus();
141 if (stubInfo
->tookSlowPath
)
142 return PutByIdStatus(TakesSlowPath
);
145 return PutByIdStatus();
147 switch (stubInfo
->accessType
) {
149 // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
150 return PutByIdStatus(TakesSlowPath
);
152 case access_put_by_id_replace
: {
153 PropertyOffset offset
=
154 stubInfo
->u
.putByIdReplace
.baseObjectStructure
->getConcurrently(uid
);
155 if (isValidOffset(offset
)) {
156 return PutByIdVariant::replace(
157 stubInfo
->u
.putByIdReplace
.baseObjectStructure
.get(), offset
);
159 return PutByIdStatus(TakesSlowPath
);
162 case access_put_by_id_transition_normal
:
163 case access_put_by_id_transition_direct
: {
164 ASSERT(stubInfo
->u
.putByIdTransition
.previousStructure
->transitionWatchpointSetHasBeenInvalidated());
165 PropertyOffset offset
=
166 stubInfo
->u
.putByIdTransition
.structure
->getConcurrently(uid
);
167 if (isValidOffset(offset
)) {
168 RefPtr
<IntendedStructureChain
> chain
;
169 if (stubInfo
->u
.putByIdTransition
.chain
) {
170 chain
= adoptRef(new IntendedStructureChain(
171 profiledBlock
, stubInfo
->u
.putByIdTransition
.previousStructure
.get(),
172 stubInfo
->u
.putByIdTransition
.chain
.get()));
174 return PutByIdVariant::transition(
175 stubInfo
->u
.putByIdTransition
.previousStructure
.get(),
176 stubInfo
->u
.putByIdTransition
.structure
.get(),
177 chain
.get(), offset
);
179 return PutByIdStatus(TakesSlowPath
);
182 case access_put_by_id_list
: {
183 PolymorphicPutByIdList
* list
= stubInfo
->u
.putByIdList
.list
;
185 PutByIdStatus result
;
186 result
.m_state
= Simple
;
188 State slowPathState
= TakesSlowPath
;
189 for (unsigned i
= 0; i
< list
->size(); ++i
) {
190 const PutByIdAccess
& access
= list
->at(i
);
192 switch (access
.type()) {
193 case PutByIdAccess::Setter
:
194 case PutByIdAccess::CustomSetter
:
195 slowPathState
= MakesCalls
;
202 for (unsigned i
= 0; i
< list
->size(); ++i
) {
203 const PutByIdAccess
& access
= list
->at(i
);
205 PutByIdVariant variant
;
207 switch (access
.type()) {
208 case PutByIdAccess::Replace
: {
209 Structure
* structure
= access
.structure();
210 PropertyOffset offset
= structure
->getConcurrently(uid
);
211 if (!isValidOffset(offset
))
212 return PutByIdStatus(slowPathState
);
213 variant
= PutByIdVariant::replace(structure
, offset
);
217 case PutByIdAccess::Transition
: {
218 PropertyOffset offset
=
219 access
.newStructure()->getConcurrently(uid
);
220 if (!isValidOffset(offset
))
221 return PutByIdStatus(slowPathState
);
222 RefPtr
<IntendedStructureChain
> chain
;
223 if (access
.chain()) {
224 chain
= adoptRef(new IntendedStructureChain(
225 profiledBlock
, access
.oldStructure(), access
.chain()));
226 if (!chain
->isStillValid())
229 variant
= PutByIdVariant::transition(
230 access
.oldStructure(), access
.newStructure(), chain
.get(), offset
);
234 case PutByIdAccess::Setter
: {
235 Structure
* structure
= access
.structure();
237 ComplexGetStatus complexGetStatus
= ComplexGetStatus::computeFor(
238 profiledBlock
, structure
, access
.chain(), access
.chainCount(), uid
);
240 switch (complexGetStatus
.kind()) {
241 case ComplexGetStatus::ShouldSkip
:
244 case ComplexGetStatus::TakesSlowPath
:
245 return PutByIdStatus(slowPathState
);
247 case ComplexGetStatus::Inlineable
: {
248 AccessorCallJITStubRoutine
* stub
= static_cast<AccessorCallJITStubRoutine
*>(
249 access
.stubRoutine());
250 std::unique_ptr
<CallLinkStatus
> callLinkStatus
=
251 std::make_unique
<CallLinkStatus
>(
252 CallLinkStatus::computeFor(
253 locker
, profiledBlock
, *stub
->m_callLinkInfo
, callExitSiteData
));
255 variant
= PutByIdVariant::setter(
256 structure
, complexGetStatus
.offset(), complexGetStatus
.chain(),
257 WTF::move(callLinkStatus
));
262 case PutByIdAccess::CustomSetter
:
263 return PutByIdStatus(MakesCalls
);
266 return PutByIdStatus(slowPathState
);
269 if (!result
.appendVariant(variant
))
270 return PutByIdStatus(slowPathState
);
277 return PutByIdStatus(TakesSlowPath
);
282 PutByIdStatus
PutByIdStatus::computeFor(CodeBlock
* baselineBlock
, CodeBlock
* dfgBlock
, StubInfoMap
& baselineMap
, StubInfoMap
& dfgMap
, CodeOrigin codeOrigin
, UniquedStringImpl
* uid
)
286 CallLinkStatus::ExitSiteData exitSiteData
;
288 ConcurrentJITLocker
locker(baselineBlock
->m_lock
);
289 if (hasExitSite(locker
, baselineBlock
, codeOrigin
.bytecodeIndex
))
290 return PutByIdStatus(TakesSlowPath
);
291 exitSiteData
= CallLinkStatus::computeExitSiteData(
292 locker
, baselineBlock
, codeOrigin
.bytecodeIndex
);
295 PutByIdStatus result
;
297 ConcurrentJITLocker
locker(dfgBlock
->m_lock
);
298 result
= computeForStubInfo(
299 locker
, dfgBlock
, dfgMap
.get(codeOrigin
), uid
, exitSiteData
);
302 // We use TakesSlowPath in some cases where the stub was unset. That's weird and
303 // it would be better not to do that. But it means that we have to defend
305 if (result
.isSimple())
309 UNUSED_PARAM(dfgBlock
);
310 UNUSED_PARAM(dfgMap
);
313 return computeFor(baselineBlock
, baselineMap
, codeOrigin
.bytecodeIndex
, uid
);
316 PutByIdStatus
PutByIdStatus::computeFor(JSGlobalObject
* globalObject
, const StructureSet
& set
, UniquedStringImpl
* uid
, bool isDirect
)
318 if (parseIndex(*uid
))
319 return PutByIdStatus(TakesSlowPath
);
322 return PutByIdStatus();
324 PutByIdStatus result
;
325 result
.m_state
= Simple
;
326 for (unsigned i
= 0; i
< set
.size(); ++i
) {
327 Structure
* structure
= set
[i
];
329 if (structure
->typeInfo().overridesGetOwnPropertySlot() && structure
->typeInfo().type() != GlobalObjectType
)
330 return PutByIdStatus(TakesSlowPath
);
332 if (!structure
->propertyAccessesAreCacheable())
333 return PutByIdStatus(TakesSlowPath
);
336 PropertyOffset offset
= structure
->getConcurrently(uid
, attributes
);
337 if (isValidOffset(offset
)) {
338 if (attributes
& CustomAccessor
)
339 return PutByIdStatus(MakesCalls
);
341 if (attributes
& (Accessor
| ReadOnly
))
342 return PutByIdStatus(TakesSlowPath
);
344 WatchpointSet
* replaceSet
= structure
->propertyReplacementWatchpointSet(offset
);
345 if (!replaceSet
|| replaceSet
->isStillValid()) {
346 // When this executes, it'll create, and fire, this replacement watchpoint set.
347 // That means that this has probably never executed or that something fishy is
348 // going on. Also, we cannot create or fire the watchpoint set from the concurrent
349 // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
350 // So, better leave this alone and take slow path.
351 return PutByIdStatus(TakesSlowPath
);
354 if (!result
.appendVariant(PutByIdVariant::replace(structure
, offset
)))
355 return PutByIdStatus(TakesSlowPath
);
359 // Our hypothesis is that we're doing a transition. Before we prove that this is really
360 // true, we want to do some sanity checks.
362 // Don't cache put transitions on dictionaries.
363 if (structure
->isDictionary())
364 return PutByIdStatus(TakesSlowPath
);
366 // If the structure corresponds to something that isn't an object, then give up, since
367 // we don't want to be adding properties to strings.
368 if (!structure
->typeInfo().isObject())
369 return PutByIdStatus(TakesSlowPath
);
371 RefPtr
<IntendedStructureChain
> chain
;
373 chain
= adoptRef(new IntendedStructureChain(globalObject
, structure
));
375 // If the prototype chain has setters or read-only properties, then give up.
376 if (chain
->mayInterceptStoreTo(uid
))
377 return PutByIdStatus(TakesSlowPath
);
379 // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
380 // then give up. The dictionary case would only happen if this structure has not been
381 // used in an optimized put_by_id transition. And really the only reason why we would
382 // bail here is that I don't really feel like having the optimizing JIT go and flatten
383 // dictionaries if we have evidence to suggest that those objects were never used as
384 // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
385 // the other checks below will fail.
386 if (structure
->isProxy() || !chain
->isNormalized())
387 return PutByIdStatus(TakesSlowPath
);
390 // We only optimize if there is already a structure that the transition is cached to.
391 Structure
* transition
= Structure::addPropertyTransitionToExistingStructureConcurrently(structure
, uid
, 0, offset
);
393 return PutByIdStatus(TakesSlowPath
);
394 ASSERT(isValidOffset(offset
));
396 bool didAppend
= result
.appendVariant(
397 PutByIdVariant::transition(structure
, transition
, chain
.get(), offset
));
399 return PutByIdStatus(TakesSlowPath
);
405 bool PutByIdStatus::makesCalls() const
407 if (m_state
== MakesCalls
)
410 if (m_state
!= Simple
)
413 for (unsigned i
= m_variants
.size(); i
--;) {
414 if (m_variants
[i
].makesCalls())
421 void PutByIdStatus::dump(PrintStream
& out
) const
425 out
.print("(NoInformation)");
429 out
.print("(", listDump(m_variants
), ")");
433 out
.print("(TakesSlowPath)");
436 out
.print("(MakesCalls)");
440 RELEASE_ASSERT_NOT_REACHED();