]> git.saurik.com Git - apple/javascriptcore.git/blob - bytecode/PutByIdStatus.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / bytecode / PutByIdStatus.cpp
1 /*
2 * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "PutByIdStatus.h"
28
29 #include "CodeBlock.h"
30 #include "LLIntData.h"
31 #include "LowLevelInterpreter.h"
32 #include "JSCInlines.h"
33 #include "PolymorphicPutByIdList.h"
34 #include "Structure.h"
35 #include "StructureChain.h"
36 #include <wtf/ListDump.h>
37
38 namespace JSC {
39
40 bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
41 {
42 for (unsigned i = 0; i < m_variants.size(); ++i) {
43 if (m_variants[i].oldStructure() == variant.oldStructure())
44 return false;
45 }
46 m_variants.append(variant);
47 return true;
48 }
49
50 #if ENABLE(DFG_JIT)
51 bool PutByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex, ExitingJITType exitType)
52 {
53 return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache, exitType))
54 || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCacheWatchpoint, exitType))
55 || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadWeakConstantCache, exitType))
56 || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadWeakConstantCacheWatchpoint, exitType));
57
58 }
59 #endif
60
61 PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, StringImpl* uid)
62 {
63 UNUSED_PARAM(profiledBlock);
64 UNUSED_PARAM(bytecodeIndex);
65 UNUSED_PARAM(uid);
66 Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
67
68 Structure* structure = instruction[4].u.structure.get();
69 if (!structure)
70 return PutByIdStatus(NoInformation);
71
72 if (instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id)
73 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_out_of_line)) {
74 PropertyOffset offset = structure->getConcurrently(*profiledBlock->vm(), uid);
75 if (!isValidOffset(offset))
76 return PutByIdStatus(NoInformation);
77
78 return PutByIdVariant::replace(structure, offset);
79 }
80
81 ASSERT(structure->transitionWatchpointSetHasBeenInvalidated());
82
83 ASSERT(instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct)
84 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal)
85 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct_out_of_line)
86 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal_out_of_line));
87
88 Structure* newStructure = instruction[6].u.structure.get();
89 StructureChain* chain = instruction[7].u.structureChain.get();
90 ASSERT(newStructure);
91 ASSERT(chain);
92
93 PropertyOffset offset = newStructure->getConcurrently(*profiledBlock->vm(), uid);
94 if (!isValidOffset(offset))
95 return PutByIdStatus(NoInformation);
96
97 return PutByIdVariant::transition(
98 structure, newStructure,
99 chain ? adoptRef(new IntendedStructureChain(profiledBlock, structure, chain)) : 0,
100 offset);
101 }
102
103 PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid)
104 {
105 ConcurrentJITLocker locker(profiledBlock->m_lock);
106
107 UNUSED_PARAM(profiledBlock);
108 UNUSED_PARAM(bytecodeIndex);
109 UNUSED_PARAM(uid);
110 #if ENABLE(DFG_JIT)
111 if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)
112 || hasExitSite(locker, profiledBlock, bytecodeIndex))
113 return PutByIdStatus(TakesSlowPath);
114
115 StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
116 PutByIdStatus result = computeForStubInfo(locker, profiledBlock, stubInfo, uid);
117 if (!result)
118 return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
119
120 return result;
121 #else // ENABLE(JIT)
122 UNUSED_PARAM(map);
123 return PutByIdStatus(NoInformation);
124 #endif // ENABLE(JIT)
125 }
126
127 #if ENABLE(JIT)
128 PutByIdStatus PutByIdStatus::computeForStubInfo(const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid)
129 {
130 if (!stubInfo || !stubInfo->seen)
131 return PutByIdStatus();
132
133 if (stubInfo->resetByGC)
134 return PutByIdStatus(TakesSlowPath);
135
136 switch (stubInfo->accessType) {
137 case access_unset:
138 // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
139 return PutByIdStatus(TakesSlowPath);
140
141 case access_put_by_id_replace: {
142 PropertyOffset offset =
143 stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently(
144 *profiledBlock->vm(), uid);
145 if (isValidOffset(offset)) {
146 return PutByIdVariant::replace(
147 stubInfo->u.putByIdReplace.baseObjectStructure.get(), offset);
148 }
149 return PutByIdStatus(TakesSlowPath);
150 }
151
152 case access_put_by_id_transition_normal:
153 case access_put_by_id_transition_direct: {
154 ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated());
155 PropertyOffset offset =
156 stubInfo->u.putByIdTransition.structure->getConcurrently(
157 *profiledBlock->vm(), uid);
158 if (isValidOffset(offset)) {
159 return PutByIdVariant::transition(
160 stubInfo->u.putByIdTransition.previousStructure.get(),
161 stubInfo->u.putByIdTransition.structure.get(),
162 stubInfo->u.putByIdTransition.chain ? adoptRef(new IntendedStructureChain(
163 profiledBlock, stubInfo->u.putByIdTransition.previousStructure.get(),
164 stubInfo->u.putByIdTransition.chain.get())) : 0,
165 offset);
166 }
167 return PutByIdStatus(TakesSlowPath);
168 }
169
170 case access_put_by_id_list: {
171 PolymorphicPutByIdList* list = stubInfo->u.putByIdList.list;
172
173 PutByIdStatus result;
174 result.m_state = Simple;
175
176 for (unsigned i = 0; i < list->size(); ++i) {
177 const PutByIdAccess& access = list->at(i);
178
179 switch (access.type()) {
180 case PutByIdAccess::Replace: {
181 Structure* structure = access.structure();
182 PropertyOffset offset = structure->getConcurrently(*profiledBlock->vm(), uid);
183 if (!isValidOffset(offset))
184 return PutByIdStatus(TakesSlowPath);
185 if (!result.appendVariant(PutByIdVariant::replace(structure, offset)))
186 return PutByIdStatus(TakesSlowPath);
187 break;
188 }
189
190 case PutByIdAccess::Transition: {
191 PropertyOffset offset =
192 access.newStructure()->getConcurrently(*profiledBlock->vm(), uid);
193 if (!isValidOffset(offset))
194 return PutByIdStatus(TakesSlowPath);
195 bool ok = result.appendVariant(PutByIdVariant::transition(
196 access.oldStructure(), access.newStructure(),
197 access.chain() ? adoptRef(new IntendedStructureChain(
198 profiledBlock, access.oldStructure(), access.chain())) : 0,
199 offset));
200 if (!ok)
201 return PutByIdStatus(TakesSlowPath);
202 break;
203 }
204 case PutByIdAccess::Setter:
205 case PutByIdAccess::CustomSetter:
206 return PutByIdStatus(MakesCalls);
207
208 default:
209 return PutByIdStatus(TakesSlowPath);
210 }
211 }
212
213 return result;
214 }
215
216 default:
217 return PutByIdStatus(TakesSlowPath);
218 }
219 }
220 #endif
221
222 PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, StringImpl* uid)
223 {
224 #if ENABLE(DFG_JIT)
225 if (dfgBlock) {
226 {
227 ConcurrentJITLocker locker(baselineBlock->m_lock);
228 if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex, ExitFromFTL))
229 return PutByIdStatus(TakesSlowPath);
230 }
231
232 PutByIdStatus result;
233 {
234 ConcurrentJITLocker locker(dfgBlock->m_lock);
235 result = computeForStubInfo(locker, dfgBlock, dfgMap.get(codeOrigin), uid);
236 }
237
238 // We use TakesSlowPath in some cases where the stub was unset. That's weird and
239 // it would be better not to do that. But it means that we have to defend
240 // ourselves here.
241 if (result.isSimple())
242 return result;
243 }
244 #else
245 UNUSED_PARAM(dfgBlock);
246 UNUSED_PARAM(dfgMap);
247 #endif
248
249 return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
250 }
251
252 PutByIdStatus PutByIdStatus::computeFor(VM& vm, JSGlobalObject* globalObject, Structure* structure, StringImpl* uid, bool isDirect)
253 {
254 if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex)
255 return PutByIdStatus(TakesSlowPath);
256
257 if (!structure)
258 return PutByIdStatus(TakesSlowPath);
259
260 if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
261 return PutByIdStatus(TakesSlowPath);
262
263 if (!structure->propertyAccessesAreCacheable())
264 return PutByIdStatus(TakesSlowPath);
265
266 unsigned attributes;
267 JSCell* specificValue;
268 PropertyOffset offset = structure->getConcurrently(vm, uid, attributes, specificValue);
269 if (isValidOffset(offset)) {
270 if (attributes & CustomAccessor)
271 return PutByIdStatus(MakesCalls);
272
273 if (attributes & (Accessor | ReadOnly))
274 return PutByIdStatus(TakesSlowPath);
275 if (specificValue) {
276 // We need the PutById slow path to verify that we're storing the right value into
277 // the specialized slot.
278 return PutByIdStatus(TakesSlowPath);
279 }
280 return PutByIdVariant::replace(structure, offset);
281 }
282
283 // Our hypothesis is that we're doing a transition. Before we prove that this is really
284 // true, we want to do some sanity checks.
285
286 // Don't cache put transitions on dictionaries.
287 if (structure->isDictionary())
288 return PutByIdStatus(TakesSlowPath);
289
290 // If the structure corresponds to something that isn't an object, then give up, since
291 // we don't want to be adding properties to strings.
292 if (structure->typeInfo().type() == StringType)
293 return PutByIdStatus(TakesSlowPath);
294
295 RefPtr<IntendedStructureChain> chain;
296 if (!isDirect) {
297 chain = adoptRef(new IntendedStructureChain(globalObject, structure));
298
299 // If the prototype chain has setters or read-only properties, then give up.
300 if (chain->mayInterceptStoreTo(vm, uid))
301 return PutByIdStatus(TakesSlowPath);
302
303 // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
304 // then give up. The dictionary case would only happen if this structure has not been
305 // used in an optimized put_by_id transition. And really the only reason why we would
306 // bail here is that I don't really feel like having the optimizing JIT go and flatten
307 // dictionaries if we have evidence to suggest that those objects were never used as
308 // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
309 // the other checks below will fail.
310 if (!chain->isNormalized())
311 return PutByIdStatus(TakesSlowPath);
312 }
313
314 // We only optimize if there is already a structure that the transition is cached to.
315 // Among other things, this allows us to guard against a transition with a specific
316 // value.
317 //
318 // - If we're storing a value that could be specific: this would only be a problem if
319 // the existing transition did have a specific value already, since if it didn't,
320 // then we would behave "as if" we were not storing a specific value. If it did
321 // have a specific value, then we'll know - the fact that we pass 0 for
322 // specificValue will tell us.
323 //
324 // - If we're not storing a value that could be specific: again, this would only be a
325 // problem if the existing transition did have a specific value, which we check for
326 // by passing 0 for the specificValue.
327 Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, 0, offset);
328 if (!transition)
329 return PutByIdStatus(TakesSlowPath); // This occurs in bizarre cases only. See above.
330 ASSERT(!transition->transitionDidInvolveSpecificValue());
331 ASSERT(isValidOffset(offset));
332
333 return PutByIdVariant::transition(structure, transition, chain.release(), offset);
334 }
335
336 void PutByIdStatus::dump(PrintStream& out) const
337 {
338 switch (m_state) {
339 case NoInformation:
340 out.print("(NoInformation)");
341 return;
342
343 case Simple:
344 out.print("(", listDump(m_variants), ")");
345 return;
346
347 case TakesSlowPath:
348 out.print("(TakesSlowPath)");
349 return;
350 case MakesCalls:
351 out.print("(MakesCalls)");
352 return;
353 }
354
355 RELEASE_ASSERT_NOT_REACHED();
356 }
357
358 } // namespace JSC
359