]> git.saurik.com Git - apple/javascriptcore.git/blob - bytecode/PutByIdStatus.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / bytecode / PutByIdStatus.cpp
1 /*
2 * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "PutByIdStatus.h"
28
29 #include "AccessorCallJITStubRoutine.h"
30 #include "CodeBlock.h"
31 #include "ComplexGetStatus.h"
32 #include "LLIntData.h"
33 #include "LowLevelInterpreter.h"
34 #include "JSCInlines.h"
35 #include "PolymorphicPutByIdList.h"
36 #include "Structure.h"
37 #include "StructureChain.h"
38 #include <wtf/ListDump.h>
39
40 namespace JSC {
41
42 bool PutByIdStatus::appendVariant(const PutByIdVariant& variant)
43 {
44 for (unsigned i = 0; i < m_variants.size(); ++i) {
45 if (m_variants[i].attemptToMerge(variant))
46 return true;
47 }
48 for (unsigned i = 0; i < m_variants.size(); ++i) {
49 if (m_variants[i].oldStructure().overlaps(variant.oldStructure()))
50 return false;
51 }
52 m_variants.append(variant);
53 return true;
54 }
55
56 #if ENABLE(DFG_JIT)
57 bool PutByIdStatus::hasExitSite(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
58 {
59 return profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache))
60 || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadConstantCache));
61
62 }
63 #endif
64
65 PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid)
66 {
67 UNUSED_PARAM(profiledBlock);
68 UNUSED_PARAM(bytecodeIndex);
69 UNUSED_PARAM(uid);
70 Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
71
72 Structure* structure = instruction[4].u.structure.get();
73 if (!structure)
74 return PutByIdStatus(NoInformation);
75
76 if (instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id)
77 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_out_of_line)) {
78 PropertyOffset offset = structure->getConcurrently(uid);
79 if (!isValidOffset(offset))
80 return PutByIdStatus(NoInformation);
81
82 return PutByIdVariant::replace(structure, offset);
83 }
84
85 ASSERT(structure->transitionWatchpointSetHasBeenInvalidated());
86
87 ASSERT(instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct)
88 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal)
89 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_direct_out_of_line)
90 || instruction[0].u.opcode == LLInt::getOpcode(op_put_by_id_transition_normal_out_of_line));
91
92 Structure* newStructure = instruction[6].u.structure.get();
93 StructureChain* chain = instruction[7].u.structureChain.get();
94 ASSERT(newStructure);
95 ASSERT(chain);
96
97 PropertyOffset offset = newStructure->getConcurrently(uid);
98 if (!isValidOffset(offset))
99 return PutByIdStatus(NoInformation);
100
101 RefPtr<IntendedStructureChain> intendedChain;
102 if (chain)
103 intendedChain = adoptRef(new IntendedStructureChain(profiledBlock, structure, chain));
104
105 return PutByIdVariant::transition(structure, newStructure, intendedChain.get(), offset);
106 }
107
108 PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid)
109 {
110 ConcurrentJITLocker locker(profiledBlock->m_lock);
111
112 UNUSED_PARAM(profiledBlock);
113 UNUSED_PARAM(bytecodeIndex);
114 UNUSED_PARAM(uid);
115 #if ENABLE(DFG_JIT)
116 if (hasExitSite(locker, profiledBlock, bytecodeIndex))
117 return PutByIdStatus(TakesSlowPath);
118
119 StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
120 PutByIdStatus result = computeForStubInfo(
121 locker, profiledBlock, stubInfo, uid,
122 CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex));
123 if (!result)
124 return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
125
126 return result;
127 #else // ENABLE(JIT)
128 UNUSED_PARAM(map);
129 return PutByIdStatus(NoInformation);
130 #endif // ENABLE(JIT)
131 }
132
133 #if ENABLE(JIT)
134 PutByIdStatus PutByIdStatus::computeForStubInfo(
135 const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo,
136 UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData)
137 {
138 if (!stubInfo)
139 return PutByIdStatus();
140
141 if (stubInfo->tookSlowPath)
142 return PutByIdStatus(TakesSlowPath);
143
144 if (!stubInfo->seen)
145 return PutByIdStatus();
146
147 switch (stubInfo->accessType) {
148 case access_unset:
149 // If the JIT saw it but didn't optimize it, then assume that this takes slow path.
150 return PutByIdStatus(TakesSlowPath);
151
152 case access_put_by_id_replace: {
153 PropertyOffset offset =
154 stubInfo->u.putByIdReplace.baseObjectStructure->getConcurrently(uid);
155 if (isValidOffset(offset)) {
156 return PutByIdVariant::replace(
157 stubInfo->u.putByIdReplace.baseObjectStructure.get(), offset);
158 }
159 return PutByIdStatus(TakesSlowPath);
160 }
161
162 case access_put_by_id_transition_normal:
163 case access_put_by_id_transition_direct: {
164 ASSERT(stubInfo->u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated());
165 PropertyOffset offset =
166 stubInfo->u.putByIdTransition.structure->getConcurrently(uid);
167 if (isValidOffset(offset)) {
168 RefPtr<IntendedStructureChain> chain;
169 if (stubInfo->u.putByIdTransition.chain) {
170 chain = adoptRef(new IntendedStructureChain(
171 profiledBlock, stubInfo->u.putByIdTransition.previousStructure.get(),
172 stubInfo->u.putByIdTransition.chain.get()));
173 }
174 return PutByIdVariant::transition(
175 stubInfo->u.putByIdTransition.previousStructure.get(),
176 stubInfo->u.putByIdTransition.structure.get(),
177 chain.get(), offset);
178 }
179 return PutByIdStatus(TakesSlowPath);
180 }
181
182 case access_put_by_id_list: {
183 PolymorphicPutByIdList* list = stubInfo->u.putByIdList.list;
184
185 PutByIdStatus result;
186 result.m_state = Simple;
187
188 State slowPathState = TakesSlowPath;
189 for (unsigned i = 0; i < list->size(); ++i) {
190 const PutByIdAccess& access = list->at(i);
191
192 switch (access.type()) {
193 case PutByIdAccess::Setter:
194 case PutByIdAccess::CustomSetter:
195 slowPathState = MakesCalls;
196 break;
197 default:
198 break;
199 }
200 }
201
202 for (unsigned i = 0; i < list->size(); ++i) {
203 const PutByIdAccess& access = list->at(i);
204
205 PutByIdVariant variant;
206
207 switch (access.type()) {
208 case PutByIdAccess::Replace: {
209 Structure* structure = access.structure();
210 PropertyOffset offset = structure->getConcurrently(uid);
211 if (!isValidOffset(offset))
212 return PutByIdStatus(slowPathState);
213 variant = PutByIdVariant::replace(structure, offset);
214 break;
215 }
216
217 case PutByIdAccess::Transition: {
218 PropertyOffset offset =
219 access.newStructure()->getConcurrently(uid);
220 if (!isValidOffset(offset))
221 return PutByIdStatus(slowPathState);
222 RefPtr<IntendedStructureChain> chain;
223 if (access.chain()) {
224 chain = adoptRef(new IntendedStructureChain(
225 profiledBlock, access.oldStructure(), access.chain()));
226 if (!chain->isStillValid())
227 continue;
228 }
229 variant = PutByIdVariant::transition(
230 access.oldStructure(), access.newStructure(), chain.get(), offset);
231 break;
232 }
233
234 case PutByIdAccess::Setter: {
235 Structure* structure = access.structure();
236
237 ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor(
238 profiledBlock, structure, access.chain(), access.chainCount(), uid);
239
240 switch (complexGetStatus.kind()) {
241 case ComplexGetStatus::ShouldSkip:
242 continue;
243
244 case ComplexGetStatus::TakesSlowPath:
245 return PutByIdStatus(slowPathState);
246
247 case ComplexGetStatus::Inlineable: {
248 AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>(
249 access.stubRoutine());
250 std::unique_ptr<CallLinkStatus> callLinkStatus =
251 std::make_unique<CallLinkStatus>(
252 CallLinkStatus::computeFor(
253 locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData));
254
255 variant = PutByIdVariant::setter(
256 structure, complexGetStatus.offset(), complexGetStatus.chain(),
257 WTF::move(callLinkStatus));
258 } }
259 break;
260 }
261
262 case PutByIdAccess::CustomSetter:
263 return PutByIdStatus(MakesCalls);
264
265 default:
266 return PutByIdStatus(slowPathState);
267 }
268
269 if (!result.appendVariant(variant))
270 return PutByIdStatus(slowPathState);
271 }
272
273 return result;
274 }
275
276 default:
277 return PutByIdStatus(TakesSlowPath);
278 }
279 }
280 #endif
281
282 PutByIdStatus PutByIdStatus::computeFor(CodeBlock* baselineBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid)
283 {
284 #if ENABLE(DFG_JIT)
285 if (dfgBlock) {
286 CallLinkStatus::ExitSiteData exitSiteData;
287 {
288 ConcurrentJITLocker locker(baselineBlock->m_lock);
289 if (hasExitSite(locker, baselineBlock, codeOrigin.bytecodeIndex))
290 return PutByIdStatus(TakesSlowPath);
291 exitSiteData = CallLinkStatus::computeExitSiteData(
292 locker, baselineBlock, codeOrigin.bytecodeIndex);
293 }
294
295 PutByIdStatus result;
296 {
297 ConcurrentJITLocker locker(dfgBlock->m_lock);
298 result = computeForStubInfo(
299 locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData);
300 }
301
302 // We use TakesSlowPath in some cases where the stub was unset. That's weird and
303 // it would be better not to do that. But it means that we have to defend
304 // ourselves here.
305 if (result.isSimple())
306 return result;
307 }
308 #else
309 UNUSED_PARAM(dfgBlock);
310 UNUSED_PARAM(dfgMap);
311 #endif
312
313 return computeFor(baselineBlock, baselineMap, codeOrigin.bytecodeIndex, uid);
314 }
315
316 PutByIdStatus PutByIdStatus::computeFor(JSGlobalObject* globalObject, const StructureSet& set, UniquedStringImpl* uid, bool isDirect)
317 {
318 if (parseIndex(*uid))
319 return PutByIdStatus(TakesSlowPath);
320
321 if (set.isEmpty())
322 return PutByIdStatus();
323
324 PutByIdStatus result;
325 result.m_state = Simple;
326 for (unsigned i = 0; i < set.size(); ++i) {
327 Structure* structure = set[i];
328
329 if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType)
330 return PutByIdStatus(TakesSlowPath);
331
332 if (!structure->propertyAccessesAreCacheable())
333 return PutByIdStatus(TakesSlowPath);
334
335 unsigned attributes;
336 PropertyOffset offset = structure->getConcurrently(uid, attributes);
337 if (isValidOffset(offset)) {
338 if (attributes & CustomAccessor)
339 return PutByIdStatus(MakesCalls);
340
341 if (attributes & (Accessor | ReadOnly))
342 return PutByIdStatus(TakesSlowPath);
343
344 WatchpointSet* replaceSet = structure->propertyReplacementWatchpointSet(offset);
345 if (!replaceSet || replaceSet->isStillValid()) {
346 // When this executes, it'll create, and fire, this replacement watchpoint set.
347 // That means that this has probably never executed or that something fishy is
348 // going on. Also, we cannot create or fire the watchpoint set from the concurrent
349 // JIT thread, so even if we wanted to do this, we'd need to have a lazy thingy.
350 // So, better leave this alone and take slow path.
351 return PutByIdStatus(TakesSlowPath);
352 }
353
354 if (!result.appendVariant(PutByIdVariant::replace(structure, offset)))
355 return PutByIdStatus(TakesSlowPath);
356 continue;
357 }
358
359 // Our hypothesis is that we're doing a transition. Before we prove that this is really
360 // true, we want to do some sanity checks.
361
362 // Don't cache put transitions on dictionaries.
363 if (structure->isDictionary())
364 return PutByIdStatus(TakesSlowPath);
365
366 // If the structure corresponds to something that isn't an object, then give up, since
367 // we don't want to be adding properties to strings.
368 if (!structure->typeInfo().isObject())
369 return PutByIdStatus(TakesSlowPath);
370
371 RefPtr<IntendedStructureChain> chain;
372 if (!isDirect) {
373 chain = adoptRef(new IntendedStructureChain(globalObject, structure));
374
375 // If the prototype chain has setters or read-only properties, then give up.
376 if (chain->mayInterceptStoreTo(uid))
377 return PutByIdStatus(TakesSlowPath);
378
379 // If the prototype chain hasn't been normalized (i.e. there are proxies or dictionaries)
380 // then give up. The dictionary case would only happen if this structure has not been
381 // used in an optimized put_by_id transition. And really the only reason why we would
382 // bail here is that I don't really feel like having the optimizing JIT go and flatten
383 // dictionaries if we have evidence to suggest that those objects were never used as
384 // prototypes in a cacheable prototype access - i.e. there's a good chance that some of
385 // the other checks below will fail.
386 if (structure->isProxy() || !chain->isNormalized())
387 return PutByIdStatus(TakesSlowPath);
388 }
389
390 // We only optimize if there is already a structure that the transition is cached to.
391 Structure* transition = Structure::addPropertyTransitionToExistingStructureConcurrently(structure, uid, 0, offset);
392 if (!transition)
393 return PutByIdStatus(TakesSlowPath);
394 ASSERT(isValidOffset(offset));
395
396 bool didAppend = result.appendVariant(
397 PutByIdVariant::transition(structure, transition, chain.get(), offset));
398 if (!didAppend)
399 return PutByIdStatus(TakesSlowPath);
400 }
401
402 return result;
403 }
404
405 bool PutByIdStatus::makesCalls() const
406 {
407 if (m_state == MakesCalls)
408 return true;
409
410 if (m_state != Simple)
411 return false;
412
413 for (unsigned i = m_variants.size(); i--;) {
414 if (m_variants[i].makesCalls())
415 return true;
416 }
417
418 return false;
419 }
420
421 void PutByIdStatus::dump(PrintStream& out) const
422 {
423 switch (m_state) {
424 case NoInformation:
425 out.print("(NoInformation)");
426 return;
427
428 case Simple:
429 out.print("(", listDump(m_variants), ")");
430 return;
431
432 case TakesSlowPath:
433 out.print("(TakesSlowPath)");
434 return;
435 case MakesCalls:
436 out.print("(MakesCalls)");
437 return;
438 }
439
440 RELEASE_ASSERT_NOT_REACHED();
441 }
442
443 } // namespace JSC
444