]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | #include "ExecutableAllocator.h" | |
28 | ||
29 | #include "JSCInlines.h" | |
30 | ||
31 | #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) | |
32 | #include "CodeProfiling.h" | |
33 | #include <wtf/HashSet.h> | |
34 | #include <wtf/MetaAllocator.h> | |
35 | #include <wtf/NeverDestroyed.h> | |
36 | #include <wtf/PageReservation.h> | |
37 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) | |
38 | #include <wtf/PassOwnPtr.h> | |
39 | #endif | |
40 | #include <wtf/ThreadingPrimitives.h> | |
41 | #include <wtf/VMTags.h> | |
42 | #endif | |
43 | ||
44 | // Uncomment to create an artificial executable memory usage limit. This limit | |
45 | // is imperfect and is primarily useful for testing the VM's ability to handle | |
46 | // out-of-executable-memory situations. | |
47 | // #define EXECUTABLE_MEMORY_LIMIT 1000000 | |
48 | ||
49 | #if ENABLE(ASSEMBLER) | |
50 | ||
51 | using namespace WTF; | |
52 | ||
53 | namespace JSC { | |
54 | ||
55 | #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) | |
56 | ||
57 | class DemandExecutableAllocator : public MetaAllocator { | |
58 | public: | |
59 | DemandExecutableAllocator() | |
60 | : MetaAllocator(jitAllocationGranule) | |
61 | { | |
62 | std::lock_guard<std::mutex> lock(allocatorsMutex()); | |
63 | allocators().add(this); | |
64 | // Don't preallocate any memory here. | |
65 | } | |
66 | ||
67 | virtual ~DemandExecutableAllocator() | |
68 | { | |
69 | { | |
70 | std::lock_guard<std::mutex> lock(allocatorsMutex()); | |
71 | allocators().remove(this); | |
72 | } | |
73 | for (unsigned i = 0; i < reservations.size(); ++i) | |
74 | reservations.at(i).deallocate(); | |
75 | } | |
76 | ||
77 | static size_t bytesAllocatedByAllAllocators() | |
78 | { | |
79 | size_t total = 0; | |
80 | std::lock_guard<std::mutex> lock(allocatorsMutex()); | |
81 | for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) | |
82 | total += (*allocator)->bytesAllocated(); | |
83 | return total; | |
84 | } | |
85 | ||
86 | static size_t bytesCommittedByAllocactors() | |
87 | { | |
88 | size_t total = 0; | |
89 | std::lock_guard<std::mutex> lock(allocatorsMutex()); | |
90 | for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) | |
91 | total += (*allocator)->bytesCommitted(); | |
92 | return total; | |
93 | } | |
94 | ||
95 | #if ENABLE(META_ALLOCATOR_PROFILE) | |
96 | static void dumpProfileFromAllAllocators() | |
97 | { | |
98 | std::lock_guard<std::mutex> lock(allocatorsMutex()); | |
99 | for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) | |
100 | (*allocator)->dumpProfile(); | |
101 | } | |
102 | #endif | |
103 | ||
104 | protected: | |
105 | virtual void* allocateNewSpace(size_t& numPages) | |
106 | { | |
107 | size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize(); | |
108 | ||
109 | ASSERT(newNumPages >= numPages); | |
110 | ||
111 | numPages = newNumPages; | |
112 | ||
113 | #ifdef EXECUTABLE_MEMORY_LIMIT | |
114 | if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT) | |
115 | return 0; | |
116 | #endif | |
117 | ||
118 | PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); | |
119 | RELEASE_ASSERT(reservation); | |
120 | ||
121 | reservations.append(reservation); | |
122 | ||
123 | return reservation.base(); | |
124 | } | |
125 | ||
126 | virtual void notifyNeedPage(void* page) | |
127 | { | |
128 | OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true); | |
129 | } | |
130 | ||
131 | virtual void notifyPageIsFree(void* page) | |
132 | { | |
133 | OSAllocator::decommit(page, pageSize()); | |
134 | } | |
135 | ||
136 | private: | |
137 | Vector<PageReservation, 16> reservations; | |
138 | static HashSet<DemandExecutableAllocator*>& allocators() | |
139 | { | |
140 | DEPRECATED_DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ()); | |
141 | return sAllocators; | |
142 | } | |
143 | ||
144 | static std::mutex& allocatorsMutex() | |
145 | { | |
146 | static NeverDestroyed<std::mutex> mutex; | |
147 | ||
148 | return mutex; | |
149 | } | |
150 | }; | |
151 | ||
152 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) | |
153 | void ExecutableAllocator::initializeAllocator() | |
154 | { | |
155 | } | |
156 | #else | |
157 | static DemandExecutableAllocator* gAllocator; | |
158 | ||
159 | namespace { | |
160 | static inline DemandExecutableAllocator* allocator() | |
161 | { | |
162 | return gAllocator; | |
163 | } | |
164 | } | |
165 | ||
166 | void ExecutableAllocator::initializeAllocator() | |
167 | { | |
168 | ASSERT(!gAllocator); | |
169 | gAllocator = new DemandExecutableAllocator(); | |
170 | CodeProfiling::notifyAllocator(gAllocator); | |
171 | } | |
172 | #endif | |
173 | ||
174 | ExecutableAllocator::ExecutableAllocator(VM&) | |
175 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) | |
176 | : m_allocator(adoptPtr(new DemandExecutableAllocator())) | |
177 | #endif | |
178 | { | |
179 | ASSERT(allocator()); | |
180 | } | |
181 | ||
182 | ExecutableAllocator::~ExecutableAllocator() | |
183 | { | |
184 | } | |
185 | ||
186 | bool ExecutableAllocator::isValid() const | |
187 | { | |
188 | return true; | |
189 | } | |
190 | ||
191 | bool ExecutableAllocator::underMemoryPressure() | |
192 | { | |
193 | #ifdef EXECUTABLE_MEMORY_LIMIT | |
194 | return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2; | |
195 | #else | |
196 | return false; | |
197 | #endif | |
198 | } | |
199 | ||
200 | double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) | |
201 | { | |
202 | double result; | |
203 | #ifdef EXECUTABLE_MEMORY_LIMIT | |
204 | size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage; | |
205 | if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT) | |
206 | bytesAllocated = EXECUTABLE_MEMORY_LIMIT; | |
207 | result = static_cast<double>(EXECUTABLE_MEMORY_LIMIT) / | |
208 | (EXECUTABLE_MEMORY_LIMIT - bytesAllocated); | |
209 | #else | |
210 | UNUSED_PARAM(addedMemoryUsage); | |
211 | result = 1.0; | |
212 | #endif | |
213 | if (result < 1.0) | |
214 | result = 1.0; | |
215 | return result; | |
216 | ||
217 | } | |
218 | ||
219 | PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) | |
220 | { | |
221 | RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID); | |
222 | RELEASE_ASSERT(result || effort != JITCompilationMustSucceed); | |
223 | return result.release(); | |
224 | } | |
225 | ||
226 | size_t ExecutableAllocator::committedByteCount() | |
227 | { | |
228 | return DemandExecutableAllocator::bytesCommittedByAllocactors(); | |
229 | } | |
230 | ||
231 | #if ENABLE(META_ALLOCATOR_PROFILE) | |
232 | void ExecutableAllocator::dumpProfile() | |
233 | { | |
234 | DemandExecutableAllocator::dumpProfileFromAllAllocators(); | |
235 | } | |
236 | #endif | |
237 | ||
238 | #endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) | |
239 | ||
240 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) | |
241 | ||
242 | #if OS(WINDOWS) | |
243 | #error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform." | |
244 | #endif | |
245 | ||
246 | void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting) | |
247 | { | |
248 | size_t pageSize = WTF::pageSize(); | |
249 | ||
250 | // Calculate the start of the page containing this region, | |
251 | // and account for this extra memory within size. | |
252 | intptr_t startPtr = reinterpret_cast<intptr_t>(start); | |
253 | intptr_t pageStartPtr = startPtr & ~(pageSize - 1); | |
254 | void* pageStart = reinterpret_cast<void*>(pageStartPtr); | |
255 | size += (startPtr - pageStartPtr); | |
256 | ||
257 | // Round size up | |
258 | size += (pageSize - 1); | |
259 | size &= ~(pageSize - 1); | |
260 | ||
261 | mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX); | |
262 | } | |
263 | ||
264 | #endif | |
265 | ||
266 | } | |
267 | ||
268 | #endif // HAVE(ASSEMBLER) |