]>
Commit | Line | Data |
---|---|---|
93a37866 A |
1 | /* |
2 | * Copyright (C) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #ifndef DFGAllocator_h | |
27 | #define DFGAllocator_h | |
28 | ||
93a37866 A |
29 | #if ENABLE(DFG_JIT) |
30 | ||
31 | #include "DFGCommon.h" | |
32 | #include <wtf/PageAllocationAligned.h> | |
33 | #include <wtf/StdLibExtras.h> | |
34 | ||
35 | namespace JSC { namespace DFG { | |
36 | ||
37 | // Custom pool allocator for exactly one type (type T). It has fast (O(1), only a few | |
38 | // instructions) allocator, and a similarly fast free(). Recycling works if either of | |
39 | // the following is true: | |
40 | // - T has a trivial destructor. In that case you don't have to ever call free() on | |
41 | // anything. You can just call freeAll() instead. | |
42 | // - You call free() on all T's that you allocated, and never use freeAll(). | |
43 | ||
44 | template<typename T> | |
45 | class Allocator { | |
46 | public: | |
47 | Allocator(); | |
48 | ~Allocator(); | |
49 | ||
50 | void* allocate(); // Use placement new to allocate, and avoid using this method. | |
51 | void free(T*); // Call this method to delete; never use 'delete' directly. | |
52 | ||
53 | void freeAll(); // Only call this if T has a trivial destructor. | |
54 | void reset(); // Like freeAll(), but also returns all memory to the OS. | |
55 | ||
56 | unsigned indexOf(const T*); | |
57 | ||
58 | static Allocator* allocatorOf(const T*); | |
59 | ||
60 | private: | |
61 | void* bumpAllocate(); | |
62 | void* freeListAllocate(); | |
63 | void* allocateSlow(); | |
64 | ||
65 | struct Region { | |
66 | static size_t size() { return 64 * KB; } | |
67 | static size_t headerSize() { return std::max(sizeof(Region), sizeof(T)); } | |
68 | static unsigned numberOfThingsPerRegion() { return (size() - headerSize()) / sizeof(T); } | |
69 | T* data() { return bitwise_cast<T*>(bitwise_cast<char*>(this) + headerSize()); } | |
70 | bool isInThisRegion(const T* pointer) { return static_cast<unsigned>(pointer - data()) < numberOfThingsPerRegion(); } | |
71 | static Region* regionFor(const T* pointer) { return bitwise_cast<Region*>(bitwise_cast<uintptr_t>(pointer) & ~(size() - 1)); } | |
72 | ||
73 | PageAllocationAligned m_allocation; | |
74 | Allocator* m_allocator; | |
75 | Region* m_next; | |
76 | }; | |
77 | ||
81345200 A |
78 | void freeRegionsStartingAt(Region*); |
79 | void startBumpingIn(Region*); | |
93a37866 A |
80 | |
81 | Region* m_regionHead; | |
82 | void** m_freeListHead; | |
83 | T* m_bumpEnd; | |
84 | unsigned m_bumpRemaining; | |
85 | }; | |
86 | ||
87 | template<typename T> | |
88 | inline Allocator<T>::Allocator() | |
89 | : m_regionHead(0) | |
90 | , m_freeListHead(0) | |
91 | , m_bumpRemaining(0) | |
92 | { | |
93 | } | |
94 | ||
95 | template<typename T> | |
96 | inline Allocator<T>::~Allocator() | |
97 | { | |
98 | reset(); | |
99 | } | |
100 | ||
101 | template<typename T> | |
102 | ALWAYS_INLINE void* Allocator<T>::allocate() | |
103 | { | |
104 | void* result = bumpAllocate(); | |
105 | if (LIKELY(!!result)) | |
106 | return result; | |
107 | return freeListAllocate(); | |
108 | } | |
109 | ||
110 | template<typename T> | |
111 | void Allocator<T>::free(T* object) | |
112 | { | |
113 | object->~T(); | |
114 | ||
115 | void** cell = bitwise_cast<void**>(object); | |
116 | *cell = m_freeListHead; | |
117 | m_freeListHead = cell; | |
118 | } | |
119 | ||
120 | template<typename T> | |
121 | void Allocator<T>::freeAll() | |
122 | { | |
123 | if (!m_regionHead) { | |
124 | ASSERT(!m_bumpRemaining); | |
125 | ASSERT(!m_freeListHead); | |
126 | return; | |
127 | } | |
128 | ||
129 | // Since the caller is opting out of calling the destructor for any allocated thing, | |
130 | // we have two choices, plus a continuum between: we can either just delete all regions | |
131 | // (i.e. call reset()), or we can make all regions available for reuse. We do something | |
132 | // that optimizes for (a) speed of freeAll(), (b) the assumption that if the user calls | |
133 | // freeAll() then they will probably be calling allocate() in the near future. Namely, | |
134 | // we free all but one region, and make the remaining region a bump allocation region. | |
135 | ||
136 | freeRegionsStartingAt(m_regionHead->m_next); | |
137 | ||
138 | m_regionHead->m_next = 0; | |
139 | m_freeListHead = 0; | |
140 | startBumpingIn(m_regionHead); | |
141 | } | |
142 | ||
143 | template<typename T> | |
144 | void Allocator<T>::reset() | |
145 | { | |
146 | freeRegionsStartingAt(m_regionHead); | |
147 | ||
148 | m_regionHead = 0; | |
149 | m_freeListHead = 0; | |
150 | m_bumpRemaining = 0; | |
151 | } | |
152 | ||
153 | template<typename T> | |
154 | unsigned Allocator<T>::indexOf(const T* object) | |
155 | { | |
81345200 A |
156 | unsigned numRegions = 0; |
157 | for (Region* region = m_regionHead; region; region = region->m_next) | |
158 | numRegions++; | |
159 | unsigned regionIndex = 0; | |
93a37866 A |
160 | for (Region* region = m_regionHead; region; region = region->m_next) { |
161 | if (region->isInThisRegion(object)) | |
81345200 A |
162 | return (numRegions - 1 - regionIndex) * Region::numberOfThingsPerRegion() + (object - region->data()); |
163 | regionIndex++; | |
93a37866 A |
164 | } |
165 | CRASH(); | |
166 | return 0; | |
167 | } | |
168 | ||
169 | template<typename T> | |
170 | Allocator<T>* Allocator<T>::allocatorOf(const T* object) | |
171 | { | |
172 | return Region::regionFor(object)->m_allocator; | |
173 | } | |
174 | ||
175 | template<typename T> | |
176 | ALWAYS_INLINE void* Allocator<T>::bumpAllocate() | |
177 | { | |
178 | if (unsigned remaining = m_bumpRemaining) { | |
179 | remaining--; | |
180 | m_bumpRemaining = remaining; | |
181 | return m_bumpEnd - (remaining + 1); | |
182 | } | |
183 | return 0; | |
184 | } | |
185 | ||
186 | template<typename T> | |
187 | void* Allocator<T>::freeListAllocate() | |
188 | { | |
189 | void** result = m_freeListHead; | |
190 | if (UNLIKELY(!result)) | |
191 | return allocateSlow(); | |
192 | m_freeListHead = bitwise_cast<void**>(*result); | |
193 | return result; | |
194 | } | |
195 | ||
196 | template<typename T> | |
197 | void* Allocator<T>::allocateSlow() | |
198 | { | |
199 | ASSERT(!m_freeListHead); | |
200 | ASSERT(!m_bumpRemaining); | |
201 | ||
202 | if (logCompilationChanges()) | |
203 | dataLog("Allocating another allocator region.\n"); | |
204 | ||
205 | PageAllocationAligned allocation = PageAllocationAligned::allocate(Region::size(), Region::size(), OSAllocator::JSGCHeapPages); | |
206 | if (!static_cast<bool>(allocation)) | |
207 | CRASH(); | |
208 | Region* region = static_cast<Region*>(allocation.base()); | |
209 | region->m_allocation = allocation; | |
210 | region->m_allocator = this; | |
211 | startBumpingIn(region); | |
212 | region->m_next = m_regionHead; | |
213 | m_regionHead = region; | |
214 | ||
215 | void* result = bumpAllocate(); | |
216 | ASSERT(result); | |
217 | return result; | |
218 | } | |
219 | ||
220 | template<typename T> | |
221 | void Allocator<T>::freeRegionsStartingAt(typename Allocator<T>::Region* region) | |
222 | { | |
223 | while (region) { | |
224 | Region* nextRegion = region->m_next; | |
225 | region->m_allocation.deallocate(); | |
226 | region = nextRegion; | |
227 | } | |
228 | } | |
229 | ||
230 | template<typename T> | |
231 | void Allocator<T>::startBumpingIn(typename Allocator<T>::Region* region) | |
232 | { | |
233 | m_bumpEnd = region->data() + Region::numberOfThingsPerRegion(); | |
234 | m_bumpRemaining = Region::numberOfThingsPerRegion(); | |
235 | } | |
236 | ||
237 | } } // namespace JSC::DFG | |
238 | ||
239 | #endif // ENABLE(DFG_JIT) | |
240 | ||
241 | #endif // DFGAllocator_h | |
242 |