]> git.saurik.com Git - apple/javascriptcore.git/blob - dfg/DFGAllocator.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / dfg / DFGAllocator.h
1 /*
2 * Copyright (C) 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef DFGAllocator_h
27 #define DFGAllocator_h
28
29 #if ENABLE(DFG_JIT)
30
31 #include "DFGCommon.h"
32 #include <wtf/StdLibExtras.h>
33
34 namespace JSC { namespace DFG {
35
36 // Custom pool allocator for exactly one type (type T). It has fast (O(1), only a few
37 // instructions) allocator, and a similarly fast free(). Recycling works if either of
38 // the following is true:
39 // - T has a trivial destructor. In that case you don't have to ever call free() on
40 // anything. You can just call freeAll() instead.
41 // - You call free() on all T's that you allocated, and never use freeAll().
42
43 template<typename T>
44 class Allocator {
45 public:
46 Allocator();
47 ~Allocator();
48
49 void* allocate(); // Use placement new to allocate, and avoid using this method.
50 void free(T*); // Call this method to delete; never use 'delete' directly.
51
52 void freeAll(); // Only call this if you've either freed everything or if T has a trivial destructor.
53 void reset(); // Like freeAll(), but also returns all memory to the OS.
54
55 unsigned indexOf(const T*);
56
57 static Allocator* allocatorOf(const T*);
58
59 private:
60 void* bumpAllocate();
61 void* freeListAllocate();
62 void* allocateSlow();
63
64 struct Region {
65 static size_t size() { return 64 * KB; }
66 static size_t headerSize() { return std::max(sizeof(Region), sizeof(T)); }
67 static unsigned numberOfThingsPerRegion() { return (size() - headerSize()) / sizeof(T); }
68 T* data() { return bitwise_cast<T*>(bitwise_cast<char*>(this) + headerSize()); }
69 bool isInThisRegion(const T* pointer) { return static_cast<unsigned>(pointer - data()) < numberOfThingsPerRegion(); }
70 static Region* regionFor(const T* pointer) { return bitwise_cast<Region*>(bitwise_cast<uintptr_t>(pointer) & ~(size() - 1)); }
71
72 void* m_allocation;
73 Allocator* m_allocator;
74 Region* m_next;
75 };
76
77 void freeRegionsStartingAt(Region*);
78 void startBumpingIn(Region*);
79
80 Region* m_regionHead;
81 void** m_freeListHead;
82 T* m_bumpEnd;
83 unsigned m_bumpRemaining;
84 };
85
86 template<typename T>
87 inline Allocator<T>::Allocator()
88 : m_regionHead(0)
89 , m_freeListHead(0)
90 , m_bumpRemaining(0)
91 {
92 }
93
94 template<typename T>
95 inline Allocator<T>::~Allocator()
96 {
97 reset();
98 }
99
100 template<typename T>
101 ALWAYS_INLINE void* Allocator<T>::allocate()
102 {
103 void* result = bumpAllocate();
104 if (LIKELY(!!result))
105 return result;
106 return freeListAllocate();
107 }
108
109 template<typename T>
110 void Allocator<T>::free(T* object)
111 {
112 object->~T();
113
114 void** cell = bitwise_cast<void**>(object);
115 *cell = m_freeListHead;
116 m_freeListHead = cell;
117 }
118
119 template<typename T>
120 void Allocator<T>::freeAll()
121 {
122 if (!m_regionHead) {
123 ASSERT(!m_bumpRemaining);
124 ASSERT(!m_freeListHead);
125 return;
126 }
127
128 // Since the caller is opting out of calling the destructor for any allocated thing,
129 // we have two choices, plus a continuum between: we can either just delete all regions
130 // (i.e. call reset()), or we can make all regions available for reuse. We do something
131 // that optimizes for (a) speed of freeAll(), (b) the assumption that if the user calls
132 // freeAll() then they will probably be calling allocate() in the near future. Namely,
133 // we free all but one region, and make the remaining region a bump allocation region.
134
135 freeRegionsStartingAt(m_regionHead->m_next);
136
137 m_regionHead->m_next = 0;
138 m_freeListHead = 0;
139 startBumpingIn(m_regionHead);
140 }
141
142 template<typename T>
143 void Allocator<T>::reset()
144 {
145 freeRegionsStartingAt(m_regionHead);
146
147 m_regionHead = 0;
148 m_freeListHead = 0;
149 m_bumpRemaining = 0;
150 }
151
152 template<typename T>
153 unsigned Allocator<T>::indexOf(const T* object)
154 {
155 unsigned numRegions = 0;
156 for (Region* region = m_regionHead; region; region = region->m_next)
157 numRegions++;
158 unsigned regionIndex = 0;
159 for (Region* region = m_regionHead; region; region = region->m_next) {
160 if (region->isInThisRegion(object))
161 return (numRegions - 1 - regionIndex) * Region::numberOfThingsPerRegion() + (object - region->data());
162 regionIndex++;
163 }
164 CRASH();
165 return 0;
166 }
167
168 template<typename T>
169 Allocator<T>* Allocator<T>::allocatorOf(const T* object)
170 {
171 return Region::regionFor(object)->m_allocator;
172 }
173
174 template<typename T>
175 ALWAYS_INLINE void* Allocator<T>::bumpAllocate()
176 {
177 if (unsigned remaining = m_bumpRemaining) {
178 remaining--;
179 m_bumpRemaining = remaining;
180 return m_bumpEnd - (remaining + 1);
181 }
182 return 0;
183 }
184
185 template<typename T>
186 void* Allocator<T>::freeListAllocate()
187 {
188 void** result = m_freeListHead;
189 if (UNLIKELY(!result))
190 return allocateSlow();
191 m_freeListHead = bitwise_cast<void**>(*result);
192 return result;
193 }
194
195 template<typename T>
196 void* Allocator<T>::allocateSlow()
197 {
198 ASSERT(!m_freeListHead);
199 ASSERT(!m_bumpRemaining);
200
201 if (logCompilationChanges())
202 dataLog("Allocating another allocator region.\n");
203
204 void* allocation = fastAlignedMalloc(Region::size(), Region::size());
205 Region* region = static_cast<Region*>(allocation);
206 region->m_allocation = allocation;
207 region->m_allocator = this;
208 startBumpingIn(region);
209 region->m_next = m_regionHead;
210 m_regionHead = region;
211
212 void* result = bumpAllocate();
213 ASSERT(result);
214 return result;
215 }
216
217 template<typename T>
218 void Allocator<T>::freeRegionsStartingAt(typename Allocator<T>::Region* region)
219 {
220 while (region) {
221 Region* nextRegion = region->m_next;
222 fastAlignedFree(region->m_allocation);
223 region = nextRegion;
224 }
225 }
226
227 template<typename T>
228 void Allocator<T>::startBumpingIn(typename Allocator<T>::Region* region)
229 {
230 m_bumpEnd = region->data() + Region::numberOfThingsPerRegion();
231 m_bumpRemaining = Region::numberOfThingsPerRegion();
232 }
233
234 } } // namespace JSC::DFG
235
236 #endif // ENABLE(DFG_JIT)
237
238 #endif // DFGAllocator_h
239