]> git.saurik.com Git - apple/javascriptcore.git/blame - dfg/DFGAllocator.h
JavaScriptCore-1218.34.tar.gz
[apple/javascriptcore.git] / dfg / DFGAllocator.h
CommitLineData
93a37866
A
1/*
2 * Copyright (C) 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef DFGAllocator_h
27#define DFGAllocator_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(DFG_JIT)
32
33#include "DFGCommon.h"
34#include <wtf/PageAllocationAligned.h>
35#include <wtf/StdLibExtras.h>
36
37namespace JSC { namespace DFG {
38
39// Custom pool allocator for exactly one type (type T). It has fast (O(1), only a few
40// instructions) allocator, and a similarly fast free(). Recycling works if either of
41// the following is true:
42// - T has a trivial destructor. In that case you don't have to ever call free() on
43// anything. You can just call freeAll() instead.
44// - You call free() on all T's that you allocated, and never use freeAll().
45
46template<typename T>
47class Allocator {
48public:
49 Allocator();
50 ~Allocator();
51
52 void* allocate(); // Use placement new to allocate, and avoid using this method.
53 void free(T*); // Call this method to delete; never use 'delete' directly.
54
55 void freeAll(); // Only call this if T has a trivial destructor.
56 void reset(); // Like freeAll(), but also returns all memory to the OS.
57
58 unsigned indexOf(const T*);
59
60 static Allocator* allocatorOf(const T*);
61
62private:
63 void* bumpAllocate();
64 void* freeListAllocate();
65 void* allocateSlow();
66
67 struct Region {
68 static size_t size() { return 64 * KB; }
69 static size_t headerSize() { return std::max(sizeof(Region), sizeof(T)); }
70 static unsigned numberOfThingsPerRegion() { return (size() - headerSize()) / sizeof(T); }
71 T* data() { return bitwise_cast<T*>(bitwise_cast<char*>(this) + headerSize()); }
72 bool isInThisRegion(const T* pointer) { return static_cast<unsigned>(pointer - data()) < numberOfThingsPerRegion(); }
73 static Region* regionFor(const T* pointer) { return bitwise_cast<Region*>(bitwise_cast<uintptr_t>(pointer) & ~(size() - 1)); }
74
75 PageAllocationAligned m_allocation;
76 Allocator* m_allocator;
77 Region* m_next;
78 };
79
80 void freeRegionsStartingAt(Allocator::Region*);
81 void startBumpingIn(Allocator::Region*);
82
83 Region* m_regionHead;
84 void** m_freeListHead;
85 T* m_bumpEnd;
86 unsigned m_bumpRemaining;
87};
88
89template<typename T>
90inline Allocator<T>::Allocator()
91 : m_regionHead(0)
92 , m_freeListHead(0)
93 , m_bumpRemaining(0)
94{
95}
96
97template<typename T>
98inline Allocator<T>::~Allocator()
99{
100 reset();
101}
102
103template<typename T>
104ALWAYS_INLINE void* Allocator<T>::allocate()
105{
106 void* result = bumpAllocate();
107 if (LIKELY(!!result))
108 return result;
109 return freeListAllocate();
110}
111
112template<typename T>
113void Allocator<T>::free(T* object)
114{
115 object->~T();
116
117 void** cell = bitwise_cast<void**>(object);
118 *cell = m_freeListHead;
119 m_freeListHead = cell;
120}
121
122template<typename T>
123void Allocator<T>::freeAll()
124{
125 if (!m_regionHead) {
126 ASSERT(!m_bumpRemaining);
127 ASSERT(!m_freeListHead);
128 return;
129 }
130
131 // Since the caller is opting out of calling the destructor for any allocated thing,
132 // we have two choices, plus a continuum between: we can either just delete all regions
133 // (i.e. call reset()), or we can make all regions available for reuse. We do something
134 // that optimizes for (a) speed of freeAll(), (b) the assumption that if the user calls
135 // freeAll() then they will probably be calling allocate() in the near future. Namely,
136 // we free all but one region, and make the remaining region a bump allocation region.
137
138 freeRegionsStartingAt(m_regionHead->m_next);
139
140 m_regionHead->m_next = 0;
141 m_freeListHead = 0;
142 startBumpingIn(m_regionHead);
143}
144
145template<typename T>
146void Allocator<T>::reset()
147{
148 freeRegionsStartingAt(m_regionHead);
149
150 m_regionHead = 0;
151 m_freeListHead = 0;
152 m_bumpRemaining = 0;
153}
154
155template<typename T>
156unsigned Allocator<T>::indexOf(const T* object)
157{
158 unsigned baseIndex = 0;
159 for (Region* region = m_regionHead; region; region = region->m_next) {
160 if (region->isInThisRegion(object))
161 return baseIndex + (object - region->data());
162 baseIndex += Region::numberOfThingsPerRegion();
163 }
164 CRASH();
165 return 0;
166}
167
168template<typename T>
169Allocator<T>* Allocator<T>::allocatorOf(const T* object)
170{
171 return Region::regionFor(object)->m_allocator;
172}
173
174template<typename T>
175ALWAYS_INLINE void* Allocator<T>::bumpAllocate()
176{
177 if (unsigned remaining = m_bumpRemaining) {
178 remaining--;
179 m_bumpRemaining = remaining;
180 return m_bumpEnd - (remaining + 1);
181 }
182 return 0;
183}
184
185template<typename T>
186void* Allocator<T>::freeListAllocate()
187{
188 void** result = m_freeListHead;
189 if (UNLIKELY(!result))
190 return allocateSlow();
191 m_freeListHead = bitwise_cast<void**>(*result);
192 return result;
193}
194
195template<typename T>
196void* Allocator<T>::allocateSlow()
197{
198 ASSERT(!m_freeListHead);
199 ASSERT(!m_bumpRemaining);
200
201 if (logCompilationChanges())
202 dataLog("Allocating another allocator region.\n");
203
204 PageAllocationAligned allocation = PageAllocationAligned::allocate(Region::size(), Region::size(), OSAllocator::JSGCHeapPages);
205 if (!static_cast<bool>(allocation))
206 CRASH();
207 Region* region = static_cast<Region*>(allocation.base());
208 region->m_allocation = allocation;
209 region->m_allocator = this;
210 startBumpingIn(region);
211 region->m_next = m_regionHead;
212 m_regionHead = region;
213
214 void* result = bumpAllocate();
215 ASSERT(result);
216 return result;
217}
218
219template<typename T>
220void Allocator<T>::freeRegionsStartingAt(typename Allocator<T>::Region* region)
221{
222 while (region) {
223 Region* nextRegion = region->m_next;
224 region->m_allocation.deallocate();
225 region = nextRegion;
226 }
227}
228
229template<typename T>
230void Allocator<T>::startBumpingIn(typename Allocator<T>::Region* region)
231{
232 m_bumpEnd = region->data() + Region::numberOfThingsPerRegion();
233 m_bumpRemaining = Region::numberOfThingsPerRegion();
234}
235
236} } // namespace JSC::DFG
237
238#endif // ENABLE(DFG_JIT)
239
240#endif // DFGAllocator_h
241