2 * Copyright (C) 2010 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef BumpPointerAllocator_h
27 #define BumpPointerAllocator_h
29 #include <wtf/PageAllocation.h>
33 #define MINIMUM_BUMP_POOL_SIZE 0x1000
35 class BumpPointerPool
{
37 // ensureCapacity will check whether the current pool has capacity to
38 // allocate 'size' bytes of memory If it does not, it will attempt to
39 // allocate a new pool (which will be added to this one in a chain).
41 // If allocation fails (out of memory) this method will return null.
42 // If the return value is non-null, then callers should update any
43 // references they have to this current (possibly full) BumpPointerPool
44 // to instead point to the newly returned BumpPointerPool.
45 BumpPointerPool
* ensureCapacity(size_t size
)
47 void* allocationEnd
= static_cast<char*>(m_current
) + size
;
48 ASSERT(allocationEnd
> m_current
); // check for overflow
49 if (allocationEnd
<= static_cast<void*>(this))
51 return ensureCapacityCrossPool(this, size
);
54 // alloc should only be called after calling ensureCapacity; as such
55 // alloc will never fail.
56 void* alloc(size_t size
)
58 void* current
= m_current
;
59 void* allocationEnd
= static_cast<char*>(current
) + size
;
60 ASSERT(allocationEnd
> current
); // check for overflow
61 ASSERT(allocationEnd
<= static_cast<void*>(this));
62 m_current
= allocationEnd
;
66 // The dealloc method releases memory allocated using alloc. Memory
67 // must be released in a LIFO fashion, e.g. if the client calls alloc
68 // four times, returning pointer A, B, C, D, then the only valid order
69 // in which these may be deallocaed is D, C, B, A.
71 // The client may optionally skip some deallocations. In the example
72 // above, it would be valid to only explicitly dealloc C, A (D being
73 // dealloced along with C, B along with A).
75 // If pointer was not allocated from this pool (or pools) then dealloc
76 // will CRASH(). Callers should update any references they have to
77 // this current BumpPointerPool to instead point to the returned
79 BumpPointerPool
* dealloc(void* position
)
81 if ((position
>= m_start
) && (position
<= static_cast<void*>(this))) {
82 ASSERT(position
<= m_current
);
86 return deallocCrossPool(this, position
);
90 // Placement operator new, returns the last 'size' bytes of allocation for use as this.
91 void* operator new(size_t size
, const PageAllocation
& allocation
)
93 ASSERT(size
< allocation
.size());
94 return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation
.base()) + allocation
.size()) - size
;
97 BumpPointerPool(const PageAllocation
& allocation
)
98 : m_current(allocation
.base())
99 , m_start(allocation
.base())
102 , m_allocation(allocation
)
106 static BumpPointerPool
* create(size_t minimumCapacity
= 0)
108 // Add size of BumpPointerPool object, check for overflow.
109 minimumCapacity
+= sizeof(BumpPointerPool
);
110 if (minimumCapacity
< sizeof(BumpPointerPool
))
113 size_t poolSize
= MINIMUM_BUMP_POOL_SIZE
;
114 while (poolSize
< minimumCapacity
) {
116 // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2!
117 ASSERT(!(MINIMUM_BUMP_POOL_SIZE
& (MINIMUM_BUMP_POOL_SIZE
- 1)));
122 PageAllocation allocation
= PageAllocation::allocate(poolSize
);
124 return new(allocation
) BumpPointerPool(allocation
);
133 BumpPointerPool
* nextNext
= m_next
->m_next
;
141 m_allocation
.deallocate();
144 static BumpPointerPool
* ensureCapacityCrossPool(BumpPointerPool
* previousPool
, size_t size
)
146 // The pool passed should not have capacity, so we'll start with the next one.
147 ASSERT(previousPool
);
148 ASSERT((static_cast<char*>(previousPool
->m_current
) + size
) > previousPool
->m_current
); // check for overflow
149 ASSERT((static_cast<char*>(previousPool
->m_current
) + size
) > static_cast<void*>(previousPool
));
150 BumpPointerPool
* pool
= previousPool
->m_next
;
154 // We've run to the end; allocate a new pool.
155 pool
= BumpPointerPool::create(size
);
156 previousPool
->m_next
= pool
;
157 pool
->m_previous
= previousPool
;
162 void* current
= pool
->m_current
;
163 void* allocationEnd
= static_cast<char*>(current
) + size
;
164 ASSERT(allocationEnd
> current
); // check for overflow
165 if (allocationEnd
<= static_cast<void*>(pool
))
170 static BumpPointerPool
* deallocCrossPool(BumpPointerPool
* pool
, void* position
)
172 // Should only be called if position is not in the current pool.
173 ASSERT((position
< pool
->m_start
) || (position
> static_cast<void*>(pool
)));
176 // Unwind the current pool to the start, move back in the chain to the previous pool.
177 pool
->m_current
= pool
->m_start
;
178 pool
= pool
->m_previous
;
180 // position was nowhere in the chain!
184 if ((position
>= pool
->m_start
) && (position
<= static_cast<void*>(pool
))) {
185 ASSERT(position
<= pool
->m_current
);
186 pool
->m_current
= position
;
194 BumpPointerPool
* m_next
;
195 BumpPointerPool
* m_previous
;
196 PageAllocation m_allocation
;
198 friend class BumpPointerAllocator
;
201 // A BumpPointerAllocator manages a set of BumpPointerPool objects, which
202 // can be used for LIFO (stack like) allocation.
204 // To begin allocating using this class call startAllocator(). The result
205 // of this method will be null if the initial pool allocation fails, or a
206 // pointer to a BumpPointerPool object that can be used to perform
207 // allocations. Whilst running no memory will be released until
208 // stopAllocator() is called. At this point all allocations made through
209 // this allocator will be reaped, and underlying memory may be freed.
211 // (In practice we will still hold on to the initial pool to allow allocation
212 // to be quickly restared, but aditional pools will be freed).
214 // This allocator is non-renetrant, it is encumbant on the clients to ensure
215 // startAllocator() is not called again until stopAllocator() has been called.
216 class BumpPointerAllocator
{
218 BumpPointerAllocator()
223 ~BumpPointerAllocator()
229 BumpPointerPool
* startAllocator()
232 m_head
= BumpPointerPool::create();
243 BumpPointerPool
* m_head
;
248 using WTF::BumpPointerAllocator
;
250 #endif // BumpPointerAllocator_h