2 * Copyright (C) 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef BlockAllocator_h
27 #define BlockAllocator_h
29 #include "GCActivityCallback.h"
30 #include "HeapBlock.h"
32 #include <condition_variable>
33 #include <wtf/DoublyLinkedList.h>
34 #include <wtf/Forward.h>
35 #include <wtf/PageAllocationAligned.h>
36 #include <wtf/TCSpinLock.h>
37 #include <wtf/Threading.h>
44 class CopyWorkListSegment
;
45 template <typename T
> class GCArraySegment
;
52 // Simple allocator to reduce VM cost by holding onto blocks of memory for
53 // short periods of time and then freeing them on a secondary thread.
55 class BlockAllocator
{
60 template <typename T
> DeadBlock
* allocate();
61 DeadBlock
* allocateCustomSize(size_t blockSize
, size_t blockAlignment
);
62 template <typename T
> void deallocate(T
*);
63 template <typename T
> void deallocateCustomSize(T
*);
65 JS_EXPORT_PRIVATE
void releaseFreeRegions();
68 void waitForDuration(std::chrono::milliseconds
);
70 friend ThreadIdentifier
createBlockFreeingThread(BlockAllocator
*);
71 void blockFreeingThreadMain();
72 static void blockFreeingThreadStartFunc(void* heap
);
75 RegionSet(size_t blockSize
)
76 : m_numberOfPartialRegions(0)
77 , m_blockSize(blockSize
)
83 return m_fullRegions
.isEmpty() && m_partialRegions
.isEmpty();
86 DoublyLinkedList
<Region
> m_fullRegions
;
87 DoublyLinkedList
<Region
> m_partialRegions
;
88 size_t m_numberOfPartialRegions
;
92 DeadBlock
* tryAllocateFromRegion(RegionSet
&, DoublyLinkedList
<Region
>&, size_t&);
94 bool allRegionSetsAreEmpty() const;
96 template <typename T
> RegionSet
& regionSetFor();
98 SuperRegion m_superRegion
;
99 RegionSet m_copiedRegionSet
;
100 RegionSet m_markedRegionSet
;
101 // WeakBlocks and GCArraySegments use the same RegionSet since they're the same size.
102 RegionSet m_fourKBBlockRegionSet
;
103 RegionSet m_workListRegionSet
;
105 DoublyLinkedList
<Region
> m_emptyRegions
;
106 size_t m_numberOfEmptyRegions
;
108 bool m_isCurrentlyAllocating
;
109 bool m_blockFreeingThreadShouldQuit
;
110 SpinLock m_regionLock
;
111 std::mutex m_emptyRegionConditionMutex
;
112 std::condition_variable m_emptyRegionCondition
;
113 ThreadIdentifier m_blockFreeingThread
;
116 inline DeadBlock
* BlockAllocator::tryAllocateFromRegion(RegionSet
& set
, DoublyLinkedList
<Region
>& regions
, size_t& numberOfRegions
)
118 if (numberOfRegions
) {
119 ASSERT(!regions
.isEmpty());
120 Region
* region
= regions
.head();
121 ASSERT(!region
->isFull());
123 if (region
->isEmpty()) {
124 ASSERT(region
== m_emptyRegions
.head());
125 m_numberOfEmptyRegions
--;
126 set
.m_numberOfPartialRegions
++;
127 region
= m_emptyRegions
.removeHead()->reset(set
.m_blockSize
);
128 set
.m_partialRegions
.push(region
);
131 DeadBlock
* block
= region
->allocate();
133 if (region
->isFull()) {
134 set
.m_numberOfPartialRegions
--;
135 set
.m_fullRegions
.push(set
.m_partialRegions
.removeHead());
144 inline DeadBlock
* BlockAllocator::allocate()
146 RegionSet
& set
= regionSetFor
<T
>();
148 m_isCurrentlyAllocating
= true;
150 SpinLockHolder
locker(&m_regionLock
);
151 if ((block
= tryAllocateFromRegion(set
, set
.m_partialRegions
, set
.m_numberOfPartialRegions
)))
153 if ((block
= tryAllocateFromRegion(set
, m_emptyRegions
, m_numberOfEmptyRegions
)))
157 Region
* newRegion
= Region::create(&m_superRegion
, T::blockSize
);
159 SpinLockHolder
locker(&m_regionLock
);
160 m_emptyRegions
.push(newRegion
);
161 m_numberOfEmptyRegions
++;
162 block
= tryAllocateFromRegion(set
, m_emptyRegions
, m_numberOfEmptyRegions
);
167 inline DeadBlock
* BlockAllocator::allocateCustomSize(size_t blockSize
, size_t blockAlignment
)
169 size_t realSize
= WTF::roundUpToMultipleOf(blockAlignment
, blockSize
);
170 Region
* newRegion
= Region::createCustomSize(&m_superRegion
, realSize
, blockAlignment
);
171 DeadBlock
* block
= newRegion
->allocate();
177 inline void BlockAllocator::deallocate(T
* block
)
179 RegionSet
& set
= regionSetFor
<T
>();
180 bool shouldWakeBlockFreeingThread
= false;
182 SpinLockHolder
locker(&m_regionLock
);
183 Region
* region
= block
->region();
184 ASSERT(!region
->isEmpty());
185 if (region
->isFull())
186 set
.m_fullRegions
.remove(region
);
188 set
.m_partialRegions
.remove(region
);
189 set
.m_numberOfPartialRegions
--;
192 region
->deallocate(block
);
194 if (region
->isEmpty()) {
195 m_emptyRegions
.push(region
);
196 shouldWakeBlockFreeingThread
= !m_numberOfEmptyRegions
;
197 m_numberOfEmptyRegions
++;
199 set
.m_partialRegions
.push(region
);
200 set
.m_numberOfPartialRegions
++;
204 if (shouldWakeBlockFreeingThread
) {
205 std::lock_guard
<std::mutex
> lock(m_emptyRegionConditionMutex
);
206 m_emptyRegionCondition
.notify_one();
209 if (!m_blockFreeingThread
)
210 releaseFreeRegions();
214 inline void BlockAllocator::deallocateCustomSize(T
* block
)
216 Region
* region
= block
->region();
217 ASSERT(region
->isCustomSize());
218 region
->deallocate(block
);
222 #define REGION_SET_FOR(blockType, set) \
224 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<blockType>() \
229 inline BlockAllocator::RegionSet& BlockAllocator::regionSetFor<HeapBlock<blockType>>() \
234 REGION_SET_FOR(MarkedBlock, m_markedRegionSet);
235 REGION_SET_FOR(CopiedBlock
, m_copiedRegionSet
);
236 REGION_SET_FOR(WeakBlock
, m_fourKBBlockRegionSet
);
237 REGION_SET_FOR(GCArraySegment
<const JSCell
*>, m_fourKBBlockRegionSet
);
238 REGION_SET_FOR(GCArraySegment
<CodeBlock
*>, m_fourKBBlockRegionSet
);
239 REGION_SET_FOR(CopyWorkListSegment
, m_workListRegionSet
);
240 REGION_SET_FOR(HandleBlock
, m_fourKBBlockRegionSet
);
242 #undef REGION_SET_FOR
244 template <typename T
>
245 inline BlockAllocator::RegionSet
& BlockAllocator::regionSetFor()
247 RELEASE_ASSERT_NOT_REACHED();
248 return *(RegionSet
*)0;
253 #endif // BlockAllocator_h