]> git.saurik.com Git - apple/javascriptcore.git/blame - wtf/OSAllocatorSymbian.cpp
JavaScriptCore-903.tar.gz
[apple/javascriptcore.git] / wtf / OSAllocatorSymbian.cpp
CommitLineData
14957cd0
A
1/*
2 * Copyright (C) 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include "config.h"
28#include "OSAllocator.h"
29
30#include "PageAllocatorSymbian.h"
31
32namespace WTF {
33
34// Array to store code chunks used by JIT engine(s)
35static RPointerArray<SymbianChunk> codeChunksContainer;
36
37// The singleton data allocator (non code)
38static PageAllocatorSymbian dataAllocator;
39
40_LIT(KErrorStringInternalConsistency, "OSAllocator:ConsistencyError");
41_LIT(KErrorStringChunkCreation, "OSAllocator:ChunkInitError");
42_LIT(KErrorStringPageSize, "OSAllocator:WrongPageSize");
43
44// Makes a new code chunk for a JIT engine with everything in committed state
45static void* allocateCodeChunk(size_t bytes)
46{
47 RChunk c;
48 TInt error = c.CreateLocalCode(bytes, bytes);
49 __ASSERT_ALWAYS(error == KErrNone, User::Panic(KErrorStringChunkCreation, error));
50
51 codeChunksContainer.Append(new SymbianChunk(c.Handle()));
52 return static_cast<void*>(c.Base());
53}
54
55// Frees the _entire_ code chunk in which this address resides.
56static bool deallocateCodeChunk(void* address)
57{
58 bool found = false;
59 for (int i = 0; i < codeChunksContainer.Count(); i++) {
60 SymbianChunk* p = codeChunksContainer[i];
61 if (p && p->contains(address)) {
62 codeChunksContainer.Remove(i);
63 delete p;
64 found = true;
65 }
66 }
67 return found;
68}
69
70// Return the (singleton) object that manages all non-code VM operations
71static PageAllocatorSymbian* dataAllocatorInstance()
72{
73 return &dataAllocator;
74}
75
76// Reserve memory and return the base address of the region
77void* OSAllocator::reserveUncommitted(size_t reservationSize, Usage usage, bool , bool executable, bool)
78{
79 void* base = 0;
80 if (executable)
81 base = allocateCodeChunk(reservationSize);
82 else
83 base = dataAllocatorInstance()->reserve(reservationSize);
84 return base;
85}
86
87// Inverse operation of reserveUncommitted()
88void OSAllocator::releaseDecommitted(void* parkedBase, size_t bytes)
89{
90 if (dataAllocatorInstance()->contains(parkedBase))
91 dataAllocatorInstance()->release(parkedBase, bytes);
92
93 // NOOP for code chunks (JIT) because we released them in decommit()
94}
95
96// Commit what was previously reserved via reserveUncommitted()
97void OSAllocator::commit(void* address, size_t bytes, bool, bool executable)
98{
99 // For code chunks, we commit (early) in reserveUncommitted(), so NOOP
100 // For data regions, do real work
101 if (!executable)
102 dataAllocatorInstance()->commit(address, bytes);
103}
104
105void OSAllocator::decommit(void* address, size_t bytes)
106{
107 if (dataAllocatorInstance()->contains(address))
108 dataAllocatorInstance()->decommit(address, bytes);
109 else
110 deallocateCodeChunk(address); // for code chunk, decommit AND release
111}
112
113void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bool executable, bool)
114{
115 void* base = reserveUncommitted(bytes, usage, writable, executable);
116 commit(base, bytes, writable, executable);
117 return base;
118}
119
120
121// The PageAllocatorSymbian class helps map OSAllocator calls for reserve/commit/decommit
122// to a single large Symbian chunk. Only works with multiples of page size, and as a corollary
123// all addresses accepted or returned by it are also page-sized aligned.
124// Design notes:
125// - We initialize a chunk up-front with a large reservation size
126// - The entire reservation reserve is logically divided into pageSized blocks (4K on Symbian)
127// - The map maintains 1 bit for each of the 4K-sized region in our address space
128// - OSAllocator::reserveUncommitted() requests lead to 1 or more bits being set in map
129// to indicate internally reserved state. The VM address corresponding to the first bit is returned.
130// - OSAllocator::commit() actually calls RChunk.commit() and commits *all or part* of the region
131// reserved via reserveUncommitted() previously.
132// - OSAllocator::decommit() calls RChunk.decommit()
133// - OSAllocator::releaseDecommitted() unparks all the bits in the map, but trusts that a previously
134// call to decommit() would have returned the memory to the OS
135PageAllocatorSymbian::PageAllocatorSymbian()
136{
137 __ASSERT_ALWAYS(m_pageSize == WTF::pageSize(), User::Panic(KErrorStringPageSize, m_pageSize));
138
139 RChunk chunk;
140 TInt error = chunk.CreateDisconnectedLocal(0, 0, TInt(largeReservationSize));
141 __ASSERT_ALWAYS(error == KErrNone, User::Panic(KErrorStringChunkCreation, error));
142
143 m_chunk = new SymbianChunk(chunk.Handle()); // takes ownership of chunk
144}
145
146PageAllocatorSymbian::~PageAllocatorSymbian()
147{
148 delete m_chunk;
149}
150
151// Reserves a region internally in the bitmap
152void* PageAllocatorSymbian::reserve(size_t bytes)
153{
154 // Find first available region
155 const size_t nPages = bytes / m_pageSize;
156 const int64_t startIdx = m_map.findRunOfZeros(nPages);
157
158 // Pseudo OOM
159 if (startIdx < 0)
160 return 0;
161
162 for (size_t i = startIdx; i < startIdx + nPages ; i++)
163 m_map.set(i);
164
165 return static_cast<void*>( m_chunk->m_base + (TUint)(m_pageSize * startIdx) );
166}
167
168// Reverses the effects of a reserve() call
169void PageAllocatorSymbian::release(void* address, size_t bytes)
170{
171 const size_t startIdx = (static_cast<char*>(address) - m_chunk->m_base) / m_pageSize;
172 const size_t nPages = bytes / m_pageSize;
173 for (size_t i = startIdx; i < startIdx + nPages ; i++)
174 m_map.clear(i);
175}
176
177// Actually commit memory from the OS, after a previous call to reserve()
178bool PageAllocatorSymbian::commit(void* address, size_t bytes)
179{
180 // sanity check that bits were previously set
181 const size_t idx = (static_cast<char*>(address) - m_chunk->m_base) / m_pageSize;
182 const size_t nPages = bytes / m_pageSize;
183 __ASSERT_ALWAYS(m_map.get(idx), User::Panic(KErrorStringInternalConsistency, idx));
184 __ASSERT_ALWAYS(m_map.get(idx+nPages-1), User::Panic(KErrorStringInternalConsistency, idx+nPages-1));
185
186 TInt error = m_chunk->Commit(static_cast<char*>(address) - m_chunk->m_base, bytes);
187 return (error == KErrNone);
188}
189
190// Inverse operation of commit(), a release() should follow later
191bool PageAllocatorSymbian::decommit(void* address, size_t bytes)
192{
193 TInt error = m_chunk->Decommit(static_cast<char*>(address) - m_chunk->m_base, bytes);
194 return (error == KErrNone);
195}
196
197bool PageAllocatorSymbian::contains(const void* address) const
198{
199 return m_chunk->contains(address);
200}
201
202} // namespace WTF