2 * Copyright (C) 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
28 #include "OSAllocator.h"
30 #include "PageAllocatorSymbian.h"
34 // Array to store code chunks used by JIT engine(s)
35 static RPointerArray
<SymbianChunk
> codeChunksContainer
;
37 // The singleton data allocator (non code)
38 static PageAllocatorSymbian dataAllocator
;
40 _LIT(KErrorStringInternalConsistency
, "OSAllocator:ConsistencyError");
41 _LIT(KErrorStringChunkCreation
, "OSAllocator:ChunkInitError");
42 _LIT(KErrorStringPageSize
, "OSAllocator:WrongPageSize");
44 // Makes a new code chunk for a JIT engine with everything in committed state
45 static void* allocateCodeChunk(size_t bytes
)
48 TInt error
= c
.CreateLocalCode(bytes
, bytes
);
49 __ASSERT_ALWAYS(error
== KErrNone
, User::Panic(KErrorStringChunkCreation
, error
));
51 codeChunksContainer
.Append(new SymbianChunk(c
.Handle()));
52 return static_cast<void*>(c
.Base());
55 // Frees the _entire_ code chunk in which this address resides.
56 static bool deallocateCodeChunk(void* address
)
59 for (int i
= 0; i
< codeChunksContainer
.Count(); i
++) {
60 SymbianChunk
* p
= codeChunksContainer
[i
];
61 if (p
&& p
->contains(address
)) {
62 codeChunksContainer
.Remove(i
);
70 // Return the (singleton) object that manages all non-code VM operations
71 static PageAllocatorSymbian
* dataAllocatorInstance()
73 return &dataAllocator
;
76 // Reserve memory and return the base address of the region
77 void* OSAllocator::reserveUncommitted(size_t reservationSize
, Usage usage
, bool , bool executable
, bool)
81 base
= allocateCodeChunk(reservationSize
);
83 base
= dataAllocatorInstance()->reserve(reservationSize
);
87 // Inverse operation of reserveUncommitted()
88 void OSAllocator::releaseDecommitted(void* parkedBase
, size_t bytes
)
90 if (dataAllocatorInstance()->contains(parkedBase
))
91 dataAllocatorInstance()->release(parkedBase
, bytes
);
93 // NOOP for code chunks (JIT) because we released them in decommit()
96 // Commit what was previously reserved via reserveUncommitted()
97 void OSAllocator::commit(void* address
, size_t bytes
, bool, bool executable
)
99 // For code chunks, we commit (early) in reserveUncommitted(), so NOOP
100 // For data regions, do real work
102 dataAllocatorInstance()->commit(address
, bytes
);
105 void OSAllocator::decommit(void* address
, size_t bytes
)
107 if (dataAllocatorInstance()->contains(address
))
108 dataAllocatorInstance()->decommit(address
, bytes
);
110 deallocateCodeChunk(address
); // for code chunk, decommit AND release
113 void* OSAllocator::reserveAndCommit(size_t bytes
, Usage usage
, bool writable
, bool executable
, bool)
115 void* base
= reserveUncommitted(bytes
, usage
, writable
, executable
);
116 commit(base
, bytes
, writable
, executable
);
121 // The PageAllocatorSymbian class helps map OSAllocator calls for reserve/commit/decommit
122 // to a single large Symbian chunk. Only works with multiples of page size, and as a corollary
123 // all addresses accepted or returned by it are also page-sized aligned.
125 // - We initialize a chunk up-front with a large reservation size
126 // - The entire reservation reserve is logically divided into pageSized blocks (4K on Symbian)
127 // - The map maintains 1 bit for each of the 4K-sized region in our address space
128 // - OSAllocator::reserveUncommitted() requests lead to 1 or more bits being set in map
129 // to indicate internally reserved state. The VM address corresponding to the first bit is returned.
130 // - OSAllocator::commit() actually calls RChunk.commit() and commits *all or part* of the region
131 // reserved via reserveUncommitted() previously.
132 // - OSAllocator::decommit() calls RChunk.decommit()
133 // - OSAllocator::releaseDecommitted() unparks all the bits in the map, but trusts that a previously
134 // call to decommit() would have returned the memory to the OS
135 PageAllocatorSymbian::PageAllocatorSymbian()
137 __ASSERT_ALWAYS(m_pageSize
== WTF::pageSize(), User::Panic(KErrorStringPageSize
, m_pageSize
));
140 TInt error
= chunk
.CreateDisconnectedLocal(0, 0, TInt(largeReservationSize
));
141 __ASSERT_ALWAYS(error
== KErrNone
, User::Panic(KErrorStringChunkCreation
, error
));
143 m_chunk
= new SymbianChunk(chunk
.Handle()); // takes ownership of chunk
146 PageAllocatorSymbian::~PageAllocatorSymbian()
151 // Reserves a region internally in the bitmap
152 void* PageAllocatorSymbian::reserve(size_t bytes
)
154 // Find first available region
155 const size_t nPages
= bytes
/ m_pageSize
;
156 const int64_t startIdx
= m_map
.findRunOfZeros(nPages
);
162 for (size_t i
= startIdx
; i
< startIdx
+ nPages
; i
++)
165 return static_cast<void*>( m_chunk
->m_base
+ (TUint
)(m_pageSize
* startIdx
) );
168 // Reverses the effects of a reserve() call
169 void PageAllocatorSymbian::release(void* address
, size_t bytes
)
171 const size_t startIdx
= (static_cast<char*>(address
) - m_chunk
->m_base
) / m_pageSize
;
172 const size_t nPages
= bytes
/ m_pageSize
;
173 for (size_t i
= startIdx
; i
< startIdx
+ nPages
; i
++)
177 // Actually commit memory from the OS, after a previous call to reserve()
178 bool PageAllocatorSymbian::commit(void* address
, size_t bytes
)
180 // sanity check that bits were previously set
181 const size_t idx
= (static_cast<char*>(address
) - m_chunk
->m_base
) / m_pageSize
;
182 const size_t nPages
= bytes
/ m_pageSize
;
183 __ASSERT_ALWAYS(m_map
.get(idx
), User::Panic(KErrorStringInternalConsistency
, idx
));
184 __ASSERT_ALWAYS(m_map
.get(idx
+nPages
-1), User::Panic(KErrorStringInternalConsistency
, idx
+nPages
-1));
186 TInt error
= m_chunk
->Commit(static_cast<char*>(address
) - m_chunk
->m_base
, bytes
);
187 return (error
== KErrNone
);
190 // Inverse operation of commit(), a release() should follow later
191 bool PageAllocatorSymbian::decommit(void* address
, size_t bytes
)
193 TInt error
= m_chunk
->Decommit(static_cast<char*>(address
) - m_chunk
->m_base
, bytes
);
194 return (error
== KErrNone
);
197 bool PageAllocatorSymbian::contains(const void* address
) const
199 return m_chunk
->contains(address
);