]>
git.saurik.com Git - apple/javascriptcore.git/blob - wtf/symbian/BlockAllocatorSymbian.cpp
6a28e9ef4fa23a6bef1ee8fd72852bb92ecc49d3
2 * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies)
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "BlockAllocatorSymbian.h"
38 /** Efficiently allocates blocks of size blockSize with blockSize alignment.
39 * Primarly designed for JSC Collector's needs.
42 AlignedBlockAllocator::AlignedBlockAllocator(TUint32 reservationSize
, TUint32 blockSize
)
43 : m_reservation(reservationSize
),
44 m_blockSize(blockSize
)
47 // Get system's page size value.
48 SYMBIAN_PAGESIZE(m_pageSize
);
50 // We only accept multiples of system page size for both initial reservation and the alignment/block size
51 m_reservation
= SYMBIAN_ROUNDUPTOMULTIPLE(m_reservation
, m_pageSize
);
52 __ASSERT_ALWAYS(SYMBIAN_ROUNDUPTOMULTIPLE(m_blockSize
, m_pageSize
), User::Panic(_L("AlignedBlockAllocator1"), KErrArgument
));
54 // Calculate max. bit flags we need to carve a reservationSize range into blockSize-sized blocks
55 m_map
.numBits
= m_reservation
/ m_blockSize
;
56 const TUint32 bitsPerWord
= 8*sizeof(TUint32
);
57 const TUint32 numWords
= (m_map
.numBits
+ bitsPerWord
-1) / bitsPerWord
;
59 m_map
.bits
= new TUint32
[numWords
];
60 __ASSERT_ALWAYS(m_map
.bits
, User::Panic(_L("AlignedBlockAllocator2"), KErrNoMemory
));
63 // Open a Symbian RChunk, and reserve requested virtual address range
64 // Any thread in this process can operate this rchunk due to EOwnerProcess access rights.
65 TInt ret
= m_chunk
.CreateDisconnectedLocal(0 , 0, (TInt
)m_reservation
, EOwnerProcess
);
67 User::Panic(_L("AlignedBlockAllocator3"), ret
);
69 // This is the offset to m_chunk.Base() required to make it m_blockSize-aligned
70 m_offset
= SYMBIAN_ROUNDUPTOMULTIPLE(TUint32(m_chunk
.Base()), m_blockSize
) - TUint(m_chunk
.Base());
74 void* AlignedBlockAllocator::alloc()
80 // Look up first free slot in bit map
81 const TInt freeIdx
= m_map
.findFree();
83 // Pseudo OOM: We ate up the address space we reserved..
84 // ..even though the device may have free RAM left
88 TInt ret
= m_chunk
.Commit(m_offset
+ (m_blockSize
* freeIdx
), m_blockSize
);
90 return 0; // True OOM: Device didn't have physical RAM to spare
92 // Updated bit to mark region as in use.
95 // Calculate address of committed region (block)
96 address
= (void*)( (m_chunk
.Base() + m_offset
) + (TUint
)(m_blockSize
* freeIdx
) );
101 void AlignedBlockAllocator::free(void* block
)
103 // Calculate index of block to be freed
104 TInt idx
= TUint(static_cast<TUint8
*>(block
) - m_chunk
.Base() - m_offset
) / m_blockSize
;
106 __ASSERT_DEBUG(idx
>= 0 && idx
< m_map
.numBits
, User::Panic(_L("AlignedBlockAllocator4"), KErrCorrupt
)); // valid index check
107 __ASSERT_DEBUG(m_map
.get(idx
), User::Panic(_L("AlignedBlockAllocator5"), KErrCorrupt
)); // in-use flag check
109 // Return committed region to system RAM pool (the physical RAM becomes usable by others)
110 TInt ret
= m_chunk
.Decommit(m_offset
+ m_blockSize
* idx
, m_blockSize
);
112 // mark this available again
116 void AlignedBlockAllocator::destroy()
118 // release everything!
119 m_chunk
.Decommit(0, m_chunk
.MaxSize());
123 AlignedBlockAllocator::~AlignedBlockAllocator()
127 delete [] m_map
.bits
;
130 } // end of namespace