#include <IOKit/IOMapper.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
#include <libkern/OSDebug.h>
+#include <mach/mach_vm.h>
#include "IOKitKernelInternal.h"
+#ifdef IOALLOCDEBUG
+#include <libkern/c++/OSCPPDebug.h>
+#endif
+#include <IOKit/IOStatisticsPrivate.h>
+
+#if IOKITSTATS
+#define IOStatisticsAlloc(type, size) \
+do { \
+ IOStatistics::countAlloc(type, size); \
+} while (0)
+#else
+#define IOStatisticsAlloc(type, size)
+#endif /* IOKITSTATS */
+
+
__BEGIN_DECLS
void ipc_port_release_send(ipc_port_t port);
#include <vm/pmap.h>
enum
{
- kInternalFlagPhysical = 0x00000001,
- kInternalFlagPageSized = 0x00000002
+ kInternalFlagPhysical = 0x00000001,
+ kInternalFlagPageSized = 0x00000002,
+ kInternalFlagPageAllocated = 0x00000004
+};
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#if 0
+#undef assert
+#define assert(ex) \
+ ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
+#endif
+
+enum
+{
+ kIOPageAllocChunkBytes = (PAGE_SIZE / 64),
+ kIOPageAllocSignature = 'iopa'
+};
+
+struct io_pagealloc_t
+{
+ queue_chain_t link;
+ uint64_t avail;
+ uint32_t signature;
};
+typedef struct io_pagealloc_t io_pagealloc_t;
+
+typedef char io_pagealloc_t_assert[(sizeof(io_pagealloc_t) <= kIOPageAllocChunkBytes) ? 1 : -1];
+
+IOSimpleLock * gIOPageAllocLock;
+queue_head_t gIOPageAllocList;
+vm_size_t gIOPageAllocCount;
+vm_size_t gIOPageAllocBytes;
+
+static io_pagealloc_t *
+iopa_allocpage(void)
+{
+ kern_return_t kr;
+ io_pagealloc_t * pa;
+ vm_address_t vmaddr = 0;
+
+ int options = 0; // KMA_LOMEM;
+ kr = kernel_memory_allocate(kernel_map, &vmaddr,
+ page_size, 0, options);
+ if (KERN_SUCCESS != kr) return (0);
+
+ bzero((void *) vmaddr, page_size);
+ pa = (typeof(pa)) (vmaddr + page_size - kIOPageAllocChunkBytes);
+
+ pa->signature = kIOPageAllocSignature;
+ pa->avail = -2ULL;
+
+ return (pa);
+}
+
+static void
+iopa_freepage(io_pagealloc_t * pa)
+{
+ kmem_free( kernel_map, trunc_page((uintptr_t) pa), page_size);
+}
+
+static uintptr_t
+iopa_allocinpage(io_pagealloc_t * pa, uint32_t count, uint64_t align)
+{
+ uint32_t n, s;
+ uint64_t avail = pa->avail;
+
+ assert(avail);
+
+ // find strings of count 1 bits in avail
+ for (n = count; n > 1; n -= s)
+ {
+ s = n >> 1;
+ avail = avail & (avail << s);
+ }
+ // and aligned
+ avail &= align;
+
+ if (avail)
+ {
+ n = __builtin_clzll(avail);
+ pa->avail &= ~((-1ULL << (64 - count)) >> n);
+ if (!pa->avail && pa->link.next)
+ {
+ remque(&pa->link);
+ pa->link.next = 0;
+ }
+ return (n * kIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
+ }
+
+ return (0);
+}
+
+static uint32_t
+log2up(uint32_t size)
+{
+ if (size <= 1) size = 0;
+ else size = 32 - __builtin_clz(size - 1);
+ return (size);
+}
+
+static uintptr_t
+iopa_alloc(vm_size_t bytes, uint32_t balign)
+{
+ static const uint64_t align_masks[] = {
+ 0xFFFFFFFFFFFFFFFF,
+ 0xAAAAAAAAAAAAAAAA,
+ 0x8888888888888888,
+ 0x8080808080808080,
+ 0x8000800080008000,
+ 0x8000000080000000,
+ 0x8000000000000000,
+ };
+ io_pagealloc_t * pa;
+ uintptr_t addr = 0;
+ uint32_t count;
+ uint64_t align;
+
+ if (!bytes) bytes = 1;
+ count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
+ align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];
+
+ IOSimpleLockLock(gIOPageAllocLock);
+ pa = (typeof(pa)) queue_first(&gIOPageAllocList);
+ while (!queue_end(&gIOPageAllocList, &pa->link))
+ {
+ addr = iopa_allocinpage(pa, count, align);
+ if (addr)
+ {
+ gIOPageAllocBytes += bytes;
+ break;
+ }
+ pa = (typeof(pa)) queue_next(&pa->link);
+ }
+ IOSimpleLockUnlock(gIOPageAllocLock);
+ if (!addr)
+ {
+ pa = iopa_allocpage();
+ if (pa)
+ {
+ addr = iopa_allocinpage(pa, count, align);
+ IOSimpleLockLock(gIOPageAllocLock);
+ if (pa->avail) enqueue_head(&gIOPageAllocList, &pa->link);
+ gIOPageAllocCount++;
+ if (addr) gIOPageAllocBytes += bytes;
+ IOSimpleLockUnlock(gIOPageAllocLock);
+ }
+ }
+
+ if (addr)
+ {
+ assert((addr & ((1 << log2up(balign)) - 1)) == 0);
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, bytes);
+#if IOALLOCDEBUG
+ debug_iomalloc_size += bytes;
+#endif
+ }
+
+ return (addr);
+}
+
+static void
+iopa_free(uintptr_t addr, vm_size_t bytes)
+{
+ io_pagealloc_t * pa;
+ uint32_t count;
+ uintptr_t chunk;
+
+ if (!bytes) bytes = 1;
+
+ chunk = (addr & page_mask);
+ assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));
+
+ pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
+ assert(kIOPageAllocSignature == pa->signature);
+
+ count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
+ chunk /= kIOPageAllocChunkBytes;
+
+ IOSimpleLockLock(gIOPageAllocLock);
+ if (!pa->avail)
+ {
+ assert(!pa->link.next);
+ enqueue_tail(&gIOPageAllocList, &pa->link);
+ }
+ pa->avail |= ((-1ULL << (64 - count)) >> chunk);
+ if (pa->avail != -2ULL) pa = 0;
+ else
+ {
+ remque(&pa->link);
+ pa->link.next = 0;
+ pa->signature = 0;
+ gIOPageAllocCount--;
+ }
+ gIOPageAllocBytes -= bytes;
+ IOSimpleLockUnlock(gIOPageAllocLock);
+ if (pa) iopa_freepage(pa);
+
+#if IOALLOCDEBUG
+ debug_iomalloc_size -= bytes;
+#endif
+ IOStatisticsAlloc(kIOStatisticsFreeAligned, bytes);
+}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
mach_vm_address_t alignment,
mach_vm_address_t physicalMask)
{
- kern_return_t kr;
- task_t mapTask = NULL;
- vm_map_t vmmap = NULL;
- mach_vm_address_t highestMask = 0;
- IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+ kern_return_t kr;
+ task_t mapTask = NULL;
+ vm_map_t vmmap = NULL;
+ mach_vm_address_t highestMask = 0;
+ IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+ IODMAMapSpecification mapSpec;
+ bool mapped = false;
+ bool needZero;
if (!capacity)
return false;
return (false);
_ranges.v64->address = 0;
_ranges.v64->length = 0;
- // make sure super::free doesn't dealloc _ranges before super::init
- _flags = kIOMemoryAsReference;
+ // make sure super::free doesn't dealloc _ranges before super::init
+ _flags = kIOMemoryAsReference;
// Grab IOMD bits from the Buffer MD options
iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
+ if (!(kIOMemoryMapperNone & options))
+ {
+ IOMapper::checkForSystemMapper();
+ mapped = (0 != IOMapper::gSystem);
+ }
+ needZero = mapped;
+
if (physicalMask && (alignment <= 1))
{
alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
return false;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.alignment = _alignment;
+ mapSpec.numAddressBits = 64;
+ if (highestMask && mapped)
+ {
+ if (highestMask <= 0xFFFFFFFF)
+ mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
+ else
+ mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
+ highestMask = 0;
+ }
+
// set flags for entry + object create
vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
/* Allocate a wired-down buffer inside kernel space. */
- if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
+ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
+
+ if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
+ {
+ contig |= (!mapped);
+ contig |= (0 != (kIOMemoryMapperNone & options));
+#if 0
+ // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
+ contig |= true;
+#endif
+ }
+
+ if (contig || highestMask || (alignment > page_size))
{
_internalFlags |= kInternalFlagPhysical;
if (highestMask)
_internalFlags |= kInternalFlagPageSized;
capacity = round_page(capacity);
}
- _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
- (0 != (options & kIOMemoryPhysicallyContiguous)));
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+ capacity, highestMask, alignment, contig);
+ }
+ else if (needZero
+ && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
+ {
+ _internalFlags |= kInternalFlagPageAllocated;
+ needZero = false;
+ _buffer = (void *) iopa_alloc(capacity, alignment);
}
else if (alignment > 1)
{
{
_buffer = IOMalloc(capacity);
}
-
if (!_buffer)
{
return false;
}
+ if (needZero) bzero(_buffer, capacity);
}
if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
{
*startAddr;
startAddr += page_size;
- }
+ }
}
}
inTask, iomdOptions, /* System mapper */ 0))
return false;
+ // give any system mapper the allocation params
+ if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
+ &mapSpec, sizeof(mapSpec)))
+ return false;
+
if (mapTask)
{
if (!reserved) {
}
else if (buffer)
{
- if (internalFlags & kInternalFlagPhysical)
+ if (kInternalFlagPageSized & internalFlags) size = round_page(size);
+
+ if (kInternalFlagPhysical & internalFlags)
{
- if (kInternalFlagPageSized & internalFlags)
- size = round_page(size);
IOKernelFreePhysical((mach_vm_address_t) buffer, size);
- }
+ }
+ else if (kInternalFlagPageAllocated & internalFlags)
+ {
+ iopa_free((uintptr_t) buffer, size);
+ }
else if (alignment > 1)
+ {
IOFreeAligned(buffer, size);
+ }
else
+ {
IOFree(buffer, size);
+ }
}
if (range && (kIOMemoryAsReference & flags))
IODelete(range, IOAddressRange, 1);