#include <sys/sysctl.h>
#endif
+#include "libkern/OSAtomic.h"
+#include <libkern/c++/OSKext.h>
+#include <IOKit/IOStatisticsPrivate.h>
+#include <sys/msgbuf.h>
+
+#if IOKITSTATS
+
+#define IOStatisticsAlloc(type, size) \
+do { \
+ IOStatistics::countAlloc(type, size); \
+} while (0)
+
+#else
+
+#define IOStatisticsAlloc(type, size)
+
+#endif /* IOKITSTATS */
+
extern "C"
{
extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
-int
+extern int
__doprnt(
const char *fmt,
va_list argp,
void *arg,
int radix);
-extern void conslog_putc(char);
+extern void cons_putc_locked(char);
+extern void bsd_log_lock(void);
+extern void bsd_log_unlock(void);
+extern void logwakeup();
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void *_giDebugReserved1 = NULL;
void *_giDebugReserved2 = NULL;
+iopa_t gIOBMDPageAllocator;
/*
* Static variables for this module.
static queue_head_t gIOMallocContiguousEntries;
static lck_mtx_t * gIOMallocContiguousEntriesLock;
-enum { kIOMaxPageableMaps = 16 };
-enum { kIOPageableMapSize = 96 * 1024 * 1024 };
+#if __x86_64__
+enum { kIOMaxPageableMaps = 8 };
+enum { kIOPageableMapSize = 512 * 1024 * 1024 };
+enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
+#else
+enum { kIOMaxPageableMaps = 16 };
+enum { kIOPageableMapSize = 96 * 1024 * 1024 };
enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
+#endif
typedef struct {
vm_map_t map;
lck_mtx_t * lock;
} gIOKitPageableSpace;
+static iopa_t gIOPageablePageAllocator;
+
+uint32_t gIOPageAllocChunkBytes;
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void IOLibInit(void)
gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
queue_init( &gIOMallocContiguousEntries );
+ gIOPageAllocChunkBytes = PAGE_SIZE/64;
+ assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
+ iopa_init(&gIOBMDPageAllocator);
+ iopa_init(&gIOPageablePageAllocator);
+
libInitialized = true;
}
void * address;
address = (void *)kalloc(size);
+ if ( address ) {
#if IOALLOCDEBUG
- if (address) {
debug_iomalloc_size += size;
- }
#endif
+ IOStatisticsAlloc(kIOStatisticsMalloc, size);
+ }
+
return address;
}
#if IOALLOCDEBUG
debug_iomalloc_size -= size;
#endif
+ IOStatisticsAlloc(kIOStatisticsFree, size);
}
}
alignMask = alignment - 1;
adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
- if (adjustedSize >= page_size) {
+ if (size > adjustedSize) {
+ address = 0; /* overflow detected */
+ }
+ else if (adjustedSize >= page_size) {
kr = kernel_memory_allocate(kernel_map, &address,
size, alignMask, 0);
assert(0 == (address & alignMask));
-#if IOALLOCDEBUG
if( address) {
+#if IOALLOCDEBUG
debug_iomalloc_size += size;
- }
#endif
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
+ }
return (void *) address;
}
#if IOALLOCDEBUG
debug_iomalloc_size -= size;
#endif
+
+ IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
kfree((void *)allocationAddress, adjustedSize);
}
+ IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
#if IOALLOCDEBUG
debug_iomalloc_size -= size;
#endif
}
+
mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys,
- mach_vm_size_t alignment, bool contiguous)
+ mach_vm_size_t alignment, bool contiguous)
{
kern_return_t kr;
mach_vm_address_t address;
contiguous = (contiguous && (adjustedSize > page_size))
|| (alignment > page_size);
- if ((!contiguous) && (maxPhys <= 0xFFFFFFFF))
- {
- maxPhys = 0;
- options |= KMA_LOMEM;
- }
-
+ if (!contiguous)
+ {
+ if (maxPhys <= 0xFFFFFFFF)
+ {
+ maxPhys = 0;
+ options |= KMA_LOMEM;
+ }
+ else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
+ {
+ maxPhys = 0;
+ }
+ }
if (contiguous || maxPhys)
{
kr = kmem_alloc_contig(kernel_map, &virt, size,
address = 0;
}
-#if IOALLOCDEBUG
if (address) {
+ IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
+#if IOALLOCDEBUG
debug_iomalloc_size += size;
- }
#endif
+ }
return (address);
}
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
struct _IOMallocContiguousEntry
return( kr );
}
-void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
+static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
{
kern_return_t kr = kIOReturnNotReady;
struct IOMallocPageableRef ref;
if( kIOReturnSuccess != kr)
ref.address = 0;
-#if IOALLOCDEBUG
- if( ref.address)
- debug_iomallocpageable_size += round_page(size);
-#endif
-
return( (void *) ref.address );
}
return( map );
}
-void IOFreePageable(void * address, vm_size_t size)
+static void IOFreePageablePages(void * address, vm_size_t size)
{
vm_map_t map;
map = IOPageableMapForAddress( (vm_address_t) address);
if( map)
kmem_free( map, (vm_offset_t) address, size);
+}
+static uintptr_t IOMallocOnePageablePage(iopa_t * a)
+{
+ return ((uintptr_t) IOMallocPageablePages(page_size, page_size));
+}
+
+void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
+{
+ void * addr;
+
+ if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment);
+ else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
+
+ if (addr) {
#if IOALLOCDEBUG
- debug_iomallocpageable_size -= round_page(size);
+ debug_iomallocpageable_size += size;
#endif
+ IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
+ }
+
+ return (addr);
+}
+
+void IOFreePageable(void * address, vm_size_t size)
+{
+#if IOALLOCDEBUG
+ debug_iomallocpageable_size -= size;
+#endif
+ IOStatisticsAlloc(kIOStatisticsFreePageable, size);
+
+ if (size < (page_size - 4*gIOPageAllocChunkBytes))
+ {
+ address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
+ size = page_size;
+ }
+ if (address) IOFreePageablePages(address, size);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+extern "C" void
+iopa_init(iopa_t * a)
+{
+ bzero(a, sizeof(*a));
+ a->lock = IOLockAlloc();
+ queue_init(&a->list);
+}
+
+static uintptr_t
+iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
+{
+ uint32_t n, s;
+ uint64_t avail = pa->avail;
+
+ assert(avail);
+
+ // find strings of count 1 bits in avail
+ for (n = count; n > 1; n -= s)
+ {
+ s = n >> 1;
+ avail = avail & (avail << s);
+ }
+ // and aligned
+ avail &= align;
+
+ if (avail)
+ {
+ n = __builtin_clzll(avail);
+ pa->avail &= ~((-1ULL << (64 - count)) >> n);
+ if (!pa->avail && pa->link.next)
+ {
+ remque(&pa->link);
+ pa->link.next = 0;
+ }
+ return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
+ }
+
+ return (0);
+}
+
+static uint32_t
+log2up(uint32_t size)
+{
+ if (size <= 1) size = 0;
+ else size = 32 - __builtin_clz(size - 1);
+ return (size);
+}
+
+uintptr_t
+iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
+{
+ static const uint64_t align_masks[] = {
+ 0xFFFFFFFFFFFFFFFF,
+ 0xAAAAAAAAAAAAAAAA,
+ 0x8888888888888888,
+ 0x8080808080808080,
+ 0x8000800080008000,
+ 0x8000000080000000,
+ 0x8000000000000000,
+ };
+ iopa_page_t * pa;
+ uintptr_t addr = 0;
+ uint32_t count;
+ uint64_t align;
+
+ if (!bytes) bytes = 1;
+ count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
+ align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
+
+ IOLockLock(a->lock);
+ pa = (typeof(pa)) queue_first(&a->list);
+ while (!queue_end(&a->list, &pa->link))
+ {
+ addr = iopa_allocinpage(pa, count, align);
+ if (addr)
+ {
+ a->bytecount += bytes;
+ break;
+ }
+ pa = (typeof(pa)) queue_next(&pa->link);
+ }
+ IOLockUnlock(a->lock);
+
+ if (!addr)
+ {
+ addr = alloc(a);
+ if (addr)
+ {
+ pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
+ pa->signature = kIOPageAllocSignature;
+ pa->avail = -2ULL;
+
+ addr = iopa_allocinpage(pa, count, align);
+ IOLockLock(a->lock);
+ if (pa->avail) enqueue_head(&a->list, &pa->link);
+ a->pagecount++;
+ if (addr) a->bytecount += bytes;
+ IOLockUnlock(a->lock);
+ }
+ }
+
+ assert((addr & ((1 << log2up(balign)) - 1)) == 0);
+ return (addr);
+}
+
+uintptr_t
+iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
+{
+ iopa_page_t * pa;
+ uint32_t count;
+ uintptr_t chunk;
+
+ if (!bytes) bytes = 1;
+
+ chunk = (addr & page_mask);
+ assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
+
+ pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
+ assert(kIOPageAllocSignature == pa->signature);
+
+ count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
+ chunk /= gIOPageAllocChunkBytes;
+
+ IOLockLock(a->lock);
+ if (!pa->avail)
+ {
+ assert(!pa->link.next);
+ enqueue_tail(&a->list, &pa->link);
+ }
+ pa->avail |= ((-1ULL << (64 - count)) >> chunk);
+ if (pa->avail != -2ULL) pa = 0;
+ else
+ {
+ remque(&pa->link);
+ pa->link.next = 0;
+ pa->signature = 0;
+ a->pagecount--;
+ // page to free
+ pa = (typeof(pa)) trunc_page(pa);
+ }
+ a->bytecount -= bytes;
+ IOLockUnlock(a->lock);
+
+ return ((uintptr_t) pa);
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-static void _iolog_putc(int ch, void *arg __unused)
+static void _iolog_consputc(int ch, void *arg __unused)
{
- conslog_putc(ch);
+ cons_putc_locked(ch);
+}
+
+static void _iolog_logputc(int ch, void *arg __unused)
+{
+ log_putc_locked(ch);
}
void IOLog(const char *format, ...)
{
- va_list ap;
+ va_list ap;
- va_start(ap, format);
- __doprnt(format, ap, _iolog_putc, NULL, 16);
- va_end(ap);
+ va_start(ap, format);
+ IOLogv(format, ap);
+ va_end(ap);
}
void IOLogv(const char *format, va_list ap)
{
- __doprnt(format, ap, _iolog_putc, NULL, 16);
+ va_list ap2;
+
+ va_copy(ap2, ap);
+
+ bsd_log_lock();
+ __doprnt(format, ap, _iolog_logputc, NULL, 16);
+ bsd_log_unlock();
+ logwakeup();
+
+ __doprnt(format, ap2, _iolog_consputc, NULL, 16);
}
#if !__LP64__