X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4452a7af2eac33dbad800bcc91f2399d62c18f53..04b8595b18b1b41ac7a206e4b3d51a635f8413d7:/iokit/Kernel/IOLib.cpp?ds=sidebyside diff --git a/iokit/Kernel/IOLib.cpp b/iokit/Kernel/IOLib.cpp index 5ed1c4708..3714d1d4f 100644 --- a/iokit/Kernel/IOLib.cpp +++ b/iokit/Kernel/IOLib.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,29 @@ #include "IOKitKernelInternal.h" +#ifdef IOALLOCDEBUG +#include +#include +#endif + +#include "libkern/OSAtomic.h" +#include +#include +#include + +#if IOKITSTATS + +#define IOStatisticsAlloc(type, size) \ +do { \ + IOStatistics::countAlloc(type, size); \ +} while (0) + +#else + +#define IOStatisticsAlloc(type, size) + +#endif /* IOKITSTATS */ + extern "C" { @@ -57,13 +81,19 @@ mach_timespec_t IOZeroTvalspec = { 0, 0 }; extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); -extern kern_return_t kmem_suballoc( - vm_map_t parent, - vm_offset_t *addr, - vm_size_t size, - boolean_t pageable, - boolean_t anywhere, - vm_map_t *new_map); +extern int +__doprnt( + const char *fmt, + va_list argp, + void (*putc)(int, void *), + void *arg, + int radix); + +extern void cons_putc_locked(char); +extern void bsd_log_lock(void); +extern void bsd_log_unlock(void); +extern void logwakeup(); + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -80,6 +110,7 @@ void *_giDebugLogDataInternal = NULL; void *_giDebugReserved1 = NULL; void *_giDebugReserved2 = NULL; +iopa_t gIOBMDPageAllocator; /* * Static variables for this module. @@ -88,13 +119,18 @@ void *_giDebugReserved2 = NULL; static queue_head_t gIOMallocContiguousEntries; static lck_mtx_t * gIOMallocContiguousEntriesLock; -enum { kIOMaxPageableMaps = 16 }; -enum { kIOPageableMapSize = 96 * 1024 * 1024 }; +#if __x86_64__ +enum { kIOMaxPageableMaps = 8 }; +enum { kIOPageableMapSize = 512 * 1024 * 1024 }; +enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 }; +#else +enum { kIOMaxPageableMaps = 16 }; +enum { kIOPageableMapSize = 96 * 1024 * 1024 }; enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 }; +#endif -/* LP64todo - these need to expand */ typedef struct { - vm_map_t map; + vm_map_t map; vm_offset_t address; vm_offset_t end; } IOMapData; @@ -106,6 +142,10 @@ static struct { lck_mtx_t * lock; } gIOKitPageableSpace; +static iopa_t gIOPageablePageAllocator; + +uint32_t gIOPageAllocChunkBytes; + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void IOLibInit(void) @@ -137,6 +177,11 @@ void IOLibInit(void) gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); queue_init( &gIOMallocContiguousEntries ); + gIOPageAllocChunkBytes = PAGE_SIZE/64; + assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes); + iopa_init(&gIOBMDPageAllocator); + iopa_init(&gIOPageablePageAllocator); + libInitialized = true; } @@ -170,20 +215,24 @@ void * IOMalloc(vm_size_t size) void * address; address = (void *)kalloc(size); + if ( address ) { #if IOALLOCDEBUG - if (address) - debug_iomalloc_size += size; + debug_iomalloc_size += size; #endif + IOStatisticsAlloc(kIOStatisticsMalloc, size); + } + return address; } void IOFree(void * address, vm_size_t size) { if (address) { - kfree(address, size); + kfree(address, size); #if IOALLOCDEBUG - debug_iomalloc_size -= size; + debug_iomalloc_size -= size; #endif + IOStatisticsAlloc(kIOStatisticsFree, size); } } @@ -192,10 +241,10 @@ void IOFree(void * address, vm_size_t size) void * IOMallocAligned(vm_size_t size, vm_size_t alignment) { kern_return_t kr; - vm_address_t address; - vm_address_t allocationAddress; + vm_offset_t address; + vm_offset_t allocationAddress; vm_size_t adjustedSize; - vm_offset_t alignMask; + uintptr_t alignMask; if (size == 0) return 0; @@ -205,7 +254,10 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment) alignMask = alignment - 1; adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t); - if (adjustedSize >= page_size) { + if (size > adjustedSize) { + address = 0; /* overflow detected */ + } + else if (adjustedSize >= page_size) { kr = kernel_memory_allocate(kernel_map, &address, size, alignMask, 0); @@ -231,8 +283,8 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment) + (sizeof(vm_size_t) + sizeof(vm_address_t))) & (~alignMask); - *((vm_size_t *)(address - sizeof(vm_size_t) - - sizeof(vm_address_t))) = adjustedSize; + *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t))) + = adjustedSize; *((vm_address_t *)(address - sizeof(vm_address_t))) = allocationAddress; } else @@ -241,10 +293,12 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment) assert(0 == (address & alignMask)); + if( address) { #if IOALLOCDEBUG - if( address) - debug_iomalloc_size += size; + debug_iomalloc_size += size; #endif + IOStatisticsAlloc(kIOStatisticsMallocAligned, size); + } return (void *) address; } @@ -252,7 +306,7 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment) void IOFreeAligned(void * address, vm_size_t size) { vm_address_t allocationAddress; - vm_size_t adjustedSize; + vm_size_t adjustedSize; if( !address) return; @@ -262,10 +316,10 @@ void IOFreeAligned(void * address, vm_size_t size) adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t); if (adjustedSize >= page_size) { - kmem_free( kernel_map, (vm_address_t) address, size); + kmem_free( kernel_map, (vm_offset_t) address, size); } else { - adjustedSize = *((vm_size_t *)( (vm_address_t) address + adjustedSize = *((vm_size_t *)( (vm_address_t) address - sizeof(vm_address_t) - sizeof(vm_size_t))); allocationAddress = *((vm_address_t *)( (vm_address_t) address - sizeof(vm_address_t) )); @@ -279,12 +333,14 @@ void IOFreeAligned(void * address, vm_size_t size) #if IOALLOCDEBUG debug_iomalloc_size -= size; #endif + + IOStatisticsAlloc(kIOStatisticsFreeAligned, size); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void -IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size) +IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size) { mach_vm_address_t allocationAddress; mach_vm_size_t adjustedSize; @@ -297,7 +353,7 @@ IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size) adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t); if (adjustedSize >= page_size) { - kmem_free( kernel_map, (vm_address_t) address, size); + kmem_free( kernel_map, (vm_offset_t) address, size); } else { @@ -308,13 +364,16 @@ IOKernelFreeContiguous(mach_vm_address_t address, mach_vm_size_t size) kfree((void *)allocationAddress, adjustedSize); } + IOStatisticsAlloc(kIOStatisticsFreeContiguous, size); #if IOALLOCDEBUG debug_iomalloc_size -= size; #endif } + mach_vm_address_t -IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment) +IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, + mach_vm_size_t alignment, bool contiguous) { kern_return_t kr; mach_vm_address_t address; @@ -330,19 +389,39 @@ IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment) alignMask = alignment - 1; adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t); - if (adjustedSize >= page_size) + contiguous = (contiguous && (adjustedSize > page_size)) + || (alignment > page_size); + + if (contiguous || maxPhys) { + int options = 0; vm_offset_t virt; + adjustedSize = size; - if (adjustedSize > page_size) + contiguous = (contiguous && (adjustedSize > page_size)) + || (alignment > page_size); + + if (!contiguous) + { + if (maxPhys <= 0xFFFFFFFF) + { + maxPhys = 0; + options |= KMA_LOMEM; + } + else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) + { + maxPhys = 0; + } + } + if (contiguous || maxPhys) { kr = kmem_alloc_contig(kernel_map, &virt, size, - alignMask, 0); + alignMask, atop(maxPhys), atop(alignMask), 0); } else { kr = kernel_memory_allocate(kernel_map, &virt, - size, alignMask, 0); + size, alignMask, options); } if (KERN_SUCCESS == kr) address = virt; @@ -361,7 +440,7 @@ IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment) & (~alignMask); if (atop_32(address) != atop_32(address + size - 1)) - address = round_page_32(address); + address = round_page(address); *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t) - sizeof(mach_vm_address_t))) = adjustedSize; @@ -371,14 +450,17 @@ IOKernelAllocateContiguous(mach_vm_size_t size, mach_vm_size_t alignment) address = 0; } + if (address) { + IOStatisticsAlloc(kIOStatisticsMallocContiguous, size); #if IOALLOCDEBUG - if (address) debug_iomalloc_size += size; #endif + } return (address); } + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ struct _IOMallocContiguousEntry @@ -402,16 +484,17 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, /* Do we want a physical address? */ if (!physicalAddress) { - address = IOKernelAllocateContiguous(size, alignment); + address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true); } else do { IOBufferMemoryDescriptor * bmd; mach_vm_address_t physicalMask; - vm_offset_t alignMask; + vm_offset_t alignMask; alignMask = alignment - 1; - physicalMask = 0xFFFFFFFF ^ (alignMask & PAGE_MASK); + physicalMask = (0xFFFFFFFF ^ alignMask); + bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask( kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask); if (!bmd) @@ -471,7 +554,7 @@ void IOFreeContiguous(void * _address, vm_size_t size) } else { - IOKernelFreeContiguous((mach_vm_address_t) address, size); + IOKernelFreePhysical((mach_vm_address_t) address, size); } } @@ -547,7 +630,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, struct IOMallocPageableRef { - vm_address_t address; + vm_offset_t address; vm_size_t size; }; @@ -561,7 +644,7 @@ static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref) return( kr ); } -void * IOMallocPageable(vm_size_t size, vm_size_t alignment) +static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment) { kern_return_t kr = kIOReturnNotReady; struct IOMallocPageableRef ref; @@ -576,15 +659,10 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment) if( kIOReturnSuccess != kr) ref.address = 0; -#if IOALLOCDEBUG - if( ref.address) - debug_iomallocpageable_size += round_page_32(size); -#endif - return( (void *) ref.address ); } -vm_map_t IOPageableMapForAddress( vm_address_t address ) +vm_map_t IOPageableMapForAddress( uintptr_t address ) { vm_map_t map = 0; UInt32 index; @@ -597,24 +675,205 @@ vm_map_t IOPageableMapForAddress( vm_address_t address ) } } if( !map) - IOPanic("IOPageableMapForAddress: null"); + panic("IOPageableMapForAddress: null"); return( map ); } -void IOFreePageable(void * address, vm_size_t size) +static void IOFreePageablePages(void * address, vm_size_t size) { vm_map_t map; map = IOPageableMapForAddress( (vm_address_t) address); if( map) kmem_free( map, (vm_offset_t) address, size); +} + +static uintptr_t IOMallocOnePageablePage(iopa_t * a) +{ + return ((uintptr_t) IOMallocPageablePages(page_size, page_size)); +} +void * IOMallocPageable(vm_size_t size, vm_size_t alignment) +{ + void * addr; + + if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment); + else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment)); + + if (addr) { #if IOALLOCDEBUG - debug_iomallocpageable_size -= round_page_32(size); + debug_iomallocpageable_size += size; #endif + IOStatisticsAlloc(kIOStatisticsMallocPageable, size); + } + + return (addr); +} + +void IOFreePageable(void * address, vm_size_t size) +{ +#if IOALLOCDEBUG + debug_iomallocpageable_size -= size; +#endif + IOStatisticsAlloc(kIOStatisticsFreePageable, size); + + if (size < (page_size - 4*gIOPageAllocChunkBytes)) + { + address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size); + size = page_size; + } + if (address) IOFreePageablePages(address, size); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +extern "C" void +iopa_init(iopa_t * a) +{ + bzero(a, sizeof(*a)); + a->lock = IOLockAlloc(); + queue_init(&a->list); +} + +static uintptr_t +iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align) +{ + uint32_t n, s; + uint64_t avail = pa->avail; + + assert(avail); + + // find strings of count 1 bits in avail + for (n = count; n > 1; n -= s) + { + s = n >> 1; + avail = avail & (avail << s); + } + // and aligned + avail &= align; + + if (avail) + { + n = __builtin_clzll(avail); + pa->avail &= ~((-1ULL << (64 - count)) >> n); + if (!pa->avail && pa->link.next) + { + remque(&pa->link); + pa->link.next = 0; + } + return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa)); + } + + return (0); +} + +static uint32_t +log2up(uint32_t size) +{ + if (size <= 1) size = 0; + else size = 32 - __builtin_clz(size - 1); + return (size); +} + +uintptr_t +iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign) +{ + static const uint64_t align_masks[] = { + 0xFFFFFFFFFFFFFFFF, + 0xAAAAAAAAAAAAAAAA, + 0x8888888888888888, + 0x8080808080808080, + 0x8000800080008000, + 0x8000000080000000, + 0x8000000000000000, + }; + iopa_page_t * pa; + uintptr_t addr = 0; + uint32_t count; + uint64_t align; + + if (!bytes) bytes = 1; + count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; + align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)]; + + IOLockLock(a->lock); + pa = (typeof(pa)) queue_first(&a->list); + while (!queue_end(&a->list, &pa->link)) + { + addr = iopa_allocinpage(pa, count, align); + if (addr) + { + a->bytecount += bytes; + break; + } + pa = (typeof(pa)) queue_next(&pa->link); + } + IOLockUnlock(a->lock); + + if (!addr) + { + addr = alloc(a); + if (addr) + { + pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes); + pa->signature = kIOPageAllocSignature; + pa->avail = -2ULL; + + addr = iopa_allocinpage(pa, count, align); + IOLockLock(a->lock); + if (pa->avail) enqueue_head(&a->list, &pa->link); + a->pagecount++; + if (addr) a->bytecount += bytes; + IOLockUnlock(a->lock); + } + } + + assert((addr & ((1 << log2up(balign)) - 1)) == 0); + return (addr); } +uintptr_t +iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes) +{ + iopa_page_t * pa; + uint32_t count; + uintptr_t chunk; + + if (!bytes) bytes = 1; + + chunk = (addr & page_mask); + assert(0 == (chunk & (gIOPageAllocChunkBytes - 1))); + + pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes)); + assert(kIOPageAllocSignature == pa->signature); + + count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; + chunk /= gIOPageAllocChunkBytes; + + IOLockLock(a->lock); + if (!pa->avail) + { + assert(!pa->link.next); + enqueue_tail(&a->list, &pa->link); + } + pa->avail |= ((-1ULL << (64 - count)) >> chunk); + if (pa->avail != -2ULL) pa = 0; + else + { + remque(&pa->link); + pa->link.next = 0; + pa->signature = 0; + a->pagecount--; + // page to free + pa = (typeof(pa)) trunc_page(pa); + } + a->bytecount -= bytes; + IOLockUnlock(a->lock); + + return ((uintptr_t) pa); +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, @@ -625,9 +884,13 @@ IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, if( task != kernel_task) return( kIOReturnUnsupported ); - - length = round_page_32(address + length) - trunc_page_32( address ); - address = trunc_page_32( address ); + if ((address | length) & PAGE_MASK) + { +// OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode); + return( kIOReturnUnsupported ); + } + length = round_page(address + length) - trunc_page( address ); + address = trunc_page( address ); // make map mode cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask; @@ -656,26 +919,23 @@ IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, if( task != kernel_task) return( kIOReturnUnsupported ); -#if __ppc__ flush_dcache64( (addr64_t) address, (unsigned) length, false ); -#endif return( kIOReturnSuccess ); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -SInt32 OSKernelStackRemaining( void ) +vm_offset_t OSKernelStackRemaining( void ) { - SInt32 stack; - - stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1)); - - return( stack ); + return (ml_stack_remaining()); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* + * Spin for indicated number of milliseconds. + */ void IOSleep(unsigned milliseconds) { delay_for_interval(milliseconds, kMillisecondScale); @@ -689,23 +949,55 @@ void IODelay(unsigned microseconds) delay_for_interval(microseconds, kMicrosecondScale); } +/* + * Spin for indicated number of nanoseconds. + */ +void IOPause(unsigned nanoseconds) +{ + delay_for_interval(nanoseconds, kNanosecondScale); +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +static void _iolog_consputc(int ch, void *arg __unused) +{ + cons_putc_locked(ch); +} + +static void _iolog_logputc(int ch, void *arg __unused) +{ + log_putc_locked(ch); +} + void IOLog(const char *format, ...) { - va_list ap; - extern void conslog_putc(char); - extern void logwakeup(void); + va_list ap; - va_start(ap, format); - _doprnt(format, &ap, conslog_putc, 16); - va_end(ap); + va_start(ap, format); + IOLogv(format, ap); + va_end(ap); } +void IOLogv(const char *format, va_list ap) +{ + va_list ap2; + + va_copy(ap2, ap); + + bsd_log_lock(); + __doprnt(format, ap, _iolog_logputc, NULL, 16); + bsd_log_unlock(); + logwakeup(); + + __doprnt(format, ap2, _iolog_consputc, NULL, 16); +} + +#if !__LP64__ void IOPanic(const char *reason) { - panic(reason); + panic("%s", reason); } +#endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -720,7 +1012,7 @@ const char *IOFindNameForValue(int value, const IONamedValue *regValueArray) if(regValueArray->value == value) return(regValueArray->name); } - sprintf(noValue, "0x%x (UNDEFINED)", value); + snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value); return((const char *)noValue); } @@ -737,6 +1029,16 @@ IOReturn IOFindValueForName(const char *string, return kIOReturnBadArgument; } +OSString * IOCopyLogNameForPID(int pid) +{ + char buf[128]; + size_t len; + snprintf(buf, sizeof(buf), "pid %d, ", pid); + len = strlen(buf); + proc_name(pid, buf + len, sizeof(buf) - len); + return (OSString::withCString(buf)); +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOAlignment IOSizeToAlignment(unsigned int size) @@ -763,3 +1065,6 @@ unsigned int IOAlignmentToSize(IOAlignment align) } } /* extern "C" */ + + +