X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/de355530ae67247cbd0da700edb3a2a1dae884c2..21362eb3e66fd2c787aee132bce100a44d71a99c:/iokit/Kernel/IOLib.c diff --git a/iokit/Kernel/IOLib.c b/iokit/Kernel/IOLib.c index 4eb08077e..19f05bd80 100644 --- a/iokit/Kernel/IOLib.c +++ b/iokit/Kernel/IOLib.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * HISTORY @@ -36,10 +42,19 @@ #include #include +#include +#include #include +#include "IOKitKernelInternal.h" + mach_timespec_t IOZeroTvalspec = { 0, 0 }; +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +lck_grp_t *IOLockGroup; /* * Global variables for use by iLogger @@ -57,15 +72,14 @@ void *_giDebugReserved2 = NULL; * Static variables for this module. */ -static IOThreadFunc threadArgFcn; -static void * threadArgArg; -static lock_t * threadArgLock; - +static queue_head_t gIOMallocContiguousEntries; +static lck_mtx_t * gIOMallocContiguousEntriesLock; enum { kIOMaxPageableMaps = 16 }; -enum { kIOPageableMapSize = 16 * 1024 * 1024 }; -enum { kIOPageableMaxMapSize = 64 * 1024 * 1024 }; +enum { kIOPageableMapSize = 96 * 1024 * 1024 }; +enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 }; +/* LP64todo - these need to expand */ typedef struct { vm_map_t map; vm_offset_t address; @@ -76,9 +90,10 @@ static struct { UInt32 count; UInt32 hint; IOMapData maps[ kIOMaxPageableMaps ]; - mutex_t * lock; + lck_mtx_t * lock; } gIOKitPageableSpace; +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void IOLibInit(void) { @@ -89,66 +104,49 @@ void IOLibInit(void) if(libInitialized) return; - threadArgLock = lock_alloc( true, NULL, NULL ); - gIOKitPageableSpace.maps[0].address = 0; ret = kmem_suballoc(kernel_map, &gIOKitPageableSpace.maps[0].address, kIOPageableMapSize, TRUE, - TRUE, + VM_FLAGS_ANYWHERE, &gIOKitPageableSpace.maps[0].map); if (ret != KERN_SUCCESS) panic("failed to allocate iokit pageable map\n"); - gIOKitPageableSpace.lock = mutex_alloc( 0 ); + IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL); + + gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize; gIOKitPageableSpace.hint = 0; gIOKitPageableSpace.count = 1; + gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); + queue_init( &gIOMallocContiguousEntries ); + libInitialized = true; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * We pass an argument to a new thread by saving fcn and arg in some - * locked variables and starting the thread at ioThreadStart(). This - * function retrives fcn and arg and makes the appropriate call. - * - */ - -static void ioThreadStart( void ) -{ - IOThreadFunc fcn; - void * arg; - - fcn = threadArgFcn; - arg = threadArgArg; - lock_done( threadArgLock); - - (*fcn)(arg); - - IOExitThread(); -} - IOThread IOCreateThread(IOThreadFunc fcn, void *arg) { - IOThread thread; + kern_return_t result; + thread_t thread; - lock_write( threadArgLock); - threadArgFcn = fcn; - threadArgArg = arg; + result = kernel_thread_start((thread_continue_t)fcn, arg, &thread); + if (result != KERN_SUCCESS) + return (NULL); - thread = kernel_thread( kernel_task, ioThreadStart); + thread_deallocate(thread); - return(thread); + return (thread); } -volatile void IOExitThread() +volatile void IOExitThread(void) { - (void) thread_terminate(current_act()); + (void) thread_terminate(current_thread()); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -169,7 +167,7 @@ void * IOMalloc(vm_size_t size) void IOFree(void * address, vm_size_t size) { if (address) { - kfree((vm_offset_t)address, size); + kfree(address, size); #if IOALLOCDEBUG debug_iomalloc_size -= size; #endif @@ -260,9 +258,9 @@ void IOFreeAligned(void * address, vm_size_t size) - sizeof(vm_address_t) )); if (adjustedSize >= page_size) - kmem_free( kernel_map, (vm_address_t) allocationAddress, adjustedSize); + kmem_free( kernel_map, allocationAddress, adjustedSize); else - kfree((vm_offset_t) allocationAddress, adjustedSize); + kfree((void *)allocationAddress, adjustedSize); } #if IOALLOCDEBUG @@ -272,6 +270,14 @@ void IOFreeAligned(void * address, vm_size_t size) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +struct _IOMallocContiguousEntry +{ + void * virtual; + ppnum_t ioBase; + queue_chain_t link; +}; +typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry; + void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, IOPhysicalAddress * physicalAddress) { @@ -280,6 +286,7 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, vm_address_t allocationAddress; vm_size_t adjustedSize; vm_offset_t alignMask; + ppnum_t pagenum; if (size == 0) return 0; @@ -289,15 +296,24 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, alignMask = alignment - 1; adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t); - if (adjustedSize >= page_size) { - - kr = kmem_alloc_contig(kernel_map, &address, size, - alignMask, 0); + if (adjustedSize >= page_size) + { + adjustedSize = size; + if (adjustedSize > page_size) + { + kr = kmem_alloc_contig(kernel_map, &address, size, + alignMask, 0); + } + else + { + kr = kernel_memory_allocate(kernel_map, &address, + size, alignMask, 0); + } if (KERN_SUCCESS != kr) address = 0; - - } else { - + } + else + { adjustedSize += alignMask; allocationAddress = (vm_address_t) kalloc(adjustedSize); @@ -307,8 +323,8 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, + (sizeof(vm_size_t) + sizeof(vm_address_t))) & (~alignMask); - if (atop(address) != atop(address + size - 1)) - address = round_page(address); + if (atop_32(address) != atop_32(address + size - 1)) + address = round_page_32(address); *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t))) = adjustedSize; @@ -318,9 +334,49 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, address = 0; } - if( address && physicalAddress) - *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap, - address ); + /* Do we want a physical address? */ + if (address && physicalAddress) + { + do + { + /* Get the physical page */ + pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address); + if(pagenum) + { + IOByteCount offset; + ppnum_t base; + + base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT); + if (base) + { + _IOMallocContiguousEntry * + entry = IONew(_IOMallocContiguousEntry, 1); + if (!entry) + { + IOFreeContiguous((void *) address, size); + address = 0; + break; + } + entry->virtual = (void *) address; + entry->ioBase = base; + lck_mtx_lock(gIOMallocContiguousEntriesLock); + queue_enter( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ); + lck_mtx_unlock(gIOMallocContiguousEntriesLock); + + *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK)); + for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++) + IOMapperInsertPage( base, offset, pagenum ); + } + else + *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK)); + } + else + /* Did not find, return 0 */ + *physicalAddress = (IOPhysicalAddress) 0; + } + while (false); + } assert(0 == (address & alignMask)); @@ -334,14 +390,35 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, void IOFreeContiguous(void * address, vm_size_t size) { - vm_address_t allocationAddress; - vm_size_t adjustedSize; + vm_address_t allocationAddress; + vm_size_t adjustedSize; + _IOMallocContiguousEntry * entry; + ppnum_t base = 0; if( !address) return; assert(size); + lck_mtx_lock(gIOMallocContiguousEntriesLock); + queue_iterate( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ) + { + if( entry->virtual == address ) { + base = entry->ioBase; + queue_remove( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ); + break; + } + } + lck_mtx_unlock(gIOMallocContiguousEntriesLock); + + if (base) + { + IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT); + IODelete(entry, _IOMallocContiguousEntry, 1); + } + adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t); if (adjustedSize >= page_size) { @@ -353,7 +430,7 @@ void IOFreeContiguous(void * address, vm_size_t size) allocationAddress = *((vm_address_t *)( (vm_address_t) address - sizeof(vm_address_t) )); - kfree((vm_offset_t) allocationAddress, adjustedSize); + kfree((void *)allocationAddress, adjustedSize); } #if IOALLOCDEBUG @@ -363,8 +440,6 @@ void IOFreeContiguous(void * address, vm_size_t size) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); - kern_return_t IOIteratePageableMaps(vm_size_t size, IOIteratePageableMapsCallback callback, void * ref) { @@ -395,11 +470,11 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, if( KERN_SUCCESS == kr) break; - mutex_lock( gIOKitPageableSpace.lock ); + lck_mtx_lock( gIOKitPageableSpace.lock ); index = gIOKitPageableSpace.count; if( index >= (kIOMaxPageableMaps - 1)) { - mutex_unlock( gIOKitPageableSpace.lock ); + lck_mtx_unlock( gIOKitPageableSpace.lock ); break; } @@ -413,10 +488,10 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, &min, segSize, TRUE, - TRUE, + VM_FLAGS_ANYWHERE, &map); if( KERN_SUCCESS != kr) { - mutex_unlock( gIOKitPageableSpace.lock ); + lck_mtx_unlock( gIOKitPageableSpace.lock ); break; } @@ -426,7 +501,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, gIOKitPageableSpace.hint = index; gIOKitPageableSpace.count = index + 1; - mutex_unlock( gIOKitPageableSpace.lock ); + lck_mtx_unlock( gIOKitPageableSpace.lock ); } while( true ); @@ -466,7 +541,7 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment) #if IOALLOCDEBUG if( ref.address) - debug_iomalloc_size += round_page(size); + debug_iomallocpageable_size += round_page_32(size); #endif return( (void *) ref.address ); @@ -499,38 +574,38 @@ void IOFreePageable(void * address, vm_size_t size) kmem_free( map, (vm_offset_t) address, size); #if IOALLOCDEBUG - debug_iomalloc_size -= round_page(size); + debug_iomallocpageable_size -= round_page_32(size); #endif } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, - vm_size_t length, unsigned int options); - IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, IOByteCount length, IOOptionBits cacheMode ) { IOReturn ret = kIOReturnSuccess; - vm_offset_t physAddr; + ppnum_t pagenum; if( task != kernel_task) return( kIOReturnUnsupported ); - length = round_page(address + length) - trunc_page( address ); - address = trunc_page( address ); + length = round_page_32(address + length) - trunc_page_32( address ); + address = trunc_page_32( address ); // make map mode cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask; while( (kIOReturnSuccess == ret) && (length > 0) ) { - physAddr = pmap_extract( kernel_pmap, address ); - if( physAddr) - ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode ); - else + // Get the physical page number + pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address); + if( pagenum) { + ret = IOUnmapPages( get_task_map(task), address, page_size ); + ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode ); + } else ret = kIOReturnVMError; + address += page_size; length -= page_size; } @@ -545,7 +620,7 @@ IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, return( kIOReturnUnsupported ); #if __ppc__ - flush_dcache( (vm_offset_t) address, (unsigned) length, false ); + flush_dcache64( (addr64_t) address, (unsigned) length, false ); #endif return( kIOReturnSuccess ); @@ -566,13 +641,7 @@ SInt32 OSKernelStackRemaining( void ) void IOSleep(unsigned milliseconds) { - wait_result_t wait_result; - - wait_result = assert_wait_timeout(milliseconds, THREAD_UNINT); - assert(wait_result == THREAD_WAITING); - - wait_result = thread_block(THREAD_CONTINUE_NULL); - assert(wait_result == THREAD_TIMED_OUT); + delay_for_interval(milliseconds, kMillisecondScale); } /* @@ -580,9 +649,7 @@ void IOSleep(unsigned milliseconds) */ void IODelay(unsigned microseconds) { - extern void delay(int usec); - - delay(microseconds); + delay_for_interval(microseconds, kMicrosecondScale); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -591,7 +658,7 @@ void IOLog(const char *format, ...) { va_list ap; extern void conslog_putc(char); - extern void logwakeup(); + extern void logwakeup(void); va_start(ap, format); _doprnt(format, &ap, conslog_putc, 16); @@ -657,8 +724,3 @@ unsigned int IOAlignmentToSize(IOAlignment align) } return size; } - -IOReturn IONDRVLibrariesInitialize( void ) -{ - return( kIOReturnUnsupported ); -}