X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/55e303ae13a4cf49d70f2294092726f2fffb9ef2..21362eb3e66fd2c787aee132bce100a44d71a99c:/iokit/Kernel/IOLib.c?ds=sidebyside diff --git a/iokit/Kernel/IOLib.c b/iokit/Kernel/IOLib.c index 41394e11c..19f05bd80 100644 --- a/iokit/Kernel/IOLib.c +++ b/iokit/Kernel/IOLib.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * HISTORY @@ -39,15 +42,20 @@ #include #include +#include #include #include +#include "IOKitKernelInternal.h" + mach_timespec_t IOZeroTvalspec = { 0, 0 }; extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +lck_grp_t *IOLockGroup; + /* * Global variables for use by iLogger * These symbols are for use only by Apple diagnostic code. @@ -64,17 +72,14 @@ void *_giDebugReserved2 = NULL; * Static variables for this module. */ -static IOThreadFunc threadArgFcn; -static void * threadArgArg; -static lock_t * threadArgLock; - static queue_head_t gIOMallocContiguousEntries; -static mutex_t * gIOMallocContiguousEntriesLock; +static lck_mtx_t * gIOMallocContiguousEntriesLock; enum { kIOMaxPageableMaps = 16 }; -enum { kIOPageableMapSize = 16 * 1024 * 1024 }; +enum { kIOPageableMapSize = 96 * 1024 * 1024 }; enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 }; +/* LP64todo - these need to expand */ typedef struct { vm_map_t map; vm_offset_t address; @@ -85,7 +90,7 @@ static struct { UInt32 count; UInt32 hint; IOMapData maps[ kIOMaxPageableMaps ]; - mutex_t * lock; + lck_mtx_t * lock; } gIOKitPageableSpace; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -99,24 +104,24 @@ void IOLibInit(void) if(libInitialized) return; - threadArgLock = lock_alloc( true, NULL, NULL ); - gIOKitPageableSpace.maps[0].address = 0; ret = kmem_suballoc(kernel_map, &gIOKitPageableSpace.maps[0].address, kIOPageableMapSize, TRUE, - TRUE, + VM_FLAGS_ANYWHERE, &gIOKitPageableSpace.maps[0].map); if (ret != KERN_SUCCESS) panic("failed to allocate iokit pageable map\n"); - gIOKitPageableSpace.lock = mutex_alloc( 0 ); + IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL); + + gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize; gIOKitPageableSpace.hint = 0; gIOKitPageableSpace.count = 1; - gIOMallocContiguousEntriesLock = mutex_alloc( 0 ); + gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); queue_init( &gIOMallocContiguousEntries ); libInitialized = true; @@ -124,44 +129,24 @@ void IOLibInit(void) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * We pass an argument to a new thread by saving fcn and arg in some - * locked variables and starting the thread at ioThreadStart(). This - * function retrives fcn and arg and makes the appropriate call. - * - */ - -static void ioThreadStart( void ) -{ - IOThreadFunc fcn; - void * arg; - - fcn = threadArgFcn; - arg = threadArgArg; - lock_done( threadArgLock); - - (*fcn)(arg); - - IOExitThread(); -} - IOThread IOCreateThread(IOThreadFunc fcn, void *arg) { - IOThread thread; + kern_return_t result; + thread_t thread; - lock_write( threadArgLock); - threadArgFcn = fcn; - threadArgArg = arg; + result = kernel_thread_start((thread_continue_t)fcn, arg, &thread); + if (result != KERN_SUCCESS) + return (NULL); - thread = kernel_thread( kernel_task, ioThreadStart); + thread_deallocate(thread); - return(thread); + return (thread); } -volatile void IOExitThread() +volatile void IOExitThread(void) { - (void) thread_terminate(current_act()); + (void) thread_terminate(current_thread()); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -182,7 +167,7 @@ void * IOMalloc(vm_size_t size) void IOFree(void * address, vm_size_t size) { if (address) { - kfree((vm_offset_t)address, size); + kfree(address, size); #if IOALLOCDEBUG debug_iomalloc_size -= size; #endif @@ -273,9 +258,9 @@ void IOFreeAligned(void * address, vm_size_t size) - sizeof(vm_address_t) )); if (adjustedSize >= page_size) - kmem_free( kernel_map, (vm_address_t) allocationAddress, adjustedSize); + kmem_free( kernel_map, allocationAddress, adjustedSize); else - kfree((vm_offset_t) allocationAddress, adjustedSize); + kfree((void *)allocationAddress, adjustedSize); } #if IOALLOCDEBUG @@ -374,10 +359,10 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, } entry->virtual = (void *) address; entry->ioBase = base; - mutex_lock(gIOMallocContiguousEntriesLock); + lck_mtx_lock(gIOMallocContiguousEntriesLock); queue_enter( &gIOMallocContiguousEntries, entry, _IOMallocContiguousEntry *, link ); - mutex_unlock(gIOMallocContiguousEntriesLock); + lck_mtx_unlock(gIOMallocContiguousEntriesLock); *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK)); for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++) @@ -415,7 +400,7 @@ void IOFreeContiguous(void * address, vm_size_t size) assert(size); - mutex_lock(gIOMallocContiguousEntriesLock); + lck_mtx_lock(gIOMallocContiguousEntriesLock); queue_iterate( &gIOMallocContiguousEntries, entry, _IOMallocContiguousEntry *, link ) { @@ -426,7 +411,7 @@ void IOFreeContiguous(void * address, vm_size_t size) break; } } - mutex_unlock(gIOMallocContiguousEntriesLock); + lck_mtx_unlock(gIOMallocContiguousEntriesLock); if (base) { @@ -445,7 +430,7 @@ void IOFreeContiguous(void * address, vm_size_t size) allocationAddress = *((vm_address_t *)( (vm_address_t) address - sizeof(vm_address_t) )); - kfree((vm_offset_t) allocationAddress, adjustedSize); + kfree((void *)allocationAddress, adjustedSize); } #if IOALLOCDEBUG @@ -455,8 +440,6 @@ void IOFreeContiguous(void * address, vm_size_t size) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); - kern_return_t IOIteratePageableMaps(vm_size_t size, IOIteratePageableMapsCallback callback, void * ref) { @@ -487,11 +470,11 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, if( KERN_SUCCESS == kr) break; - mutex_lock( gIOKitPageableSpace.lock ); + lck_mtx_lock( gIOKitPageableSpace.lock ); index = gIOKitPageableSpace.count; if( index >= (kIOMaxPageableMaps - 1)) { - mutex_unlock( gIOKitPageableSpace.lock ); + lck_mtx_unlock( gIOKitPageableSpace.lock ); break; } @@ -505,10 +488,10 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, &min, segSize, TRUE, - TRUE, + VM_FLAGS_ANYWHERE, &map); if( KERN_SUCCESS != kr) { - mutex_unlock( gIOKitPageableSpace.lock ); + lck_mtx_unlock( gIOKitPageableSpace.lock ); break; } @@ -518,7 +501,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, gIOKitPageableSpace.hint = index; gIOKitPageableSpace.count = index + 1; - mutex_unlock( gIOKitPageableSpace.lock ); + lck_mtx_unlock( gIOKitPageableSpace.lock ); } while( true ); @@ -558,7 +541,7 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment) #if IOALLOCDEBUG if( ref.address) - debug_iomalloc_size += round_page_32(size); + debug_iomallocpageable_size += round_page_32(size); #endif return( (void *) ref.address ); @@ -591,16 +574,12 @@ void IOFreePageable(void * address, vm_size_t size) kmem_free( map, (vm_offset_t) address, size); #if IOALLOCDEBUG - debug_iomalloc_size -= round_page_32(size); + debug_iomallocpageable_size -= round_page_32(size); #endif } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, - vm_size_t length, unsigned int options); -extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length); - IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, IOByteCount length, IOOptionBits cacheMode ) { @@ -662,13 +641,7 @@ SInt32 OSKernelStackRemaining( void ) void IOSleep(unsigned milliseconds) { - wait_result_t wait_result; - - wait_result = assert_wait_timeout(milliseconds, THREAD_UNINT); - assert(wait_result == THREAD_WAITING); - - wait_result = thread_block(THREAD_CONTINUE_NULL); - assert(wait_result == THREAD_TIMED_OUT); + delay_for_interval(milliseconds, kMillisecondScale); } /* @@ -676,9 +649,7 @@ void IOSleep(unsigned milliseconds) */ void IODelay(unsigned microseconds) { - extern void delay(int usec); - - delay(microseconds); + delay_for_interval(microseconds, kMicrosecondScale); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -687,7 +658,7 @@ void IOLog(const char *format, ...) { va_list ap; extern void conslog_putc(char); - extern void logwakeup(); + extern void logwakeup(void); va_start(ap, format); _doprnt(format, &ap, conslog_putc, 16); @@ -753,8 +724,3 @@ unsigned int IOAlignmentToSize(IOAlignment align) } return size; } - -IOReturn IONDRVLibrariesInitialize( void ) -{ - return( kIOReturnUnsupported ); -}