]> git.saurik.com Git - apple/xnu.git/blobdiff - iokit/Kernel/IOLib.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
index 525094a7c3f0c729eb7b719c3e75f3b52e8c1f17..0a75d9af377ebcedcf692a815dcef58ea2473a74 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
@@ -20,8 +20,6 @@
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
- * Copyright (c) 1998 Apple Computer, Inc.  All rights reserved. 
- *
  * HISTORY
  *
  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
 
 #include <IOKit/IOReturn.h>
 #include <IOKit/IOLib.h> 
+#include <IOKit/IOLocks.h> 
+#include <IOKit/IOMapper.h>
 #include <IOKit/IOKitDebug.h> 
 
+#include "IOKitKernelInternal.h"
+
 mach_timespec_t IOZeroTvalspec = { 0, 0 };
 
+extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+lck_grp_t      *IOLockGroup;
+
 /*
- * Static variables for this module.
+ * Global variables for use by iLogger
+ * These symbols are for use only by Apple diagnostic code.
+ * Binary compatibility is not guaranteed for kexts that reference these symbols.
  */
 
-static IOThreadFunc threadArgFcn;
-static void * threadArgArg;
-static lock_t * threadArgLock;
+void *_giDebugLogInternal      = NULL;
+void *_giDebugLogDataInternal  = NULL;
+void *_giDebugReserved1                = NULL;
+void *_giDebugReserved2                = NULL;
+
+
+/*
+ * Static variables for this module.
+ */
 
+static queue_head_t gIOMallocContiguousEntries;
+static lck_mtx_t *  gIOMallocContiguousEntriesLock;
 
 enum { kIOMaxPageableMaps = 16 };
-enum { kIOPageableMapSize = 16 * 1024 * 1024 };
-enum { kIOPageableMaxMapSize = 32 * 1024 * 1024 };
+enum { kIOPageableMapSize = 96 * 1024 * 1024 };
+enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
 
+/* LP64todo - these need to expand */
 typedef struct {
     vm_map_t   map;
     vm_offset_t        address;
@@ -65,9 +84,10 @@ static struct {
     UInt32     count;
     UInt32     hint;
     IOMapData  maps[ kIOMaxPageableMaps ];
-    mutex_t *  lock;
+    lck_mtx_t *        lock;
 } gIOKitPageableSpace;
 
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
 void IOLibInit(void)
 {
@@ -78,66 +98,49 @@ void IOLibInit(void)
     if(libInitialized)
         return;        
 
-    threadArgLock = lock_alloc( true, NULL, NULL );
-
     gIOKitPageableSpace.maps[0].address = 0;
     ret = kmem_suballoc(kernel_map,
                     &gIOKitPageableSpace.maps[0].address,
                     kIOPageableMapSize,
                     TRUE,
-                    TRUE,
+                    VM_FLAGS_ANYWHERE,
                     &gIOKitPageableSpace.maps[0].map);
     if (ret != KERN_SUCCESS)
         panic("failed to allocate iokit pageable map\n");
 
-    gIOKitPageableSpace.lock           = mutex_alloc( 0 );
+    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
+
+    gIOKitPageableSpace.lock           = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
     gIOKitPageableSpace.maps[0].end    = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
     gIOKitPageableSpace.hint           = 0;
     gIOKitPageableSpace.count          = 1;
 
+    gIOMallocContiguousEntriesLock     = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
+    queue_init( &gIOMallocContiguousEntries );
+
     libInitialized = true;
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
-/*
- * We pass an argument to a new thread by saving fcn and arg in some
- * locked variables and starting the thread at ioThreadStart(). This
- * function retrives fcn and arg and makes the appropriate call.
- *
- */
-
-static void ioThreadStart( void )
-{
-    IOThreadFunc       fcn;
-    void *             arg;
-
-    fcn = threadArgFcn;
-    arg = threadArgArg;
-    lock_done( threadArgLock);
-
-    (*fcn)(arg);
-
-    IOExitThread();
-}
-
 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
 {
-       IOThread thread;
+       kern_return_t   result;
+       thread_t                thread;
 
-       lock_write( threadArgLock);
-       threadArgFcn = fcn;
-       threadArgArg = arg;
+       result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
+       if (result != KERN_SUCCESS)
+               return (NULL);
 
-       thread = kernel_thread( kernel_task, ioThreadStart);
+       thread_deallocate(thread);
 
-       return(thread);
+       return (thread);
 }
 
 
-volatile void IOExitThread()
+volatile void IOExitThread(void)
 {
-       (void) thread_terminate(current_act());
+       (void) thread_terminate(current_thread());
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -158,7 +161,7 @@ void * IOMalloc(vm_size_t size)
 void IOFree(void * address, vm_size_t size)
 {
     if (address) {
-       kfree((vm_offset_t)address, size);
+       kfree(address, size);
 #if IOALLOCDEBUG
        debug_iomalloc_size -= size;
 #endif
@@ -186,16 +189,23 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
     if (adjustedSize >= page_size) {
 
         kr = kernel_memory_allocate(kernel_map, &address,
-                                       size, alignMask, KMA_KOBJECT);
-       if (KERN_SUCCESS != kr) {
-            IOLog("Failed %08x, %08x\n", size, alignment);
+                                       size, alignMask, 0);
+       if (KERN_SUCCESS != kr)
            address = 0;
-       }
 
     } else {
 
        adjustedSize += alignMask;
-        allocationAddress = (vm_address_t) kalloc(adjustedSize);
+
+       if (adjustedSize >= page_size) {
+
+           kr = kernel_memory_allocate(kernel_map, &allocationAddress,
+                                           adjustedSize, 0, 0);
+           if (KERN_SUCCESS != kr)
+               allocationAddress = 0;
+
+       } else
+           allocationAddress = (vm_address_t) kalloc(adjustedSize);
 
         if (allocationAddress) {
             address = (allocationAddress + alignMask
@@ -241,7 +251,10 @@ void IOFreeAligned(void * address, vm_size_t size)
         allocationAddress = *((vm_address_t *)( (vm_address_t) address
                                - sizeof(vm_address_t) ));
 
-        kfree((vm_offset_t) allocationAddress, adjustedSize);
+       if (adjustedSize >= page_size)
+           kmem_free( kernel_map, allocationAddress, adjustedSize);
+       else
+         kfree((void *)allocationAddress, adjustedSize);
     }
 
 #if IOALLOCDEBUG
@@ -251,6 +264,14 @@ void IOFreeAligned(void * address, vm_size_t size)
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+struct _IOMallocContiguousEntry
+{
+    void *             virtual;
+    ppnum_t            ioBase;
+    queue_chain_t      link;
+};
+typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
+
 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
                           IOPhysicalAddress * physicalAddress)
 {
@@ -259,6 +280,7 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
     vm_address_t       allocationAddress;
     vm_size_t          adjustedSize;
     vm_offset_t                alignMask;
+    ppnum_t            pagenum;
 
     if (size == 0)
         return 0;
@@ -268,26 +290,35 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
     alignMask = alignment - 1;
     adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
 
-    if (adjustedSize >= page_size) {
-
-        kr = kmem_alloc_contig(kernel_map, &address, size,
-                               alignMask, KMA_KOBJECT);
+    if (adjustedSize >= page_size)
+    {
+       adjustedSize = size;
+       if (adjustedSize > page_size)
+       {
+           kr = kmem_alloc_contig(kernel_map, &address, size,
+                                   alignMask, 0);
+       }
+       else
+       {
+           kr = kernel_memory_allocate(kernel_map, &address,
+                                       size, alignMask, 0);
+       }
        if (KERN_SUCCESS != kr)
            address = 0;
-
-    } else {
-
+    }
+    else
+    {
        adjustedSize += alignMask;
-        allocationAddress = (vm_address_t)
-                               kalloc(adjustedSize);
+        allocationAddress = (vm_address_t) kalloc(adjustedSize);
+
         if (allocationAddress) {
 
             address = (allocationAddress + alignMask
                     + (sizeof(vm_size_t) + sizeof(vm_address_t)))
                     & (~alignMask);
 
-            if (atop(address) != atop(address + size - 1))
-                address = round_page(address);
+            if (atop_32(address) != atop_32(address + size - 1))
+                address = round_page_32(address);
 
             *((vm_size_t *)(address - sizeof(vm_size_t)
                             - sizeof(vm_address_t))) = adjustedSize;
@@ -297,9 +328,49 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
            address = 0;
     }
 
-    if( address && physicalAddress)
-       *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap,
-                                                                address );
+    /* Do we want a physical address? */
+    if (address && physicalAddress)
+    {
+       do
+       {
+           /* Get the physical page */
+           pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
+           if(pagenum)
+           {
+               IOByteCount offset;
+               ppnum_t base;
+    
+               base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
+               if (base)
+               {
+                   _IOMallocContiguousEntry *
+                   entry = IONew(_IOMallocContiguousEntry, 1);
+                   if (!entry)
+                   {
+                       IOFreeContiguous((void *) address, size);
+                       address = 0;
+                       break;
+                   }
+                   entry->virtual = (void *) address;
+                   entry->ioBase  = base;
+                   lck_mtx_lock(gIOMallocContiguousEntriesLock);
+                   queue_enter( &gIOMallocContiguousEntries, entry, 
+                               _IOMallocContiguousEntry *, link );
+                   lck_mtx_unlock(gIOMallocContiguousEntriesLock);
+    
+                   *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
+                   for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
+                       IOMapperInsertPage( base, offset, pagenum );
+               }
+               else
+                   *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
+           } 
+           else
+               /* Did not find, return 0 */
+               *physicalAddress = (IOPhysicalAddress) 0;
+       }
+       while (false);
+    }
 
     assert(0 == (address & alignMask));
 
@@ -313,14 +384,35 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
 
 void IOFreeContiguous(void * address, vm_size_t size)
 {
-    vm_address_t       allocationAddress;
-    vm_size_t          adjustedSize;
+    vm_address_t              allocationAddress;
+    vm_size_t                 adjustedSize;
+    _IOMallocContiguousEntry * entry;
+    ppnum_t                   base = 0;
 
     if( !address)
        return;
 
     assert(size);
 
+    lck_mtx_lock(gIOMallocContiguousEntriesLock);
+    queue_iterate( &gIOMallocContiguousEntries, entry,
+                   _IOMallocContiguousEntry *, link )
+    {
+       if( entry->virtual == address ) {
+           base = entry->ioBase;
+           queue_remove( &gIOMallocContiguousEntries, entry,
+                           _IOMallocContiguousEntry *, link );
+           break;
+       }
+    }
+    lck_mtx_unlock(gIOMallocContiguousEntriesLock);
+
+    if (base)
+    {
+       IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
+       IODelete(entry, _IOMallocContiguousEntry, 1);
+    }
+
     adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
     if (adjustedSize >= page_size) {
 
@@ -332,7 +424,7 @@ void IOFreeContiguous(void * address, vm_size_t size)
         allocationAddress = *((vm_address_t *)( (vm_address_t) address
                                - sizeof(vm_address_t) ));
 
-        kfree((vm_offset_t) allocationAddress, adjustedSize);
+        kfree((void *)allocationAddress, adjustedSize);
     }
 
 #if IOALLOCDEBUG
@@ -342,27 +434,24 @@ void IOFreeContiguous(void * address, vm_size_t size)
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
-void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
+kern_return_t IOIteratePageableMaps(vm_size_t size,
+                    IOIteratePageableMapsCallback callback, void * ref)
 {
     kern_return_t      kr = kIOReturnNotReady;
-    vm_address_t       address;
     vm_size_t          segSize;
     UInt32             attempts;
     UInt32             index;
     vm_offset_t                min;
     vm_map_t           map;
 
-    if (alignment > page_size)
-        return( 0 );
     if (size > kIOPageableMaxMapSize)
-        return( 0 );
+        return( kIOReturnBadArgument );
 
     do {
         index = gIOKitPageableSpace.hint;
         attempts = gIOKitPageableSpace.count;
         while( attempts--) {
-            kr = kmem_alloc_pageable( gIOKitPageableSpace.maps[index].map,
-                                        &address, size);
+            kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref);
             if( KERN_SUCCESS == kr) {
                 gIOKitPageableSpace.hint = index;
                 break;
@@ -375,11 +464,11 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
         if( KERN_SUCCESS == kr)
             break;
 
-        mutex_lock( gIOKitPageableSpace.lock );
+        lck_mtx_lock( gIOKitPageableSpace.lock );
 
         index = gIOKitPageableSpace.count;
         if( index >= (kIOMaxPageableMaps - 1)) {
-            mutex_unlock( gIOKitPageableSpace.lock );
+            lck_mtx_unlock( gIOKitPageableSpace.lock );
             break;
         }
 
@@ -393,10 +482,10 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
                     &min,
                     segSize,
                     TRUE,
-                    TRUE,
+                    VM_FLAGS_ANYWHERE,
                     &map);
         if( KERN_SUCCESS != kr) {
-            mutex_unlock( gIOKitPageableSpace.lock );
+            lck_mtx_unlock( gIOKitPageableSpace.lock );
             break;
         }
 
@@ -406,19 +495,50 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
         gIOKitPageableSpace.hint               = index;
         gIOKitPageableSpace.count              = index + 1;
 
-        mutex_unlock( gIOKitPageableSpace.lock );
+        lck_mtx_unlock( gIOKitPageableSpace.lock );
 
     } while( true );
 
-    if( KERN_SUCCESS != kr)
-        address = 0;
+    return kr;
+}
+
+struct IOMallocPageableRef
+{
+    vm_address_t address;
+    vm_size_t   size;
+};
+
+static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
+{
+    struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
+    kern_return_t               kr;
+
+    kr = kmem_alloc_pageable( map, &ref->address, ref->size );
+
+    return( kr );
+}
+
+void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
+{
+    kern_return_t             kr = kIOReturnNotReady;
+    struct IOMallocPageableRef ref;
+
+    if (alignment > page_size)
+        return( 0 );
+    if (size > kIOPageableMaxMapSize)
+        return( 0 );
+
+    ref.size = size;
+    kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
+    if( kIOReturnSuccess != kr)
+        ref.address = 0;
 
 #if IOALLOCDEBUG
-    if( address)
-       debug_iomalloc_size += round_page(size);
+    if( ref.address)
+       debug_iomallocpageable_size += round_page_32(size);
 #endif
 
-    return (void *) address;
+    return( (void *) ref.address );
 }
 
 vm_map_t IOPageableMapForAddress( vm_address_t address )
@@ -448,38 +568,38 @@ void IOFreePageable(void * address, vm_size_t size)
         kmem_free( map, (vm_offset_t) address, size);
 
 #if IOALLOCDEBUG
-    debug_iomalloc_size -= round_page(size);
+    debug_iomallocpageable_size -= round_page_32(size);
 #endif
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
-extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
-                       vm_size_t length, unsigned int options);
-
 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
                                  IOByteCount length, IOOptionBits cacheMode )
 {
     IOReturn   ret = kIOReturnSuccess;
-    vm_offset_t        physAddr;
+    ppnum_t    pagenum;
 
     if( task != kernel_task)
        return( kIOReturnUnsupported );
 
-    length = round_page(address + length) - trunc_page( address );
-    address = trunc_page( address );
+    length = round_page_32(address + length) - trunc_page_32( address );
+    address = trunc_page_32( address );
 
     // make map mode
     cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
 
     while( (kIOReturnSuccess == ret) && (length > 0) ) {
 
-       physAddr = pmap_extract( kernel_pmap, address );
-       if( physAddr)
-            ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode );
-       else
+       // Get the physical page number
+       pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
+       if( pagenum) {
+            ret = IOUnmapPages( get_task_map(task), address, page_size );
+           ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
+       } else
            ret = kIOReturnVMError;
 
+       address += page_size;
        length -= page_size;
     }
 
@@ -494,7 +614,7 @@ IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
        return( kIOReturnUnsupported );
 
 #if __ppc__
-    flush_dcache( (vm_offset_t) address, (unsigned) length, false );
+    flush_dcache64( (addr64_t) address, (unsigned) length, false );
 #endif
 
     return( kIOReturnSuccess );
@@ -515,12 +635,7 @@ SInt32 OSKernelStackRemaining( void )
 
 void IOSleep(unsigned milliseconds)
 {
-       int wait_result;
-
-       assert_wait_timeout(milliseconds, THREAD_INTERRUPTIBLE);
-       wait_result = thread_block((void (*)(void))0);
-       if (wait_result != THREAD_TIMED_OUT)
-               thread_cancel_timer();
+    delay_for_interval(milliseconds, kMillisecondScale);
 }
 
 /*
@@ -528,9 +643,7 @@ void IOSleep(unsigned milliseconds)
  */
 void IODelay(unsigned microseconds)
 {
-    extern void delay(int usec);
-
-    delay(microseconds);
+    delay_for_interval(microseconds, kMicrosecondScale);
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -539,7 +652,7 @@ void IOLog(const char *format, ...)
 {
        va_list ap;
        extern void conslog_putc(char);
-       extern void logwakeup();
+       extern void logwakeup(void);
 
        va_start(ap, format);
        _doprnt(format, &ap, conslog_putc, 16);