]> git.saurik.com Git - apple/xnu.git/blobdiff - iokit/Kernel/IOLib.c
xnu-517.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.c
index 2721732a0f02c38dc80fc7b6486ad31a1244ca81..41394e11cb4f1f92bd612a73350ca3c679db0057 100644 (file)
@@ -1,27 +1,28 @@
 /*
- * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
- * Copyright (c) 1998 Apple Computer, Inc.  All rights reserved. 
- *
  * HISTORY
  *
  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT.
 
 #include <IOKit/IOReturn.h>
 #include <IOKit/IOLib.h> 
+#include <IOKit/IOMapper.h>
 #include <IOKit/IOKitDebug.h> 
 
 mach_timespec_t IOZeroTvalspec = { 0, 0 };
 
+extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Global variables for use by iLogger
+ * These symbols are for use only by Apple diagnostic code.
+ * Binary compatibility is not guaranteed for kexts that reference these symbols.
+ */
+
+void *_giDebugLogInternal      = NULL;
+void *_giDebugLogDataInternal  = NULL;
+void *_giDebugReserved1                = NULL;
+void *_giDebugReserved2                = NULL;
+
+
 /*
  * Static variables for this module.
  */
 
 static IOThreadFunc threadArgFcn;
-static void * threadArgArg;
-static lock_t * threadArgLock;
+static void *       threadArgArg;
+static lock_t *     threadArgLock;
 
+static queue_head_t gIOMallocContiguousEntries;
+static mutex_t *    gIOMallocContiguousEntriesLock;
 
 enum { kIOMaxPageableMaps = 16 };
 enum { kIOPageableMapSize = 16 * 1024 * 1024 };
-enum { kIOPageableMaxMapSize = 64 * 1024 * 1024 };
+enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
 
 typedef struct {
     vm_map_t   map;
@@ -68,6 +88,7 @@ static struct {
     mutex_t *  lock;
 } gIOKitPageableSpace;
 
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
 void IOLibInit(void)
 {
@@ -95,6 +116,9 @@ void IOLibInit(void)
     gIOKitPageableSpace.hint           = 0;
     gIOKitPageableSpace.count          = 1;
 
+    gIOMallocContiguousEntriesLock     = mutex_alloc( 0 );
+    queue_init( &gIOMallocContiguousEntries );
+
     libInitialized = true;
 }
 
@@ -186,16 +210,23 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
     if (adjustedSize >= page_size) {
 
         kr = kernel_memory_allocate(kernel_map, &address,
-                                       size, alignMask, KMA_KOBJECT);
-       if (KERN_SUCCESS != kr) {
-            IOLog("Failed %08x, %08x\n", size, alignment);
+                                       size, alignMask, 0);
+       if (KERN_SUCCESS != kr)
            address = 0;
-       }
 
     } else {
 
        adjustedSize += alignMask;
-        allocationAddress = (vm_address_t) kalloc(adjustedSize);
+
+       if (adjustedSize >= page_size) {
+
+           kr = kernel_memory_allocate(kernel_map, &allocationAddress,
+                                           adjustedSize, 0, 0);
+           if (KERN_SUCCESS != kr)
+               allocationAddress = 0;
+
+       } else
+           allocationAddress = (vm_address_t) kalloc(adjustedSize);
 
         if (allocationAddress) {
             address = (allocationAddress + alignMask
@@ -241,7 +272,10 @@ void IOFreeAligned(void * address, vm_size_t size)
         allocationAddress = *((vm_address_t *)( (vm_address_t) address
                                - sizeof(vm_address_t) ));
 
-        kfree((vm_offset_t) allocationAddress, adjustedSize);
+       if (adjustedSize >= page_size)
+           kmem_free( kernel_map, (vm_address_t) allocationAddress, adjustedSize);
+       else
+           kfree((vm_offset_t) allocationAddress, adjustedSize);
     }
 
 #if IOALLOCDEBUG
@@ -251,6 +285,14 @@ void IOFreeAligned(void * address, vm_size_t size)
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+struct _IOMallocContiguousEntry
+{
+    void *             virtual;
+    ppnum_t            ioBase;
+    queue_chain_t      link;
+};
+typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry;
+
 void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
                           IOPhysicalAddress * physicalAddress)
 {
@@ -259,6 +301,7 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
     vm_address_t       allocationAddress;
     vm_size_t          adjustedSize;
     vm_offset_t                alignMask;
+    ppnum_t            pagenum;
 
     if (size == 0)
         return 0;
@@ -268,26 +311,35 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
     alignMask = alignment - 1;
     adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
 
-    if (adjustedSize >= page_size) {
-
-        kr = kmem_alloc_contig(kernel_map, &address, size,
-                               alignMask, KMA_KOBJECT);
+    if (adjustedSize >= page_size)
+    {
+       adjustedSize = size;
+       if (adjustedSize > page_size)
+       {
+           kr = kmem_alloc_contig(kernel_map, &address, size,
+                                   alignMask, 0);
+       }
+       else
+       {
+           kr = kernel_memory_allocate(kernel_map, &address,
+                                       size, alignMask, 0);
+       }
        if (KERN_SUCCESS != kr)
            address = 0;
-
-    } else {
-
+    }
+    else
+    {
        adjustedSize += alignMask;
-        allocationAddress = (vm_address_t)
-                               kalloc(adjustedSize);
+        allocationAddress = (vm_address_t) kalloc(adjustedSize);
+
         if (allocationAddress) {
 
             address = (allocationAddress + alignMask
                     + (sizeof(vm_size_t) + sizeof(vm_address_t)))
                     & (~alignMask);
 
-            if (atop(address) != atop(address + size - 1))
-                address = round_page(address);
+            if (atop_32(address) != atop_32(address + size - 1))
+                address = round_page_32(address);
 
             *((vm_size_t *)(address - sizeof(vm_size_t)
                             - sizeof(vm_address_t))) = adjustedSize;
@@ -297,9 +349,49 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
            address = 0;
     }
 
-    if( address && physicalAddress)
-       *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap,
-                                                                address );
+    /* Do we want a physical address? */
+    if (address && physicalAddress)
+    {
+       do
+       {
+           /* Get the physical page */
+           pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
+           if(pagenum)
+           {
+               IOByteCount offset;
+               ppnum_t base;
+    
+               base = IOMapperIOVMAlloc((size + PAGE_MASK) >> PAGE_SHIFT);
+               if (base)
+               {
+                   _IOMallocContiguousEntry *
+                   entry = IONew(_IOMallocContiguousEntry, 1);
+                   if (!entry)
+                   {
+                       IOFreeContiguous((void *) address, size);
+                       address = 0;
+                       break;
+                   }
+                   entry->virtual = (void *) address;
+                   entry->ioBase  = base;
+                   mutex_lock(gIOMallocContiguousEntriesLock);
+                   queue_enter( &gIOMallocContiguousEntries, entry, 
+                               _IOMallocContiguousEntry *, link );
+                   mutex_unlock(gIOMallocContiguousEntriesLock);
+    
+                   *physicalAddress = (IOPhysicalAddress)((base << PAGE_SHIFT) | (address & PAGE_MASK));
+                   for (offset = 0; offset < ((size + PAGE_MASK) >> PAGE_SHIFT); offset++, pagenum++)
+                       IOMapperInsertPage( base, offset, pagenum );
+               }
+               else
+                   *physicalAddress = (IOPhysicalAddress)((pagenum << PAGE_SHIFT) | (address & PAGE_MASK));
+           } 
+           else
+               /* Did not find, return 0 */
+               *physicalAddress = (IOPhysicalAddress) 0;
+       }
+       while (false);
+    }
 
     assert(0 == (address & alignMask));
 
@@ -313,14 +405,35 @@ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
 
 void IOFreeContiguous(void * address, vm_size_t size)
 {
-    vm_address_t       allocationAddress;
-    vm_size_t          adjustedSize;
+    vm_address_t              allocationAddress;
+    vm_size_t                 adjustedSize;
+    _IOMallocContiguousEntry * entry;
+    ppnum_t                   base = 0;
 
     if( !address)
        return;
 
     assert(size);
 
+    mutex_lock(gIOMallocContiguousEntriesLock);
+    queue_iterate( &gIOMallocContiguousEntries, entry,
+                   _IOMallocContiguousEntry *, link )
+    {
+       if( entry->virtual == address ) {
+           base = entry->ioBase;
+           queue_remove( &gIOMallocContiguousEntries, entry,
+                           _IOMallocContiguousEntry *, link );
+           break;
+       }
+    }
+    mutex_unlock(gIOMallocContiguousEntriesLock);
+
+    if (base)
+    {
+       IOMapperIOVMFree(base, (size + PAGE_MASK) >> PAGE_SHIFT);
+       IODelete(entry, _IOMallocContiguousEntry, 1);
+    }
+
     adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t);
     if (adjustedSize >= page_size) {
 
@@ -445,7 +558,7 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
 
 #if IOALLOCDEBUG
     if( ref.address)
-       debug_iomalloc_size += round_page(size);
+       debug_iomalloc_size += round_page_32(size);
 #endif
 
     return( (void *) ref.address );
@@ -478,7 +591,7 @@ void IOFreePageable(void * address, vm_size_t size)
         kmem_free( map, (vm_offset_t) address, size);
 
 #if IOALLOCDEBUG
-    debug_iomalloc_size -= round_page(size);
+    debug_iomalloc_size -= round_page_32(size);
 #endif
 }
 
@@ -486,30 +599,34 @@ void IOFreePageable(void * address, vm_size_t size)
 
 extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
                        vm_size_t length, unsigned int options);
+extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
 
 IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address,
                                  IOByteCount length, IOOptionBits cacheMode )
 {
     IOReturn   ret = kIOReturnSuccess;
-    vm_offset_t        physAddr;
+    ppnum_t    pagenum;
 
     if( task != kernel_task)
        return( kIOReturnUnsupported );
 
-    length = round_page(address + length) - trunc_page( address );
-    address = trunc_page( address );
+    length = round_page_32(address + length) - trunc_page_32( address );
+    address = trunc_page_32( address );
 
     // make map mode
     cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask;
 
     while( (kIOReturnSuccess == ret) && (length > 0) ) {
 
-       physAddr = pmap_extract( kernel_pmap, address );
-       if( physAddr)
-            ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode );
-       else
+       // Get the physical page number
+       pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address);
+       if( pagenum) {
+            ret = IOUnmapPages( get_task_map(task), address, page_size );
+           ret = IOMapPages( get_task_map(task), address, pagenum << PAGE_SHIFT, page_size, cacheMode );
+       } else
            ret = kIOReturnVMError;
 
+       address += page_size;
        length -= page_size;
     }
 
@@ -524,7 +641,7 @@ IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address,
        return( kIOReturnUnsupported );
 
 #if __ppc__
-    flush_dcache( (vm_offset_t) address, (unsigned) length, false );
+    flush_dcache64( (addr64_t) address, (unsigned) length, false );
 #endif
 
     return( kIOReturnSuccess );
@@ -545,12 +662,13 @@ SInt32 OSKernelStackRemaining( void )
 
 void IOSleep(unsigned milliseconds)
 {
-       int wait_result;
+    wait_result_t wait_result;
+
+    wait_result = assert_wait_timeout(milliseconds, THREAD_UNINT);
+    assert(wait_result == THREAD_WAITING);
 
-       assert_wait_timeout(milliseconds, THREAD_INTERRUPTIBLE);
-       wait_result = thread_block((void (*)(void))0);
-       if (wait_result != THREAD_TIMED_OUT)
-               thread_cancel_timer();
+    wait_result = thread_block(THREAD_CONTINUE_NULL);
+    assert(wait_result == THREAD_TIMED_OUT);
 }
 
 /*