]> git.saurik.com Git - apple/xnu.git/blobdiff - iokit/Kernel/IOLib.cpp
xnu-3789.51.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
index 45504157aa469ccacd400731e9c7a611a54d7b3f..73a0c67a80191d0f859bc8fcb58f3677ca857b83 100644 (file)
@@ -1,4 +1,4 @@
-/*
+/* 
  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
@@ -58,6 +58,7 @@
 #include "libkern/OSAtomic.h"
 #include <libkern/c++/OSKext.h>
 #include <IOKit/IOStatisticsPrivate.h>
+#include <os/log_private.h>
 #include <sys/msgbuf.h>
 
 #if IOKITSTATS
@@ -73,6 +74,10 @@ do { \
 
 #endif /* IOKITSTATS */
 
+
+#define TRACK_ALLOC    (IOTRACKING && (kIOTracking & gIOKitDebug))
+
+
 extern "C"
 {
 
@@ -87,7 +92,8 @@ __doprnt(
        va_list                 argp,
        void                    (*putc)(int, void *),
        void                    *arg,
-       int                     radix);
+       int                     radix,
+       int                     is_log);
 
 extern void cons_putc_locked(char);
 extern void bsd_log_lock(void);
@@ -119,9 +125,15 @@ iopa_t gIOBMDPageAllocator;
 static queue_head_t gIOMallocContiguousEntries;
 static lck_mtx_t *  gIOMallocContiguousEntriesLock;
 
-enum { kIOMaxPageableMaps = 16 };
-enum { kIOPageableMapSize = 96 * 1024 * 1024 };
+#if __x86_64__
+enum { kIOMaxPageableMaps    = 8 };
+enum { kIOPageableMapSize    = 512 * 1024 * 1024 };
+enum { kIOPageableMaxMapSize = 512 * 1024 * 1024 };
+#else
+enum { kIOMaxPageableMaps    = 16 };
+enum { kIOPageableMapSize    = 96 * 1024 * 1024 };
 enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 };
+#endif
 
 typedef struct {
     vm_map_t           map;
@@ -138,6 +150,14 @@ static struct {
 
 static iopa_t gIOPageablePageAllocator;
 
+uint32_t  gIOPageAllocChunkBytes;
+
+#if IOTRACKING
+IOTrackingQueue * gIOMallocTracking;
+IOTrackingQueue * gIOWireTracking;
+IOTrackingQueue * gIOMapTracking;
+#endif /* IOTRACKING */
+
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
 void IOLibInit(void)
@@ -149,18 +169,33 @@ void IOLibInit(void)
     if(libInitialized)
         return;        
 
+    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
+
+#if IOTRACKING
+    IOTrackingInit();
+    gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0,
+                                                kIOTrackingQueueTypeAlloc,
+                                                37);
+    gIOWireTracking   = IOTrackingQueueAlloc(kIOWireTrackingName,   0, 0, page_size, 0, 0);
+
+    size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024);
+    gIOMapTracking    = IOTrackingQueueAlloc(kIOMapTrackingName,    0, 0, mapCaptureSize,
+                                                kIOTrackingQueueTypeDefaultOn
+                                                | kIOTrackingQueueTypeMap
+                                                | kIOTrackingQueueTypeUser,
+                                            0);
+#endif
+
     gIOKitPageableSpace.maps[0].address = 0;
     ret = kmem_suballoc(kernel_map,
                     &gIOKitPageableSpace.maps[0].address,
                     kIOPageableMapSize,
                     TRUE,
-                    VM_FLAGS_ANYWHERE,
+                    VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
                     &gIOKitPageableSpace.maps[0].map);
     if (ret != KERN_SUCCESS)
         panic("failed to allocate iokit pageable map\n");
 
-    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
-
     gIOKitPageableSpace.lock           = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
     gIOKitPageableSpace.maps[0].end    = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
     gIOKitPageableSpace.hint           = 0;
@@ -169,14 +204,27 @@ void IOLibInit(void)
     gIOMallocContiguousEntriesLock     = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
     queue_init( &gIOMallocContiguousEntries );
 
+    gIOPageAllocChunkBytes = PAGE_SIZE/64;
+    assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes);
     iopa_init(&gIOBMDPageAllocator);
     iopa_init(&gIOPageablePageAllocator);
 
+
     libInitialized = true;
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+static uint32_t 
+log2up(uint32_t size)
+{
+    if (size <= 1) size = 0;
+    else size = 32 - __builtin_clz(size - 1);
+    return (size);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
 {
        kern_return_t   result;
@@ -199,50 +247,141 @@ void IOExitThread(void)
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+#if IOTRACKING
+struct IOLibMallocHeader
+{
+    IOTrackingAddress tracking;
+};
+#endif
+
+#if IOTRACKING
+#define sizeofIOLibMallocHeader        (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
+#else
+#define sizeofIOLibMallocHeader        (0)
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
 void * IOMalloc(vm_size_t size)
 {
     void * address;
+    vm_size_t allocSize;
+
+    allocSize = size + sizeofIOLibMallocHeader;
+#if IOTRACKING
+    if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
+#endif
+    address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
 
-    address = (void *)kalloc(size);
     if ( address ) {
+#if IOTRACKING
+       if (TRACK_ALLOC) {
+           IOLibMallocHeader * hdr;
+           hdr = (typeof(hdr)) address;
+           bzero(&hdr->tracking, sizeof(hdr->tracking));
+           hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
+           hdr->tracking.size    = size;
+           IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+       }
+#endif
+       address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
+
 #if IOALLOCDEBUG
-               debug_iomalloc_size += size;
+    OSAddAtomic(size, &debug_iomalloc_size);
 #endif
-               IOStatisticsAlloc(kIOStatisticsMalloc, size);
+       IOStatisticsAlloc(kIOStatisticsMalloc, size);
     }
 
     return address;
 }
 
-void IOFree(void * address, vm_size_t size)
+void IOFree(void * inAddress, vm_size_t size)
 {
-    if (address) {
-               kfree(address, size);
+    void * address;
+
+    if ((address = inAddress))
+    {
+       address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
+       
+#if IOTRACKING
+       if (TRACK_ALLOC)
+       {
+           IOLibMallocHeader * hdr;
+           struct ptr_reference{ void * ptr; };
+           volatile struct ptr_reference ptr;
+
+            // we're about to block in IOTrackingRemove(), make sure the original pointer
+            // exists in memory or a register for leak scanning to find
+            ptr.ptr = inAddress;
+
+           hdr = (typeof(hdr)) address;
+            if (size != hdr->tracking.size)
+           {
+               OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
+               size = hdr->tracking.size;
+           }
+           IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+            ptr.ptr = NULL;
+       }
+#endif
+
+       kfree(address, size + sizeofIOLibMallocHeader);
 #if IOALLOCDEBUG
-               debug_iomalloc_size -= size;
+    OSAddAtomic(-size, &debug_iomalloc_size);
 #endif
-               IOStatisticsAlloc(kIOStatisticsFree, size);
+       IOStatisticsAlloc(kIOStatisticsFree, size);
     }
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+vm_tag_t 
+IOMemoryTag(vm_map_t map)
+{
+    vm_tag_t tag;
+
+    if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
+
+    tag = vm_tag_bt();
+    if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
+
+    return (tag);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+struct IOLibPageMallocHeader
+{
+    mach_vm_size_t    allocationSize;
+    mach_vm_address_t allocationAddress;
+#if IOTRACKING
+    IOTrackingAddress tracking;
+#endif
+};
+
+#if IOTRACKING
+#define sizeofIOLibPageMallocHeader    (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
+#else
+#define sizeofIOLibPageMallocHeader    (sizeof(IOLibPageMallocHeader))
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
 {
-    kern_return_t      kr;
-    vm_offset_t                address;
-    vm_offset_t                allocationAddress;
-    vm_size_t          adjustedSize;
-    uintptr_t          alignMask;
+    kern_return_t          kr;
+    vm_offset_t                    address;
+    vm_offset_t                    allocationAddress;
+    vm_size_t              adjustedSize;
+    uintptr_t              alignMask;
+    IOLibPageMallocHeader * hdr;
 
     if (size == 0)
         return 0;
-    if (alignment == 0) 
-        alignment = 1;
 
+    alignment = (1UL << log2up(alignment));
     alignMask = alignment - 1;
-    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
+    adjustedSize = size + sizeofIOLibPageMallocHeader;
 
     if (size > adjustedSize) {
            address = 0;    /* overflow detected */
@@ -250,9 +389,11 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
     else if (adjustedSize >= page_size) {
 
         kr = kernel_memory_allocate(kernel_map, &address,
-                                       size, alignMask, 0);
-       if (KERN_SUCCESS != kr)
-           address = 0;
+                                       size, alignMask, 0, IOMemoryTag(kernel_map));
+       if (KERN_SUCCESS != kr) address = 0;
+#if IOTRACKING
+       else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
+#endif
 
     } else {
 
@@ -261,22 +402,27 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
        if (adjustedSize >= page_size) {
 
            kr = kernel_memory_allocate(kernel_map, &allocationAddress,
-                                           adjustedSize, 0, 0);
-           if (KERN_SUCCESS != kr)
-               allocationAddress = 0;
+                                           adjustedSize, 0, 0, IOMemoryTag(kernel_map));
+           if (KERN_SUCCESS != kr) allocationAddress = 0;
 
        } else
-           allocationAddress = (vm_address_t) kalloc(adjustedSize);
+           allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
 
         if (allocationAddress) {
-            address = (allocationAddress + alignMask
-                    + (sizeof(vm_size_t) + sizeof(vm_address_t)))
+            address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
                     & (~alignMask);
 
-            *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t))) 
-                           = adjustedSize;
-            *((vm_address_t *)(address - sizeof(vm_address_t)))
-                            = allocationAddress;
+           hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
+           hdr->allocationSize    = adjustedSize;
+           hdr->allocationAddress = allocationAddress;
+#if IOTRACKING
+           if (TRACK_ALLOC) {
+               bzero(&hdr->tracking, sizeof(hdr->tracking));
+               hdr->tracking.address = ~address;
+               hdr->tracking.size = size;
+               IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+           }
+#endif
        } else
            address = 0;
     }
@@ -285,7 +431,7 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
 
     if( address) {
 #if IOALLOCDEBUG
-               debug_iomalloc_size += size;
+               OSAddAtomic(size, &debug_iomalloc_size);
 #endif
        IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
        }
@@ -295,33 +441,47 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
 
 void IOFreeAligned(void * address, vm_size_t size)
 {
-    vm_address_t       allocationAddress;
-    vm_size_t  adjustedSize;
+    vm_address_t           allocationAddress;
+    vm_size_t              adjustedSize;
+    IOLibPageMallocHeader * hdr;
 
     if( !address)
        return;
 
     assert(size);
 
-    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
+    adjustedSize = size + sizeofIOLibPageMallocHeader;
     if (adjustedSize >= page_size) {
-
+#if IOTRACKING
+       if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
+#endif
         kmem_free( kernel_map, (vm_offset_t) address, size);
 
     } else {
-       adjustedSize = *((vm_size_t *)( (vm_address_t) address
-                                - sizeof(vm_address_t) - sizeof(vm_size_t)));
-        allocationAddress = *((vm_address_t *)( (vm_address_t) address
-                               - sizeof(vm_address_t) ));
+        hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
+       adjustedSize = hdr->allocationSize;
+        allocationAddress = hdr->allocationAddress;
 
-       if (adjustedSize >= page_size)
+#if IOTRACKING
+       if (TRACK_ALLOC)
+       {
+            if (size != hdr->tracking.size)
+           {
+               OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
+               size = hdr->tracking.size;
+           }
+           IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+       }
+#endif
+       if (adjustedSize >= page_size) {
            kmem_free( kernel_map, allocationAddress, adjustedSize);
-       else
-         kfree((void *)allocationAddress, adjustedSize);
+       } else {
+           kfree((void *)allocationAddress, adjustedSize);
+       }
     }
 
 #if IOALLOCDEBUG
-    debug_iomalloc_size -= size;
+    OSAddAtomic(-size, &debug_iomalloc_size);
 #endif
 
     IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
@@ -332,43 +492,50 @@ void IOFreeAligned(void * address, vm_size_t size)
 void
 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
 {
-    mach_vm_address_t allocationAddress;
-    mach_vm_size_t    adjustedSize;
+    mach_vm_address_t       allocationAddress;
+    mach_vm_size_t          adjustedSize;
+    IOLibPageMallocHeader * hdr;
 
     if (!address)
        return;
 
     assert(size);
 
-    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
+    adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
     if (adjustedSize >= page_size) {
-
+#if IOTRACKING
+       if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
+#endif
        kmem_free( kernel_map, (vm_offset_t) address, size);
 
     } else {
 
-       adjustedSize = *((mach_vm_size_t *)
-                       (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
-       allocationAddress = *((mach_vm_address_t *)
-                       (address - sizeof(mach_vm_address_t) ));
+        hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
+       adjustedSize = hdr->allocationSize;
+        allocationAddress = hdr->allocationAddress;
+#if IOTRACKING
+       if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+#endif
        kfree((void *)allocationAddress, adjustedSize);
     }
 
     IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
 #if IOALLOCDEBUG
-    debug_iomalloc_size -= size;
+    OSAddAtomic(-size, &debug_iomalloc_size);
 #endif
 }
 
+
 mach_vm_address_t
 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, 
                                        mach_vm_size_t alignment, bool contiguous)
 {
-    kern_return_t      kr;
-    mach_vm_address_t  address;
-    mach_vm_address_t  allocationAddress;
-    mach_vm_size_t     adjustedSize;
-    mach_vm_address_t  alignMask;
+    kern_return_t          kr;
+    mach_vm_address_t      address;
+    mach_vm_address_t      allocationAddress;
+    mach_vm_size_t         adjustedSize;
+    mach_vm_address_t      alignMask;
+    IOLibPageMallocHeader * hdr;
 
     if (size == 0)
        return (0);
@@ -376,7 +543,8 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP
         alignment = 1;
 
     alignMask = alignment - 1;
-    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
+    adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
+    if (adjustedSize < size) return (0);
 
     contiguous = (contiguous && (adjustedSize > page_size))
                    || (alignment > page_size);
@@ -405,36 +573,49 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP
        if (contiguous || maxPhys)
        {
            kr = kmem_alloc_contig(kernel_map, &virt, size,
-                                  alignMask, atop(maxPhys), atop(alignMask), 0);
+                                  alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
        }
        else
        {
            kr = kernel_memory_allocate(kernel_map, &virt,
-                                       size, alignMask, options);
+                                       size, alignMask, options, IOMemoryTag(kernel_map));
        }
        if (KERN_SUCCESS == kr)
+       {
            address = virt;
+#if IOTRACKING
+           if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
+#endif
+       }
        else
            address = 0;
     }
     else
     {
        adjustedSize += alignMask;
-        allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
+        if (adjustedSize < size) return (0);
+        allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
 
         if (allocationAddress) {
 
-            address = (allocationAddress + alignMask
-                    + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
+
+            address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
                     & (~alignMask);
 
             if (atop_32(address) != atop_32(address + size - 1))
                 address = round_page(address);
 
-            *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
-                            - sizeof(mach_vm_address_t))) = adjustedSize;
-            *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
-                            = allocationAddress;
+           hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
+           hdr->allocationSize    = adjustedSize;
+           hdr->allocationAddress = allocationAddress;
+#if IOTRACKING
+           if (TRACK_ALLOC) {
+               bzero(&hdr->tracking, sizeof(hdr->tracking));
+               hdr->tracking.address = ~address;
+               hdr->tracking.size    = size;
+               IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+           }
+#endif
        } else
            address = 0;
     }
@@ -442,7 +623,7 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP
     if (address) {
     IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
 #if IOALLOCDEBUG
-       debug_iomalloc_size += size;
+    OSAddAtomic(size, &debug_iomalloc_size);
 #endif
     }
 
@@ -576,7 +757,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size,
             else
                 index = gIOKitPageableSpace.count - 1;
         }
-        if( KERN_SUCCESS == kr)
+        if (KERN_NO_SPACE != kr)
             break;
 
         lck_mtx_lock( gIOKitPageableSpace.lock );
@@ -597,7 +778,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size,
                     &min,
                     segSize,
                     TRUE,
-                    VM_FLAGS_ANYWHERE,
+                    VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
                     &map);
         if( KERN_SUCCESS != kr) {
             lck_mtx_unlock( gIOKitPageableSpace.lock );
@@ -620,7 +801,8 @@ kern_return_t IOIteratePageableMaps(vm_size_t size,
 struct IOMallocPageableRef
 {
     vm_offset_t address;
-    vm_size_t   size;
+    vm_size_t  size;
+    vm_tag_t    tag;
 };
 
 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
@@ -628,12 +810,12 @@ static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
     struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
     kern_return_t               kr;
 
-    kr = kmem_alloc_pageable( map, &ref->address, ref->size );
+    kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
 
     return( kr );
 }
 
-static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
+static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
 {
     kern_return_t             kr = kIOReturnNotReady;
     struct IOMallocPageableRef ref;
@@ -644,6 +826,7 @@ static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
         return( 0 );
 
     ref.size = size;
+    ref.tag  = tag;
     kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
     if( kIOReturnSuccess != kr)
         ref.address = 0;
@@ -680,19 +863,19 @@ static void IOFreePageablePages(void * address, vm_size_t size)
 
 static uintptr_t IOMallocOnePageablePage(iopa_t * a)
 {
-    return ((uintptr_t) IOMallocPageablePages(page_size, page_size));
+    return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
 }
 
 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
 {
     void * addr;
 
-    if (size >= (page_size - 4*kIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment);
+    if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
     else                   addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
 
     if (addr) {
 #if IOALLOCDEBUG
-       debug_iomallocpageable_size += size;
+          OSAddAtomicLong(size, &debug_iomallocpageable_size);
 #endif
        IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
     }
@@ -703,11 +886,11 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
 void IOFreePageable(void * address, vm_size_t size)
 {
 #if IOALLOCDEBUG
-    debug_iomallocpageable_size -= size;
+       OSAddAtomicLong(-size, &debug_iomallocpageable_size);
 #endif
     IOStatisticsAlloc(kIOStatisticsFreePageable, size);
 
-    if (size < (page_size - 4*kIOPageAllocChunkBytes))
+    if (size < (page_size - 4*gIOPageAllocChunkBytes))
     {
        address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size);
        size = page_size;
@@ -717,14 +900,6 @@ void IOFreePageable(void * address, vm_size_t size)
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
-#if 0
-#undef assert
-#define assert(ex)  \
-       ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
-#endif
-
-typedef char iopa_page_t_assert[(sizeof(iopa_page_t) <= kIOPageAllocChunkBytes) ? 1 : -1];
-
 extern "C" void 
 iopa_init(iopa_t * a)
 {
@@ -759,20 +934,12 @@ iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
            remque(&pa->link);
            pa->link.next = 0;
        }
-       return (n * kIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
+       return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
     }
 
     return (0);
 }
 
-static uint32_t 
-log2up(uint32_t size)
-{
-    if (size <= 1) size = 0;
-    else size = 32 - __builtin_clz(size - 1);
-    return (size);
-}
-
 uintptr_t 
 iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
 {
@@ -791,11 +958,11 @@ iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
     uint64_t      align;
 
     if (!bytes) bytes = 1;
-    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
-    align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];
+    count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
+    align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
 
     IOLockLock(a->lock);
-    pa = (typeof(pa)) queue_first(&a->list);
+    __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
     while (!queue_end(&a->list, &pa->link))
     {
        addr = iopa_allocinpage(pa, count, align);
@@ -804,7 +971,7 @@ iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
            a->bytecount += bytes;
            break;
        }
-       pa = (typeof(pa)) queue_next(&pa->link);
+       __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
     }
     IOLockUnlock(a->lock);
 
@@ -813,7 +980,7 @@ iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
        addr = alloc(a);
        if (addr)
        {
-           pa = (typeof(pa)) (addr + page_size - kIOPageAllocChunkBytes);
+           pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes);
            pa->signature = kIOPageAllocSignature;
            pa->avail     = -2ULL;
 
@@ -840,13 +1007,13 @@ iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes)
     if (!bytes) bytes = 1;
 
     chunk = (addr & page_mask);
-    assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));
+    assert(0 == (chunk & (gIOPageAllocChunkBytes - 1)));
 
-    pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
+    pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes));
     assert(kIOPageAllocSignature == pa->signature);
 
-    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
-    chunk /= kIOPageAllocChunkBytes;
+    count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes;
+    chunk /= gIOPageAllocChunkBytes;
 
     IOLockLock(a->lock);
     if (!pa->avail)
@@ -938,6 +1105,15 @@ void IOSleep(unsigned milliseconds)
     delay_for_interval(milliseconds, kMillisecondScale);
 }
 
+/*
+ * Spin for indicated number of milliseconds, and potentially an
+ * additional number of milliseconds up to the leeway values.
+ */
+void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
+{
+    delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
+}
+
 /*
  * Spin for indicated number of microseconds.
  */
@@ -961,32 +1137,39 @@ static void _iolog_consputc(int ch, void *arg __unused)
     cons_putc_locked(ch);
 }
 
-static void _iolog_logputc(int ch, void *arg __unused)
-{
-    log_putc_locked(ch);
-}
+static void _IOLogv(const char *format, va_list ap, void *caller);
 
+__attribute__((noinline,not_tail_called))
 void IOLog(const char *format, ...)
 {
+    void *caller = __builtin_return_address(0);
     va_list ap;
 
     va_start(ap, format);
-    IOLogv(format, ap);
+    _IOLogv(format, ap, caller);
     va_end(ap);
 }
 
+__attribute__((noinline,not_tail_called))
 void IOLogv(const char *format, va_list ap)
+{
+    void *caller = __builtin_return_address(0);
+    _IOLogv(format, ap, caller);
+}
+
+void _IOLogv(const char *format, va_list ap, void *caller)
 {
     va_list ap2;
 
+    /* Ideally not called at interrupt context or with interrupts disabled. Needs further validate */
+    /* assert(TRUE == ml_get_interrupts_enabled()); */
+
     va_copy(ap2, ap);
 
-    bsd_log_lock();
-    __doprnt(format, ap, _iolog_logputc, NULL, 16);
-    bsd_log_unlock();
-    logwakeup();
+    os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller);
 
-    __doprnt(format, ap2, _iolog_consputc, NULL, 16);
+    __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE);
+    va_end(ap2);
 }
 
 #if !__LP64__
@@ -1040,7 +1223,7 @@ OSString * IOCopyLogNameForPID(int pid)
 
 IOAlignment IOSizeToAlignment(unsigned int size)
 {
-    register int shift;
+    int shift;
     const int intsize = sizeof(unsigned int) * 8;
     
     for (shift = 1; shift < intsize; shift++) {