]> git.saurik.com Git - apple/xnu.git/blobdiff - iokit/Kernel/IOLib.cpp
xnu-3247.1.106.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLib.cpp
index 3714d1d4fb9018868dd6f59bd91733572a6ed4c8..44a436346612d99a6441fcab26d86e26379af1fe 100644 (file)
@@ -1,4 +1,4 @@
-/*
+/* 
  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
@@ -73,6 +73,10 @@ do { \
 
 #endif /* IOKITSTATS */
 
+
+#define TRACK_ALLOC    (IOTRACKING && (kIOTracking & gIOKitDebug))
+
+
 extern "C"
 {
 
@@ -87,7 +91,8 @@ __doprnt(
        va_list                 argp,
        void                    (*putc)(int, void *),
        void                    *arg,
-       int                     radix);
+       int                     radix,
+       int                     is_log);
 
 extern void cons_putc_locked(char);
 extern void bsd_log_lock(void);
@@ -146,6 +151,12 @@ static iopa_t gIOPageablePageAllocator;
 
 uint32_t  gIOPageAllocChunkBytes;
 
+#if IOTRACKING
+IOTrackingQueue * gIOMallocTracking;
+IOTrackingQueue * gIOWireTracking;
+IOTrackingQueue * gIOMapTracking;
+#endif /* IOTRACKING */
+
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
 void IOLibInit(void)
@@ -157,18 +168,25 @@ void IOLibInit(void)
     if(libInitialized)
         return;        
 
+    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
+
+#if IOTRACKING
+    IOTrackingInit();
+    gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0,         true);
+    gIOWireTracking   = IOTrackingQueueAlloc(kIOWireTrackingName,   0, page_size, false);
+    gIOMapTracking    = IOTrackingQueueAlloc(kIOMapTrackingName,    0, page_size, false);
+#endif
+
     gIOKitPageableSpace.maps[0].address = 0;
     ret = kmem_suballoc(kernel_map,
                     &gIOKitPageableSpace.maps[0].address,
                     kIOPageableMapSize,
                     TRUE,
-                    VM_FLAGS_ANYWHERE,
+                    VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
                     &gIOKitPageableSpace.maps[0].map);
     if (ret != KERN_SUCCESS)
         panic("failed to allocate iokit pageable map\n");
 
-    IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL);
-
     gIOKitPageableSpace.lock           = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
     gIOKitPageableSpace.maps[0].end    = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize;
     gIOKitPageableSpace.hint           = 0;
@@ -182,11 +200,22 @@ void IOLibInit(void)
     iopa_init(&gIOBMDPageAllocator);
     iopa_init(&gIOPageablePageAllocator);
 
+
     libInitialized = true;
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+static uint32_t 
+log2up(uint32_t size)
+{
+    if (size <= 1) size = 0;
+    else size = 32 - __builtin_clz(size - 1);
+    return (size);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
 IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
 {
        kern_return_t   result;
@@ -209,17 +238,49 @@ void IOExitThread(void)
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+#if IOTRACKING
+struct IOLibMallocHeader
+{
+    IOTrackingAddress tracking;
+};
+#endif
+
+#if IOTRACKING
+#define sizeofIOLibMallocHeader        (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
+#else
+#define sizeofIOLibMallocHeader        (0)
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
 void * IOMalloc(vm_size_t size)
 {
     void * address;
+    vm_size_t allocSize;
+
+    allocSize = size + sizeofIOLibMallocHeader;
+#if IOTRACKING
+    if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow
+#endif
+    address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT);
 
-    address = (void *)kalloc(size);
     if ( address ) {
+#if IOTRACKING
+       if (TRACK_ALLOC) {
+           IOLibMallocHeader * hdr;
+           hdr = (typeof(hdr)) address;
+           bzero(&hdr->tracking, sizeof(hdr->tracking));
+           hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader);
+           hdr->tracking.size    = size;
+           IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+       }
+#endif
+       address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader);
+
 #if IOALLOCDEBUG
-               debug_iomalloc_size += size;
+    OSAddAtomic(size, &debug_iomalloc_size);
 #endif
-               IOStatisticsAlloc(kIOStatisticsMalloc, size);
+       IOStatisticsAlloc(kIOStatisticsMalloc, size);
     }
 
     return address;
@@ -228,31 +289,79 @@ void * IOMalloc(vm_size_t size)
 void IOFree(void * address, vm_size_t size)
 {
     if (address) {
-               kfree(address, size);
+
+       address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
+       
+#if IOTRACKING
+       if (TRACK_ALLOC) {
+           IOLibMallocHeader * hdr;
+           hdr = (typeof(hdr)) address;
+            if (size != hdr->tracking.size)
+           {
+               OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size);
+               size = hdr->tracking.size;
+           }
+           IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+       }
+#endif
+
+       kfree(address, size + sizeofIOLibMallocHeader);
 #if IOALLOCDEBUG
-               debug_iomalloc_size -= size;
+    OSAddAtomic(-size, &debug_iomalloc_size);
 #endif
-               IOStatisticsAlloc(kIOStatisticsFree, size);
+       IOStatisticsAlloc(kIOStatisticsFree, size);
     }
 }
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
+vm_tag_t 
+IOMemoryTag(vm_map_t map)
+{
+    vm_tag_t tag;
+
+    if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT);
+
+    tag = vm_tag_bt();
+    if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT;
+
+    return (tag);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+struct IOLibPageMallocHeader
+{
+    mach_vm_size_t    allocationSize;
+    mach_vm_address_t allocationAddress;
+#if IOTRACKING
+    IOTrackingAddress tracking;
+#endif
+};
+
+#if IOTRACKING
+#define sizeofIOLibPageMallocHeader    (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress)))
+#else
+#define sizeofIOLibPageMallocHeader    (sizeof(IOLibPageMallocHeader))
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
 void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
 {
-    kern_return_t      kr;
-    vm_offset_t                address;
-    vm_offset_t                allocationAddress;
-    vm_size_t          adjustedSize;
-    uintptr_t          alignMask;
+    kern_return_t          kr;
+    vm_offset_t                    address;
+    vm_offset_t                    allocationAddress;
+    vm_size_t              adjustedSize;
+    uintptr_t              alignMask;
+    IOLibPageMallocHeader * hdr;
 
     if (size == 0)
         return 0;
-    if (alignment == 0) 
-        alignment = 1;
 
+    alignment = (1UL << log2up(alignment));
     alignMask = alignment - 1;
-    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
+    adjustedSize = size + sizeofIOLibPageMallocHeader;
 
     if (size > adjustedSize) {
            address = 0;    /* overflow detected */
@@ -260,9 +369,11 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
     else if (adjustedSize >= page_size) {
 
         kr = kernel_memory_allocate(kernel_map, &address,
-                                       size, alignMask, 0);
-       if (KERN_SUCCESS != kr)
-           address = 0;
+                                       size, alignMask, 0, IOMemoryTag(kernel_map));
+       if (KERN_SUCCESS != kr) address = 0;
+#if IOTRACKING
+       else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
+#endif
 
     } else {
 
@@ -271,22 +382,27 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
        if (adjustedSize >= page_size) {
 
            kr = kernel_memory_allocate(kernel_map, &allocationAddress,
-                                           adjustedSize, 0, 0);
-           if (KERN_SUCCESS != kr)
-               allocationAddress = 0;
+                                           adjustedSize, 0, 0, IOMemoryTag(kernel_map));
+           if (KERN_SUCCESS != kr) allocationAddress = 0;
 
        } else
-           allocationAddress = (vm_address_t) kalloc(adjustedSize);
+           allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
 
         if (allocationAddress) {
-            address = (allocationAddress + alignMask
-                    + (sizeof(vm_size_t) + sizeof(vm_address_t)))
+            address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
                     & (~alignMask);
 
-            *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t))) 
-                           = adjustedSize;
-            *((vm_address_t *)(address - sizeof(vm_address_t)))
-                            = allocationAddress;
+           hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
+           hdr->allocationSize    = adjustedSize;
+           hdr->allocationAddress = allocationAddress;
+#if IOTRACKING
+           if (TRACK_ALLOC) {
+               bzero(&hdr->tracking, sizeof(hdr->tracking));
+               hdr->tracking.address = ~address;
+               hdr->tracking.size = size;
+               IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+           }
+#endif
        } else
            address = 0;
     }
@@ -295,7 +411,7 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
 
     if( address) {
 #if IOALLOCDEBUG
-               debug_iomalloc_size += size;
+               OSAddAtomic(size, &debug_iomalloc_size);
 #endif
        IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
        }
@@ -305,33 +421,47 @@ void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
 
 void IOFreeAligned(void * address, vm_size_t size)
 {
-    vm_address_t       allocationAddress;
-    vm_size_t  adjustedSize;
+    vm_address_t           allocationAddress;
+    vm_size_t              adjustedSize;
+    IOLibPageMallocHeader * hdr;
 
     if( !address)
        return;
 
     assert(size);
 
-    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
+    adjustedSize = size + sizeofIOLibPageMallocHeader;
     if (adjustedSize >= page_size) {
-
+#if IOTRACKING
+       if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size);
+#endif
         kmem_free( kernel_map, (vm_offset_t) address, size);
 
     } else {
-       adjustedSize = *((vm_size_t *)( (vm_address_t) address
-                                - sizeof(vm_address_t) - sizeof(vm_size_t)));
-        allocationAddress = *((vm_address_t *)( (vm_address_t) address
-                               - sizeof(vm_address_t) ));
+        hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
+       adjustedSize = hdr->allocationSize;
+        allocationAddress = hdr->allocationAddress;
 
-       if (adjustedSize >= page_size)
+#if IOTRACKING
+       if (TRACK_ALLOC)
+       {
+            if (size != hdr->tracking.size)
+           {
+               OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size);
+               size = hdr->tracking.size;
+           }
+           IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+       }
+#endif
+       if (adjustedSize >= page_size) {
            kmem_free( kernel_map, allocationAddress, adjustedSize);
-       else
-         kfree((void *)allocationAddress, adjustedSize);
+       } else {
+           kfree((void *)allocationAddress, adjustedSize);
+       }
     }
 
 #if IOALLOCDEBUG
-    debug_iomalloc_size -= size;
+    OSAddAtomic(-size, &debug_iomalloc_size);
 #endif
 
     IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
@@ -342,31 +472,36 @@ void IOFreeAligned(void * address, vm_size_t size)
 void
 IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
 {
-    mach_vm_address_t allocationAddress;
-    mach_vm_size_t    adjustedSize;
+    mach_vm_address_t       allocationAddress;
+    mach_vm_size_t          adjustedSize;
+    IOLibPageMallocHeader * hdr;
 
     if (!address)
        return;
 
     assert(size);
 
-    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
+    adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
     if (adjustedSize >= page_size) {
-
+#if IOTRACKING
+       if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size);
+#endif
        kmem_free( kernel_map, (vm_offset_t) address, size);
 
     } else {
 
-       adjustedSize = *((mach_vm_size_t *)
-                       (address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
-       allocationAddress = *((mach_vm_address_t *)
-                       (address - sizeof(mach_vm_address_t) ));
+        hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader);
+       adjustedSize = hdr->allocationSize;
+        allocationAddress = hdr->allocationAddress;
+#if IOTRACKING
+       if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+#endif
        kfree((void *)allocationAddress, adjustedSize);
     }
 
     IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
 #if IOALLOCDEBUG
-    debug_iomalloc_size -= size;
+    OSAddAtomic(-size, &debug_iomalloc_size);
 #endif
 }
 
@@ -375,11 +510,12 @@ mach_vm_address_t
 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, 
                                        mach_vm_size_t alignment, bool contiguous)
 {
-    kern_return_t      kr;
-    mach_vm_address_t  address;
-    mach_vm_address_t  allocationAddress;
-    mach_vm_size_t     adjustedSize;
-    mach_vm_address_t  alignMask;
+    kern_return_t          kr;
+    mach_vm_address_t      address;
+    mach_vm_address_t      allocationAddress;
+    mach_vm_size_t         adjustedSize;
+    mach_vm_address_t      alignMask;
+    IOLibPageMallocHeader * hdr;
 
     if (size == 0)
        return (0);
@@ -387,7 +523,8 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP
         alignment = 1;
 
     alignMask = alignment - 1;
-    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
+    adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader;
+    if (adjustedSize < size) return (0);
 
     contiguous = (contiguous && (adjustedSize > page_size))
                    || (alignment > page_size);
@@ -416,36 +553,49 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP
        if (contiguous || maxPhys)
        {
            kr = kmem_alloc_contig(kernel_map, &virt, size,
-                                  alignMask, atop(maxPhys), atop(alignMask), 0);
+                                  alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map));
        }
        else
        {
            kr = kernel_memory_allocate(kernel_map, &virt,
-                                       size, alignMask, options);
+                                       size, alignMask, options, IOMemoryTag(kernel_map));
        }
        if (KERN_SUCCESS == kr)
+       {
            address = virt;
+#if IOTRACKING
+           if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size);
+#endif
+       }
        else
            address = 0;
     }
     else
     {
        adjustedSize += alignMask;
-        allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);
+        if (adjustedSize < size) return (0);
+        allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT);
 
         if (allocationAddress) {
 
-            address = (allocationAddress + alignMask
-                    + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
+
+            address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader)
                     & (~alignMask);
 
             if (atop_32(address) != atop_32(address + size - 1))
                 address = round_page(address);
 
-            *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
-                            - sizeof(mach_vm_address_t))) = adjustedSize;
-            *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
-                            = allocationAddress;
+           hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader);
+           hdr->allocationSize    = adjustedSize;
+           hdr->allocationAddress = allocationAddress;
+#if IOTRACKING
+           if (TRACK_ALLOC) {
+               bzero(&hdr->tracking, sizeof(hdr->tracking));
+               hdr->tracking.address = ~address;
+               hdr->tracking.size    = size;
+               IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true);
+           }
+#endif
        } else
            address = 0;
     }
@@ -453,7 +603,7 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP
     if (address) {
     IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
 #if IOALLOCDEBUG
-       debug_iomalloc_size += size;
+    OSAddAtomic(size, &debug_iomalloc_size);
 #endif
     }
 
@@ -608,7 +758,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size,
                     &min,
                     segSize,
                     TRUE,
-                    VM_FLAGS_ANYWHERE,
+                    VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT),
                     &map);
         if( KERN_SUCCESS != kr) {
             lck_mtx_unlock( gIOKitPageableSpace.lock );
@@ -631,7 +781,8 @@ kern_return_t IOIteratePageableMaps(vm_size_t size,
 struct IOMallocPageableRef
 {
     vm_offset_t address;
-    vm_size_t   size;
+    vm_size_t  size;
+    vm_tag_t    tag;
 };
 
 static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
@@ -639,12 +790,12 @@ static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref)
     struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref;
     kern_return_t               kr;
 
-    kr = kmem_alloc_pageable( map, &ref->address, ref->size );
+    kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag );
 
     return( kr );
 }
 
-static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
+static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag)
 {
     kern_return_t             kr = kIOReturnNotReady;
     struct IOMallocPageableRef ref;
@@ -655,6 +806,7 @@ static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment)
         return( 0 );
 
     ref.size = size;
+    ref.tag  = tag;
     kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref );
     if( kIOReturnSuccess != kr)
         ref.address = 0;
@@ -691,19 +843,19 @@ static void IOFreePageablePages(void * address, vm_size_t size)
 
 static uintptr_t IOMallocOnePageablePage(iopa_t * a)
 {
-    return ((uintptr_t) IOMallocPageablePages(page_size, page_size));
+    return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT));
 }
 
 void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
 {
     void * addr;
 
-    if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment);
+    if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map));
     else                   addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment));
 
     if (addr) {
 #if IOALLOCDEBUG
-       debug_iomallocpageable_size += size;
+          OSAddAtomicLong(size, &debug_iomallocpageable_size);
 #endif
        IOStatisticsAlloc(kIOStatisticsMallocPageable, size);
     }
@@ -714,7 +866,7 @@ void * IOMallocPageable(vm_size_t size, vm_size_t alignment)
 void IOFreePageable(void * address, vm_size_t size)
 {
 #if IOALLOCDEBUG
-    debug_iomallocpageable_size -= size;
+       OSAddAtomicLong(-size, &debug_iomallocpageable_size);
 #endif
     IOStatisticsAlloc(kIOStatisticsFreePageable, size);
 
@@ -768,14 +920,6 @@ iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align)
     return (0);
 }
 
-static uint32_t 
-log2up(uint32_t size)
-{
-    if (size <= 1) size = 0;
-    else size = 32 - __builtin_clz(size - 1);
-    return (size);
-}
-
 uintptr_t 
 iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
 {
@@ -798,7 +942,7 @@ iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
     align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)];
 
     IOLockLock(a->lock);
-    pa = (typeof(pa)) queue_first(&a->list);
+    __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list));
     while (!queue_end(&a->list, &pa->link))
     {
        addr = iopa_allocinpage(pa, count, align);
@@ -807,7 +951,7 @@ iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign)
            a->bytecount += bytes;
            break;
        }
-       pa = (typeof(pa)) queue_next(&pa->link);
+       __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link));
     }
     IOLockUnlock(a->lock);
 
@@ -941,6 +1085,15 @@ void IOSleep(unsigned milliseconds)
     delay_for_interval(milliseconds, kMillisecondScale);
 }
 
+/*
+ * Spin for indicated number of milliseconds, and potentially an
+ * additional number of milliseconds up to the leeway values.
+ */
+void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds)
+{
+    delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale);
+}
+
 /*
  * Spin for indicated number of microseconds.
  */
@@ -985,11 +1138,12 @@ void IOLogv(const char *format, va_list ap)
     va_copy(ap2, ap);
 
     bsd_log_lock();
-    __doprnt(format, ap, _iolog_logputc, NULL, 16);
+    __doprnt(format, ap, _iolog_logputc, NULL, 16, TRUE);
     bsd_log_unlock();
     logwakeup();
 
-    __doprnt(format, ap2, _iolog_consputc, NULL, 16);
+    __doprnt(format, ap2, _iolog_consputc, NULL, 16, TRUE);
+    va_end(ap2);
 }
 
 #if !__LP64__
@@ -1043,7 +1197,7 @@ OSString * IOCopyLogNameForPID(int pid)
 
 IOAlignment IOSizeToAlignment(unsigned int size)
 {
-    register int shift;
+    int shift;
     const int intsize = sizeof(unsigned int) * 8;
     
     for (shift = 1; shift < intsize; shift++) {