2  * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  31  * 17-Apr-91   Portions from libIO.m, Doug Mitchell at NeXT. 
  36 #include <IOKit/system.h> 
  37 #include <mach/sync_policy.h> 
  38 #include <machine/machine_routines.h> 
  39 #include <vm/vm_kern.h> 
  40 #include <libkern/c++/OSCPPDebug.h> 
  42 #include <IOKit/assert.h> 
  44 #include <IOKit/IOReturn.h> 
  45 #include <IOKit/IOLib.h> 
  46 #include <IOKit/IOLocks.h> 
  47 #include <IOKit/IOMapper.h> 
  48 #include <IOKit/IOBufferMemoryDescriptor.h> 
  49 #include <IOKit/IOKitDebug.h> 
  51 #include "IOKitKernelInternal.h" 
  54 #include <libkern/OSDebug.h> 
  55 #include <sys/sysctl.h> 
  58 #include "libkern/OSAtomic.h" 
  59 #include <libkern/c++/OSKext.h> 
  60 #include <IOKit/IOStatisticsPrivate.h> 
  61 #include <os/log_private.h> 
  62 #include <sys/msgbuf.h> 
  63 #include <console/serial_protos.h> 
  67 #define IOStatisticsAlloc(type, size) \ 
  69         IOStatistics::countAlloc(type, size); \ 
  74 #define IOStatisticsAlloc(type, size) 
  76 #endif /* IOKITSTATS */ 
  79 #define TRACK_ALLOC     (IOTRACKING && (kIOTracking & gIOKitDebug)) 
  84 mach_timespec_t IOZeroTvalspec 
= { 0, 0 }; 
  86 extern ppnum_t 
pmap_find_phys(pmap_t pmap
, addr64_t va
); 
  92         void                    (*putc
)(int, void *), 
  97 extern void cons_putc_locked(char); 
  98 extern bool bsd_log_lock(bool); 
  99 extern void bsd_log_unlock(void); 
 102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 104 lck_grp_t       
*IOLockGroup
; 
 107  * Global variables for use by iLogger 
 108  * These symbols are for use only by Apple diagnostic code. 
 109  * Binary compatibility is not guaranteed for kexts that reference these symbols. 
 112 void *_giDebugLogInternal       
= NULL
; 
 113 void *_giDebugLogDataInternal   
= NULL
; 
 114 void *_giDebugReserved1         
= NULL
; 
 115 void *_giDebugReserved2         
= NULL
; 
 117 iopa_t gIOBMDPageAllocator
; 
 120  * Static variables for this module. 
 123 static queue_head_t gIOMallocContiguousEntries
; 
 124 static lck_mtx_t 
*  gIOMallocContiguousEntriesLock
; 
 127 enum { kIOMaxPageableMaps    
= 8 }; 
 128 enum { kIOPageableMapSize    
= 512 * 1024 * 1024 }; 
 129 enum { kIOPageableMaxMapSize 
= 512 * 1024 * 1024 }; 
 131 enum { kIOMaxPageableMaps    
= 16 }; 
 132 enum { kIOPageableMapSize    
= 96 * 1024 * 1024 }; 
 133 enum { kIOPageableMaxMapSize 
= 96 * 1024 * 1024 }; 
 145         IOMapData   maps
[kIOMaxPageableMaps
]; 
 147 } gIOKitPageableSpace
; 
 149 static iopa_t gIOPageablePageAllocator
; 
 151 uint32_t  gIOPageAllocChunkBytes
; 
 154 IOTrackingQueue 
* gIOMallocTracking
; 
 155 IOTrackingQueue 
* gIOWireTracking
; 
 156 IOTrackingQueue 
* gIOMapTracking
; 
 157 #endif /* IOTRACKING */ 
 159 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 166         static bool libInitialized
; 
 168         if (libInitialized
) { 
 172         IOLockGroup 
= lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL
); 
 176         gIOMallocTracking 
= IOTrackingQueueAlloc(kIOMallocTrackingName
, 0, 0, 0, 
 177             kIOTrackingQueueTypeAlloc
, 
 179         gIOWireTracking   
= IOTrackingQueueAlloc(kIOWireTrackingName
, 0, 0, page_size
, 0, 0); 
 181         size_t mapCaptureSize 
= (kIOTracking 
& gIOKitDebug
) ? page_size 
: (1024 * 1024); 
 182         gIOMapTracking    
= IOTrackingQueueAlloc(kIOMapTrackingName
, 0, 0, mapCaptureSize
, 
 183             kIOTrackingQueueTypeDefaultOn
 
 184             | kIOTrackingQueueTypeMap
 
 185             | kIOTrackingQueueTypeUser
, 
 189         gIOKitPageableSpace
.maps
[0].address 
= 0; 
 190         ret 
= kmem_suballoc(kernel_map
, 
 191             &gIOKitPageableSpace
.maps
[0].address
, 
 195             VM_MAP_KERNEL_FLAGS_NONE
, 
 196             VM_KERN_MEMORY_IOKIT
, 
 197             &gIOKitPageableSpace
.maps
[0].map
); 
 198         if (ret 
!= KERN_SUCCESS
) { 
 199                 panic("failed to allocate iokit pageable map\n"); 
 202         gIOKitPageableSpace
.lock            
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
); 
 203         gIOKitPageableSpace
.maps
[0].end     
= gIOKitPageableSpace
.maps
[0].address 
+ kIOPageableMapSize
; 
 204         gIOKitPageableSpace
.hint            
= 0; 
 205         gIOKitPageableSpace
.count           
= 1; 
 207         gIOMallocContiguousEntriesLock      
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
); 
 208         queue_init( &gIOMallocContiguousEntries 
); 
 210         gIOPageAllocChunkBytes 
= PAGE_SIZE 
/ 64; 
 211         assert(sizeof(iopa_page_t
) <= gIOPageAllocChunkBytes
); 
 212         iopa_init(&gIOBMDPageAllocator
); 
 213         iopa_init(&gIOPageablePageAllocator
); 
 216         libInitialized 
= true; 
 219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 222 log2up(vm_size_t size
) 
 228                 size 
= 64 - __builtin_clzl(size 
- 1); 
 230                 size 
= 32 - __builtin_clzl(size 
- 1); 
 236 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 239 IOCreateThread(IOThreadFunc fcn
, void *arg
) 
 241         kern_return_t   result
; 
 244         result 
= kernel_thread_start((thread_continue_t
)fcn
, arg
, &thread
); 
 245         if (result 
!= KERN_SUCCESS
) { 
 249         thread_deallocate(thread
); 
 258         (void) thread_terminate(current_thread()); 
 268         return IOMalloc_internal(KHEAP_KEXT
, size
); 
 272 IOMallocZero_external( 
 275 IOMallocZero_external( 
 278         return IOMallocZero_internal(KHEAP_KEXT
, size
); 
 281 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 284 IOMallocZero_internal(struct kalloc_heap 
*kalloc_heap_cfg
, vm_size_t size
) 
 287         result 
= IOMalloc_internal(kalloc_heap_cfg
, size
); 
 294 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 297 struct IOLibMallocHeader 
{ 
 298         IOTrackingAddress tracking
; 
 303 #define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) 
 305 #define sizeofIOLibMallocHeader (0) 
 308 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 311 IOMalloc_internal(struct kalloc_heap 
*kheap
, vm_size_t size
) 
 316         allocSize 
= size 
+ sizeofIOLibMallocHeader
; 
 318         if (sizeofIOLibMallocHeader 
&& (allocSize 
<= size
)) { 
 319                 return NULL
;                                          // overflow 
 322         address 
= kheap_alloc_tag_bt(kheap
, allocSize
, Z_WAITOK
, VM_KERN_MEMORY_IOKIT
); 
 327                         IOLibMallocHeader 
* hdr
; 
 328                         hdr 
= (typeof(hdr
))address
; 
 329                         bzero(&hdr
->tracking
, sizeof(hdr
->tracking
)); 
 330                         hdr
->tracking
.address 
= ~(((uintptr_t) address
) + sizeofIOLibMallocHeader
); 
 331                         hdr
->tracking
.size    
= size
; 
 332                         IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
); 
 335                 address 
= (typeof(address
))(((uintptr_t) address
) + sizeofIOLibMallocHeader
); 
 338                 OSAddAtomicLong(size
, &debug_iomalloc_size
); 
 340                 IOStatisticsAlloc(kIOStatisticsMalloc
, size
); 
 347 IOFree(void * inAddress
, vm_size_t size
) 
 351         if ((address 
= inAddress
)) { 
 352                 address 
= (typeof(address
))(((uintptr_t) address
) - sizeofIOLibMallocHeader
); 
 356                         IOLibMallocHeader 
* hdr
; 
 357                         struct ptr_reference 
{ void * ptr
; }; 
 358                         volatile struct ptr_reference ptr
; 
 360                         // we're about to block in IOTrackingRemove(), make sure the original pointer 
 361                         // exists in memory or a register for leak scanning to find 
 364                         hdr 
= (typeof(hdr
))address
; 
 365                         if (size 
!= hdr
->tracking
.size
) { 
 366                                 OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size
, hdr
->tracking
.size
); 
 367                                 size 
= hdr
->tracking
.size
; 
 369                         IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
); 
 374                 kfree(address
, size 
+ sizeofIOLibMallocHeader
); 
 376                 OSAddAtomicLong(-size
, &debug_iomalloc_size
); 
 378                 IOStatisticsAlloc(kIOStatisticsFree
, size
); 
 382 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 385 IOMemoryTag(vm_map_t map
) 
 389         if (!vm_kernel_map_is_kernel(map
)) { 
 390                 return VM_MEMORY_IOKIT
; 
 394         if (tag 
== VM_KERN_MEMORY_NONE
) { 
 395                 tag 
= VM_KERN_MEMORY_IOKIT
; 
 401 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 403 struct IOLibPageMallocHeader 
{ 
 404         mach_vm_size_t    allocationSize
; 
 405         mach_vm_address_t allocationAddress
; 
 407         IOTrackingAddress tracking
; 
 412 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) 
 414 #define sizeofIOLibPageMallocHeader     (sizeof(IOLibPageMallocHeader)) 
 417 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 419 IOMallocAligned_external( 
 420         vm_size_t size
, vm_size_t alignment
); 
 422 IOMallocAligned_external( 
 423         vm_size_t size
, vm_size_t alignment
) 
 425         return IOMallocAligned_internal(KHEAP_KEXT
, size
, alignment
); 
 429 IOMallocAligned_internal(struct kalloc_heap 
*kheap
, vm_size_t size
, 
 434         vm_offset_t             allocationAddress
; 
 435         vm_size_t               adjustedSize
; 
 437         IOLibPageMallocHeader 
* hdr
; 
 442         if (((uint32_t) alignment
) != alignment
) { 
 446         alignment 
= (1UL << log2up((uint32_t) alignment
)); 
 447         alignMask 
= alignment 
- 1; 
 448         adjustedSize 
= size 
+ sizeofIOLibPageMallocHeader
; 
 450         if (size 
> adjustedSize
) { 
 451                 address 
= 0; /* overflow detected */ 
 452         } else if (adjustedSize 
>= page_size
) { 
 453                 kr 
= kernel_memory_allocate(kernel_map
, &address
, 
 454                     size
, alignMask
, 0, IOMemoryTag(kernel_map
)); 
 455                 if (KERN_SUCCESS 
!= kr
) { 
 459                 else if (TRACK_ALLOC
) { 
 460                         IOTrackingAlloc(gIOMallocTracking
, address
, size
); 
 464                 adjustedSize 
+= alignMask
; 
 466                 if (adjustedSize 
>= page_size
) { 
 467                         kr 
= kernel_memory_allocate(kernel_map
, &allocationAddress
, 
 468                             adjustedSize
, 0, 0, IOMemoryTag(kernel_map
)); 
 469                         if (KERN_SUCCESS 
!= kr
) { 
 470                                 allocationAddress 
= 0; 
 473                         allocationAddress 
= (vm_address_t
) kheap_alloc_tag_bt(kheap
, 
 474                             adjustedSize
, Z_WAITOK
, VM_KERN_MEMORY_IOKIT
); 
 477                 if (allocationAddress
) { 
 478                         address 
= (allocationAddress 
+ alignMask 
+ sizeofIOLibPageMallocHeader
) 
 481                         hdr 
= (typeof(hdr
))(address 
- sizeofIOLibPageMallocHeader
); 
 482                         hdr
->allocationSize    
= adjustedSize
; 
 483                         hdr
->allocationAddress 
= allocationAddress
; 
 486                                 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
)); 
 487                                 hdr
->tracking
.address 
= ~address
; 
 488                                 hdr
->tracking
.size 
= size
; 
 489                                 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
); 
 497         assert(0 == (address 
& alignMask
)); 
 501                 OSAddAtomicLong(size
, &debug_iomalloc_size
); 
 503                 IOStatisticsAlloc(kIOStatisticsMallocAligned
, size
); 
 506         return (void *) address
; 
 510 IOFreeAligned(void * address
, vm_size_t size
) 
 512         vm_address_t            allocationAddress
; 
 513         vm_size_t               adjustedSize
; 
 514         IOLibPageMallocHeader 
* hdr
; 
 522         adjustedSize 
= size 
+ sizeofIOLibPageMallocHeader
; 
 523         if (adjustedSize 
>= page_size
) { 
 526                         IOTrackingFree(gIOMallocTracking
, (uintptr_t) address
, size
); 
 529                 kmem_free( kernel_map
, (vm_offset_t
) address
, size
); 
 531                 hdr 
= (typeof(hdr
))(((uintptr_t)address
) - sizeofIOLibPageMallocHeader
); 
 532                 adjustedSize 
= hdr
->allocationSize
; 
 533                 allocationAddress 
= hdr
->allocationAddress
; 
 537                         if (size 
!= hdr
->tracking
.size
) { 
 538                                 OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size
, hdr
->tracking
.size
); 
 539                                 size 
= hdr
->tracking
.size
; 
 541                         IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
); 
 544                 if (adjustedSize 
>= page_size
) { 
 545                         kmem_free( kernel_map
, allocationAddress
, adjustedSize
); 
 547                         kfree(allocationAddress
, adjustedSize
); 
 552         OSAddAtomicLong(-size
, &debug_iomalloc_size
); 
 555         IOStatisticsAlloc(kIOStatisticsFreeAligned
, size
); 
 558 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 561 IOKernelFreePhysical(mach_vm_address_t address
, mach_vm_size_t size
) 
 563         vm_address_t       allocationAddress
; 
 564         vm_size_t          adjustedSize
; 
 565         IOLibPageMallocHeader 
* hdr
; 
 573         adjustedSize 
= (2 * size
) + sizeofIOLibPageMallocHeader
; 
 574         if (adjustedSize 
>= page_size
) { 
 577                         IOTrackingFree(gIOMallocTracking
, address
, size
); 
 580                 kmem_free( kernel_map
, (vm_offset_t
) address
, size
); 
 582                 hdr 
= (typeof(hdr
))(((uintptr_t)address
) - sizeofIOLibPageMallocHeader
); 
 583                 adjustedSize 
= hdr
->allocationSize
; 
 584                 allocationAddress 
= hdr
->allocationAddress
; 
 587                         IOTrackingRemove(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
); 
 590                 kfree(allocationAddress
, adjustedSize
); 
 593         IOStatisticsAlloc(kIOStatisticsFreeContiguous
, size
); 
 595         OSAddAtomicLong(-size
, &debug_iomalloc_size
); 
 599 #if __arm__ || __arm64__ 
 600 extern unsigned long gPhysBase
, gPhysSize
; 
 604 IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size
, mach_vm_address_t maxPhys
, 
 605     mach_vm_size_t alignment
, bool contiguous
) 
 608         mach_vm_address_t       address
; 
 609         mach_vm_address_t       allocationAddress
; 
 610         mach_vm_size_t          adjustedSize
; 
 611         mach_vm_address_t       alignMask
; 
 612         IOLibPageMallocHeader 
* hdr
; 
 617         if (alignment 
== 0) { 
 621         alignMask 
= alignment 
- 1; 
 623         if (os_mul_and_add_overflow(2, size
, sizeofIOLibPageMallocHeader
, &adjustedSize
)) { 
 627         contiguous 
= (contiguous 
&& (adjustedSize 
> page_size
)) 
 628             || (alignment 
> page_size
); 
 630         if (contiguous 
|| maxPhys
) { 
 635                 contiguous 
= (contiguous 
&& (adjustedSize 
> page_size
)) 
 636                     || (alignment 
> page_size
); 
 639 #if __arm__ || __arm64__ 
 640                         if (maxPhys 
>= (mach_vm_address_t
)(gPhysBase 
+ gPhysSize
)) { 
 644                         if (maxPhys 
<= 0xFFFFFFFF) { 
 646                                 options 
|= KMA_LOMEM
; 
 647                         } else if (gIOLastPage 
&& (atop_64(maxPhys
) > gIOLastPage
)) { 
 651                 if (contiguous 
|| maxPhys
) { 
 652                         kr 
= kmem_alloc_contig(kernel_map
, &virt
, size
, 
 653                             alignMask
, (ppnum_t
) atop(maxPhys
), (ppnum_t
) atop(alignMask
), 0, IOMemoryTag(kernel_map
)); 
 655                         kr 
= kernel_memory_allocate(kernel_map
, &virt
, 
 656                             size
, alignMask
, options
, IOMemoryTag(kernel_map
)); 
 658                 if (KERN_SUCCESS 
== kr
) { 
 662                                 IOTrackingAlloc(gIOMallocTracking
, address
, size
); 
 669                 adjustedSize 
+= alignMask
; 
 670                 if (adjustedSize 
< size
) { 
 673                 allocationAddress 
= (mach_vm_address_t
) kheap_alloc_tag_bt(KHEAP_KEXT
, 
 674                     adjustedSize
, Z_WAITOK
, VM_KERN_MEMORY_IOKIT
); 
 676                 if (allocationAddress
) { 
 677                         address 
= (allocationAddress 
+ alignMask 
+ sizeofIOLibPageMallocHeader
) 
 680                         if (atop_32(address
) != atop_32(address 
+ size 
- 1)) { 
 681                                 address 
= round_page(address
); 
 684                         hdr 
= (typeof(hdr
))(address 
- sizeofIOLibPageMallocHeader
); 
 685                         hdr
->allocationSize    
= adjustedSize
; 
 686                         hdr
->allocationAddress 
= allocationAddress
; 
 689                                 bzero(&hdr
->tracking
, sizeof(hdr
->tracking
)); 
 690                                 hdr
->tracking
.address 
= ~address
; 
 691                                 hdr
->tracking
.size    
= size
; 
 692                                 IOTrackingAdd(gIOMallocTracking
, &hdr
->tracking
.tracking
, size
, true, VM_KERN_MEMORY_NONE
); 
 701                 IOStatisticsAlloc(kIOStatisticsMallocContiguous
, size
); 
 703                 OSAddAtomicLong(size
, &debug_iomalloc_size
); 
 711 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 713 struct _IOMallocContiguousEntry 
{ 
 714         mach_vm_address_t          virtualAddr
; 
 715         IOBufferMemoryDescriptor 
* md
; 
 718 typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry
; 
 721 IOMallocContiguous(vm_size_t size
, vm_size_t alignment
, 
 722     IOPhysicalAddress 
* physicalAddress
) 
 724         mach_vm_address_t   address 
= 0; 
 729         if (alignment 
== 0) { 
 733         /* Do we want a physical address? */ 
 734         if (!physicalAddress
) { 
 735                 address 
= IOKernelAllocateWithPhysicalRestrict(size
, 0 /*maxPhys*/, alignment
, true); 
 738                         IOBufferMemoryDescriptor 
* bmd
; 
 739                         mach_vm_address_t          physicalMask
; 
 740                         vm_offset_t                alignMask
; 
 742                         alignMask 
= alignment 
- 1; 
 743                         physicalMask 
= (0xFFFFFFFF ^ alignMask
); 
 745                         bmd 
= IOBufferMemoryDescriptor::inTaskWithPhysicalMask( 
 746                                 kernel_task
, kIOMemoryPhysicallyContiguous
, size
, physicalMask
); 
 751                         _IOMallocContiguousEntry 
* 
 752                             entry 
= IONew(_IOMallocContiguousEntry
, 1); 
 757                         entry
->virtualAddr 
= (mach_vm_address_t
) bmd
->getBytesNoCopy(); 
 759                         lck_mtx_lock(gIOMallocContiguousEntriesLock
); 
 760                         queue_enter( &gIOMallocContiguousEntries
, entry
, 
 761                             _IOMallocContiguousEntry 
*, link 
); 
 762                         lck_mtx_unlock(gIOMallocContiguousEntriesLock
); 
 764                         address          
= (mach_vm_address_t
) entry
->virtualAddr
; 
 765                         *physicalAddress 
= bmd
->getPhysicalAddress(); 
 769         return (void *) address
; 
 773 IOFreeContiguous(void * _address
, vm_size_t size
) 
 775         _IOMallocContiguousEntry 
* entry
; 
 776         IOMemoryDescriptor 
*       md 
= NULL
; 
 778         mach_vm_address_t address 
= (mach_vm_address_t
) _address
; 
 786         lck_mtx_lock(gIOMallocContiguousEntriesLock
); 
 787         queue_iterate( &gIOMallocContiguousEntries
, entry
, 
 788             _IOMallocContiguousEntry 
*, link 
) 
 790                 if (entry
->virtualAddr 
== address
) { 
 792                         queue_remove( &gIOMallocContiguousEntries
, entry
, 
 793                             _IOMallocContiguousEntry 
*, link 
); 
 797         lck_mtx_unlock(gIOMallocContiguousEntriesLock
); 
 801                 IODelete(entry
, _IOMallocContiguousEntry
, 1); 
 803                 IOKernelFreePhysical((mach_vm_address_t
) address
, size
); 
 807 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 810 IOIteratePageableMaps(vm_size_t size
, 
 811     IOIteratePageableMapsCallback callback
, void * ref
) 
 813         kern_return_t       kr 
= kIOReturnNotReady
; 
 820         if (size 
> kIOPageableMaxMapSize
) { 
 821                 return kIOReturnBadArgument
; 
 825                 index 
= gIOKitPageableSpace
.hint
; 
 826                 attempts 
= gIOKitPageableSpace
.count
; 
 828                         kr 
= (*callback
)(gIOKitPageableSpace
.maps
[index
].map
, ref
); 
 829                         if (KERN_SUCCESS 
== kr
) { 
 830                                 gIOKitPageableSpace
.hint 
= index
; 
 836                                 index 
= gIOKitPageableSpace
.count 
- 1; 
 839                 if (KERN_NO_SPACE 
!= kr
) { 
 843                 lck_mtx_lock( gIOKitPageableSpace
.lock 
); 
 845                 index 
= gIOKitPageableSpace
.count
; 
 846                 if (index 
>= (kIOMaxPageableMaps 
- 1)) { 
 847                         lck_mtx_unlock( gIOKitPageableSpace
.lock 
); 
 851                 if (size 
< kIOPageableMapSize
) { 
 852                         segSize 
= kIOPageableMapSize
; 
 858                 kr 
= kmem_suballoc(kernel_map
, 
 863                     VM_MAP_KERNEL_FLAGS_NONE
, 
 864                     VM_KERN_MEMORY_IOKIT
, 
 866                 if (KERN_SUCCESS 
!= kr
) { 
 867                         lck_mtx_unlock( gIOKitPageableSpace
.lock 
); 
 871                 gIOKitPageableSpace
.maps
[index
].map     
= map
; 
 872                 gIOKitPageableSpace
.maps
[index
].address 
= min
; 
 873                 gIOKitPageableSpace
.maps
[index
].end     
= min 
+ segSize
; 
 874                 gIOKitPageableSpace
.hint                
= index
; 
 875                 gIOKitPageableSpace
.count               
= index 
+ 1; 
 877                 lck_mtx_unlock( gIOKitPageableSpace
.lock 
); 
 883 struct IOMallocPageableRef 
{ 
 890 IOMallocPageableCallback(vm_map_t map
, void * _ref
) 
 892         struct IOMallocPageableRef 
* ref 
= (struct IOMallocPageableRef 
*) _ref
; 
 895         kr 
= kmem_alloc_pageable( map
, &ref
->address
, ref
->size
, ref
->tag 
); 
 901 IOMallocPageablePages(vm_size_t size
, vm_size_t alignment
, vm_tag_t tag
) 
 903         kern_return_t              kr 
= kIOReturnNotReady
; 
 904         struct IOMallocPageableRef ref
; 
 906         if (alignment 
> page_size
) { 
 909         if (size 
> kIOPageableMaxMapSize
) { 
 915         kr 
= IOIteratePageableMaps( size
, &IOMallocPageableCallback
, &ref 
); 
 916         if (kIOReturnSuccess 
!= kr
) { 
 920         return (void *) ref
.address
; 
 924 IOPageableMapForAddress( uintptr_t address 
) 
 929         for (index 
= 0; index 
< gIOKitPageableSpace
.count
; index
++) { 
 930                 if ((address 
>= gIOKitPageableSpace
.maps
[index
].address
) 
 931                     && (address 
< gIOKitPageableSpace
.maps
[index
].end
)) { 
 932                         map 
= gIOKitPageableSpace
.maps
[index
].map
; 
 937                 panic("IOPageableMapForAddress: null"); 
 944 IOFreePageablePages(void * address
, vm_size_t size
) 
 948         map 
= IOPageableMapForAddress((vm_address_t
) address
); 
 950                 kmem_free( map
, (vm_offset_t
) address
, size
); 
 955 IOMallocOnePageablePage(iopa_t 
* a
) 
 957         return (uintptr_t) IOMallocPageablePages(page_size
, page_size
, VM_KERN_MEMORY_IOKIT
); 
 961 IOMallocPageableInternal(vm_size_t size
, vm_size_t alignment
, bool zeroed
) 
 965         if (((uint32_t) alignment
) != alignment
) { 
 968         if (size 
>= (page_size 
- 4 * gIOPageAllocChunkBytes
) || 
 969             alignment 
> page_size
) { 
 970                 addr 
= IOMallocPageablePages(size
, alignment
, IOMemoryTag(kernel_map
)); 
 971                 /* Memory allocated this way will already be zeroed. */ 
 973                 addr 
= ((void *) iopa_alloc(&gIOPageablePageAllocator
, &IOMallocOnePageablePage
, size
, (uint32_t) alignment
)); 
 981                 OSAddAtomicLong(size
, &debug_iomallocpageable_size
); 
 983                 IOStatisticsAlloc(kIOStatisticsMallocPageable
, size
); 
 990 IOMallocPageable(vm_size_t size
, vm_size_t alignment
) 
 992         return IOMallocPageableInternal(size
, alignment
, /*zeroed*/ false); 
 996 IOMallocPageableZero(vm_size_t size
, vm_size_t alignment
) 
 998         return IOMallocPageableInternal(size
, alignment
, /*zeroed*/ true); 
1002 IOFreePageable(void * address
, vm_size_t size
) 
1005         OSAddAtomicLong(-size
, &debug_iomallocpageable_size
); 
1007         IOStatisticsAlloc(kIOStatisticsFreePageable
, size
); 
1009         if (size 
< (page_size 
- 4 * gIOPageAllocChunkBytes
)) { 
1010                 address 
= (void *) iopa_free(&gIOPageablePageAllocator
, (uintptr_t) address
, size
); 
1014                 IOFreePageablePages(address
, size
); 
1018 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1021 iopa_init(iopa_t 
* a
) 
1023         bzero(a
, sizeof(*a
)); 
1024         a
->lock 
= IOLockAlloc(); 
1025         queue_init(&a
->list
); 
1029 iopa_allocinpage(iopa_page_t 
* pa
, uint32_t count
, uint64_t align
) 
1032         uint64_t avail 
= pa
->avail
; 
1036         // find strings of count 1 bits in avail 
1037         for (n 
= count
; n 
> 1; n 
-= s
) { 
1039                 avail 
= avail 
& (avail 
<< s
); 
1045                 n 
= __builtin_clzll(avail
); 
1046                 pa
->avail 
&= ~((-1ULL << (64 - count
)) >> n
); 
1047                 if (!pa
->avail 
&& pa
->link
.next
) { 
1049                         pa
->link
.next 
= NULL
; 
1051                 return n 
* gIOPageAllocChunkBytes 
+ trunc_page((uintptr_t) pa
); 
1058 iopa_alloc(iopa_t 
* a
, iopa_proc_t alloc
, vm_size_t bytes
, vm_size_t balign
) 
1060         static const uint64_t align_masks
[] = { 
1073         vm_size_t     align_masks_idx
; 
1075         if (((uint32_t) bytes
) != bytes
) { 
1081         count 
= (((uint32_t) bytes
) + gIOPageAllocChunkBytes 
- 1) / gIOPageAllocChunkBytes
; 
1083         align_masks_idx 
= log2up((balign 
+ gIOPageAllocChunkBytes 
- 1) / gIOPageAllocChunkBytes
); 
1084         assert(align_masks_idx 
< sizeof(align_masks
) / sizeof(*align_masks
)); 
1085         align 
= align_masks
[align_masks_idx
]; 
1087         IOLockLock(a
->lock
); 
1088         __IGNORE_WCASTALIGN(pa 
= (typeof(pa
))queue_first(&a
->list
)); 
1089         while (!queue_end(&a
->list
, &pa
->link
)) { 
1090                 addr 
= iopa_allocinpage(pa
, count
, align
); 
1092                         a
->bytecount 
+= bytes
; 
1095                 __IGNORE_WCASTALIGN(pa 
= (typeof(pa
))queue_next(&pa
->link
)); 
1097         IOLockUnlock(a
->lock
); 
1102                         pa 
= (typeof(pa
))(addr 
+ page_size 
- gIOPageAllocChunkBytes
); 
1103                         pa
->signature 
= kIOPageAllocSignature
; 
1106                         addr 
= iopa_allocinpage(pa
, count
, align
); 
1107                         IOLockLock(a
->lock
); 
1109                                 enqueue_head(&a
->list
, &pa
->link
); 
1113                                 a
->bytecount 
+= bytes
; 
1115                         IOLockUnlock(a
->lock
); 
1119         assert((addr 
& ((1 << log2up(balign
)) - 1)) == 0); 
1124 iopa_free(iopa_t 
* a
, uintptr_t addr
, vm_size_t bytes
) 
1130         if (((uint32_t) bytes
) != bytes
) { 
1137         chunk 
= (addr 
& page_mask
); 
1138         assert(0 == (chunk 
& (gIOPageAllocChunkBytes 
- 1))); 
1140         pa 
= (typeof(pa
))(addr 
| (page_size 
- gIOPageAllocChunkBytes
)); 
1141         assert(kIOPageAllocSignature 
== pa
->signature
); 
1143         count 
= (((uint32_t) bytes
) + gIOPageAllocChunkBytes 
- 1) / gIOPageAllocChunkBytes
; 
1144         chunk 
/= gIOPageAllocChunkBytes
; 
1146         IOLockLock(a
->lock
); 
1148                 assert(!pa
->link
.next
); 
1149                 enqueue_tail(&a
->list
, &pa
->link
); 
1151         pa
->avail 
|= ((-1ULL << (64 - count
)) >> chunk
); 
1152         if (pa
->avail 
!= -2ULL) { 
1156                 pa
->link
.next 
= NULL
; 
1160                 pa 
= (typeof(pa
))trunc_page(pa
); 
1162         a
->bytecount 
-= bytes
; 
1163         IOLockUnlock(a
->lock
); 
1165         return (uintptr_t) pa
; 
1168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1171 IOSetProcessorCacheMode( task_t task
, IOVirtualAddress address
, 
1172     IOByteCount length
, IOOptionBits cacheMode 
) 
1174         IOReturn    ret 
= kIOReturnSuccess
; 
1177         if (task 
!= kernel_task
) { 
1178                 return kIOReturnUnsupported
; 
1180         if ((address 
| length
) & PAGE_MASK
) { 
1181 //      OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode); 
1182                 return kIOReturnUnsupported
; 
1184         length 
= round_page(address 
+ length
) - trunc_page( address 
); 
1185         address 
= trunc_page( address 
); 
1188         cacheMode 
= (cacheMode 
<< kIOMapCacheShift
) & kIOMapCacheMask
; 
1190         while ((kIOReturnSuccess 
== ret
) && (length 
> 0)) { 
1191                 // Get the physical page number 
1192                 pagenum 
= pmap_find_phys(kernel_pmap
, (addr64_t
)address
); 
1194                         ret 
= IOUnmapPages( get_task_map(task
), address
, page_size 
); 
1195                         ret 
= IOMapPages( get_task_map(task
), address
, ptoa_64(pagenum
), page_size
, cacheMode 
); 
1197                         ret 
= kIOReturnVMError
; 
1200                 address 
+= page_size
; 
1201                 length 
-= page_size
; 
1209 IOFlushProcessorCache( task_t task
, IOVirtualAddress address
, 
1210     IOByteCount length 
) 
1212         if (task 
!= kernel_task
) { 
1213                 return kIOReturnUnsupported
; 
1216         flush_dcache64((addr64_t
) address
, (unsigned) length
, false ); 
1218         return kIOReturnSuccess
; 
1221 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1224 OSKernelStackRemaining( void ) 
1226         return ml_stack_remaining(); 
1229 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1232  * Spin for indicated number of milliseconds. 
1235 IOSleep(unsigned milliseconds
) 
1237         delay_for_interval(milliseconds
, kMillisecondScale
); 
1241  * Spin for indicated number of milliseconds, and potentially an 
1242  * additional number of milliseconds up to the leeway values. 
1245 IOSleepWithLeeway(unsigned intervalMilliseconds
, unsigned leewayMilliseconds
) 
1247         delay_for_interval_with_leeway(intervalMilliseconds
, leewayMilliseconds
, kMillisecondScale
); 
1251  * Spin for indicated number of microseconds. 
1254 IODelay(unsigned microseconds
) 
1256         delay_for_interval(microseconds
, kMicrosecondScale
); 
1260  * Spin for indicated number of nanoseconds. 
1263 IOPause(unsigned nanoseconds
) 
1265         delay_for_interval(nanoseconds
, kNanosecondScale
); 
1268 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1270 static void _IOLogv(const char *format
, va_list ap
, void *caller
) __printflike(1, 0); 
1272 __attribute__((noinline
, not_tail_called
)) 
1274 IOLog(const char *format
, ...) 
1276         void *caller 
= __builtin_return_address(0); 
1279         va_start(ap
, format
); 
1280         _IOLogv(format
, ap
, caller
); 
1284 __attribute__((noinline
, not_tail_called
)) 
1286 IOLogv(const char *format
, va_list ap
) 
1288         void *caller 
= __builtin_return_address(0); 
1289         _IOLogv(format
, ap
, caller
); 
1293 _IOLogv(const char *format
, va_list ap
, void *caller
) 
1296         struct console_printbuf_state info_data
; 
1297         console_printbuf_state_init(&info_data
, TRUE
, TRUE
); 
1301         os_log_with_args(OS_LOG_DEFAULT
, OS_LOG_TYPE_DEFAULT
, format
, ap
, caller
); 
1303         __doprnt(format
, ap2
, console_printbuf_putc
, &info_data
, 16, TRUE
); 
1304         console_printbuf_clear(&info_data
); 
1307         assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning
, "IOLog called with interrupts disabled"); 
1312 IOPanic(const char *reason
) 
1314         panic("%s", reason
); 
1318 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1321 IOKitKernelLogBuffer(const char * title
, const void * buffer
, size_t size
, 
1322     void (*output
)(const char *format
, ...)) 
1324         size_t idx
, linestart
; 
1325         enum { bytelen 
= (sizeof("0xZZ, ") - 1) }; 
1326         char hex
[(bytelen 
* 16) + 1]; 
1327         uint8_t c
, chars
[17]; 
1329         output("%s(0x%lx):\n", title
, size
); 
1330         output("              0     1     2     3     4     5     6     7     8     9     A     B     C     D     E     F\n"); 
1335         for (idx 
= 0, linestart 
= 0; idx 
< size
;) { 
1336                 c 
= ((char *)buffer
)[idx
]; 
1337                 snprintf(&hex
[bytelen 
* (idx 
& 15)], bytelen 
+ 1, "0x%02x, ", c
); 
1338                 chars
[idx 
& 15] = ((c 
>= 0x20) && (c 
<= 0x7f)) ? c 
: ' '; 
1340                 if ((idx 
== size
) || !(idx 
& 15)) { 
1342                                 chars
[idx 
& 15] = 0; 
1344                         output("/* %04lx: */ %-96s /* |%-16s| */\n", linestart
, hex
, chars
); 
1350 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1353  * Convert a integer constant (typically a #define or enum) to a string. 
1355 static char noValue
[80];        // that's pretty 
1358 IOFindNameForValue(int value
, const IONamedValue 
*regValueArray
) 
1360         for (; regValueArray
->name
; regValueArray
++) { 
1361                 if (regValueArray
->value 
== value
) { 
1362                         return regValueArray
->name
; 
1365         snprintf(noValue
, sizeof(noValue
), "0x%x (UNDEFINED)", value
); 
1366         return (const char *)noValue
; 
1370 IOFindValueForName(const char *string
, 
1371     const IONamedValue 
*regValueArray
, 
1374         for (; regValueArray
->name
; regValueArray
++) { 
1375                 if (!strcmp(regValueArray
->name
, string
)) { 
1376                         *value 
= regValueArray
->value
; 
1377                         return kIOReturnSuccess
; 
1380         return kIOReturnBadArgument
; 
1384 IOCopyLogNameForPID(int pid
) 
1388         snprintf(buf
, sizeof(buf
), "pid %d, ", pid
); 
1390         proc_name(pid
, buf 
+ len
, (int) (sizeof(buf
) - len
)); 
1391         return OSString::withCString(buf
); 
1394 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1397 IOSizeToAlignment(unsigned int size
) 
1400         const int intsize 
= sizeof(unsigned int) * 8; 
1402         for (shift 
= 1; shift 
< intsize
; shift
++) { 
1403                 if (size 
& 0x80000000) { 
1404                         return (IOAlignment
)(intsize 
- shift
); 
1412 IOAlignmentToSize(IOAlignment align
) 
1416         for (size 
= 1; align
; align
--) {