2  * Copyright (c) 1998-2016 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  30 #include <sys/cdefs.h> 
  32 #include <IOKit/assert.h> 
  33 #include <IOKit/system.h> 
  34 #include <IOKit/IOLib.h> 
  35 #include <IOKit/IOMemoryDescriptor.h> 
  36 #include <IOKit/IOMapper.h> 
  37 #include <IOKit/IODMACommand.h> 
  38 #include <IOKit/IOKitKeysPrivate.h> 
  40 #include <IOKit/IOSubMemoryDescriptor.h> 
  41 #include <IOKit/IOMultiMemoryDescriptor.h> 
  43 #include <IOKit/IOKitDebug.h> 
  44 #include <libkern/OSDebug.h> 
  46 #include "IOKitKernelInternal.h" 
  48 #include <libkern/c++/OSContainers.h> 
  49 #include <libkern/c++/OSDictionary.h> 
  50 #include <libkern/c++/OSArray.h> 
  51 #include <libkern/c++/OSSymbol.h> 
  52 #include <libkern/c++/OSNumber.h> 
  53 #include <os/overflow.h> 
  59 #include <vm/vm_pageout.h> 
  60 #include <mach/memory_object_types.h> 
  61 #include <device/device_port.h> 
  63 #include <mach/vm_prot.h> 
  64 #include <mach/mach_vm.h> 
  65 #include <vm/vm_fault.h> 
  66 #include <vm/vm_protos.h> 
  68 extern ppnum_t 
pmap_find_phys(pmap_t pmap
, addr64_t va
); 
  69 extern void ipc_port_release_send(ipc_port_t port
); 
  71 // osfmk/device/iokit_rpc.c 
  72 unsigned int IODefaultCacheBits(addr64_t pa
); 
  73 unsigned int  IOTranslateCacheBits(struct phys_entry 
*pp
); 
  77 #define kIOMapperWaitSystem     ((IOMapper *) 1) 
  79 static IOMapper 
* gIOSystemMapper 
= NULL
; 
  83 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
  85 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject 
) 
  87 #define super IOMemoryDescriptor 
  89 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
) 
  91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
  93 static IORecursiveLock 
* gIOMemoryLock
; 
  95 #define LOCK    IORecursiveLockLock( gIOMemoryLock) 
  96 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock) 
  97 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) 
  99     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) 
 102 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); } 
 104 #define DEBG(fmt, args...)      {} 
 107 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 109 // Some data structures and accessor macros used by the initWithOptions 
 112 enum ioPLBlockFlags 
{ 
 113     kIOPLOnDevice  
= 0x00000001, 
 114     kIOPLExternUPL 
= 0x00000002, 
 117 struct IOMDPersistentInitData
 
 119     const IOGeneralMemoryDescriptor 
* fMD
; 
 120     IOMemoryReference               
* fMemRef
; 
 125     vm_address_t fPageInfo
;   // Pointer to page list or index into it 
 126     uint32_t fIOMDOffset
;           // The offset of this iopl in descriptor 
 127     ppnum_t fMappedPage
;            // Page number of first page in this iopl 
 128     unsigned int fPageOffset
;       // Offset within first page of iopl 
 129     unsigned int fFlags
;            // Flags 
 132 enum { kMaxWireTags 
= 6 }; 
 137     uint64_t    fDMAMapAlignment
; 
 138     uint64_t    fMappedBase
; 
 139     uint64_t    fMappedLength
; 
 140     uint64_t    fPreparationID
; 
 142     IOTracking  fWireTracking
; 
 143 #endif /* IOTRACKING */ 
 144     unsigned int      fPageCnt
; 
 145     uint8_t           fDMAMapNumAddressBits
; 
 146     unsigned char     fDiscontig
:1; 
 147     unsigned char     fCompletionError
:1; 
 148     unsigned char     fMappedBaseValid
:1; 
 149     unsigned char     _resv
:3; 
 150     unsigned char     fDMAAccess
:2; 
 152     /* variable length arrays */ 
 153     upl_page_info_t fPageList
[1] 
 155                                 // align fPageList as for ioPLBlock 
 156                                 __attribute__((aligned(sizeof(upl_t
)))) 
 159     ioPLBlock fBlocks
[1]; 
 162 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytesNoCopy()) 
 163 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) 
 164 #define getNumIOPL(osd, d)      \ 
 165     (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) 
 166 #define getPageList(d)  (&(d->fPageList[0])) 
 167 #define computeDataSize(p, u) \ 
 168     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) 
 170 enum { kIOMemoryHostOrRemote 
= kIOMemoryHostOnly 
| kIOMemoryRemote 
}; 
 172 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 174 #define next_page(a) ( trunc_page(a) + PAGE_SIZE ) 
 178 kern_return_t 
device_data_action( 
 179                uintptr_t               device_handle
,  
 180                ipc_port_t              device_pager
, 
 181                vm_prot_t               protection
,  
 182                vm_object_offset_t      offset
,  
 186     IOMemoryDescriptorReserved 
* ref 
= (IOMemoryDescriptorReserved 
*) device_handle
; 
 187     IOMemoryDescriptor 
* memDesc
; 
 190     memDesc 
= ref
->dp
.memory
; 
 194         kr 
= memDesc
->handleFault(device_pager
, offset
, size
); 
 204 kern_return_t 
device_close( 
 205                uintptr_t     device_handle
) 
 207     IOMemoryDescriptorReserved 
* ref 
= (IOMemoryDescriptorReserved 
*) device_handle
; 
 209     IODelete( ref
, IOMemoryDescriptorReserved
, 1 ); 
 211     return( kIOReturnSuccess 
); 
 215 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 217 // Note this inline function uses C++ reference arguments to return values 
 218 // This means that pointers are not passed and NULLs don't have to be 
 219 // checked for as a NULL reference is illegal. 
 221 getAddrLenForInd(mach_vm_address_t 
&addr
, mach_vm_size_t 
&len
, // Output variables 
 222      UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
) 
 224     assert(kIOMemoryTypeUIO       
== type
 
 225         || kIOMemoryTypeVirtual   
== type 
|| kIOMemoryTypeVirtual64 
== type
 
 226         || kIOMemoryTypePhysical  
== type 
|| kIOMemoryTypePhysical64 
== type
); 
 227     if (kIOMemoryTypeUIO 
== type
) { 
 230         uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr 
= ad
; len 
= us
; 
 233     else if ((kIOMemoryTypeVirtual64 
== type
) || (kIOMemoryTypePhysical64 
== type
)) { 
 234         IOAddressRange cur 
= r
.v64
[ind
]; 
 238 #endif /* !__LP64__ */ 
 240         IOVirtualRange cur 
= r
.v
[ind
]; 
 246 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 249 purgeableControlBits(IOOptionBits newState
, vm_purgable_t 
* control
, int * state
) 
 251     IOReturn err 
= kIOReturnSuccess
; 
 253     *control 
= VM_PURGABLE_SET_STATE
; 
 255     enum { kIOMemoryPurgeableControlMask 
= 15 }; 
 257     switch (kIOMemoryPurgeableControlMask 
& newState
) 
 259         case kIOMemoryPurgeableKeepCurrent
: 
 260             *control 
= VM_PURGABLE_GET_STATE
; 
 263         case kIOMemoryPurgeableNonVolatile
: 
 264             *state 
= VM_PURGABLE_NONVOLATILE
; 
 266         case kIOMemoryPurgeableVolatile
: 
 267             *state 
= VM_PURGABLE_VOLATILE 
| (newState 
& ~kIOMemoryPurgeableControlMask
); 
 269         case kIOMemoryPurgeableEmpty
: 
 270             *state 
= VM_PURGABLE_EMPTY 
| (newState 
& ~kIOMemoryPurgeableControlMask
); 
 273             err 
= kIOReturnBadArgument
; 
 277     if (*control 
== VM_PURGABLE_SET_STATE
) { 
 278         // let VM know this call is from the kernel and is allowed to alter 
 279         // the volatility of the memory entry even if it was created with 
 280         // MAP_MEM_PURGABLE_KERNEL_ONLY 
 281         *control 
= VM_PURGABLE_SET_STATE_FROM_KERNEL
; 
 288 purgeableStateBits(int * state
) 
 290     IOReturn err 
= kIOReturnSuccess
; 
 292     switch (VM_PURGABLE_STATE_MASK 
& *state
) 
 294         case VM_PURGABLE_NONVOLATILE
: 
 295             *state 
= kIOMemoryPurgeableNonVolatile
; 
 297         case VM_PURGABLE_VOLATILE
: 
 298             *state 
= kIOMemoryPurgeableVolatile
; 
 300         case VM_PURGABLE_EMPTY
: 
 301             *state 
= kIOMemoryPurgeableEmpty
; 
 304             *state 
= kIOMemoryPurgeableNonVolatile
; 
 305             err 
= kIOReturnNotReady
; 
 313 vmProtForCacheMode(IOOptionBits cacheMode
) 
 318         case kIOInhibitCache
: 
 319             SET_MAP_MEM(MAP_MEM_IO
, prot
); 
 322         case kIOWriteThruCache
: 
 323             SET_MAP_MEM(MAP_MEM_WTHRU
, prot
); 
 326         case kIOWriteCombineCache
: 
 327             SET_MAP_MEM(MAP_MEM_WCOMB
, prot
); 
 330         case kIOCopybackCache
: 
 331             SET_MAP_MEM(MAP_MEM_COPYBACK
, prot
); 
 334         case kIOCopybackInnerCache
: 
 335             SET_MAP_MEM(MAP_MEM_INNERWBACK
, prot
); 
 339             SET_MAP_MEM(MAP_MEM_POSTED
, prot
); 
 342         case kIODefaultCache
: 
 344             SET_MAP_MEM(MAP_MEM_NOOP
, prot
); 
 352 pagerFlagsForCacheMode(IOOptionBits cacheMode
) 
 354     unsigned int pagerFlags 
= 0; 
 357         case kIOInhibitCache
: 
 358             pagerFlags 
= DEVICE_PAGER_CACHE_INHIB 
|  DEVICE_PAGER_COHERENT 
| DEVICE_PAGER_GUARDED
; 
 361         case kIOWriteThruCache
: 
 362             pagerFlags 
= DEVICE_PAGER_WRITE_THROUGH 
| DEVICE_PAGER_COHERENT 
| DEVICE_PAGER_GUARDED
; 
 365         case kIOWriteCombineCache
: 
 366             pagerFlags 
= DEVICE_PAGER_CACHE_INHIB 
| DEVICE_PAGER_COHERENT
; 
 369         case kIOCopybackCache
: 
 370             pagerFlags 
= DEVICE_PAGER_COHERENT
; 
 373         case kIOCopybackInnerCache
: 
 374             pagerFlags 
= DEVICE_PAGER_COHERENT
; 
 378             pagerFlags 
= DEVICE_PAGER_CACHE_INHIB 
|  DEVICE_PAGER_COHERENT 
| DEVICE_PAGER_GUARDED 
| DEVICE_PAGER_EARLY_ACK
; 
 381         case kIODefaultCache
: 
 389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 399 struct IOMemoryReference
 
 401     volatile SInt32             refCount
; 
 405     struct IOMemoryReference  
* mapRef
; 
 406     IOMemoryEntry               entries
[0]; 
 411     kIOMemoryReferenceReuse 
= 0x00000001, 
 412     kIOMemoryReferenceWrite 
= 0x00000002, 
 413     kIOMemoryReferenceCOW   
= 0x00000004, 
 416 SInt32 gIOMemoryReferenceCount
; 
 419 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference 
* realloc
) 
 421     IOMemoryReference 
* ref
; 
 422     size_t              newSize
, oldSize
, copySize
; 
 424     newSize 
= (sizeof(IOMemoryReference
)  
 425                  - sizeof(ref
->entries
)  
 426                  + capacity 
* sizeof(ref
->entries
[0])); 
 427     ref 
= (typeof(ref
)) IOMalloc(newSize
); 
 430         oldSize 
= (sizeof(IOMemoryReference
)  
 431                         - sizeof(realloc
->entries
)  
 432                         + realloc
->capacity 
* sizeof(realloc
->entries
[0])); 
 434         if (copySize 
> newSize
) copySize 
= newSize
; 
 435         if (ref
) bcopy(realloc
, ref
, copySize
); 
 436         IOFree(realloc
, oldSize
); 
 440         bzero(ref
, sizeof(*ref
)); 
 442         OSIncrementAtomic(&gIOMemoryReferenceCount
); 
 444     if (!ref
) return (0); 
 445     ref
->capacity 
= capacity
; 
 450 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference 
* ref
) 
 452     IOMemoryEntry 
* entries
; 
 457         memoryReferenceFree(ref
->mapRef
); 
 461     entries 
= ref
->entries 
+ ref
->count
; 
 462     while (entries 
> &ref
->entries
[0]) 
 465         ipc_port_release_send(entries
->entry
); 
 467     size 
= (sizeof(IOMemoryReference
)  
 468                  - sizeof(ref
->entries
)  
 469                  + ref
->capacity 
* sizeof(ref
->entries
[0])); 
 472     OSDecrementAtomic(&gIOMemoryReferenceCount
); 
 476 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference 
* ref
) 
 478     if (1 == OSDecrementAtomic(&ref
->refCount
)) memoryReferenceFree(ref
); 
 483 IOGeneralMemoryDescriptor::memoryReferenceCreate( 
 484                         IOOptionBits         options
, 
 485                         IOMemoryReference 
** reference
) 
 487     enum { kCapacity 
= 4, kCapacityInc 
= 4 }; 
 490     IOMemoryReference 
*  ref
; 
 491     IOMemoryEntry 
*      entries
; 
 492     IOMemoryEntry 
*      cloneEntries
; 
 494     ipc_port_t           entry
, cloneEntry
; 
 496     memory_object_size_t actualSize
; 
 499     mach_vm_address_t    entryAddr
, endAddr
, entrySize
; 
 500     mach_vm_size_t       srcAddr
, srcLen
; 
 501     mach_vm_size_t       nextAddr
, nextLen
; 
 502     mach_vm_size_t       offset
, remain
; 
 504     IOOptionBits         type 
= (_flags 
& kIOMemoryTypeMask
); 
 505     IOOptionBits         cacheMode
; 
 506     unsigned int         pagerFlags
; 
 509     ref 
= memoryReferenceAlloc(kCapacity
, NULL
); 
 510     if (!ref
) return (kIOReturnNoMemory
); 
 512     tag 
= getVMTag(kernel_map
); 
 513     entries 
= &ref
->entries
[0]; 
 521         getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
); 
 525         nextAddr 
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
); 
 528         // default cache mode for physical 
 529         if (kIODefaultCache 
== ((_flags 
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
)) 
 532             pagerFlags 
= IODefaultCacheBits(nextAddr
); 
 533             if (DEVICE_PAGER_CACHE_INHIB 
& pagerFlags
) 
 535                 if (DEVICE_PAGER_EARLY_ACK 
& pagerFlags
) 
 536                     mode 
= kIOPostedWrite
; 
 537                 else if (DEVICE_PAGER_GUARDED 
& pagerFlags
) 
 538                     mode 
= kIOInhibitCache
; 
 540                     mode 
= kIOWriteCombineCache
; 
 542             else if (DEVICE_PAGER_WRITE_THROUGH 
& pagerFlags
) 
 543                 mode 
= kIOWriteThruCache
; 
 545                 mode 
= kIOCopybackCache
; 
 546             _flags 
|= (mode 
<< kIOMemoryBufferCacheShift
); 
 550     // cache mode & vm_prot 
 552     cacheMode 
= ((_flags 
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
); 
 553     prot 
|= vmProtForCacheMode(cacheMode
); 
 554     // VM system requires write access to change cache mode 
 555     if (kIODefaultCache 
!= cacheMode
)                    prot 
|= VM_PROT_WRITE
; 
 556     if (kIODirectionOut 
!= (kIODirectionOutIn 
& _flags
)) prot 
|= VM_PROT_WRITE
; 
 557     if (kIOMemoryReferenceWrite 
& options
)               prot 
|= VM_PROT_WRITE
; 
 558     if (kIOMemoryReferenceCOW   
& options
)               prot 
|= MAP_MEM_VM_COPY
; 
 560     if ((kIOMemoryReferenceReuse 
& options
) && _memRef
) 
 562         cloneEntries 
= &_memRef
->entries
[0]; 
 563         prot 
|= MAP_MEM_NAMED_REUSE
; 
 570         if (kIOMemoryBufferPageable 
& _flags
) 
 572             // IOBufferMemoryDescriptor alloc - set flags for entry + object create 
 573             prot 
|= MAP_MEM_NAMED_CREATE
; 
 574             if (kIOMemoryBufferPurgeable 
& _flags
) prot 
|= (MAP_MEM_PURGABLE 
| MAP_MEM_PURGABLE_KERNEL_ONLY
); 
 575             if (kIOMemoryUseReserve 
& _flags
)      prot 
|= MAP_MEM_GRAB_SECLUDED
; 
 577             prot 
|= VM_PROT_WRITE
; 
 580         else map 
= get_task_map(_task
); 
 589             // coalesce addr range 
 590             for (++rangeIdx
; rangeIdx 
< _rangesCount
; rangeIdx
++) 
 592                 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
); 
 593                 if ((srcAddr 
+ srcLen
) != nextAddr
) break; 
 596             entryAddr 
= trunc_page_64(srcAddr
); 
 597             endAddr   
= round_page_64(srcAddr 
+ srcLen
); 
 600                 entrySize 
= (endAddr 
- entryAddr
); 
 601                 if (!entrySize
) break; 
 602                 actualSize 
= entrySize
; 
 604                 cloneEntry 
= MACH_PORT_NULL
; 
 605                 if (MAP_MEM_NAMED_REUSE 
& prot
) 
 607                     if (cloneEntries 
< &_memRef
->entries
[_memRef
->count
]) cloneEntry 
= cloneEntries
->entry
; 
 608                     else                                                  prot 
&= ~MAP_MEM_NAMED_REUSE
; 
 611                 err 
= mach_make_memory_entry_64(map
, 
 612                         &actualSize
, entryAddr
, prot
, &entry
, cloneEntry
); 
 614                 if (KERN_SUCCESS 
!= err
) break; 
 615                 if (actualSize 
> entrySize
) panic("mach_make_memory_entry_64 actualSize"); 
 617                 if (count 
>= ref
->capacity
) 
 619                     ref 
= memoryReferenceAlloc(ref
->capacity 
+ kCapacityInc
, ref
); 
 620                     entries 
= &ref
->entries
[count
]; 
 622                 entries
->entry  
= entry
; 
 623                 entries
->size   
= actualSize
; 
 624                 entries
->offset 
= offset 
+ (entryAddr 
- srcAddr
); 
 625                 entryAddr 
+= actualSize
; 
 626                 if (MAP_MEM_NAMED_REUSE 
& prot
) 
 628                     if ((cloneEntries
->entry  
== entries
->entry
) 
 629                      && (cloneEntries
->size   
== entries
->size
) 
 630                      && (cloneEntries
->offset 
== entries
->offset
))         cloneEntries
++; 
 631                      else                                    prot 
&= ~MAP_MEM_NAMED_REUSE
; 
 643         // _task == 0, physical or kIOMemoryTypeUPL 
 644         memory_object_t pager
; 
 645         vm_size_t       size 
= ptoa_32(_pages
); 
 647         if (!getKernelReserved()) panic("getKernelReserved"); 
 649         reserved
->dp
.pagerContig 
= (1 == _rangesCount
); 
 650         reserved
->dp
.memory      
= this; 
 652         pagerFlags 
= pagerFlagsForCacheMode(cacheMode
); 
 653         if (-1U == pagerFlags
) panic("phys is kIODefaultCache"); 
 654         if (reserved
->dp
.pagerContig
) pagerFlags 
|= DEVICE_PAGER_CONTIGUOUS
; 
 656         pager 
= device_pager_setup((memory_object_t
) 0, (uintptr_t) reserved
,  
 659         if (!pager
) err 
= kIOReturnVMError
; 
 663             entryAddr 
= trunc_page_64(srcAddr
); 
 664             err 
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,  
 665                         size
, VM_PROT_READ 
| VM_PROT_WRITE
, pager
, &entry
); 
 666             assert (KERN_SUCCESS 
== err
); 
 667             if (KERN_SUCCESS 
!= err
) device_pager_deallocate(pager
); 
 670                 reserved
->dp
.devicePager 
= pager
; 
 671                 entries
->entry  
= entry
; 
 672                 entries
->size   
= size
; 
 673                 entries
->offset 
= offset 
+ (entryAddr 
- srcAddr
); 
 683     if (_task 
&& (KERN_SUCCESS 
== err
) 
 684       && (kIOMemoryMapCopyOnWrite 
& _flags
) 
 685       && !(kIOMemoryReferenceCOW 
& options
)) 
 687         err 
= memoryReferenceCreate(options 
| kIOMemoryReferenceCOW
, &ref
->mapRef
); 
 690     if (KERN_SUCCESS 
== err
) 
 692         if (MAP_MEM_NAMED_REUSE 
& prot
) 
 694             memoryReferenceFree(ref
); 
 695             OSIncrementAtomic(&_memRef
->refCount
); 
 701         memoryReferenceFree(ref
); 
 711 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
) 
 713     IOMemoryDescriptorMapAllocRef 
* ref 
= (typeof(ref
))_ref
; 
 715     vm_map_offset_t                 addr
; 
 719     err 
= vm_map_enter_mem_object(map
, &addr
, ref
->size
, 
 721                                   (((ref
->options 
& kIOMapAnywhere
) 
 724                                   VM_MAP_KERNEL_FLAGS_NONE
, 
 727                                   (memory_object_offset_t
) 0, 
 732     if (KERN_SUCCESS 
== err
) 
 734         ref
->mapped 
= (mach_vm_address_t
) addr
; 
 742 IOGeneralMemoryDescriptor::memoryReferenceMap( 
 743                      IOMemoryReference 
* ref
, 
 745                      mach_vm_size_t      inoffset
, 
 747                      IOOptionBits        options
, 
 748                      mach_vm_address_t 
* inaddr
) 
 751     int64_t         offset 
= inoffset
; 
 752     uint32_t        rangeIdx
, entryIdx
; 
 753     vm_map_offset_t addr
, mapAddr
; 
 754     vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
; 
 756     mach_vm_address_t nextAddr
; 
 757     mach_vm_size_t    nextLen
; 
 759     IOMemoryEntry   
* entry
; 
 760     vm_prot_t         prot
, memEntryCacheMode
; 
 762     IOOptionBits      cacheMode
; 
 764     // for the kIOMapPrefault option. 
 765     upl_page_info_t 
* pageList 
= NULL
; 
 766     UInt              currentPageIndex 
= 0; 
 771         err 
= memoryReferenceMap(ref
->mapRef
, map
, inoffset
, size
, options
, inaddr
); 
 775     type 
= _flags 
& kIOMemoryTypeMask
; 
 778     if (!(kIOMapReadOnly 
& options
)) prot 
|= VM_PROT_WRITE
; 
 781     cacheMode 
= ((options 
& kIOMapCacheMask
) >> kIOMapCacheShift
); 
 782     if (kIODefaultCache 
!= cacheMode
) 
 784         // VM system requires write access to update named entry cache mode 
 785         memEntryCacheMode 
= (MAP_MEM_ONLY 
| VM_PROT_WRITE 
| prot 
| vmProtForCacheMode(cacheMode
)); 
 792         // Find first range for offset 
 793         if (!_rangesCount
) return (kIOReturnBadArgument
); 
 794         for (remain 
= offset
, rangeIdx 
= 0; rangeIdx 
< _rangesCount
; rangeIdx
++) 
 796             getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
); 
 797             if (remain 
< nextLen
) break; 
 805         nextAddr 
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
); 
 809     assert(remain 
< nextLen
); 
 810     if (remain 
>= nextLen
) return (kIOReturnBadArgument
); 
 814     pageOffset 
= (page_mask 
& nextAddr
); 
 818     if (!(options 
& kIOMapAnywhere
)) 
 821         if (pageOffset 
!= (page_mask 
& addr
)) return (kIOReturnNotAligned
); 
 825     // find first entry for offset 
 827         (entryIdx 
< ref
->count
) && (offset 
>= ref
->entries
[entryIdx
].offset
); 
 830     entry 
= &ref
->entries
[entryIdx
]; 
 833     size 
= round_page_64(size 
+ pageOffset
); 
 834     if (kIOMapOverwrite 
& options
) 
 836         if ((map 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) 
 838             map 
= IOPageableMapForAddress(addr
); 
 844         IOMemoryDescriptorMapAllocRef ref
; 
 847         ref
.options 
= options
; 
 850         if (options 
& kIOMapAnywhere
) 
 851             // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE 
 855         if ((ref
.map 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) 
 856             err 
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref 
); 
 858             err 
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
); 
 859         if (KERN_SUCCESS 
== err
) 
 868      * If the memory is associated with a device pager but doesn't have a UPL, 
 869      * it will be immediately faulted in through the pager via populateDevicePager(). 
 870      * kIOMapPrefault is redundant in that case, so don't try to use it for UPL 
 873     if ((reserved 
!= NULL
) && (reserved
->dp
.devicePager
) && (_memoryEntries 
== NULL
) && (_wireCount 
!= 0)) 
 874         options 
&= ~kIOMapPrefault
; 
 877      * Prefaulting is only possible if we wired the memory earlier. Check the 
 878      * memory type, and the underlying data. 
 880     if (options 
& kIOMapPrefault
) 
 883          * The memory must have been wired by calling ::prepare(), otherwise 
 884          * we don't have the UPL. Without UPLs, pages cannot be pre-faulted 
 886         assert(_wireCount 
!= 0); 
 887         assert(_memoryEntries 
!= NULL
); 
 888         if ((_wireCount 
== 0) || 
 889             (_memoryEntries 
== NULL
)) 
 891             return kIOReturnBadArgument
; 
 894         // Get the page list. 
 895         ioGMDData
* dataP 
= getDataP(_memoryEntries
); 
 896         ioPLBlock 
const* ioplList 
= getIOPLList(dataP
); 
 897         pageList 
= getPageList(dataP
); 
 899         // Get the number of IOPLs. 
 900         UInt numIOPLs 
= getNumIOPL(_memoryEntries
, dataP
); 
 903          * Scan through the IOPL Info Blocks, looking for the first block containing 
 904          * the offset. The research will go past it, so we'll need to go back to the 
 905          * right range at the end. 
 908         while (ioplIndex 
< numIOPLs 
&& offset 
>= ioplList
[ioplIndex
].fIOMDOffset
) 
 912         // Retrieve the IOPL info block. 
 913         ioPLBlock ioplInfo 
= ioplList
[ioplIndex
]; 
 916          * For external UPLs, the fPageInfo points directly to the UPL's page_info_t 
 919         if (ioplInfo
.fFlags 
& kIOPLExternUPL
) 
 920             pageList 
= (upl_page_info_t
*) ioplInfo
.fPageInfo
; 
 922             pageList 
= &pageList
[ioplInfo
.fPageInfo
]; 
 924         // Rebase [offset] into the IOPL in order to looks for the first page index. 
 925         mach_vm_size_t offsetInIOPL 
= offset 
- ioplInfo
.fIOMDOffset 
+ ioplInfo
.fPageOffset
; 
 927         // Retrieve the index of the first page corresponding to the offset. 
 928         currentPageIndex 
= atop_32(offsetInIOPL
); 
 936     while (remain 
&& (KERN_SUCCESS 
== err
)) 
 938             entryOffset 
= offset 
- entry
->offset
; 
 939             if ((page_mask 
& entryOffset
) != pageOffset
)  
 941                 err 
= kIOReturnNotAligned
; 
 945             if (kIODefaultCache 
!= cacheMode
) 
 947                 vm_size_t unused 
= 0; 
 948                 err 
= mach_make_memory_entry(NULL 
/*unused*/, &unused
, 0 /*unused*/,  
 949                                              memEntryCacheMode
, NULL
, entry
->entry
); 
 950                 assert (KERN_SUCCESS 
== err
); 
 953             entryOffset 
-= pageOffset
; 
 954             if (entryOffset 
>= entry
->size
) panic("entryOffset"); 
 955             chunk 
= entry
->size 
- entryOffset
; 
 958                 vm_map_kernel_flags_t vmk_flags
; 
 960                 vmk_flags 
= VM_MAP_KERNEL_FLAGS_NONE
; 
 961                 vmk_flags
.vmkf_iokit_acct 
= TRUE
; /* iokit accounting */ 
 963                 if (chunk 
> remain
) chunk 
= remain
; 
 964                 if (options 
& kIOMapPrefault
)  
 966                     UInt nb_pages 
= round_page(chunk
) / PAGE_SIZE
; 
 968                     err 
= vm_map_enter_mem_object_prefault(map
, 
 972                                                             | VM_FLAGS_OVERWRITE
), 
 979                                                            &pageList
[currentPageIndex
], 
 982                     // Compute the next index in the page list. 
 983                     currentPageIndex 
+= nb_pages
; 
 984                     assert(currentPageIndex 
<= _pages
); 
 988                     err 
= vm_map_enter_mem_object(map
, 
 992                                                     | VM_FLAGS_OVERWRITE
), 
1002                 if (KERN_SUCCESS 
!= err
) break; 
1006                 offset   
+= chunk 
- pageOffset
; 
1011             if (entryIdx 
>= ref
->count
)  
1013                 err 
= kIOReturnOverrun
; 
1018     if ((KERN_SUCCESS 
!= err
) && didAlloc
) 
1020         (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
); 
1029 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( 
1030                                IOMemoryReference 
* ref
, 
1031                                IOByteCount       
* residentPageCount
, 
1032                                IOByteCount       
* dirtyPageCount
) 
1035     IOMemoryEntry 
* entries
; 
1036     unsigned int resident
, dirty
; 
1037     unsigned int totalResident
, totalDirty
; 
1039     totalResident 
= totalDirty 
= 0; 
1040     err 
= kIOReturnSuccess
; 
1041     entries 
= ref
->entries 
+ ref
->count
; 
1042     while (entries 
> &ref
->entries
[0]) 
1045         err 
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
); 
1046         if (KERN_SUCCESS 
!= err
) break; 
1047         totalResident 
+= resident
; 
1048         totalDirty    
+= dirty
; 
1051     if (residentPageCount
) *residentPageCount 
= totalResident
; 
1052     if (dirtyPageCount
)    *dirtyPageCount    
= totalDirty
; 
1057 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( 
1058                                 IOMemoryReference 
* ref
, 
1059                                 IOOptionBits        newState
, 
1060                                 IOOptionBits      
* oldState
) 
1063     IOMemoryEntry 
* entries
; 
1064     vm_purgable_t   control
; 
1065     int             totalState
, state
; 
1067     totalState 
= kIOMemoryPurgeableNonVolatile
; 
1068     err 
= kIOReturnSuccess
; 
1069     entries 
= ref
->entries 
+ ref
->count
; 
1070     while (entries 
> &ref
->entries
[0]) 
1074         err 
= purgeableControlBits(newState
, &control
, &state
); 
1075         if (KERN_SUCCESS 
!= err
) break; 
1076         err 
= memory_entry_purgeable_control_internal(entries
->entry
, control
, &state
); 
1077         if (KERN_SUCCESS 
!= err
) break; 
1078         err 
= purgeableStateBits(&state
); 
1079         if (KERN_SUCCESS 
!= err
) break; 
1081         if (kIOMemoryPurgeableEmpty 
== state
)              totalState 
= kIOMemoryPurgeableEmpty
; 
1082         else if (kIOMemoryPurgeableEmpty 
== totalState
)    continue; 
1083         else if (kIOMemoryPurgeableVolatile 
== totalState
) continue; 
1084         else if (kIOMemoryPurgeableVolatile 
== state
)      totalState 
= kIOMemoryPurgeableVolatile
; 
1085         else totalState 
= kIOMemoryPurgeableNonVolatile
; 
1088     if (oldState
) *oldState 
= totalState
; 
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1094 IOMemoryDescriptor 
* 
1095 IOMemoryDescriptor::withAddress(void *      address
, 
1097                                 IODirection direction
) 
1099     return IOMemoryDescriptor:: 
1100         withAddressRange((IOVirtualAddress
) address
, length
, direction 
| kIOMemoryAutoPrepare
, kernel_task
); 
1104 IOMemoryDescriptor 
* 
1105 IOMemoryDescriptor::withAddress(IOVirtualAddress address
, 
1107                                 IODirection  direction
, 
1110     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1113         if (that
->initWithAddress(address
, length
, direction
, task
)) 
1120 #endif /* !__LP64__ */ 
1122 IOMemoryDescriptor 
* 
1123 IOMemoryDescriptor::withPhysicalAddress( 
1124                                 IOPhysicalAddress       address
, 
1126                                 IODirection             direction 
) 
1128     return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
)); 
1132 IOMemoryDescriptor 
* 
1133 IOMemoryDescriptor::withRanges( IOVirtualRange 
* ranges
, 
1135                                 IODirection      direction
, 
1139     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1142         if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
)) 
1149 #endif /* !__LP64__ */ 
1151 IOMemoryDescriptor 
* 
1152 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
, 
1153                                         mach_vm_size_t length
, 
1154                                         IOOptionBits   options
, 
1157     IOAddressRange range 
= { address
, length 
}; 
1158     return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
)); 
1161 IOMemoryDescriptor 
* 
1162 IOMemoryDescriptor::withAddressRanges(IOAddressRange 
*   ranges
, 
1164                                         IOOptionBits     options
, 
1167     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1171             options 
|= kIOMemoryTypeVirtual64
; 
1173             options 
|= kIOMemoryTypePhysical64
; 
1175         if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0)) 
1188  * Create a new IOMemoryDescriptor. The buffer is made up of several 
1189  * virtual address ranges, from a given task. 
1191  * Passing the ranges as a reference will avoid an extra allocation. 
1193 IOMemoryDescriptor 
* 
1194 IOMemoryDescriptor::withOptions(void *          buffers
, 
1201     IOGeneralMemoryDescriptor 
*self 
= new IOGeneralMemoryDescriptor
; 
1204     && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
)) 
1213 bool IOMemoryDescriptor::initWithOptions(void *         buffers
, 
1217                                          IOOptionBits   options
, 
1224 IOMemoryDescriptor 
* 
1225 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange 
* ranges
, 
1227                                         IODirection     direction
, 
1230     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1233         if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
)) 
1241 IOMemoryDescriptor 
* 
1242 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor 
*   of
, 
1245                                 IODirection             direction
) 
1247     return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
)); 
1249 #endif /* !__LP64__ */ 
1251 IOMemoryDescriptor 
* 
1252 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor 
*originalMD
) 
1254     IOGeneralMemoryDescriptor 
*origGenMD 
=  
1255         OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
); 
1258         return IOGeneralMemoryDescriptor:: 
1259             withPersistentMemoryDescriptor(origGenMD
); 
1264 IOMemoryDescriptor 
* 
1265 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor 
*originalMD
) 
1267     IOMemoryReference 
* memRef
; 
1269     if (kIOReturnSuccess 
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) return (0); 
1271     if (memRef 
== originalMD
->_memRef
) 
1273         originalMD
->retain();               // Add a new reference to ourselves 
1274         originalMD
->memoryReferenceRelease(memRef
); 
1278     IOGeneralMemoryDescriptor 
* self 
= new IOGeneralMemoryDescriptor
; 
1279     IOMDPersistentInitData initData 
= { originalMD
, memRef 
}; 
1282     && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) { 
1291 IOGeneralMemoryDescriptor::initWithAddress(void *      address
, 
1292                                     IOByteCount   withLength
, 
1293                                     IODirection withDirection
) 
1295     _singleRange
.v
.address 
= (vm_offset_t
) address
; 
1296     _singleRange
.v
.length  
= withLength
; 
1298     return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true); 
1302 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
, 
1303                                     IOByteCount    withLength
, 
1304                                     IODirection  withDirection
, 
1307     _singleRange
.v
.address 
= address
; 
1308     _singleRange
.v
.length  
= withLength
; 
1310     return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true); 
1314 IOGeneralMemoryDescriptor::initWithPhysicalAddress( 
1315                                  IOPhysicalAddress      address
, 
1316                                  IOByteCount            withLength
, 
1317                                  IODirection            withDirection 
) 
1319     _singleRange
.p
.address 
= address
; 
1320     _singleRange
.p
.length  
= withLength
; 
1322     return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true); 
1326 IOGeneralMemoryDescriptor::initWithPhysicalRanges( 
1327                                 IOPhysicalRange 
* ranges
, 
1329                                 IODirection       direction
, 
1332     IOOptionBits mdOpts 
= direction 
| kIOMemoryTypePhysical
; 
1335         mdOpts 
|= kIOMemoryAsReference
; 
1337     return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0); 
1341 IOGeneralMemoryDescriptor::initWithRanges( 
1342                                    IOVirtualRange 
* ranges
, 
1344                                    IODirection      direction
, 
1348     IOOptionBits mdOpts 
= direction
; 
1351         mdOpts 
|= kIOMemoryAsReference
; 
1354         mdOpts 
|= kIOMemoryTypeVirtual
; 
1356         // Auto-prepare if this is a kernel memory descriptor as very few 
1357         // clients bother to prepare() kernel memory. 
1358         // But it was not enforced so what are you going to do? 
1359         if (task 
== kernel_task
) 
1360             mdOpts 
|= kIOMemoryAutoPrepare
; 
1363         mdOpts 
|= kIOMemoryTypePhysical
; 
1365     return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0); 
1367 #endif /* !__LP64__ */ 
1372  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges, 
1373  * from a given task, several physical ranges, an UPL from the ubc 
1374  * system or a uio (may be 64bit) from the BSD subsystem. 
1376  * Passing the ranges as a reference will avoid an extra allocation. 
1378  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an 
1379  * existing instance -- note this behavior is not commonly supported in other 
1380  * I/O Kit classes, although it is supported here. 
1384 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers
, 
1388                                            IOOptionBits options
, 
1391     IOOptionBits type 
= options 
& kIOMemoryTypeMask
; 
1395         && (kIOMemoryTypeVirtual 
== type
) 
1396         && vm_map_is_64bit(get_task_map(task
))  
1397         && ((IOVirtualRange 
*) buffers
)->address
) 
1399         OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); 
1402 #endif /* !__LP64__ */ 
1404     // Grab the original MD's configuation data to initialse the 
1405     // arguments to this function. 
1406     if (kIOMemoryTypePersistentMD 
== type
) { 
1408         IOMDPersistentInitData 
*initData 
= (typeof(initData
)) buffers
; 
1409         const IOGeneralMemoryDescriptor 
*orig 
= initData
->fMD
; 
1410         ioGMDData 
*dataP 
= getDataP(orig
->_memoryEntries
); 
1412         // Only accept persistent memory descriptors with valid dataP data. 
1413         assert(orig
->_rangesCount 
== 1); 
1414         if ( !(orig
->_flags 
& kIOMemoryPersistent
) || !dataP
) 
1417         _memRef 
= initData
->fMemRef
;    // Grab the new named entry 
1418         options 
= orig
->_flags 
& ~kIOMemoryAsReference
;  
1419         type 
= options 
& kIOMemoryTypeMask
; 
1420         buffers 
= orig
->_ranges
.v
; 
1421         count 
= orig
->_rangesCount
; 
1423         // Now grab the original task and whatever mapper was previously used 
1425         mapper 
= dataP
->fMapper
; 
1427         // We are ready to go through the original initialisation now 
1431     case kIOMemoryTypeUIO
: 
1432     case kIOMemoryTypeVirtual
: 
1434     case kIOMemoryTypeVirtual64
: 
1435 #endif /* !__LP64__ */ 
1441     case kIOMemoryTypePhysical
:         // Neither Physical nor UPL should have a task 
1443     case kIOMemoryTypePhysical64
: 
1444 #endif /* !__LP64__ */ 
1445     case kIOMemoryTypeUPL
: 
1449         return false;   /* bad argument */ 
1456      * We can check the _initialized  instance variable before having ever set 
1457      * it to an initial value because I/O Kit guarantees that all our instance 
1458      * variables are zeroed on an object's allocation. 
1463          * An existing memory descriptor is being retargeted to point to 
1464          * somewhere else.  Clean up our present state. 
1466         IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
1467         if ((kIOMemoryTypePhysical 
!= type
) && (kIOMemoryTypePhysical64 
!= type
)) 
1472         if (_ranges
.v 
&& !(kIOMemoryAsReference 
& _flags
)) 
1474             if (kIOMemoryTypeUIO 
== type
) 
1475                 uio_free((uio_t
) _ranges
.v
); 
1477             else if ((kIOMemoryTypeVirtual64 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1478                 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
); 
1479 #endif /* !__LP64__ */ 
1481                 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
); 
1484         options 
|= (kIOMemoryRedirected 
& _flags
); 
1485         if (!(kIOMemoryRedirected 
& options
)) 
1489                 memoryReferenceRelease(_memRef
); 
1493                 _mappings
->flushCollection(); 
1499         _initialized 
= true; 
1502     // Grab the appropriate mapper 
1503     if (kIOMemoryHostOrRemote 
& options
) options 
|= kIOMemoryMapperNone
; 
1504     if (kIOMemoryMapperNone 
& options
) 
1505         mapper 
= 0;     // No Mapper 
1506     else if (mapper 
== kIOMapperSystem
) { 
1507         IOMapper::checkForSystemMapper(); 
1508         gIOSystemMapper 
= mapper 
= IOMapper::gSystem
; 
1511     // Remove the dynamic internal use flags from the initial setting 
1512     options               
&= ~(kIOMemoryPreparedReadOnly
); 
1517     _direction             
= (IODirection
) (_flags 
& kIOMemoryDirectionMask
); 
1518 #endif /* !__LP64__ */ 
1521     __iomd_reservedA 
= 0; 
1522     __iomd_reservedB 
= 0; 
1525     if (kIOMemoryThreadSafe 
& options
) 
1528             _prepareLock 
= IOLockAlloc(); 
1530     else if (_prepareLock
) 
1532         IOLockFree(_prepareLock
); 
1533         _prepareLock 
= NULL
; 
1536     if (kIOMemoryTypeUPL 
== type
) { 
1539         unsigned int dataSize 
= computeDataSize(/* pages */ 0, /* upls */ 1); 
1541         if (!initMemoryEntries(dataSize
, mapper
)) return (false); 
1542         dataP 
= getDataP(_memoryEntries
); 
1543         dataP
->fPageCnt 
= 0; 
1544         switch (kIOMemoryDirectionMask 
& options
) 
1546             case kIODirectionOut
: 
1547                 dataP
->fDMAAccess 
= kIODMAMapReadAccess
; 
1549             case kIODirectionIn
: 
1550                 dataP
->fDMAAccess 
= kIODMAMapWriteAccess
; 
1552             case kIODirectionNone
: 
1553             case kIODirectionOutIn
: 
1555                 panic("bad dir for upl 0x%x\n", (int) options
); 
1558  //       _wireCount++; // UPLs start out life wired 
1561         _pages    
+= atop_32(offset 
+ count 
+ PAGE_MASK
) - atop_32(offset
); 
1564         iopl
.fIOPL 
= (upl_t
) buffers
; 
1565         upl_set_referenced(iopl
.fIOPL
, true); 
1566         upl_page_info_t 
*pageList 
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
); 
1568         if (upl_get_size(iopl
.fIOPL
) < (count 
+ offset
)) 
1569             panic("short external upl"); 
1571         _highestPage 
= upl_get_highest_page(iopl
.fIOPL
); 
1573         // Set the flag kIOPLOnDevice convieniently equal to 1 
1574         iopl
.fFlags  
= pageList
->device 
| kIOPLExternUPL
; 
1575         if (!pageList
->device
) { 
1576             // Pre-compute the offset into the UPL's page list 
1577             pageList 
= &pageList
[atop_32(offset
)]; 
1578             offset 
&= PAGE_MASK
; 
1580         iopl
.fIOMDOffset 
= 0; 
1581         iopl
.fMappedPage 
= 0; 
1582         iopl
.fPageInfo 
= (vm_address_t
) pageList
; 
1583         iopl
.fPageOffset 
= offset
; 
1584         _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
)); 
1587         // kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO  
1588         // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 
1590         // Initialize the memory descriptor 
1591         if (options 
& kIOMemoryAsReference
) { 
1593             _rangesIsAllocated 
= false; 
1594 #endif /* !__LP64__ */ 
1596             // Hack assignment to get the buffer arg into _ranges. 
1597             // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't 
1599             // This also initialises the uio & physical ranges. 
1600             _ranges
.v 
= (IOVirtualRange 
*) buffers
; 
1604             _rangesIsAllocated 
= true; 
1605 #endif /* !__LP64__ */ 
1608               case kIOMemoryTypeUIO
: 
1609                 _ranges
.v 
= (IOVirtualRange 
*) uio_duplicate((uio_t
) buffers
); 
1613               case kIOMemoryTypeVirtual64
: 
1614               case kIOMemoryTypePhysical64
: 
1617                     && (((IOAddressRange 
*) buffers
)->address 
+ ((IOAddressRange 
*) buffers
)->length
) <= 0x100000000ULL
 
1620                     if (kIOMemoryTypeVirtual64 
== type
) 
1621                         type 
= kIOMemoryTypeVirtual
; 
1623                         type 
= kIOMemoryTypePhysical
; 
1624                     _flags 
= (_flags 
& ~kIOMemoryTypeMask
) | type 
| kIOMemoryAsReference
; 
1625                     _rangesIsAllocated 
= false; 
1626                     _ranges
.v 
= &_singleRange
.v
; 
1627                     _singleRange
.v
.address 
= ((IOAddressRange 
*) buffers
)->address
; 
1628                     _singleRange
.v
.length  
= ((IOAddressRange 
*) buffers
)->length
; 
1631                 _ranges
.v64 
= IONew(IOAddressRange
, count
); 
1634                 bcopy(buffers
, _ranges
.v
, count 
* sizeof(IOAddressRange
)); 
1636 #endif /* !__LP64__ */ 
1637               case kIOMemoryTypeVirtual
: 
1638               case kIOMemoryTypePhysical
: 
1640                     _flags 
|= kIOMemoryAsReference
; 
1642                     _rangesIsAllocated 
= false; 
1643 #endif /* !__LP64__ */ 
1644                     _ranges
.v 
= &_singleRange
.v
; 
1646                     _ranges
.v 
= IONew(IOVirtualRange
, count
); 
1650                 bcopy(buffers
, _ranges
.v
, count 
* sizeof(IOVirtualRange
)); 
1654         _rangesCount 
= count
; 
1656         // Find starting address within the vector of ranges 
1657         Ranges vec 
= _ranges
; 
1658         mach_vm_size_t totalLength 
= 0; 
1659         unsigned int ind
, pages 
= 0; 
1660         for (ind 
= 0; ind 
< count
; ind
++) { 
1661             mach_vm_address_t addr
; 
1662             mach_vm_address_t endAddr
; 
1665             // addr & len are returned by this function 
1666             getAddrLenForInd(addr
, len
, type
, vec
, ind
); 
1667             if (os_add3_overflow(addr
, len
, PAGE_MASK
, &endAddr
))                   break; 
1668             if (os_add_overflow(pages
, (atop_64(endAddr
) - atop_64(addr
)), &pages
)) break; 
1669             if (os_add_overflow(totalLength
, len
, &totalLength
))                    break; 
1670             if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1672                 ppnum_t highPage 
= atop_64(addr 
+ len 
- 1); 
1673                 if (highPage 
> _highestPage
) 
1674                     _highestPage 
= highPage
; 
1678          || (totalLength 
!= ((IOByteCount
) totalLength
))) return (false); /* overflow */ 
1680         _length      
= totalLength
; 
1683         // Auto-prepare memory at creation time. 
1684         // Implied completion when descriptor is free-ed 
1687         if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1688             _wireCount
++;       // Physical MDs are, by definition, wired 
1689         else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ 
1693             if (_pages 
> atop_64(max_mem
)) return false; 
1695             dataSize 
= computeDataSize(_pages
, /* upls */ count 
* 2); 
1696             if (!initMemoryEntries(dataSize
, mapper
)) return false; 
1697             dataP 
= getDataP(_memoryEntries
); 
1698             dataP
->fPageCnt 
= _pages
; 
1700             if (((_task 
!= kernel_task
) || (kIOMemoryBufferPageable 
& _flags
)) 
1701               && (VM_KERN_MEMORY_NONE 
== _kernelTag
)) 
1703                 _kernelTag 
= IOMemoryTag(kernel_map
); 
1706             if ( (kIOMemoryPersistent 
& _flags
) && !_memRef
) 
1709                 err 
= memoryReferenceCreate(0, &_memRef
); 
1710                 if (kIOReturnSuccess 
!= err
) return false; 
1713             if ((_flags 
& kIOMemoryAutoPrepare
) 
1714              && prepare() != kIOReturnSuccess
) 
1727 void IOGeneralMemoryDescriptor::free() 
1729     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
1734         reserved
->dp
.memory 
= 0; 
1737     if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1740         if (_memoryEntries 
&& (dataP 
= getDataP(_memoryEntries
)) && dataP
->fMappedBaseValid
) 
1742             dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
); 
1743             dataP
->fMappedBaseValid 
= dataP
->fMappedBase 
= 0; 
1748         while (_wireCount
) complete(); 
1751     if (_memoryEntries
) _memoryEntries
->release(); 
1753     if (_ranges
.v 
&& !(kIOMemoryAsReference 
& _flags
)) 
1755         if (kIOMemoryTypeUIO 
== type
) 
1756             uio_free((uio_t
) _ranges
.v
); 
1758         else if ((kIOMemoryTypeVirtual64 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1759             IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
); 
1760 #endif /* !__LP64__ */ 
1762             IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
); 
1769         if (reserved
->dp
.devicePager
) 
1771             // memEntry holds a ref on the device pager which owns reserved 
1772             // (IOMemoryDescriptorReserved) so no reserved access after this point 
1773             device_pager_deallocate( (memory_object_t
) reserved
->dp
.devicePager 
); 
1776             IODelete(reserved
, IOMemoryDescriptorReserved
, 1); 
1780     if (_memRef
)      memoryReferenceRelease(_memRef
); 
1781     if (_prepareLock
) IOLockFree(_prepareLock
); 
1787 void IOGeneralMemoryDescriptor::unmapFromKernel() 
1789     panic("IOGMD::unmapFromKernel deprecated"); 
1792 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
) 
1794     panic("IOGMD::mapIntoKernel deprecated"); 
1796 #endif /* !__LP64__ */ 
1801  * Get the direction of the transfer. 
1803 IODirection 
IOMemoryDescriptor::getDirection() const 
1808 #endif /* !__LP64__ */ 
1809     return (IODirection
) (_flags 
& kIOMemoryDirectionMask
); 
1815  * Get the length of the transfer (over all ranges). 
1817 IOByteCount 
IOMemoryDescriptor::getLength() const 
1822 void IOMemoryDescriptor::setTag( IOOptionBits tag 
) 
1827 IOOptionBits 
IOMemoryDescriptor::getTag( void ) 
1832 uint64_t IOMemoryDescriptor::getFlags(void) 
1838 #pragma clang diagnostic push 
1839 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
1841 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement. 
1843 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset
, IOByteCount 
* length 
) 
1845     addr64_t physAddr 
= 0; 
1847     if( prepare() == kIOReturnSuccess
) { 
1848         physAddr 
= getPhysicalSegment64( offset
, length 
); 
1852     return( (IOPhysicalAddress
) physAddr 
); // truncated but only page offset is used 
1855 #pragma clang diagnostic pop 
1857 #endif /* !__LP64__ */ 
1859 IOByteCount 
IOMemoryDescriptor::readBytes
 
1860                 (IOByteCount offset
, void *bytes
, IOByteCount length
) 
1862     addr64_t dstAddr 
= CAST_DOWN(addr64_t
, bytes
); 
1863     IOByteCount remaining
; 
1865     // Assert that this entire I/O is withing the available range 
1866     assert(offset 
<= _length
); 
1867     assert(offset 
+ length 
<= _length
); 
1868     if ((offset 
>= _length
) 
1869      || ((offset 
+ length
) > _length
)) { 
1873     assert (!(kIOMemoryRemote 
& _flags
)); 
1874     if (kIOMemoryRemote 
& _flags
) return (0); 
1876     if (kIOMemoryThreadSafe 
& _flags
) 
1879     remaining 
= length 
= min(length
, _length 
- offset
); 
1880     while (remaining
) { // (process another target segment?) 
1884         srcAddr64 
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
); 
1888         // Clip segment length to remaining 
1889         if (srcLen 
> remaining
) 
1892         copypv(srcAddr64
, dstAddr
, srcLen
, 
1893                             cppvPsrc 
| cppvNoRefSrc 
| cppvFsnk 
| cppvKmap
); 
1897         remaining 
-= srcLen
; 
1900     if (kIOMemoryThreadSafe 
& _flags
) 
1905     return length 
- remaining
; 
1908 IOByteCount 
IOMemoryDescriptor::writeBytes
 
1909                 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
) 
1911     addr64_t srcAddr 
= CAST_DOWN(addr64_t
, bytes
); 
1912     IOByteCount remaining
; 
1913     IOByteCount offset 
= inoffset
; 
1915     // Assert that this entire I/O is withing the available range 
1916     assert(offset 
<= _length
); 
1917     assert(offset 
+ length 
<= _length
); 
1919     assert( !(kIOMemoryPreparedReadOnly 
& _flags
) ); 
1921     if ( (kIOMemoryPreparedReadOnly 
& _flags
) 
1922      || (offset 
>= _length
) 
1923      || ((offset 
+ length
) > _length
)) { 
1927     assert (!(kIOMemoryRemote 
& _flags
)); 
1928     if (kIOMemoryRemote 
& _flags
) return (0); 
1930     if (kIOMemoryThreadSafe 
& _flags
) 
1933     remaining 
= length 
= min(length
, _length 
- offset
); 
1934     while (remaining
) { // (process another target segment?) 
1938         dstAddr64 
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
); 
1942         // Clip segment length to remaining 
1943         if (dstLen 
> remaining
) 
1946         if (!srcAddr
) bzero_phys(dstAddr64
, dstLen
); 
1949             copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
, 
1950                     cppvPsnk 
| cppvFsnk 
| cppvNoRefSrc 
| cppvNoModSnk 
| cppvKmap
); 
1954         remaining 
-= dstLen
; 
1957     if (kIOMemoryThreadSafe 
& _flags
) 
1962     if (!srcAddr
) performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
); 
1964     return length 
- remaining
; 
1968 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
) 
1970     panic("IOGMD::setPosition deprecated"); 
1972 #endif /* !__LP64__ */ 
1974 static volatile SInt64 gIOMDPreparationID 
__attribute__((aligned(8))) = (1ULL << 32); 
1977 IOGeneralMemoryDescriptor::getPreparationID( void ) 
1982         return (kIOPreparationIDUnprepared
); 
1984     if (((kIOMemoryTypeMask 
& _flags
) == kIOMemoryTypePhysical
) 
1985       || ((kIOMemoryTypeMask 
& _flags
) == kIOMemoryTypePhysical64
)) 
1987         IOMemoryDescriptor::setPreparationID(); 
1988         return (IOMemoryDescriptor::getPreparationID()); 
1991     if (!_memoryEntries 
|| !(dataP 
= getDataP(_memoryEntries
))) 
1992         return (kIOPreparationIDUnprepared
); 
1994     if (kIOPreparationIDUnprepared 
== dataP
->fPreparationID
) 
1996         dataP
->fPreparationID 
= OSIncrementAtomic64(&gIOMDPreparationID
); 
1998     return (dataP
->fPreparationID
); 
2001 IOMemoryDescriptorReserved 
* IOMemoryDescriptor::getKernelReserved( void ) 
2005         reserved 
= IONew(IOMemoryDescriptorReserved
, 1); 
2007             bzero(reserved
, sizeof(IOMemoryDescriptorReserved
)); 
2012 void IOMemoryDescriptor::setPreparationID( void ) 
2014     if (getKernelReserved() && (kIOPreparationIDUnprepared 
== reserved
->preparationID
)) 
2016         reserved
->preparationID 
= OSIncrementAtomic64(&gIOMDPreparationID
); 
2020 uint64_t IOMemoryDescriptor::getPreparationID( void ) 
2023         return (reserved
->preparationID
);     
2025         return (kIOPreparationIDUnsupported
);     
2028 void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag
, vm_tag_t userTag
) 
2030     _kernelTag 
= kernelTag
; 
2034 vm_tag_t 
IOMemoryDescriptor::getVMTag(vm_map_t map
) 
2036     if (vm_kernel_map_is_kernel(map
)) 
2038          if (VM_KERN_MEMORY_NONE 
!= _kernelTag
) return (_kernelTag
); 
2042          if (VM_KERN_MEMORY_NONE 
!= _userTag
)   return (_userTag
); 
2044     return (IOMemoryTag(map
)); 
2047 IOReturn 
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const 
2049     IOReturn err 
= kIOReturnSuccess
; 
2050     DMACommandOps params
; 
2051     IOGeneralMemoryDescriptor 
* md 
= const_cast<IOGeneralMemoryDescriptor 
*>(this); 
2054     params 
= (op 
& ~kIOMDDMACommandOperationMask 
& op
); 
2055     op 
&= kIOMDDMACommandOperationMask
; 
2057     if (kIOMDDMAMap 
== op
) 
2059         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2060             return kIOReturnUnderrun
; 
2062         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2065             && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
); 
2067         if (_memoryEntries 
&& data
->fMapper
) 
2069             bool remap
, keepMap
; 
2070             dataP 
= getDataP(_memoryEntries
); 
2072             if (data
->fMapSpec
.numAddressBits 
< dataP
->fDMAMapNumAddressBits
) dataP
->fDMAMapNumAddressBits 
= data
->fMapSpec
.numAddressBits
; 
2073             if (data
->fMapSpec
.alignment      
> dataP
->fDMAMapAlignment
)      dataP
->fDMAMapAlignment      
= data
->fMapSpec
.alignment
; 
2075             keepMap 
= (data
->fMapper 
== gIOSystemMapper
); 
2076             keepMap 
&= ((data
->fOffset 
== 0) && (data
->fLength 
== _length
)); 
2079             remap 
|= (dataP
->fDMAMapNumAddressBits 
< 64) 
2080                   && ((dataP
->fMappedBase 
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
)); 
2081             remap 
|= (dataP
->fDMAMapAlignment 
> page_size
); 
2083             if (remap 
|| !dataP
->fMappedBaseValid
) 
2085 //              if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); 
2086                 err 
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
); 
2087                 if (keepMap 
&& (kIOReturnSuccess 
== err
) && !dataP
->fMappedBaseValid
) 
2089                     dataP
->fMappedBase      
= data
->fAlloc
; 
2090                     dataP
->fMappedBaseValid 
= true; 
2091                     dataP
->fMappedLength    
= data
->fAllocLength
; 
2092                     data
->fAllocLength      
= 0;                        // IOMD owns the alloc now 
2097                 data
->fAlloc 
= dataP
->fMappedBase
; 
2098                 data
->fAllocLength 
= 0;                         // give out IOMD map 
2099                 md
->dmaMapRecord(data
->fMapper
, data
->fCommand
, dataP
->fMappedLength
); 
2101             data
->fMapContig 
= !dataP
->fDiscontig
; 
2105     if (kIOMDDMAUnmap 
== op
) 
2107         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2108             return kIOReturnUnderrun
; 
2109         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2111         err 
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
); 
2113         return kIOReturnSuccess
; 
2116     if (kIOMDAddDMAMapSpec 
== op
) 
2118         if (dataSize 
< sizeof(IODMAMapSpecification
)) 
2119             return kIOReturnUnderrun
; 
2121         IODMAMapSpecification 
* data 
= (IODMAMapSpecification 
*) vData
; 
2124             && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
); 
2128             dataP 
= getDataP(_memoryEntries
); 
2129             if (data
->numAddressBits 
< dataP
->fDMAMapNumAddressBits
) 
2130                 dataP
->fDMAMapNumAddressBits 
= data
->numAddressBits
; 
2131             if (data
->alignment 
> dataP
->fDMAMapAlignment
) 
2132                 dataP
->fDMAMapAlignment 
= data
->alignment
; 
2134         return kIOReturnSuccess
; 
2137     if (kIOMDGetCharacteristics 
== op
) { 
2139         if (dataSize 
< sizeof(IOMDDMACharacteristics
)) 
2140             return kIOReturnUnderrun
; 
2142         IOMDDMACharacteristics 
*data 
= (IOMDDMACharacteristics 
*) vData
; 
2143         data
->fLength 
= _length
; 
2144         data
->fSGCount 
= _rangesCount
; 
2145         data
->fPages 
= _pages
; 
2146         data
->fDirection 
= getDirection(); 
2148             data
->fIsPrepared 
= false; 
2150             data
->fIsPrepared 
= true; 
2151             data
->fHighestPage 
= _highestPage
; 
2154                 dataP 
= getDataP(_memoryEntries
); 
2155                 ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
2156                 UInt count 
= getNumIOPL(_memoryEntries
, dataP
); 
2158                     data
->fPageAlign 
= (ioplList
[0].fPageOffset 
& PAGE_MASK
) | ~PAGE_MASK
; 
2162         return kIOReturnSuccess
; 
2165     else if (kIOMDDMAActive 
== op
) 
2170             prior 
= OSAddAtomic16(1, &md
->_dmaReferences
); 
2171             if (!prior
) md
->_mapName 
= NULL
; 
2175             if (md
->_dmaReferences
) OSAddAtomic16(-1, &md
->_dmaReferences
); 
2176             else                    panic("_dmaReferences underflow"); 
2179     else if (kIOMDWalkSegments 
!= op
) 
2180         return kIOReturnBadArgument
; 
2182     // Get the next segment 
2183     struct InternalState 
{ 
2184         IOMDDMAWalkSegmentArgs fIO
; 
2190     // Find the next segment 
2191     if (dataSize 
< sizeof(*isP
)) 
2192         return kIOReturnUnderrun
; 
2194     isP 
= (InternalState 
*) vData
; 
2195     UInt offset 
= isP
->fIO
.fOffset
; 
2196     bool mapped 
= isP
->fIO
.fMapped
; 
2198     if (mapped 
&& (kIOMemoryRemote 
& _flags
)) return (kIOReturnNotAttached
); 
2200     if (IOMapper::gSystem 
&& mapped
 
2201         && (!(kIOMemoryHostOnly 
& _flags
)) 
2202         && (!_memoryEntries 
|| !getDataP(_memoryEntries
)->fMappedBaseValid
)) 
2203 //      && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid)) 
2206             && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
); 
2208         dataP 
= getDataP(_memoryEntries
); 
2211             IODMAMapSpecification mapSpec
; 
2212             bzero(&mapSpec
, sizeof(mapSpec
)); 
2213             mapSpec
.numAddressBits 
= dataP
->fDMAMapNumAddressBits
; 
2214             mapSpec
.alignment 
= dataP
->fDMAMapAlignment
; 
2215             err 
= md
->dmaMap(dataP
->fMapper
, NULL
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, &dataP
->fMappedLength
); 
2216             if (kIOReturnSuccess 
!= err
) return (err
); 
2217             dataP
->fMappedBaseValid 
= true; 
2221     if (offset 
>= _length
) 
2222         return (offset 
== _length
)? kIOReturnOverrun 
: kIOReturnInternalError
; 
2224     // Validate the previous offset 
2225     UInt ind
, off2Ind 
= isP
->fOffset2Index
; 
2228         && (offset 
== isP
->fNextOffset 
|| off2Ind 
<= offset
)) 
2231         ind 
= off2Ind 
= 0;      // Start from beginning 
2237     if ( (_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) { 
2239         // Physical address based memory descriptor 
2240         const IOPhysicalRange 
*physP 
= (IOPhysicalRange 
*) &_ranges
.p
[0]; 
2242         // Find the range after the one that contains the offset 
2244         for (len 
= 0; off2Ind 
<= offset
; ind
++) { 
2245             len 
= physP
[ind
].length
; 
2249         // Calculate length within range and starting address 
2250         length   
= off2Ind 
- offset
; 
2251         address  
= physP
[ind 
- 1].address 
+ len 
- length
; 
2253         if (true && mapped 
&& _memoryEntries 
 
2254                 && (dataP 
= getDataP(_memoryEntries
)) && dataP
->fMappedBaseValid
) 
2256             address 
= dataP
->fMappedBase 
+ offset
; 
2260             // see how far we can coalesce ranges 
2261             while (ind 
< _rangesCount 
&& address 
+ length 
== physP
[ind
].address
) { 
2262                 len 
= physP
[ind
].length
; 
2269         // correct contiguous check overshoot 
2274     else if ( (_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) { 
2276         // Physical address based memory descriptor 
2277         const IOAddressRange 
*physP 
= (IOAddressRange 
*) &_ranges
.v64
[0]; 
2279         // Find the range after the one that contains the offset 
2281         for (len 
= 0; off2Ind 
<= offset
; ind
++) { 
2282             len 
= physP
[ind
].length
; 
2286         // Calculate length within range and starting address 
2287         length   
= off2Ind 
- offset
; 
2288         address  
= physP
[ind 
- 1].address 
+ len 
- length
; 
2290         if (true && mapped 
&& _memoryEntries 
 
2291                 && (dataP 
= getDataP(_memoryEntries
)) && dataP
->fMappedBaseValid
) 
2293             address 
= dataP
->fMappedBase 
+ offset
; 
2297             // see how far we can coalesce ranges 
2298             while (ind 
< _rangesCount 
&& address 
+ length 
== physP
[ind
].address
) { 
2299                 len 
= physP
[ind
].length
; 
2305         // correct contiguous check overshoot 
2309 #endif /* !__LP64__ */ 
2312             panic("IOGMD: not wired for the IODMACommand"); 
2314         assert(_memoryEntries
); 
2316         dataP 
= getDataP(_memoryEntries
); 
2317         const ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
2318         UInt numIOPLs 
= getNumIOPL(_memoryEntries
, dataP
); 
2319         upl_page_info_t 
*pageList 
= getPageList(dataP
); 
2321         assert(numIOPLs 
> 0); 
2323         // Scan through iopl info blocks looking for block containing offset 
2324         while (ind 
< numIOPLs 
&& offset 
>= ioplList
[ind
].fIOMDOffset
) 
2327         // Go back to actual range as search goes past it 
2328         ioPLBlock ioplInfo 
= ioplList
[ind 
- 1]; 
2329         off2Ind 
= ioplInfo
.fIOMDOffset
; 
2332             length 
= ioplList
[ind
].fIOMDOffset
; 
2335         length 
-= offset
;                       // Remainder within iopl 
2337         // Subtract offset till this iopl in total list 
2340         // If a mapped address is requested and this is a pre-mapped IOPL 
2341         // then just need to compute an offset relative to the mapped base. 
2342         if (mapped 
&& dataP
->fMappedBaseValid
) { 
2343             offset 
+= (ioplInfo
.fPageOffset 
& PAGE_MASK
); 
2344             address 
= trunc_page_64(dataP
->fMappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
; 
2345             continue;   // Done leave do/while(false) now 
2348         // The offset is rebased into the current iopl. 
2349         // Now add the iopl 1st page offset. 
2350         offset 
+= ioplInfo
.fPageOffset
; 
2352         // For external UPLs the fPageInfo field points directly to 
2353         // the upl's upl_page_info_t array. 
2354         if (ioplInfo
.fFlags 
& kIOPLExternUPL
) 
2355             pageList 
= (upl_page_info_t 
*) ioplInfo
.fPageInfo
; 
2357             pageList 
= &pageList
[ioplInfo
.fPageInfo
]; 
2359         // Check for direct device non-paged memory 
2360         if ( ioplInfo
.fFlags 
& kIOPLOnDevice 
) { 
2361             address 
= ptoa_64(pageList
->phys_addr
) + offset
; 
2362             continue;   // Done leave do/while(false) now 
2365         // Now we need compute the index into the pageList 
2366         UInt pageInd 
= atop_32(offset
); 
2367         offset 
&= PAGE_MASK
; 
2369         // Compute the starting address of this segment 
2370         IOPhysicalAddress pageAddr 
= pageList
[pageInd
].phys_addr
; 
2372             panic("!pageList phys_addr"); 
2375         address 
= ptoa_64(pageAddr
) + offset
; 
2377         // length is currently set to the length of the remainider of the iopl. 
2378         // We need to check that the remainder of the iopl is contiguous. 
2379         // This is indicated by pageList[ind].phys_addr being sequential. 
2380         IOByteCount contigLength 
= PAGE_SIZE 
- offset
; 
2381         while (contigLength 
< length
 
2382                 && ++pageAddr 
== pageList
[++pageInd
].phys_addr
) 
2384             contigLength 
+= PAGE_SIZE
; 
2387         if (contigLength 
< length
) 
2388             length 
= contigLength
; 
2396     // Update return values and state 
2397     isP
->fIO
.fIOVMAddr 
= address
; 
2398     isP
->fIO
.fLength   
= length
; 
2400     isP
->fOffset2Index 
= off2Ind
; 
2401     isP
->fNextOffset   
= isP
->fIO
.fOffset 
+ length
; 
2403     return kIOReturnSuccess
; 
2407 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
, IOOptionBits options
) 
2410     mach_vm_address_t address 
= 0; 
2411     mach_vm_size_t    length  
= 0; 
2412     IOMapper 
*        mapper  
= gIOSystemMapper
; 
2413     IOOptionBits      type    
= _flags 
& kIOMemoryTypeMask
; 
2415     if (lengthOfSegment
) 
2416         *lengthOfSegment 
= 0; 
2418     if (offset 
>= _length
) 
2421     // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must 
2422     // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use 
2423     // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation 
2424     // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up 
2426     if ((options 
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL 
!= type
)) 
2428         unsigned rangesIndex 
= 0; 
2429         Ranges vec 
= _ranges
; 
2430         mach_vm_address_t addr
; 
2432         // Find starting address within the vector of ranges 
2434             getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
); 
2435             if (offset 
< length
) 
2437             offset 
-= length
; // (make offset relative) 
2441         // Now that we have the starting range, 
2442         // lets find the last contiguous range 
2446         for ( ++rangesIndex
; rangesIndex 
< _rangesCount
; rangesIndex
++ ) { 
2447             mach_vm_address_t newAddr
; 
2448             mach_vm_size_t    newLen
; 
2450             getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
); 
2451             if (addr 
+ length 
!= newAddr
) 
2456             address 
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit 
2460         IOMDDMAWalkSegmentState _state
; 
2461         IOMDDMAWalkSegmentArgs 
* state 
= (IOMDDMAWalkSegmentArgs 
*) (void *)&_state
; 
2463         state
->fOffset 
= offset
; 
2464         state
->fLength 
= _length 
- offset
; 
2465         state
->fMapped 
= (0 == (options 
& kIOMemoryMapperNone
)) && !(_flags 
& kIOMemoryHostOrRemote
); 
2467         ret 
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
)); 
2469         if ((kIOReturnSuccess 
!= ret
) && (kIOReturnOverrun 
!= ret
)) 
2470                 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",  
2471                                         ret
, this, state
->fOffset
, 
2472                                         state
->fIOVMAddr
, state
->fLength
); 
2473         if (kIOReturnSuccess 
== ret
) 
2475             address 
= state
->fIOVMAddr
; 
2476             length  
= state
->fLength
; 
2479         // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even 
2480         // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up 
2482         if (mapper 
&& ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
))) 
2484             if ((options 
& kIOMemoryMapperNone
) && !(_flags 
& kIOMemoryMapperNone
)) 
2486                 addr64_t    origAddr 
= address
; 
2487                 IOByteCount origLen  
= length
; 
2489                 address 
= mapper
->mapToPhysicalAddress(origAddr
); 
2490                 length 
= page_size 
- (address 
& (page_size 
- 1)); 
2491                 while ((length 
< origLen
) 
2492                     && ((address 
+ length
) == mapper
->mapToPhysicalAddress(origAddr 
+ length
))) 
2493                     length 
+= page_size
; 
2494                 if (length 
> origLen
) 
2503     if (lengthOfSegment
) 
2504         *lengthOfSegment 
= length
; 
2510 #pragma clang diagnostic push 
2511 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
2514 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
, IOOptionBits options
) 
2516     addr64_t address 
= 0; 
2518     if (options 
& _kIOMemorySourceSegment
) 
2520         address 
= getSourceSegment(offset
, lengthOfSegment
); 
2522     else if (options 
& kIOMemoryMapperNone
) 
2524         address 
= getPhysicalSegment64(offset
, lengthOfSegment
); 
2528         address 
= getPhysicalSegment(offset
, lengthOfSegment
); 
2533 #pragma clang diagnostic pop 
2536 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2538     return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
)); 
2542 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2544     addr64_t    address 
= 0; 
2545     IOByteCount length  
= 0; 
2547     address 
= getPhysicalSegment(offset
, lengthOfSegment
, 0); 
2549     if (lengthOfSegment
) 
2550         length 
= *lengthOfSegment
; 
2552     if ((address 
+ length
) > 0x100000000ULL
) 
2554         panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s", 
2555                     address
, (long) length
, (getMetaClass())->getClassName()); 
2558     return ((IOPhysicalAddress
) address
); 
2562 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2564     IOPhysicalAddress phys32
; 
2567     IOMapper 
*        mapper 
= 0; 
2569     phys32 
= getPhysicalSegment(offset
, lengthOfSegment
); 
2573     if (gIOSystemMapper
) 
2574         mapper 
= gIOSystemMapper
; 
2578         IOByteCount origLen
; 
2580         phys64 
= mapper
->mapToPhysicalAddress(phys32
); 
2581         origLen 
= *lengthOfSegment
; 
2582         length 
= page_size 
- (phys64 
& (page_size 
- 1)); 
2583         while ((length 
< origLen
) 
2584             && ((phys64 
+ length
) == mapper
->mapToPhysicalAddress(phys32 
+ length
))) 
2585             length 
+= page_size
; 
2586         if (length 
> origLen
) 
2589         *lengthOfSegment 
= length
; 
2592         phys64 
= (addr64_t
) phys32
; 
2598 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2600     return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0)); 
2604 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2606     return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
)); 
2609 #pragma clang diagnostic push 
2610 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
2612 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
, 
2613                                                         IOByteCount 
* lengthOfSegment
) 
2615     if (_task 
== kernel_task
) 
2616         return (void *) getSourceSegment(offset
, lengthOfSegment
); 
2618         panic("IOGMD::getVirtualSegment deprecated"); 
2622 #pragma clang diagnostic pop 
2623 #endif /* !__LP64__ */ 
2626 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const 
2628     IOMemoryDescriptor 
*md 
= const_cast<IOMemoryDescriptor 
*>(this); 
2629     DMACommandOps params
; 
2632     params 
= (op 
& ~kIOMDDMACommandOperationMask 
& op
); 
2633     op 
&= kIOMDDMACommandOperationMask
; 
2635     if (kIOMDGetCharacteristics 
== op
) { 
2636         if (dataSize 
< sizeof(IOMDDMACharacteristics
)) 
2637             return kIOReturnUnderrun
; 
2639         IOMDDMACharacteristics 
*data 
= (IOMDDMACharacteristics 
*) vData
; 
2640         data
->fLength 
= getLength(); 
2642         data
->fDirection 
= getDirection(); 
2643         data
->fIsPrepared 
= true;       // Assume prepared - fails safe 
2645     else if (kIOMDWalkSegments 
== op
) { 
2646         if (dataSize 
< sizeof(IOMDDMAWalkSegmentArgs
)) 
2647             return kIOReturnUnderrun
; 
2649         IOMDDMAWalkSegmentArgs 
*data 
= (IOMDDMAWalkSegmentArgs 
*) vData
; 
2650         IOByteCount offset  
= (IOByteCount
) data
->fOffset
; 
2652         IOPhysicalLength length
; 
2653         if (data
->fMapped 
&& IOMapper::gSystem
) 
2654             data
->fIOVMAddr 
= md
->getPhysicalSegment(offset
, &length
); 
2656             data
->fIOVMAddr 
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
); 
2657         data
->fLength 
= length
; 
2659     else if (kIOMDAddDMAMapSpec 
== op
) return kIOReturnUnsupported
; 
2660     else if (kIOMDDMAMap 
== op
) 
2662         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2663             return kIOReturnUnderrun
; 
2664         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2666         if (params
) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); 
2668         data
->fMapContig 
= true; 
2669         err 
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
); 
2673     else if (kIOMDDMAUnmap 
== op
) 
2675         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2676             return kIOReturnUnderrun
; 
2677         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2679         err 
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
); 
2681         return (kIOReturnSuccess
); 
2683     else return kIOReturnBadArgument
; 
2685     return kIOReturnSuccess
; 
2689 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
, 
2690                                                    IOOptionBits 
* oldState 
) 
2692     IOReturn      err 
= kIOReturnSuccess
; 
2694     vm_purgable_t control
; 
2697     assert (!(kIOMemoryRemote 
& _flags
)); 
2698     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
2702         err 
= super::setPurgeable(newState
, oldState
); 
2706         if (kIOMemoryThreadSafe 
& _flags
) 
2710             // Find the appropriate vm_map for the given task 
2712             if (_task 
== kernel_task 
&& (kIOMemoryBufferPageable 
& _flags
)) 
2714                 err 
= kIOReturnNotReady
; 
2719                 err 
= kIOReturnUnsupported
; 
2724                 curMap 
= get_task_map(_task
); 
2727                     err 
= KERN_INVALID_ARGUMENT
; 
2732             // can only do one range 
2733             Ranges vec 
= _ranges
; 
2734             IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
2735             mach_vm_address_t addr
;  
2737             getAddrLenForInd(addr
, len
, type
, vec
, 0); 
2739             err 
= purgeableControlBits(newState
, &control
, &state
); 
2740             if (kIOReturnSuccess 
!= err
) 
2742             err 
= vm_map_purgable_control(curMap
, addr
, control
, &state
); 
2745                 if (kIOReturnSuccess 
== err
) 
2747                     err 
= purgeableStateBits(&state
); 
2753         if (kIOMemoryThreadSafe 
& _flags
) 
2760 IOReturn 
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
, 
2761                                            IOOptionBits 
* oldState 
) 
2763     IOReturn err 
= kIOReturnNotReady
; 
2765     if (kIOMemoryThreadSafe 
& _flags
) LOCK
; 
2766     if (_memRef
) err 
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
); 
2767     if (kIOMemoryThreadSafe 
& _flags
) UNLOCK
; 
2772 IOReturn 
IOMemoryDescriptor::getPageCounts( IOByteCount 
* residentPageCount
, 
2773                                             IOByteCount 
* dirtyPageCount 
) 
2775     IOReturn err 
= kIOReturnNotReady
; 
2777     assert (!(kIOMemoryRemote 
& _flags
)); 
2778     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
2780     if (kIOMemoryThreadSafe 
& _flags
) LOCK
; 
2781     if (_memRef
) err 
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
); 
2784         IOMultiMemoryDescriptor 
* mmd
; 
2785         IOSubMemoryDescriptor   
* smd
; 
2786         if ((smd 
= OSDynamicCast(IOSubMemoryDescriptor
, this))) 
2788             err 
= smd
->getPageCounts(residentPageCount
, dirtyPageCount
); 
2790         else if ((mmd 
= OSDynamicCast(IOMultiMemoryDescriptor
, this))) 
2792             err 
= mmd
->getPageCounts(residentPageCount
, dirtyPageCount
); 
2795     if (kIOMemoryThreadSafe 
& _flags
) UNLOCK
; 
2801 #if defined(__arm__) || defined(__arm64__) 
2802 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
); 
2803 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
); 
2804 #else /* defined(__arm__) || defined(__arm64__) */ 
2805 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
); 
2806 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
); 
2807 #endif /* defined(__arm__) || defined(__arm64__) */ 
2809 static void SetEncryptOp(addr64_t pa
, unsigned int count
) 
2813     page 
= atop_64(round_page_64(pa
)); 
2814     end  
= atop_64(trunc_page_64(pa 
+ count
)); 
2815     for (; page 
< end
; page
++) 
2817         pmap_clear_noencrypt(page
);     
2821 static void ClearEncryptOp(addr64_t pa
, unsigned int count
) 
2825     page 
= atop_64(round_page_64(pa
)); 
2826     end  
= atop_64(trunc_page_64(pa 
+ count
)); 
2827     for (; page 
< end
; page
++) 
2829         pmap_set_noencrypt(page
);     
2833 IOReturn 
IOMemoryDescriptor::performOperation( IOOptionBits options
, 
2834                                                 IOByteCount offset
, IOByteCount length 
) 
2836     IOByteCount remaining
; 
2838     void (*func
)(addr64_t pa
, unsigned int count
) = 0; 
2839 #if defined(__arm__) || defined(__arm64__) 
2840     void (*func_ext
)(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *result
) = 0; 
2843     assert (!(kIOMemoryRemote 
& _flags
)); 
2844     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
2848         case kIOMemoryIncoherentIOFlush
: 
2849 #if defined(__arm__) || defined(__arm64__) 
2850             func_ext 
= &dcache_incoherent_io_flush64
; 
2851 #if __ARM_COHERENT_IO__ 
2852             func_ext(0, 0, 0, &res
); 
2853             return kIOReturnSuccess
; 
2854 #else /* __ARM_COHERENT_IO__ */ 
2856 #endif /* __ARM_COHERENT_IO__ */ 
2857 #else /* defined(__arm__) || defined(__arm64__) */ 
2858             func 
= &dcache_incoherent_io_flush64
; 
2860 #endif /* defined(__arm__) || defined(__arm64__) */ 
2861         case kIOMemoryIncoherentIOStore
: 
2862 #if defined(__arm__) || defined(__arm64__) 
2863             func_ext 
= &dcache_incoherent_io_store64
; 
2864 #if __ARM_COHERENT_IO__ 
2865             func_ext(0, 0, 0, &res
); 
2866             return kIOReturnSuccess
; 
2867 #else /* __ARM_COHERENT_IO__ */ 
2869 #endif /* __ARM_COHERENT_IO__ */ 
2870 #else /* defined(__arm__) || defined(__arm64__) */ 
2871             func 
= &dcache_incoherent_io_store64
; 
2873 #endif /* defined(__arm__) || defined(__arm64__) */ 
2875         case kIOMemorySetEncrypted
: 
2876             func 
= &SetEncryptOp
; 
2878         case kIOMemoryClearEncrypted
: 
2879             func 
= &ClearEncryptOp
; 
2883 #if defined(__arm__) || defined(__arm64__) 
2884     if ((func 
== 0) && (func_ext 
== 0)) 
2885         return (kIOReturnUnsupported
); 
2886 #else /* defined(__arm__) || defined(__arm64__) */ 
2888         return (kIOReturnUnsupported
); 
2889 #endif /* defined(__arm__) || defined(__arm64__) */ 
2891     if (kIOMemoryThreadSafe 
& _flags
) 
2895     remaining 
= length 
= min(length
, getLength() - offset
); 
2897     // (process another target segment?) 
2902         dstAddr64 
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
); 
2906         // Clip segment length to remaining 
2907         if (dstLen 
> remaining
) 
2910 #if defined(__arm__) || defined(__arm64__) 
2912             (*func
)(dstAddr64
, dstLen
); 
2914             (*func_ext
)(dstAddr64
, dstLen
, remaining
, &res
); 
2920 #else /* defined(__arm__) || defined(__arm64__) */ 
2921         (*func
)(dstAddr64
, dstLen
); 
2922 #endif /* defined(__arm__) || defined(__arm64__) */ 
2925         remaining 
-= dstLen
; 
2928     if (kIOMemoryThreadSafe 
& _flags
) 
2931     return (remaining 
? kIOReturnUnderrun 
: kIOReturnSuccess
); 
2938 #if defined(__i386__) || defined(__x86_64__) 
2940 #define io_kernel_static_start  vm_kernel_stext 
2941 #define io_kernel_static_end    vm_kernel_etext 
2943 #elif defined(__arm__) || defined(__arm64__) 
2945 extern vm_offset_t              static_memory_end
; 
2947 #if defined(__arm64__) 
2948 #define io_kernel_static_start vm_kext_base 
2949 #else /* defined(__arm64__) */ 
2950 #define io_kernel_static_start vm_kernel_stext 
2951 #endif /* defined(__arm64__) */ 
2953 #define io_kernel_static_end    static_memory_end 
2956 #error io_kernel_static_end is undefined for this architecture 
2959 static kern_return_t
 
2960 io_get_kernel_static_upl( 
2963         upl_size_t              
*upl_size
, 
2965         upl_page_info_array_t   page_list
, 
2966         unsigned int            *count
, 
2967         ppnum_t                 
*highest_page
) 
2969     unsigned int pageCount
, page
; 
2971     ppnum_t highestPage 
= 0; 
2973     pageCount 
= atop_32(*upl_size
); 
2974     if (pageCount 
> *count
) 
2979     for (page 
= 0; page 
< pageCount
; page
++) 
2981         phys 
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
)); 
2984         page_list
[page
].phys_addr 
= phys
; 
2985         page_list
[page
].free_when_done 
= 0; 
2986         page_list
[page
].absent    
= 0; 
2987         page_list
[page
].dirty     
= 0; 
2988         page_list
[page
].precious  
= 0; 
2989         page_list
[page
].device    
= 0; 
2990         if (phys 
> highestPage
) 
2994     *highest_page 
= highestPage
; 
2996     return ((page 
>= pageCount
) ? kIOReturnSuccess 
: kIOReturnVMError
); 
2999 IOReturn 
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
) 
3001     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3002     IOReturn error 
= kIOReturnSuccess
; 
3004     upl_page_info_array_t pageInfo
; 
3006     vm_tag_t tag 
= VM_KERN_MEMORY_NONE
; 
3008     assert(kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
); 
3010     if ((kIODirectionOutIn 
& forDirection
) == kIODirectionNone
) 
3011         forDirection 
= (IODirection
) (forDirection 
| getDirection()); 
3013     dataP 
= getDataP(_memoryEntries
); 
3014     upl_control_flags_t uplFlags
;    // This Mem Desc's default flags for upl creation 
3015     switch (kIODirectionOutIn 
& forDirection
) 
3017         case kIODirectionOut
: 
3018             // Pages do not need to be marked as dirty on commit 
3019             uplFlags 
= UPL_COPYOUT_FROM
; 
3020             dataP
->fDMAAccess 
= kIODMAMapReadAccess
; 
3023         case kIODirectionIn
: 
3024             dataP
->fDMAAccess 
= kIODMAMapWriteAccess
; 
3025             uplFlags 
= 0;       // i.e. ~UPL_COPYOUT_FROM 
3029             dataP
->fDMAAccess 
= kIODMAMapReadAccess 
| kIODMAMapWriteAccess
; 
3030             uplFlags 
= 0;       // i.e. ~UPL_COPYOUT_FROM 
3036         if ((kIOMemoryPreparedReadOnly 
& _flags
) && !(UPL_COPYOUT_FROM 
& uplFlags
)) 
3038             OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); 
3039             error 
= kIOReturnNotWritable
; 
3046         mapper 
= dataP
->fMapper
; 
3047         dataP
->fMappedBaseValid 
= dataP
->fMappedBase 
= 0; 
3049         uplFlags 
|= UPL_SET_IO_WIRE 
| UPL_SET_LITE
; 
3051         if (VM_KERN_MEMORY_NONE 
== tag
) tag 
= IOMemoryTag(kernel_map
); 
3053         if (kIODirectionPrepareToPhys32 
& forDirection
) 
3055             if (!mapper
) uplFlags 
|= UPL_NEED_32BIT_ADDR
; 
3056             if (dataP
->fDMAMapNumAddressBits 
> 32) dataP
->fDMAMapNumAddressBits 
= 32; 
3058         if (kIODirectionPrepareNoFault    
& forDirection
) uplFlags 
|= UPL_REQUEST_NO_FAULT
; 
3059         if (kIODirectionPrepareNoZeroFill 
& forDirection
) uplFlags 
|= UPL_NOZEROFILLIO
; 
3060         if (kIODirectionPrepareNonCoherent 
& forDirection
) uplFlags 
|= UPL_REQUEST_FORCE_COHERENCY
; 
3064         // Note that appendBytes(NULL) zeros the data up to the desired length 
3065         //           and the length parameter is an unsigned int 
3066         size_t uplPageSize 
= dataP
->fPageCnt 
* sizeof(upl_page_info_t
); 
3067         if (uplPageSize 
> ((unsigned int)uplPageSize
))    return (kIOReturnNoMemory
); 
3068         if (!_memoryEntries
->appendBytes(0, uplPageSize
)) return (kIOReturnNoMemory
); 
3071         // Find the appropriate vm_map for the given task 
3073         if (_task 
== kernel_task 
&& (kIOMemoryBufferPageable 
& _flags
))            curMap 
= 0; 
3074         else                                                     curMap 
= get_task_map(_task
); 
3076         // Iterate over the vector of virtual ranges 
3077         Ranges vec 
= _ranges
; 
3078         unsigned int pageIndex  
= 0; 
3079         IOByteCount mdOffset    
= 0; 
3080         ppnum_t highestPage     
= 0; 
3082         IOMemoryEntry 
* memRefEntry 
= 0; 
3083         if (_memRef
) memRefEntry 
= &_memRef
->entries
[0]; 
3085         for (UInt range 
= 0; range 
< _rangesCount
; range
++) { 
3087             mach_vm_address_t startPage
; 
3088             mach_vm_size_t    numBytes
; 
3089             ppnum_t highPage 
= 0; 
3091             // Get the startPage address and length of vec[range] 
3092             getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
); 
3093             iopl
.fPageOffset 
= startPage 
& PAGE_MASK
; 
3094             numBytes 
+= iopl
.fPageOffset
; 
3095             startPage 
= trunc_page_64(startPage
); 
3098                 iopl
.fMappedPage 
= mapBase 
+ pageIndex
; 
3100                 iopl
.fMappedPage 
= 0; 
3102             // Iterate over the current range, creating UPLs 
3104                 vm_address_t kernelStart 
= (vm_address_t
) startPage
; 
3106                 if (curMap
) theMap 
= curMap
; 
3113                     assert(_task 
== kernel_task
); 
3114                     theMap 
= IOPageableMapForAddress(kernelStart
); 
3117                 // ioplFlags is an in/out parameter 
3118                 upl_control_flags_t ioplFlags 
= uplFlags
; 
3119                 dataP 
= getDataP(_memoryEntries
); 
3120                 pageInfo 
= getPageList(dataP
); 
3121                 upl_page_list_ptr_t baseInfo 
= &pageInfo
[pageIndex
]; 
3123                 mach_vm_size_t _ioplSize    
= round_page(numBytes
); 
3124                 upl_size_t          ioplSize    
= (_ioplSize 
<= MAX_UPL_SIZE_BYTES
) ? _ioplSize 
: MAX_UPL_SIZE_BYTES
; 
3125                 unsigned int    numPageInfo 
= atop_32(ioplSize
); 
3127                 if ((theMap 
== kernel_map
) 
3128                  && (kernelStart 
>= io_kernel_static_start
) 
3129                  && (kernelStart 
<  io_kernel_static_end
)) { 
3130                     error 
= io_get_kernel_static_upl(theMap
, 
3139                     memory_object_offset_t entryOffset
; 
3141                     entryOffset 
= mdOffset
; 
3142                     entryOffset 
= (entryOffset 
- iopl
.fPageOffset 
- memRefEntry
->offset
); 
3143                     if (entryOffset 
>= memRefEntry
->size
) { 
3145                         if (memRefEntry 
>= &_memRef
->entries
[_memRef
->count
]) panic("memRefEntry"); 
3148                     if (ioplSize 
> (memRefEntry
->size 
- entryOffset
)) ioplSize 
= (memRefEntry
->size 
- entryOffset
); 
3149                     error 
= memory_object_iopl_request(memRefEntry
->entry
, 
3160                     error 
= vm_map_create_upl(theMap
, 
3162                                                     (upl_size_t
*)&ioplSize
, 
3170                 if (error 
!= KERN_SUCCESS
) goto abortExit
; 
3175                     highPage 
= upl_get_highest_page(iopl
.fIOPL
); 
3176                 if (highPage 
> highestPage
) 
3177                     highestPage 
= highPage
; 
3179                 if (baseInfo
->device
) { 
3181                     iopl
.fFlags 
= kIOPLOnDevice
; 
3187                 iopl
.fIOMDOffset 
= mdOffset
; 
3188                 iopl
.fPageInfo 
= pageIndex
; 
3189                 if (mapper 
&& pageIndex 
&& (page_mask 
& (mdOffset 
+ iopl
.fPageOffset
))) dataP
->fDiscontig 
= true; 
3191                 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) { 
3192                     // Clean up partial created and unsaved iopl 
3194                         upl_abort(iopl
.fIOPL
, 0); 
3195                         upl_deallocate(iopl
.fIOPL
); 
3201                 // Check for a multiple iopl's in one virtual range 
3202                 pageIndex 
+= numPageInfo
; 
3203                 mdOffset 
-= iopl
.fPageOffset
; 
3204                 if (ioplSize 
< numBytes
) { 
3205                     numBytes 
-= ioplSize
; 
3206                     startPage 
+= ioplSize
; 
3207                     mdOffset 
+= ioplSize
; 
3208                     iopl
.fPageOffset 
= 0; 
3209                     if (mapper
) iopl
.fMappedPage 
= mapBase 
+ pageIndex
; 
3212                     mdOffset 
+= numBytes
; 
3218         _highestPage 
= highestPage
; 
3220         if (UPL_COPYOUT_FROM 
& uplFlags
) _flags 
|= kIOMemoryPreparedReadOnly
; 
3224     if (!(_flags 
& kIOMemoryAutoPrepare
) && (kIOReturnSuccess 
== error
)) 
3226         dataP 
= getDataP(_memoryEntries
); 
3227         if (!dataP
->fWireTracking
.link
.next
) 
3229             IOTrackingAdd(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
), false, tag
); 
3232 #endif /* IOTRACKING */ 
3238         dataP 
= getDataP(_memoryEntries
); 
3239         UInt done 
= getNumIOPL(_memoryEntries
, dataP
); 
3240         ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
3242         for (UInt range 
= 0; range 
< done
; range
++) 
3244             if (ioplList
[range
].fIOPL
) { 
3245              upl_abort(ioplList
[range
].fIOPL
, 0); 
3246              upl_deallocate(ioplList
[range
].fIOPL
); 
3249         (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength() 
3252     if (error 
== KERN_FAILURE
) 
3253         error 
= kIOReturnCannotWire
; 
3254     else if (error 
== KERN_MEMORY_ERROR
) 
3255         error 
= kIOReturnNoResources
; 
3260 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper 
* mapper
) 
3263     unsigned    dataSize 
= size
; 
3265     if (!_memoryEntries
) { 
3266         _memoryEntries 
= OSData::withCapacity(dataSize
); 
3267         if (!_memoryEntries
) 
3270     else if (!_memoryEntries
->initWithCapacity(dataSize
)) 
3273     _memoryEntries
->appendBytes(0, computeDataSize(0, 0)); 
3274     dataP 
= getDataP(_memoryEntries
); 
3276     if (mapper 
== kIOMapperWaitSystem
) { 
3277         IOMapper::checkForSystemMapper(); 
3278         mapper 
= IOMapper::gSystem
; 
3280     dataP
->fMapper               
= mapper
; 
3281     dataP
->fPageCnt              
= 0; 
3282     dataP
->fMappedBase           
= 0; 
3283     dataP
->fDMAMapNumAddressBits 
= 64; 
3284     dataP
->fDMAMapAlignment      
= 0; 
3285     dataP
->fPreparationID        
= kIOPreparationIDUnprepared
; 
3286     dataP
->fDiscontig            
= false; 
3287     dataP
->fCompletionError      
= false; 
3288     dataP
->fMappedBaseValid      
= false; 
3293 IOReturn 
IOMemoryDescriptor::dmaMap( 
3295     IODMACommand                
* command
, 
3296     const IODMAMapSpecification 
* mapSpec
, 
3299     uint64_t                    * mapAddress
, 
3300     uint64_t                    * mapLength
) 
3303     uint32_t mapOptions
; 
3306     mapOptions 
|= kIODMAMapReadAccess
; 
3307     if (!(kIOMemoryPreparedReadOnly 
& _flags
)) mapOptions 
|= kIODMAMapWriteAccess
; 
3309     err 
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, 
3310                                 mapSpec
, command
, NULL
, mapAddress
, mapLength
); 
3312     if (kIOReturnSuccess 
== err
) dmaMapRecord(mapper
, command
, *mapLength
); 
3317 void IOMemoryDescriptor::dmaMapRecord( 
3319     IODMACommand                
* command
, 
3322     kern_allocation_name_t alloc
; 
3325     if ((alloc 
= mapper
->fAllocName
) /* && mapper != IOMapper::gSystem */) 
3327         kern_allocation_update_size(mapper
->fAllocName
, mapLength
); 
3330     if (!command
) return; 
3331     prior 
= OSAddAtomic16(1, &_dmaReferences
); 
3334         if (alloc 
&& (VM_KERN_MEMORY_NONE 
!= _kernelTag
)) 
3337             mapLength 
= _length
; 
3338             kern_allocation_update_subtotal(alloc
, _kernelTag
, mapLength
); 
3340         else _mapName 
= NULL
; 
3344 IOReturn 
IOMemoryDescriptor::dmaUnmap( 
3346     IODMACommand                
* command
, 
3348     uint64_t                      mapAddress
, 
3352     kern_allocation_name_t alloc
; 
3353     kern_allocation_name_t mapName
; 
3361         if (_dmaReferences
) prior 
= OSAddAtomic16(-1, &_dmaReferences
); 
3362         else                panic("_dmaReferences underflow"); 
3365     if (!mapLength
) return (kIOReturnSuccess
); 
3367     ret 
= mapper
->iovmUnmapMemory(this, command
, mapAddress
, mapLength
); 
3369     if ((alloc 
= mapper
->fAllocName
)) 
3371         kern_allocation_update_size(alloc
, -mapLength
); 
3372         if ((1 == prior
) && mapName 
&& (VM_KERN_MEMORY_NONE 
!= _kernelTag
)) 
3374             mapLength 
= _length
; 
3375             kern_allocation_update_subtotal(mapName
, _kernelTag
, -mapLength
); 
3382 IOReturn 
IOGeneralMemoryDescriptor::dmaMap( 
3384     IODMACommand                
* command
, 
3385     const IODMAMapSpecification 
* mapSpec
, 
3388     uint64_t                    * mapAddress
, 
3389     uint64_t                    * mapLength
) 
3391     IOReturn          err 
= kIOReturnSuccess
; 
3393     IOOptionBits      type 
= _flags 
& kIOMemoryTypeMask
; 
3396     if (kIOMemoryHostOnly 
& _flags
) return (kIOReturnSuccess
); 
3397     if (kIOMemoryRemote 
& _flags
)   return (kIOReturnNotAttached
); 
3399     if ((type 
== kIOMemoryTypePhysical
) || (type 
== kIOMemoryTypePhysical64
) 
3400      || offset 
|| (length 
!= _length
)) 
3402         err 
= super::dmaMap(mapper
, command
, mapSpec
, offset
, length
, mapAddress
, mapLength
); 
3404     else if (_memoryEntries 
&& _pages 
&& (dataP 
= getDataP(_memoryEntries
))) 
3406         const ioPLBlock 
* ioplList 
= getIOPLList(dataP
); 
3407         upl_page_info_t 
* pageList
; 
3408         uint32_t          mapOptions 
= 0; 
3410         IODMAMapSpecification mapSpec
; 
3411         bzero(&mapSpec
, sizeof(mapSpec
)); 
3412         mapSpec
.numAddressBits 
= dataP
->fDMAMapNumAddressBits
; 
3413         mapSpec
.alignment 
= dataP
->fDMAMapAlignment
; 
3415         // For external UPLs the fPageInfo field points directly to 
3416         // the upl's upl_page_info_t array. 
3417         if (ioplList
->fFlags 
& kIOPLExternUPL
) 
3419             pageList 
= (upl_page_info_t 
*) ioplList
->fPageInfo
; 
3420             mapOptions 
|= kIODMAMapPagingPath
; 
3422         else pageList 
= getPageList(dataP
); 
3424         if ((_length 
== ptoa_64(_pages
)) && !(page_mask 
& ioplList
->fPageOffset
)) 
3426             mapOptions 
|= kIODMAMapPageListFullyOccupied
; 
3429         assert(dataP
->fDMAAccess
); 
3430         mapOptions 
|= dataP
->fDMAAccess
; 
3432         // Check for direct device non-paged memory 
3433         if (ioplList
->fFlags 
& kIOPLOnDevice
) mapOptions 
|= kIODMAMapPhysicallyContiguous
; 
3435         IODMAMapPageList dmaPageList 
= 
3437                 .pageOffset    
= (uint32_t)(ioplList
->fPageOffset 
& page_mask
), 
3438                 .pageListCount 
= _pages
, 
3439                 .pageList      
= &pageList
[0] 
3441         err 
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, &mapSpec
,  
3442                                     command
, &dmaPageList
, mapAddress
, mapLength
); 
3444         if (kIOReturnSuccess 
== err
) dmaMapRecord(mapper
, command
, *mapLength
); 
3453  * Prepare the memory for an I/O transfer.  This involves paging in 
3454  * the memory, if necessary, and wiring it down for the duration of 
3455  * the transfer.  The complete() method completes the processing of 
3456  * the memory after the I/O transfer finishes.  This method needn't 
3457  * called for non-pageable memory. 
3460 IOReturn 
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
) 
3462     IOReturn     error    
= kIOReturnSuccess
; 
3463     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3465     if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
3466         return kIOReturnSuccess
; 
3468     assert (!(kIOMemoryRemote 
& _flags
)); 
3469     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
3471     if (_prepareLock
) IOLockLock(_prepareLock
); 
3473     if (kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
) 
3475         error 
= wireVirtual(forDirection
); 
3478     if (kIOReturnSuccess 
== error
) 
3480         if (1 == ++_wireCount
) 
3482             if (kIOMemoryClearEncrypt 
& _flags
) 
3484                 performOperation(kIOMemoryClearEncrypted
, 0, _length
); 
3489     if (_prepareLock
) IOLockUnlock(_prepareLock
); 
3497  * Complete processing of the memory after an I/O transfer finishes. 
3498  * This method should not be called unless a prepare was previously 
3499  * issued; the prepare() and complete() must occur in pairs, before 
3500  * before and after an I/O transfer involving pageable memory. 
3503 IOReturn 
IOGeneralMemoryDescriptor::complete(IODirection forDirection
) 
3505     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3508     if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
3509         return kIOReturnSuccess
; 
3511     assert (!(kIOMemoryRemote 
& _flags
)); 
3512     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
3514     if (_prepareLock
) IOLockLock(_prepareLock
); 
3518         if (!_wireCount
) break; 
3519         dataP 
= getDataP(_memoryEntries
); 
3522         if (kIODirectionCompleteWithError 
& forDirection
)  dataP
->fCompletionError 
= true; 
3524         if ((kIOMemoryClearEncrypt 
& _flags
) && (1 == _wireCount
)) 
3526             performOperation(kIOMemorySetEncrypted
, 0, _length
); 
3530         if (!_wireCount 
|| (kIODirectionCompleteWithDataValid 
& forDirection
)) 
3532             ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
3533             UInt ind
, count 
= getNumIOPL(_memoryEntries
, dataP
); 
3537                 // kIODirectionCompleteWithDataValid & forDirection 
3538                 if (kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
) 
3541                     tag 
= getVMTag(kernel_map
); 
3542                     for (ind 
= 0; ind 
< count
; ind
++) 
3544                         if (ioplList
[ind
].fIOPL
) iopl_valid_data(ioplList
[ind
].fIOPL
, tag
); 
3550                 if (_dmaReferences
) panic("complete() while dma active"); 
3552                 if (dataP
->fMappedBaseValid
) { 
3553                     dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
); 
3554                     dataP
->fMappedBaseValid 
= dataP
->fMappedBase 
= 0; 
3557                 if (dataP
->fWireTracking
.link
.next
) IOTrackingRemove(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
)); 
3558 #endif /* IOTRACKING */ 
3559                 // Only complete iopls that we created which are for TypeVirtual 
3560                 if (kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
) 
3562                     for (ind 
= 0; ind 
< count
; ind
++) 
3563                         if (ioplList
[ind
].fIOPL
) { 
3564                             if (dataP
->fCompletionError
) 
3565                                 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/); 
3567                                 upl_commit(ioplList
[ind
].fIOPL
, 0, 0); 
3568                             upl_deallocate(ioplList
[ind
].fIOPL
); 
3570                 } else if (kIOMemoryTypeUPL 
== type
) { 
3571                     upl_set_referenced(ioplList
[0].fIOPL
, false); 
3574                 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength() 
3576                 dataP
->fPreparationID 
= kIOPreparationIDUnprepared
; 
3577                 _flags 
&= ~kIOMemoryPreparedReadOnly
; 
3583     if (_prepareLock
) IOLockUnlock(_prepareLock
); 
3585     return kIOReturnSuccess
; 
3588 IOReturn 
IOGeneralMemoryDescriptor::doMap( 
3589         vm_map_t                __addressMap
, 
3590         IOVirtualAddress 
*      __address
, 
3591         IOOptionBits            options
, 
3592         IOByteCount             __offset
, 
3593         IOByteCount             __length 
) 
3596     if (!(kIOMap64Bit 
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); 
3597 #endif /* !__LP64__ */ 
3601     IOMemoryMap 
*  mapping 
= (IOMemoryMap 
*) *__address
; 
3602     mach_vm_size_t offset  
= mapping
->fOffset 
+ __offset
; 
3603     mach_vm_size_t length  
= mapping
->fLength
; 
3605     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3606     Ranges vec 
= _ranges
; 
3608     mach_vm_address_t range0Addr 
= 0; 
3609     mach_vm_size_t    range0Len 
= 0; 
3611     if ((offset 
>= _length
) || ((offset 
+ length
) > _length
)) 
3612         return( kIOReturnBadArgument 
); 
3614     assert (!(kIOMemoryRemote 
& _flags
)); 
3615     if (kIOMemoryRemote 
& _flags
) return (0); 
3618         getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0); 
3620     // mapping source == dest? (could be much better) 
3622      && (mapping
->fAddressTask 
== _task
) 
3623      && (mapping
->fAddressMap 
== get_task_map(_task
))  
3624      && (options 
& kIOMapAnywhere
) 
3625      && (1 == _rangesCount
)  
3628      && (length 
<= range0Len
)) 
3630         mapping
->fAddress 
= range0Addr
; 
3631         mapping
->fOptions 
|= kIOMapStatic
; 
3633         return( kIOReturnSuccess 
); 
3638         IOOptionBits createOptions 
= 0; 
3639         if (!(kIOMapReadOnly 
& options
))  
3641             createOptions 
|= kIOMemoryReferenceWrite
; 
3642 #if DEVELOPMENT || DEBUG 
3643             if (kIODirectionOut 
== (kIODirectionOutIn 
& _flags
)) 
3645                 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction"); 
3649         err 
= memoryReferenceCreate(createOptions
, &_memRef
); 
3650         if (kIOReturnSuccess 
!= err
) return (err
); 
3653     memory_object_t pager
; 
3654     pager 
= (memory_object_t
) (reserved 
? reserved
->dp
.devicePager 
: 0); 
3656     // <upl_transpose // 
3657     if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
)) 
3663             upl_control_flags_t flags
; 
3664             unsigned int        lock_count
; 
3666             if (!_memRef 
|| (1 != _memRef
->count
)) 
3668                 err 
= kIOReturnNotReadable
; 
3672             size 
= round_page(mapping
->fLength
); 
3673             flags 
= UPL_COPYOUT_FROM 
| UPL_SET_INTERNAL 
 
3674                         | UPL_SET_LITE 
| UPL_SET_IO_WIRE 
| UPL_BLOCK_ACCESS
; 
3676             if (KERN_SUCCESS 
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
, 
3678                                             &flags
, getVMTag(kernel_map
))) 
3681             for (lock_count 
= 0; 
3682                  IORecursiveLockHaveLock(gIOMemoryLock
); 
3686             err 
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
); 
3693             if (kIOReturnSuccess 
!= err
) 
3695                 IOLog("upl_transpose(%x)\n", err
); 
3696                 err 
= kIOReturnSuccess
; 
3701                 upl_commit(redirUPL2
, NULL
, 0); 
3702                 upl_deallocate(redirUPL2
); 
3706                 // swap the memEntries since they now refer to different vm_objects 
3707                 IOMemoryReference 
* me 
= _memRef
; 
3708                 _memRef 
= mapping
->fMemory
->_memRef
; 
3709                 mapping
->fMemory
->_memRef 
= me
; 
3712                 err 
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options 
); 
3716     // upl_transpose> // 
3719         err 
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
); 
3721         if ((err 
== KERN_SUCCESS
) && ((kIOTracking 
& gIOKitDebug
) || _task
)) 
3723             // only dram maps in the default on developement case 
3724             IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
); 
3726 #endif /* IOTRACKING */ 
3727         if ((err 
== KERN_SUCCESS
) && pager
) 
3729             err 
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
); 
3731             if (err 
!= KERN_SUCCESS
) doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0); 
3732             else if (kIOMapDefaultCache 
== (options 
& kIOMapCacheMask
)) 
3734                 mapping
->fOptions 
|= ((_flags 
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
); 
3744 IOMemoryMapTracking(IOTrackingUser 
* tracking
, task_t 
* task
, 
3745                     mach_vm_address_t 
* address
, mach_vm_size_t 
* size
) 
3747 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field)) 
3749     IOMemoryMap 
* map 
= (typeof(map
)) (((uintptr_t) tracking
) - iomap_offsetof(IOMemoryMap
, fTracking
)); 
3751     if (!map
->fAddressMap 
|| (map
->fAddressMap 
!= get_task_map(map
->fAddressTask
))) return (kIOReturnNotReady
); 
3753     *task    
= map
->fAddressTask
; 
3754     *address 
= map
->fAddress
; 
3755     *size    
= map
->fLength
; 
3757     return (kIOReturnSuccess
); 
3759 #endif /* IOTRACKING */ 
3761 IOReturn 
IOGeneralMemoryDescriptor::doUnmap( 
3762         vm_map_t                addressMap
, 
3763         IOVirtualAddress        __address
, 
3764         IOByteCount             __length 
) 
3766     return (super::doUnmap(addressMap
, __address
, __length
)); 
3769 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
3772 #define super OSObject 
3774 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject 
) 
3776 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0); 
3777 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1); 
3778 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2); 
3779 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3); 
3780 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4); 
3781 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5); 
3782 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6); 
3783 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7); 
3785 /* ex-inline function implementation */ 
3786 IOPhysicalAddress 
IOMemoryMap::getPhysicalAddress() 
3787     { return( getPhysicalSegment( 0, 0 )); } 
3789 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
3791 bool IOMemoryMap::init( 
3793         mach_vm_address_t       toAddress
, 
3794         IOOptionBits            _options
, 
3795         mach_vm_size_t          _offset
, 
3796         mach_vm_size_t          _length 
) 
3804     fAddressMap  
= get_task_map(intoTask
); 
3807     vm_map_reference(fAddressMap
); 
3809     fAddressTask 
= intoTask
; 
3810     fOptions     
= _options
; 
3813     fAddress     
= toAddress
; 
3818 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor 
* _memory
, mach_vm_size_t _offset
) 
3825         if( (_offset 
+ fLength
) > _memory
->getLength()) 
3833         if (fMemory 
!= _memory
) 
3834             fMemory
->removeMapping(this); 
3842 IOReturn 
IOMemoryDescriptor::doMap( 
3843         vm_map_t                __addressMap
, 
3844         IOVirtualAddress 
*      __address
, 
3845         IOOptionBits            options
, 
3846         IOByteCount             __offset
, 
3847         IOByteCount             __length 
) 
3849     return (kIOReturnUnsupported
); 
3852 IOReturn 
IOMemoryDescriptor::handleFault( 
3854         mach_vm_size_t          sourceOffset
, 
3855         mach_vm_size_t          length
) 
3857     if( kIOMemoryRedirected 
& _flags
) 
3860         IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
); 
3864         } while( kIOMemoryRedirected 
& _flags 
); 
3866     return (kIOReturnSuccess
); 
3869 IOReturn 
IOMemoryDescriptor::populateDevicePager( 
3871         vm_map_t                addressMap
, 
3872         mach_vm_address_t       address
, 
3873         mach_vm_size_t          sourceOffset
, 
3874         mach_vm_size_t          length
, 
3875         IOOptionBits            options 
) 
3877     IOReturn            err 
= kIOReturnSuccess
; 
3878     memory_object_t     pager 
= (memory_object_t
) _pager
; 
3879     mach_vm_size_t      size
; 
3880     mach_vm_size_t      bytes
; 
3881     mach_vm_size_t      page
; 
3882     mach_vm_size_t      pageOffset
; 
3883     mach_vm_size_t      pagerOffset
; 
3884     IOPhysicalLength    segLen
, chunk
; 
3888     type 
= _flags 
& kIOMemoryTypeMask
; 
3890     if (reserved
->dp
.pagerContig
) 
3896     physAddr 
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone 
); 
3898     pageOffset 
= physAddr 
- trunc_page_64( physAddr 
); 
3899     pagerOffset 
= sourceOffset
; 
3901     size 
= length 
+ pageOffset
; 
3902     physAddr 
-= pageOffset
; 
3904     segLen 
+= pageOffset
; 
3908         // in the middle of the loop only map whole pages 
3909         if( segLen 
>= bytes
) segLen 
= bytes
; 
3910         else if (segLen 
!= trunc_page(segLen
))    err 
= kIOReturnVMError
; 
3911         if (physAddr 
!= trunc_page_64(physAddr
))  err 
= kIOReturnBadArgument
; 
3913         if (kIOReturnSuccess 
!= err
) break; 
3915 #if DEBUG || DEVELOPMENT 
3916         if ((kIOMemoryTypeUPL 
!= type
)  
3917             && pmap_has_managed_page(atop_64(physAddr
), atop_64(physAddr 
+ segLen 
- 1)))  
3919             OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr
, segLen
); 
3921 #endif /* DEBUG || DEVELOPMENT */ 
3923         chunk 
= (reserved
->dp
.pagerContig 
? round_page(segLen
) : page_size
); 
3925              (page 
< segLen
) && (KERN_SUCCESS 
== err
); 
3928             err 
= device_pager_populate_object(pager
, pagerOffset
, 
3929                 (ppnum_t
)(atop_64(physAddr 
+ page
)), chunk
); 
3930             pagerOffset 
+= chunk
; 
3933         assert (KERN_SUCCESS 
== err
); 
3936         // This call to vm_fault causes an early pmap level resolution 
3937         // of the mappings created above for kernel mappings, since 
3938         // faulting in later can't take place from interrupt level. 
3939         if ((addressMap 
== kernel_map
) && !(kIOMemoryRedirected 
& _flags
)) 
3941             err 
= vm_fault(addressMap
,  
3942                            (vm_map_offset_t
)trunc_page_64(address
), 
3943                            options 
& kIOMapReadOnly 
? VM_PROT_READ 
: VM_PROT_READ
|VM_PROT_WRITE
,  
3944                            FALSE
, VM_KERN_MEMORY_NONE
, 
3946                            (vm_map_offset_t
)0); 
3948             if (KERN_SUCCESS 
!= err
) break; 
3951         sourceOffset 
+= segLen 
- pageOffset
; 
3956     while (bytes 
&& (physAddr 
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone 
))); 
3959         err 
= kIOReturnBadArgument
; 
3964 IOReturn 
IOMemoryDescriptor::doUnmap( 
3965         vm_map_t                addressMap
, 
3966         IOVirtualAddress        __address
, 
3967         IOByteCount             __length 
) 
3970     IOMemoryMap 
*     mapping
; 
3971     mach_vm_address_t address
; 
3972     mach_vm_size_t    length
; 
3974     if (__length
) panic("doUnmap"); 
3976     mapping 
= (IOMemoryMap 
*) __address
; 
3977     addressMap 
= mapping
->fAddressMap
; 
3978     address    
= mapping
->fAddress
; 
3979     length     
= mapping
->fLength
; 
3981     if (kIOMapOverwrite 
& mapping
->fOptions
) err 
= KERN_SUCCESS
; 
3984         if ((addressMap 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) 
3985             addressMap 
= IOPageableMapForAddress( address 
); 
3987         if( kIOLogMapping 
& gIOKitDebug
) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", 
3988                                                 addressMap
, address
, length 
); 
3990         err 
= mach_vm_deallocate( addressMap
, address
, length 
); 
3994     IOTrackingRemoveUser(gIOMapTracking
, &mapping
->fTracking
); 
3995 #endif /* IOTRACKING */ 
4000 IOReturn 
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect 
) 
4002     IOReturn            err 
= kIOReturnSuccess
; 
4003     IOMemoryMap 
*       mapping 
= 0; 
4009         _flags 
|= kIOMemoryRedirected
; 
4011         _flags 
&= ~kIOMemoryRedirected
; 
4014         if( (iter 
= OSCollectionIterator::withCollection( _mappings
))) { 
4016             memory_object_t   pager
; 
4019                 pager 
= (memory_object_t
) reserved
->dp
.devicePager
; 
4021                 pager 
= MACH_PORT_NULL
; 
4023             while( (mapping 
= (IOMemoryMap 
*) iter
->getNextObject())) 
4025                 mapping
->redirect( safeTask
, doRedirect 
); 
4026                 if (!doRedirect 
&& !safeTask 
&& pager 
&& (kernel_map 
== mapping
->fAddressMap
)) 
4028                     err 
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache 
); 
4044     // temporary binary compatibility 
4045     IOSubMemoryDescriptor 
* subMem
; 
4046     if( (subMem 
= OSDynamicCast( IOSubMemoryDescriptor
, this))) 
4047         err 
= subMem
->redirect( safeTask
, doRedirect 
); 
4049         err 
= kIOReturnSuccess
; 
4050 #endif /* !__LP64__ */ 
4055 IOReturn 
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect 
) 
4057     IOReturn err 
= kIOReturnSuccess
; 
4060 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); 
4072             if ((!safeTask 
|| (get_task_map(safeTask
) != fAddressMap
)) 
4073               && (0 == (fOptions 
& kIOMapStatic
))) 
4075                 IOUnmapPages( fAddressMap
, fAddress
, fLength 
); 
4076                 err 
= kIOReturnSuccess
; 
4078                 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
); 
4081             else if (kIOMapWriteCombineCache 
== (fOptions 
& kIOMapCacheMask
)) 
4083                 IOOptionBits newMode
; 
4084                 newMode 
= (fOptions 
& ~kIOMapCacheMask
) | (doRedirect 
? kIOMapInhibitCache 
: kIOMapWriteCombineCache
); 
4085                 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
); 
4092     if ((((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) 
4093          || ((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) 
4095      && (doRedirect 
!= (0 != (fMemory
->_flags 
& kIOMemoryRedirected
)))) 
4096         fMemory
->redirect(safeTask
, doRedirect
); 
4101 IOReturn 
IOMemoryMap::unmap( void ) 
4107     if( fAddress 
&& fAddressMap 
&& (0 == fSuperMap
) && fMemory
 
4108         && (0 == (kIOMapStatic 
& fOptions
))) { 
4110         err 
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0); 
4113         err 
= kIOReturnSuccess
; 
4117         vm_map_deallocate(fAddressMap
); 
4128 void IOMemoryMap::taskDied( void ) 
4131     if (fUserClientUnmap
) unmap(); 
4133     else                  IOTrackingRemoveUser(gIOMapTracking
, &fTracking
); 
4134 #endif /* IOTRACKING */ 
4137         vm_map_deallocate(fAddressMap
); 
4145 IOReturn 
IOMemoryMap::userClientUnmap( void ) 
4147     fUserClientUnmap 
= true; 
4148     return (kIOReturnSuccess
); 
4151 // Overload the release mechanism.  All mappings must be a member 
4152 // of a memory descriptors _mappings set.  This means that we 
4153 // always have 2 references on a mapping.  When either of these mappings 
4154 // are released we need to free ourselves. 
4155 void IOMemoryMap::taggedRelease(const void *tag
) const 
4158     super::taggedRelease(tag
, 2); 
4162 void IOMemoryMap::free() 
4169         fMemory
->removeMapping(this); 
4174     if (fOwner 
&& (fOwner 
!= fMemory
)) 
4177         fOwner
->removeMapping(this); 
4182         fSuperMap
->release(); 
4185         upl_commit(fRedirUPL
, NULL
, 0); 
4186         upl_deallocate(fRedirUPL
); 
4192 IOByteCount 
IOMemoryMap::getLength() 
4197 IOVirtualAddress 
IOMemoryMap::getVirtualAddress() 
4201         fSuperMap
->getVirtualAddress(); 
4202     else if (fAddressMap 
 
4203                 && vm_map_is_64bit(fAddressMap
) 
4204                 && (sizeof(IOVirtualAddress
) < 8)) 
4206         OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
); 
4208 #endif /* !__LP64__ */ 
4214 mach_vm_address_t       
IOMemoryMap::getAddress() 
4219 mach_vm_size_t  
IOMemoryMap::getSize() 
4223 #endif /* !__LP64__ */ 
4226 task_t 
IOMemoryMap::getAddressTask() 
4229         return( fSuperMap
->getAddressTask()); 
4231         return( fAddressTask
); 
4234 IOOptionBits 
IOMemoryMap::getMapOptions() 
4239 IOMemoryDescriptor 
* IOMemoryMap::getMemoryDescriptor() 
4244 IOMemoryMap 
* IOMemoryMap::copyCompatible( 
4245                 IOMemoryMap 
* newMapping 
) 
4247     task_t              task      
= newMapping
->getAddressTask(); 
4248     mach_vm_address_t   toAddress 
= newMapping
->fAddress
; 
4249     IOOptionBits        _options  
= newMapping
->fOptions
; 
4250     mach_vm_size_t      _offset   
= newMapping
->fOffset
; 
4251     mach_vm_size_t      _length   
= newMapping
->fLength
; 
4253     if( (!task
) || (!fAddressMap
) || (fAddressMap 
!= get_task_map(task
))) 
4255     if( (fOptions 
^ _options
) & kIOMapReadOnly
) 
4257     if( (kIOMapDefaultCache 
!= (_options 
& kIOMapCacheMask
))  
4258      && ((fOptions 
^ _options
) & kIOMapCacheMask
)) 
4261     if( (0 == (_options 
& kIOMapAnywhere
)) && (fAddress 
!= toAddress
)) 
4264     if( _offset 
< fOffset
) 
4269     if( (_offset 
+ _length
) > fLength
) 
4273     if( (fLength 
== _length
) && (!_offset
)) 
4279         newMapping
->fSuperMap 
= this; 
4280         newMapping
->fOffset   
= fOffset 
+ _offset
; 
4281         newMapping
->fAddress  
= fAddress 
+ _offset
; 
4284     return( newMapping 
); 
4287 IOReturn 
IOMemoryMap::wireRange( 
4289         mach_vm_size_t          offset
, 
4290         mach_vm_size_t          length
) 
4293     mach_vm_address_t start 
= trunc_page_64(fAddress 
+ offset
); 
4294     mach_vm_address_t end   
= round_page_64(fAddress 
+ offset 
+ length
); 
4297     prot 
= (kIODirectionOutIn 
& options
); 
4300         kr 
= vm_map_wire_kernel(fAddressMap
, start
, end
, prot
, fMemory
->getVMTag(kernel_map
), FALSE
); 
4304         kr 
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
); 
4313 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength 
* _length
, IOOptionBits _options
) 
4314 #else /* !__LP64__ */ 
4315 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength 
* _length
) 
4316 #endif /* !__LP64__ */ 
4318     IOPhysicalAddress   address
; 
4322     address 
= fMemory
->getPhysicalSegment( fOffset 
+ _offset
, _length
, _options 
); 
4323 #else /* !__LP64__ */ 
4324     address 
= fMemory
->getPhysicalSegment( fOffset 
+ _offset
, _length 
); 
4325 #endif /* !__LP64__ */ 
4331 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4334 #define super OSObject 
4336 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4338 void IOMemoryDescriptor::initialize( void ) 
4340     if( 0 == gIOMemoryLock
) 
4341         gIOMemoryLock 
= IORecursiveLockAlloc(); 
4343     gIOLastPage 
= IOGetLastPageNumber(); 
4346 void IOMemoryDescriptor::free( void ) 
4348     if( _mappings
) _mappings
->release(); 
4352         IODelete(reserved
, IOMemoryDescriptorReserved
, 1); 
4358 IOMemoryMap 
* IOMemoryDescriptor::setMapping( 
4360         IOVirtualAddress        mapAddress
, 
4361         IOOptionBits            options 
) 
4363     return (createMappingInTask( intoTask
, mapAddress
, 
4364                                     options 
| kIOMapStatic
, 
4368 IOMemoryMap 
* IOMemoryDescriptor::map(  
4369         IOOptionBits            options 
) 
4371     return (createMappingInTask( kernel_task
, 0, 
4372                                 options 
| kIOMapAnywhere
, 
4377 IOMemoryMap 
* IOMemoryDescriptor::map(  
4379         IOVirtualAddress        atAddress
, 
4380         IOOptionBits            options
, 
4382         IOByteCount             length 
) 
4384     if ((!(kIOMapAnywhere 
& options
)) && vm_map_is_64bit(get_task_map(intoTask
))) 
4386         OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()"); 
4390     return (createMappingInTask(intoTask
, atAddress
, 
4391                                 options
, offset
, length
)); 
4393 #endif /* !__LP64__ */ 
4395 IOMemoryMap 
* IOMemoryDescriptor::createMappingInTask( 
4397         mach_vm_address_t       atAddress
, 
4398         IOOptionBits            options
, 
4399         mach_vm_size_t          offset
, 
4400         mach_vm_size_t          length
) 
4402     IOMemoryMap 
* result
; 
4403     IOMemoryMap 
* mapping
; 
4406         length 
= getLength(); 
4408     mapping 
= new IOMemoryMap
; 
4411      && !mapping
->init( intoTask
, atAddress
, 
4412                         options
, offset
, length 
)) { 
4418         result 
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options 
| kIOMap64Bit
, 0, 0); 
4424         IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n", 
4425                 this, atAddress
, (uint32_t) options
, offset
, length
); 
4431 #ifndef __LP64__ // there is only a 64 bit version for LP64 
4432 IOReturn 
IOMemoryMap::redirect(IOMemoryDescriptor 
* newBackingMemory
, 
4433                                 IOOptionBits         options
, 
4436     return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
)); 
4440 IOReturn 
IOMemoryMap::redirect(IOMemoryDescriptor 
* newBackingMemory
, 
4441                                 IOOptionBits         options
, 
4442                                 mach_vm_size_t       offset
) 
4444     IOReturn err 
= kIOReturnSuccess
; 
4445     IOMemoryDescriptor 
* physMem 
= 0; 
4449     if (fAddress 
&& fAddressMap
) do  
4451         if (((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) 
4452             || ((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) 
4458         if (!fRedirUPL 
&& fMemory
->_memRef 
&& (1 == fMemory
->_memRef
->count
)) 
4460             upl_size_t          size 
= round_page(fLength
); 
4461             upl_control_flags_t flags 
= UPL_COPYOUT_FROM 
| UPL_SET_INTERNAL 
 
4462                                         | UPL_SET_LITE 
| UPL_SET_IO_WIRE 
| UPL_BLOCK_ACCESS
; 
4463             if (KERN_SUCCESS 
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
, 
4465                                             &flags
, fMemory
->getVMTag(kernel_map
))) 
4470                 IOUnmapPages( fAddressMap
, fAddress
, fLength 
); 
4472                     physMem
->redirect(0, true); 
4476         if (newBackingMemory
) 
4478             if (newBackingMemory 
!= fMemory
) 
4481                 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,  
4482                                                             options 
| kIOMapUnique 
| kIOMapReference 
| kIOMap64Bit
, 
4484                     err 
= kIOReturnError
; 
4488                 upl_commit(fRedirUPL
, NULL
, 0); 
4489                 upl_deallocate(fRedirUPL
); 
4492             if ((false) && physMem
) 
4493                 physMem
->redirect(0, false); 
4506 IOMemoryMap 
* IOMemoryDescriptor::makeMapping( 
4507         IOMemoryDescriptor 
*    owner
, 
4509         IOVirtualAddress        __address
, 
4510         IOOptionBits            options
, 
4511         IOByteCount             __offset
, 
4512         IOByteCount             __length 
) 
4515     if (!(kIOMap64Bit 
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit"); 
4516 #endif /* !__LP64__ */ 
4518     IOMemoryDescriptor 
* mapDesc 
= 0; 
4519     IOMemoryMap 
*        result 
= 0; 
4522     IOMemoryMap 
*  mapping 
= (IOMemoryMap 
*) __address
; 
4523     mach_vm_size_t offset  
= mapping
->fOffset 
+ __offset
; 
4524     mach_vm_size_t length  
= mapping
->fLength
; 
4526     mapping
->fOffset 
= offset
; 
4532         if (kIOMapStatic 
& options
) 
4535             addMapping(mapping
); 
4536             mapping
->setMemoryDescriptor(this, 0); 
4540         if (kIOMapUnique 
& options
) 
4543             IOByteCount       physLen
; 
4545 //          if (owner != this)          continue; 
4547             if (((_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) 
4548                 || ((_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) 
4550                 phys 
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
); 
4551                 if (!phys 
|| (physLen 
< length
)) 
4554                 mapDesc 
= IOMemoryDescriptor::withAddressRange( 
4555                                 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
); 
4559                 mapping
->fOffset 
= offset
; 
4564             // look for a compatible existing mapping 
4565             if( (iter 
= OSCollectionIterator::withCollection(_mappings
))) 
4567                 IOMemoryMap 
* lookMapping
; 
4568                 while ((lookMapping 
= (IOMemoryMap 
*) iter
->getNextObject())) 
4570                     if ((result 
= lookMapping
->copyCompatible(mapping
))) 
4573                         result
->setMemoryDescriptor(this, offset
); 
4579             if (result 
|| (options 
& kIOMapReference
)) 
4581                 if (result 
!= mapping
) 
4596         kr 
= mapDesc
->doMap( 0, (IOVirtualAddress 
*) &mapping
, options
, 0, 0 ); 
4597         if (kIOReturnSuccess 
== kr
) 
4600             mapDesc
->addMapping(result
); 
4601             result
->setMemoryDescriptor(mapDesc
, offset
); 
4619 void IOMemoryDescriptor::addMapping( 
4620         IOMemoryMap 
* mapping 
) 
4625             _mappings 
= OSSet::withCapacity(1); 
4627             _mappings
->setObject( mapping 
); 
4631 void IOMemoryDescriptor::removeMapping( 
4632         IOMemoryMap 
* mapping 
) 
4635         _mappings
->removeObject( mapping
); 
4639 // obsolete initializers 
4640 // - initWithOptions is the designated initializer  
4642 IOMemoryDescriptor::initWithAddress(void *      address
, 
4644                                     IODirection direction
) 
4650 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
, 
4652                                     IODirection  direction
, 
4659 IOMemoryDescriptor::initWithPhysicalAddress( 
4660                                  IOPhysicalAddress      address
, 
4662                                  IODirection            direction 
) 
4668 IOMemoryDescriptor::initWithRanges( 
4669                                         IOVirtualRange 
* ranges
, 
4671                                         IODirection      direction
, 
4679 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange 
* ranges
, 
4681                                                 IODirection      direction
, 
4687 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
, 
4688                                         IOByteCount 
* lengthOfSegment
) 
4692 #endif /* !__LP64__ */ 
4694 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4696 bool IOGeneralMemoryDescriptor::serialize(OSSerialize 
* s
) const 
4698     OSSymbol 
const *keys
[2]; 
4699     OSObject 
*values
[2]; 
4703         user_addr_t address
; 
4706     unsigned int index
, nRanges
; 
4709     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
4711     if (s 
== NULL
) return false; 
4713     array 
= OSArray::withCapacity(4); 
4714     if (!array
)  return (false); 
4716     nRanges 
= _rangesCount
; 
4717     vcopy 
= (SerData 
*) IOMalloc(sizeof(SerData
) * nRanges
); 
4718     if (vcopy 
== 0) return false; 
4720     keys
[0] = OSSymbol::withCString("address"); 
4721     keys
[1] = OSSymbol::withCString("length"); 
4724     values
[0] = values
[1] = 0; 
4726     // From this point on we can go to bail. 
4728     // Copy the volatile data so we don't have to allocate memory 
4729     // while the lock is held. 
4731     if (nRanges 
== _rangesCount
) { 
4732         Ranges vec 
= _ranges
; 
4733         for (index 
= 0; index 
< nRanges
; index
++) { 
4734             mach_vm_address_t addr
; mach_vm_size_t len
; 
4735             getAddrLenForInd(addr
, len
, type
, vec
, index
); 
4736             vcopy
[index
].address 
= addr
; 
4737             vcopy
[index
].length  
= len
; 
4740         // The descriptor changed out from under us.  Give up. 
4747     for (index 
= 0; index 
< nRanges
; index
++) 
4749         user_addr_t addr 
= vcopy
[index
].address
; 
4750         IOByteCount len 
= (IOByteCount
) vcopy
[index
].length
; 
4751         values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8); 
4752         if (values
[0] == 0) { 
4756         values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8); 
4757         if (values
[1] == 0) { 
4761         OSDictionary 
*dict 
= OSDictionary::withObjects((const OSObject 
**)values
, (const OSSymbol 
**)keys
, 2); 
4766         array
->setObject(dict
); 
4768         values
[0]->release(); 
4769         values
[1]->release(); 
4770         values
[0] = values
[1] = 0; 
4773     result 
= array
->serialize(s
); 
4779       values
[0]->release(); 
4781       values
[1]->release(); 
4787         IOFree(vcopy
, sizeof(SerData
) * nRanges
); 
4792 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4794 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0); 
4796 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1); 
4797 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2); 
4798 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3); 
4799 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4); 
4800 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5); 
4801 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6); 
4802 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7); 
4803 #else /* !__LP64__ */ 
4804 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1); 
4805 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2); 
4806 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3); 
4807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4); 
4808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5); 
4809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6); 
4810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7); 
4811 #endif /* !__LP64__ */ 
4812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8); 
4813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9); 
4814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10); 
4815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11); 
4816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12); 
4817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13); 
4818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14); 
4819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15); 
4821 /* ex-inline function implementation */ 
4823 IOMemoryDescriptor::getPhysicalAddress() 
4824         { return( getPhysicalSegment( 0, 0 )); }