2  * Copyright (c) 1998-2016 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  30 #include <sys/cdefs.h> 
  32 #include <IOKit/assert.h> 
  33 #include <IOKit/system.h> 
  34 #include <IOKit/IOLib.h> 
  35 #include <IOKit/IOMemoryDescriptor.h> 
  36 #include <IOKit/IOMapper.h> 
  37 #include <IOKit/IODMACommand.h> 
  38 #include <IOKit/IOKitKeysPrivate.h> 
  40 #include <IOKit/IOSubMemoryDescriptor.h> 
  41 #include <IOKit/IOMultiMemoryDescriptor.h> 
  43 #include <IOKit/IOKitDebug.h> 
  44 #include <libkern/OSDebug.h> 
  45 #include <libkern/OSKextLibPrivate.h> 
  47 #include "IOKitKernelInternal.h" 
  49 #include <libkern/c++/OSContainers.h> 
  50 #include <libkern/c++/OSDictionary.h> 
  51 #include <libkern/c++/OSArray.h> 
  52 #include <libkern/c++/OSSymbol.h> 
  53 #include <libkern/c++/OSNumber.h> 
  54 #include <os/overflow.h> 
  60 #include <vm/vm_pageout.h> 
  61 #include <mach/memory_object_types.h> 
  62 #include <device/device_port.h> 
  64 #include <mach/vm_prot.h> 
  65 #include <mach/mach_vm.h> 
  66 #include <vm/vm_fault.h> 
  67 #include <vm/vm_protos.h> 
  69 extern ppnum_t 
pmap_find_phys(pmap_t pmap
, addr64_t va
); 
  70 extern void ipc_port_release_send(ipc_port_t port
); 
  74 #define kIOMapperWaitSystem     ((IOMapper *) 1) 
  76 static IOMapper 
* gIOSystemMapper 
= NULL
; 
  80 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
  82 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject 
) 
  84 #define super IOMemoryDescriptor 
  86 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
) 
  88 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
  90 static IORecursiveLock 
* gIOMemoryLock
; 
  92 #define LOCK    IORecursiveLockLock( gIOMemoryLock) 
  93 #define UNLOCK  IORecursiveLockUnlock( gIOMemoryLock) 
  94 #define SLEEP   IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) 
  96     IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) 
  99 #define DEBG(fmt, args...)      { kprintf(fmt, ## args); } 
 101 #define DEBG(fmt, args...)      {} 
 104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 106 // Some data structures and accessor macros used by the initWithOptions 
 109 enum ioPLBlockFlags 
{ 
 110     kIOPLOnDevice  
= 0x00000001, 
 111     kIOPLExternUPL 
= 0x00000002, 
 114 struct IOMDPersistentInitData
 
 116     const IOGeneralMemoryDescriptor 
* fMD
; 
 117     IOMemoryReference               
* fMemRef
; 
 122     vm_address_t fPageInfo
;   // Pointer to page list or index into it 
 123     uint32_t fIOMDOffset
;           // The offset of this iopl in descriptor 
 124     ppnum_t fMappedPage
;            // Page number of first page in this iopl 
 125     unsigned int fPageOffset
;       // Offset within first page of iopl 
 126     unsigned int fFlags
;            // Flags 
 129 enum { kMaxWireTags 
= 6 }; 
 134     uint64_t    fDMAMapAlignment
; 
 135     uint64_t    fMappedBase
; 
 136     uint64_t    fMappedLength
; 
 137     uint64_t    fPreparationID
; 
 139     IOTracking  fWireTracking
; 
 140 #endif /* IOTRACKING */ 
 141     unsigned int      fPageCnt
; 
 142     uint8_t           fDMAMapNumAddressBits
; 
 143     unsigned char     fDiscontig
:1; 
 144     unsigned char     fCompletionError
:1; 
 145     unsigned char     fMappedBaseValid
:1; 
 146     unsigned char     _resv
:3; 
 147     unsigned char     fDMAAccess
:2; 
 149     /* variable length arrays */ 
 150     upl_page_info_t fPageList
[1] 
 152                                 // align fPageList as for ioPLBlock 
 153                                 __attribute__((aligned(sizeof(upl_t
)))) 
 156     ioPLBlock fBlocks
[1]; 
 159 #define getDataP(osd)   ((ioGMDData *) (osd)->getBytesNoCopy()) 
 160 #define getIOPLList(d)  ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) 
 161 #define getNumIOPL(osd, d)      \ 
 162     (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) 
 163 #define getPageList(d)  (&(d->fPageList[0])) 
 164 #define computeDataSize(p, u) \ 
 165     (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) 
 167 enum { kIOMemoryHostOrRemote 
= kIOMemoryHostOnly 
| kIOMemoryRemote 
}; 
 169 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 171 #define next_page(a) ( trunc_page(a) + PAGE_SIZE ) 
 175 kern_return_t 
device_data_action( 
 176                uintptr_t               device_handle
,  
 177                ipc_port_t              device_pager
, 
 178                vm_prot_t               protection
,  
 179                vm_object_offset_t      offset
,  
 183     IOMemoryDescriptorReserved 
* ref 
= (IOMemoryDescriptorReserved 
*) device_handle
; 
 184     IOMemoryDescriptor 
* memDesc
; 
 187     memDesc 
= ref
->dp
.memory
; 
 191         kr 
= memDesc
->handleFault(device_pager
, offset
, size
); 
 201 kern_return_t 
device_close( 
 202                uintptr_t     device_handle
) 
 204     IOMemoryDescriptorReserved 
* ref 
= (IOMemoryDescriptorReserved 
*) device_handle
; 
 206     IODelete( ref
, IOMemoryDescriptorReserved
, 1 ); 
 208     return( kIOReturnSuccess 
); 
 212 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 214 // Note this inline function uses C++ reference arguments to return values 
 215 // This means that pointers are not passed and NULLs don't have to be 
 216 // checked for as a NULL reference is illegal. 
 218 getAddrLenForInd(mach_vm_address_t 
&addr
, mach_vm_size_t 
&len
, // Output variables 
 219      UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
) 
 221     assert(kIOMemoryTypeUIO       
== type
 
 222         || kIOMemoryTypeVirtual   
== type 
|| kIOMemoryTypeVirtual64 
== type
 
 223         || kIOMemoryTypePhysical  
== type 
|| kIOMemoryTypePhysical64 
== type
); 
 224     if (kIOMemoryTypeUIO 
== type
) { 
 227         uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr 
= ad
; len 
= us
; 
 230     else if ((kIOMemoryTypeVirtual64 
== type
) || (kIOMemoryTypePhysical64 
== type
)) { 
 231         IOAddressRange cur 
= r
.v64
[ind
]; 
 235 #endif /* !__LP64__ */ 
 237         IOVirtualRange cur 
= r
.v
[ind
]; 
 243 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 246 purgeableControlBits(IOOptionBits newState
, vm_purgable_t 
* control
, int * state
) 
 248     IOReturn err 
= kIOReturnSuccess
; 
 250     *control 
= VM_PURGABLE_SET_STATE
; 
 252     enum { kIOMemoryPurgeableControlMask 
= 15 }; 
 254     switch (kIOMemoryPurgeableControlMask 
& newState
) 
 256         case kIOMemoryPurgeableKeepCurrent
: 
 257             *control 
= VM_PURGABLE_GET_STATE
; 
 260         case kIOMemoryPurgeableNonVolatile
: 
 261             *state 
= VM_PURGABLE_NONVOLATILE
; 
 263         case kIOMemoryPurgeableVolatile
: 
 264             *state 
= VM_PURGABLE_VOLATILE 
| (newState 
& ~kIOMemoryPurgeableControlMask
); 
 266         case kIOMemoryPurgeableEmpty
: 
 267             *state 
= VM_PURGABLE_EMPTY 
| (newState 
& ~kIOMemoryPurgeableControlMask
); 
 270             err 
= kIOReturnBadArgument
; 
 274     if (*control 
== VM_PURGABLE_SET_STATE
) { 
 275         // let VM know this call is from the kernel and is allowed to alter 
 276         // the volatility of the memory entry even if it was created with 
 277         // MAP_MEM_PURGABLE_KERNEL_ONLY 
 278         *control 
= VM_PURGABLE_SET_STATE_FROM_KERNEL
; 
 285 purgeableStateBits(int * state
) 
 287     IOReturn err 
= kIOReturnSuccess
; 
 289     switch (VM_PURGABLE_STATE_MASK 
& *state
) 
 291         case VM_PURGABLE_NONVOLATILE
: 
 292             *state 
= kIOMemoryPurgeableNonVolatile
; 
 294         case VM_PURGABLE_VOLATILE
: 
 295             *state 
= kIOMemoryPurgeableVolatile
; 
 297         case VM_PURGABLE_EMPTY
: 
 298             *state 
= kIOMemoryPurgeableEmpty
; 
 301             *state 
= kIOMemoryPurgeableNonVolatile
; 
 302             err 
= kIOReturnNotReady
; 
 310 vmProtForCacheMode(IOOptionBits cacheMode
) 
 315         case kIOInhibitCache
: 
 316             SET_MAP_MEM(MAP_MEM_IO
, prot
); 
 319         case kIOWriteThruCache
: 
 320             SET_MAP_MEM(MAP_MEM_WTHRU
, prot
); 
 323         case kIOWriteCombineCache
: 
 324             SET_MAP_MEM(MAP_MEM_WCOMB
, prot
); 
 327         case kIOCopybackCache
: 
 328             SET_MAP_MEM(MAP_MEM_COPYBACK
, prot
); 
 331         case kIOCopybackInnerCache
: 
 332             SET_MAP_MEM(MAP_MEM_INNERWBACK
, prot
); 
 336             SET_MAP_MEM(MAP_MEM_POSTED
, prot
); 
 339         case kIODefaultCache
: 
 341             SET_MAP_MEM(MAP_MEM_NOOP
, prot
); 
 349 pagerFlagsForCacheMode(IOOptionBits cacheMode
) 
 351     unsigned int pagerFlags 
= 0; 
 354         case kIOInhibitCache
: 
 355             pagerFlags 
= DEVICE_PAGER_CACHE_INHIB 
|  DEVICE_PAGER_COHERENT 
| DEVICE_PAGER_GUARDED
; 
 358         case kIOWriteThruCache
: 
 359             pagerFlags 
= DEVICE_PAGER_WRITE_THROUGH 
| DEVICE_PAGER_COHERENT 
| DEVICE_PAGER_GUARDED
; 
 362         case kIOWriteCombineCache
: 
 363             pagerFlags 
= DEVICE_PAGER_CACHE_INHIB 
| DEVICE_PAGER_COHERENT
; 
 366         case kIOCopybackCache
: 
 367             pagerFlags 
= DEVICE_PAGER_COHERENT
; 
 370         case kIOCopybackInnerCache
: 
 371             pagerFlags 
= DEVICE_PAGER_COHERENT
; 
 375             pagerFlags 
= DEVICE_PAGER_CACHE_INHIB 
|  DEVICE_PAGER_COHERENT 
| DEVICE_PAGER_GUARDED 
| DEVICE_PAGER_EARLY_ACK
; 
 378         case kIODefaultCache
: 
 386 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 387 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
 396 struct IOMemoryReference
 
 398     volatile SInt32             refCount
; 
 402     struct IOMemoryReference  
* mapRef
; 
 403     IOMemoryEntry               entries
[0]; 
 408     kIOMemoryReferenceReuse 
= 0x00000001, 
 409     kIOMemoryReferenceWrite 
= 0x00000002, 
 410     kIOMemoryReferenceCOW   
= 0x00000004, 
 413 SInt32 gIOMemoryReferenceCount
; 
 416 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference 
* realloc
) 
 418     IOMemoryReference 
* ref
; 
 419     size_t              newSize
, oldSize
, copySize
; 
 421     newSize 
= (sizeof(IOMemoryReference
)  
 422                  - sizeof(ref
->entries
)  
 423                  + capacity 
* sizeof(ref
->entries
[0])); 
 424     ref 
= (typeof(ref
)) IOMalloc(newSize
); 
 427         oldSize 
= (sizeof(IOMemoryReference
)  
 428                         - sizeof(realloc
->entries
)  
 429                         + realloc
->capacity 
* sizeof(realloc
->entries
[0])); 
 431         if (copySize 
> newSize
) copySize 
= newSize
; 
 432         if (ref
) bcopy(realloc
, ref
, copySize
); 
 433         IOFree(realloc
, oldSize
); 
 437         bzero(ref
, sizeof(*ref
)); 
 439         OSIncrementAtomic(&gIOMemoryReferenceCount
); 
 441     if (!ref
) return (0); 
 442     ref
->capacity 
= capacity
; 
 447 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference 
* ref
) 
 449     IOMemoryEntry 
* entries
; 
 454         memoryReferenceFree(ref
->mapRef
); 
 458     entries 
= ref
->entries 
+ ref
->count
; 
 459     while (entries 
> &ref
->entries
[0]) 
 462         ipc_port_release_send(entries
->entry
); 
 464     size 
= (sizeof(IOMemoryReference
)  
 465                  - sizeof(ref
->entries
)  
 466                  + ref
->capacity 
* sizeof(ref
->entries
[0])); 
 469     OSDecrementAtomic(&gIOMemoryReferenceCount
); 
 473 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference 
* ref
) 
 475     if (1 == OSDecrementAtomic(&ref
->refCount
)) memoryReferenceFree(ref
); 
 480 IOGeneralMemoryDescriptor::memoryReferenceCreate( 
 481                         IOOptionBits         options
, 
 482                         IOMemoryReference 
** reference
) 
 484     enum { kCapacity 
= 4, kCapacityInc 
= 4 }; 
 487     IOMemoryReference 
*  ref
; 
 488     IOMemoryEntry 
*      entries
; 
 489     IOMemoryEntry 
*      cloneEntries
; 
 491     ipc_port_t           entry
, cloneEntry
; 
 493     memory_object_size_t actualSize
; 
 496     mach_vm_address_t    entryAddr
, endAddr
, entrySize
; 
 497     mach_vm_size_t       srcAddr
, srcLen
; 
 498     mach_vm_size_t       nextAddr
, nextLen
; 
 499     mach_vm_size_t       offset
, remain
; 
 501     IOOptionBits         type 
= (_flags 
& kIOMemoryTypeMask
); 
 502     IOOptionBits         cacheMode
; 
 503     unsigned int         pagerFlags
; 
 506     ref 
= memoryReferenceAlloc(kCapacity
, NULL
); 
 507     if (!ref
) return (kIOReturnNoMemory
); 
 509     tag 
= getVMTag(kernel_map
); 
 510     entries 
= &ref
->entries
[0]; 
 518         getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
); 
 522         nextAddr 
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
); 
 525         // default cache mode for physical 
 526         if (kIODefaultCache 
== ((_flags 
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
)) 
 529             pagerFlags 
= IODefaultCacheBits(nextAddr
); 
 530             if (DEVICE_PAGER_CACHE_INHIB 
& pagerFlags
) 
 532                 if (DEVICE_PAGER_EARLY_ACK 
& pagerFlags
) 
 533                     mode 
= kIOPostedWrite
; 
 534                 else if (DEVICE_PAGER_GUARDED 
& pagerFlags
) 
 535                     mode 
= kIOInhibitCache
; 
 537                     mode 
= kIOWriteCombineCache
; 
 539             else if (DEVICE_PAGER_WRITE_THROUGH 
& pagerFlags
) 
 540                 mode 
= kIOWriteThruCache
; 
 542                 mode 
= kIOCopybackCache
; 
 543             _flags 
|= (mode 
<< kIOMemoryBufferCacheShift
); 
 547     // cache mode & vm_prot 
 549     cacheMode 
= ((_flags 
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
); 
 550     prot 
|= vmProtForCacheMode(cacheMode
); 
 551     // VM system requires write access to change cache mode 
 552     if (kIODefaultCache 
!= cacheMode
)                    prot 
|= VM_PROT_WRITE
; 
 553     if (kIODirectionOut 
!= (kIODirectionOutIn 
& _flags
)) prot 
|= VM_PROT_WRITE
; 
 554     if (kIOMemoryReferenceWrite 
& options
)               prot 
|= VM_PROT_WRITE
; 
 555     if (kIOMemoryReferenceCOW   
& options
)               prot 
|= MAP_MEM_VM_COPY
; 
 557     if ((kIOMemoryReferenceReuse 
& options
) && _memRef
) 
 559         cloneEntries 
= &_memRef
->entries
[0]; 
 560         prot 
|= MAP_MEM_NAMED_REUSE
; 
 567         if (kIOMemoryBufferPageable 
& _flags
) 
 569             // IOBufferMemoryDescriptor alloc - set flags for entry + object create 
 570             prot 
|= MAP_MEM_NAMED_CREATE
; 
 571             if (kIOMemoryBufferPurgeable 
& _flags
) 
 573                 prot 
|= (MAP_MEM_PURGABLE 
| MAP_MEM_PURGABLE_KERNEL_ONLY
); 
 574                 if (VM_KERN_MEMORY_SKYWALK 
== tag
) 
 576                     prot 
|= MAP_MEM_LEDGER_TAG_NETWORK
; 
 579             if (kIOMemoryUseReserve 
& _flags
)      prot 
|= MAP_MEM_GRAB_SECLUDED
; 
 581             prot 
|= VM_PROT_WRITE
; 
 584         else map 
= get_task_map(_task
); 
 593             // coalesce addr range 
 594             for (++rangeIdx
; rangeIdx 
< _rangesCount
; rangeIdx
++) 
 596                 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
); 
 597                 if ((srcAddr 
+ srcLen
) != nextAddr
) break; 
 600             entryAddr 
= trunc_page_64(srcAddr
); 
 601             endAddr   
= round_page_64(srcAddr 
+ srcLen
); 
 604                 entrySize 
= (endAddr 
- entryAddr
); 
 605                 if (!entrySize
) break; 
 606                 actualSize 
= entrySize
; 
 608                 cloneEntry 
= MACH_PORT_NULL
; 
 609                 if (MAP_MEM_NAMED_REUSE 
& prot
) 
 611                     if (cloneEntries 
< &_memRef
->entries
[_memRef
->count
]) cloneEntry 
= cloneEntries
->entry
; 
 612                     else                                                  prot 
&= ~MAP_MEM_NAMED_REUSE
; 
 615                 err 
= mach_make_memory_entry_internal(map
, 
 616                         &actualSize
, entryAddr
, prot
, &entry
, cloneEntry
); 
 618                 if (KERN_SUCCESS 
!= err
) break; 
 619                 if (actualSize 
> entrySize
) panic("mach_make_memory_entry_64 actualSize"); 
 621                 if (count 
>= ref
->capacity
) 
 623                     ref 
= memoryReferenceAlloc(ref
->capacity 
+ kCapacityInc
, ref
); 
 624                     entries 
= &ref
->entries
[count
]; 
 626                 entries
->entry  
= entry
; 
 627                 entries
->size   
= actualSize
; 
 628                 entries
->offset 
= offset 
+ (entryAddr 
- srcAddr
); 
 629                 entryAddr 
+= actualSize
; 
 630                 if (MAP_MEM_NAMED_REUSE 
& prot
) 
 632                     if ((cloneEntries
->entry  
== entries
->entry
) 
 633                      && (cloneEntries
->size   
== entries
->size
) 
 634                      && (cloneEntries
->offset 
== entries
->offset
))         cloneEntries
++; 
 635                      else                                    prot 
&= ~MAP_MEM_NAMED_REUSE
; 
 647         // _task == 0, physical or kIOMemoryTypeUPL 
 648         memory_object_t pager
; 
 649         vm_size_t       size 
= ptoa_32(_pages
); 
 651         if (!getKernelReserved()) panic("getKernelReserved"); 
 653         reserved
->dp
.pagerContig 
= (1 == _rangesCount
); 
 654         reserved
->dp
.memory      
= this; 
 656         pagerFlags 
= pagerFlagsForCacheMode(cacheMode
); 
 657         if (-1U == pagerFlags
) panic("phys is kIODefaultCache"); 
 658         if (reserved
->dp
.pagerContig
) pagerFlags 
|= DEVICE_PAGER_CONTIGUOUS
; 
 660         pager 
= device_pager_setup((memory_object_t
) 0, (uintptr_t) reserved
,  
 663         if (!pager
) err 
= kIOReturnVMError
; 
 667             entryAddr 
= trunc_page_64(srcAddr
); 
 668             err 
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,  
 669                         size
, VM_PROT_READ 
| VM_PROT_WRITE
, pager
, &entry
); 
 670             assert (KERN_SUCCESS 
== err
); 
 671             if (KERN_SUCCESS 
!= err
) device_pager_deallocate(pager
); 
 674                 reserved
->dp
.devicePager 
= pager
; 
 675                 entries
->entry  
= entry
; 
 676                 entries
->size   
= size
; 
 677                 entries
->offset 
= offset 
+ (entryAddr 
- srcAddr
); 
 687     if (_task 
&& (KERN_SUCCESS 
== err
) 
 688       && (kIOMemoryMapCopyOnWrite 
& _flags
) 
 689       && !(kIOMemoryReferenceCOW 
& options
)) 
 691         err 
= memoryReferenceCreate(options 
| kIOMemoryReferenceCOW
, &ref
->mapRef
); 
 694     if (KERN_SUCCESS 
== err
) 
 696         if (MAP_MEM_NAMED_REUSE 
& prot
) 
 698             memoryReferenceFree(ref
); 
 699             OSIncrementAtomic(&_memRef
->refCount
); 
 705         memoryReferenceFree(ref
); 
 715 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
) 
 717     IOMemoryDescriptorMapAllocRef 
* ref 
= (typeof(ref
))_ref
; 
 719     vm_map_offset_t                 addr
; 
 723     err 
= vm_map_enter_mem_object(map
, &addr
, ref
->size
, 
 725                                   (((ref
->options 
& kIOMapAnywhere
) 
 728                                   VM_MAP_KERNEL_FLAGS_NONE
, 
 731                                   (memory_object_offset_t
) 0, 
 736     if (KERN_SUCCESS 
== err
) 
 738         ref
->mapped 
= (mach_vm_address_t
) addr
; 
 746 IOGeneralMemoryDescriptor::memoryReferenceMap( 
 747                      IOMemoryReference 
* ref
, 
 749                      mach_vm_size_t      inoffset
, 
 751                      IOOptionBits        options
, 
 752                      mach_vm_address_t 
* inaddr
) 
 755     int64_t         offset 
= inoffset
; 
 756     uint32_t        rangeIdx
, entryIdx
; 
 757     vm_map_offset_t addr
, mapAddr
; 
 758     vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
; 
 760     mach_vm_address_t nextAddr
; 
 761     mach_vm_size_t    nextLen
; 
 763     IOMemoryEntry   
* entry
; 
 764     vm_prot_t         prot
, memEntryCacheMode
; 
 766     IOOptionBits      cacheMode
; 
 768     // for the kIOMapPrefault option. 
 769     upl_page_info_t 
* pageList 
= NULL
; 
 770     UInt              currentPageIndex 
= 0; 
 775         err 
= memoryReferenceMap(ref
->mapRef
, map
, inoffset
, size
, options
, inaddr
); 
 779     type 
= _flags 
& kIOMemoryTypeMask
; 
 782     if (!(kIOMapReadOnly 
& options
)) prot 
|= VM_PROT_WRITE
; 
 785     cacheMode 
= ((options 
& kIOMapCacheMask
) >> kIOMapCacheShift
); 
 786     if (kIODefaultCache 
!= cacheMode
) 
 788         // VM system requires write access to update named entry cache mode 
 789         memEntryCacheMode 
= (MAP_MEM_ONLY 
| VM_PROT_WRITE 
| prot 
| vmProtForCacheMode(cacheMode
)); 
 796         // Find first range for offset 
 797         if (!_rangesCount
) return (kIOReturnBadArgument
); 
 798         for (remain 
= offset
, rangeIdx 
= 0; rangeIdx 
< _rangesCount
; rangeIdx
++) 
 800             getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
); 
 801             if (remain 
< nextLen
) break; 
 809         nextAddr 
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
); 
 813     assert(remain 
< nextLen
); 
 814     if (remain 
>= nextLen
) return (kIOReturnBadArgument
); 
 818     pageOffset 
= (page_mask 
& nextAddr
); 
 822     if (!(options 
& kIOMapAnywhere
)) 
 825         if (pageOffset 
!= (page_mask 
& addr
)) return (kIOReturnNotAligned
); 
 829     // find first entry for offset 
 831         (entryIdx 
< ref
->count
) && (offset 
>= ref
->entries
[entryIdx
].offset
); 
 834     entry 
= &ref
->entries
[entryIdx
]; 
 837     size 
= round_page_64(size 
+ pageOffset
); 
 838     if (kIOMapOverwrite 
& options
) 
 840         if ((map 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) 
 842             map 
= IOPageableMapForAddress(addr
); 
 848         IOMemoryDescriptorMapAllocRef ref
; 
 851         ref
.options 
= options
; 
 854         if (options 
& kIOMapAnywhere
) 
 855             // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE 
 859         if ((ref
.map 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) 
 860             err 
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref 
); 
 862             err 
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
); 
 863         if (KERN_SUCCESS 
== err
) 
 872      * If the memory is associated with a device pager but doesn't have a UPL, 
 873      * it will be immediately faulted in through the pager via populateDevicePager(). 
 874      * kIOMapPrefault is redundant in that case, so don't try to use it for UPL 
 877     if ((reserved 
!= NULL
) && (reserved
->dp
.devicePager
) && (_wireCount 
!= 0)) 
 878         options 
&= ~kIOMapPrefault
; 
 881      * Prefaulting is only possible if we wired the memory earlier. Check the 
 882      * memory type, and the underlying data. 
 884     if (options 
& kIOMapPrefault
) 
 887          * The memory must have been wired by calling ::prepare(), otherwise 
 888          * we don't have the UPL. Without UPLs, pages cannot be pre-faulted 
 890         assert(_wireCount 
!= 0); 
 891         assert(_memoryEntries 
!= NULL
); 
 892         if ((_wireCount 
== 0) || 
 893             (_memoryEntries 
== NULL
)) 
 895             return kIOReturnBadArgument
; 
 898         // Get the page list. 
 899         ioGMDData
* dataP 
= getDataP(_memoryEntries
); 
 900         ioPLBlock 
const* ioplList 
= getIOPLList(dataP
); 
 901         pageList 
= getPageList(dataP
); 
 903         // Get the number of IOPLs. 
 904         UInt numIOPLs 
= getNumIOPL(_memoryEntries
, dataP
); 
 907          * Scan through the IOPL Info Blocks, looking for the first block containing 
 908          * the offset. The research will go past it, so we'll need to go back to the 
 909          * right range at the end. 
 912         while (ioplIndex 
< numIOPLs 
&& offset 
>= ioplList
[ioplIndex
].fIOMDOffset
) 
 916         // Retrieve the IOPL info block. 
 917         ioPLBlock ioplInfo 
= ioplList
[ioplIndex
]; 
 920          * For external UPLs, the fPageInfo points directly to the UPL's page_info_t 
 923         if (ioplInfo
.fFlags 
& kIOPLExternUPL
) 
 924             pageList 
= (upl_page_info_t
*) ioplInfo
.fPageInfo
; 
 926             pageList 
= &pageList
[ioplInfo
.fPageInfo
]; 
 928         // Rebase [offset] into the IOPL in order to looks for the first page index. 
 929         mach_vm_size_t offsetInIOPL 
= offset 
- ioplInfo
.fIOMDOffset 
+ ioplInfo
.fPageOffset
; 
 931         // Retrieve the index of the first page corresponding to the offset. 
 932         currentPageIndex 
= atop_32(offsetInIOPL
); 
 940     while (remain 
&& (KERN_SUCCESS 
== err
)) 
 942             entryOffset 
= offset 
- entry
->offset
; 
 943             if ((page_mask 
& entryOffset
) != pageOffset
)  
 945                 err 
= kIOReturnNotAligned
; 
 949             if (kIODefaultCache 
!= cacheMode
) 
 951                 vm_size_t unused 
= 0; 
 952                 err 
= mach_make_memory_entry(NULL 
/*unused*/, &unused
, 0 /*unused*/,  
 953                                              memEntryCacheMode
, NULL
, entry
->entry
); 
 954                 assert (KERN_SUCCESS 
== err
); 
 957             entryOffset 
-= pageOffset
; 
 958             if (entryOffset 
>= entry
->size
) panic("entryOffset"); 
 959             chunk 
= entry
->size 
- entryOffset
; 
 962                 vm_map_kernel_flags_t vmk_flags
; 
 964                 vmk_flags 
= VM_MAP_KERNEL_FLAGS_NONE
; 
 965                 vmk_flags
.vmkf_iokit_acct 
= TRUE
; /* iokit accounting */ 
 967                 if (chunk 
> remain
) chunk 
= remain
; 
 968                 if (options 
& kIOMapPrefault
)  
 970                     UInt nb_pages 
= round_page(chunk
) / PAGE_SIZE
; 
 972                     err 
= vm_map_enter_mem_object_prefault(map
, 
 976                                                             | VM_FLAGS_OVERWRITE
), 
 983                                                            &pageList
[currentPageIndex
], 
 986                     // Compute the next index in the page list. 
 987                     currentPageIndex 
+= nb_pages
; 
 988                     assert(currentPageIndex 
<= _pages
); 
 992                     err 
= vm_map_enter_mem_object(map
, 
 996                                                     | VM_FLAGS_OVERWRITE
), 
1006                 if (KERN_SUCCESS 
!= err
) break; 
1010                 offset   
+= chunk 
- pageOffset
; 
1015             if (entryIdx 
>= ref
->count
)  
1017                 err 
= kIOReturnOverrun
; 
1022     if ((KERN_SUCCESS 
!= err
) && didAlloc
) 
1024         (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
); 
1033 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( 
1034                                IOMemoryReference 
* ref
, 
1035                                IOByteCount       
* residentPageCount
, 
1036                                IOByteCount       
* dirtyPageCount
) 
1039     IOMemoryEntry 
* entries
; 
1040     unsigned int resident
, dirty
; 
1041     unsigned int totalResident
, totalDirty
; 
1043     totalResident 
= totalDirty 
= 0; 
1044     err 
= kIOReturnSuccess
; 
1045     entries 
= ref
->entries 
+ ref
->count
; 
1046     while (entries 
> &ref
->entries
[0]) 
1049         err 
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
); 
1050         if (KERN_SUCCESS 
!= err
) break; 
1051         totalResident 
+= resident
; 
1052         totalDirty    
+= dirty
; 
1055     if (residentPageCount
) *residentPageCount 
= totalResident
; 
1056     if (dirtyPageCount
)    *dirtyPageCount    
= totalDirty
; 
1061 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( 
1062                                 IOMemoryReference 
* ref
, 
1063                                 IOOptionBits        newState
, 
1064                                 IOOptionBits      
* oldState
) 
1067     IOMemoryEntry 
* entries
; 
1068     vm_purgable_t   control
; 
1069     int             totalState
, state
; 
1071     totalState 
= kIOMemoryPurgeableNonVolatile
; 
1072     err 
= kIOReturnSuccess
; 
1073     entries 
= ref
->entries 
+ ref
->count
; 
1074     while (entries 
> &ref
->entries
[0]) 
1078         err 
= purgeableControlBits(newState
, &control
, &state
); 
1079         if (KERN_SUCCESS 
!= err
) break; 
1080         err 
= memory_entry_purgeable_control_internal(entries
->entry
, control
, &state
); 
1081         if (KERN_SUCCESS 
!= err
) break; 
1082         err 
= purgeableStateBits(&state
); 
1083         if (KERN_SUCCESS 
!= err
) break; 
1085         if (kIOMemoryPurgeableEmpty 
== state
)              totalState 
= kIOMemoryPurgeableEmpty
; 
1086         else if (kIOMemoryPurgeableEmpty 
== totalState
)    continue; 
1087         else if (kIOMemoryPurgeableVolatile 
== totalState
) continue; 
1088         else if (kIOMemoryPurgeableVolatile 
== state
)      totalState 
= kIOMemoryPurgeableVolatile
; 
1089         else totalState 
= kIOMemoryPurgeableNonVolatile
; 
1092     if (oldState
) *oldState 
= totalState
; 
1096 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
1098 IOMemoryDescriptor 
* 
1099 IOMemoryDescriptor::withAddress(void *      address
, 
1101                                 IODirection direction
) 
1103     return IOMemoryDescriptor:: 
1104         withAddressRange((IOVirtualAddress
) address
, length
, direction 
| kIOMemoryAutoPrepare
, kernel_task
); 
1108 IOMemoryDescriptor 
* 
1109 IOMemoryDescriptor::withAddress(IOVirtualAddress address
, 
1111                                 IODirection  direction
, 
1114     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1117         if (that
->initWithAddress(address
, length
, direction
, task
)) 
1124 #endif /* !__LP64__ */ 
1126 IOMemoryDescriptor 
* 
1127 IOMemoryDescriptor::withPhysicalAddress( 
1128                                 IOPhysicalAddress       address
, 
1130                                 IODirection             direction 
) 
1132     return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
)); 
1136 IOMemoryDescriptor 
* 
1137 IOMemoryDescriptor::withRanges( IOVirtualRange 
* ranges
, 
1139                                 IODirection      direction
, 
1143     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1146         if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
)) 
1153 #endif /* !__LP64__ */ 
1155 IOMemoryDescriptor 
* 
1156 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
, 
1157                                         mach_vm_size_t length
, 
1158                                         IOOptionBits   options
, 
1161     IOAddressRange range 
= { address
, length 
}; 
1162     return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
)); 
1165 IOMemoryDescriptor 
* 
1166 IOMemoryDescriptor::withAddressRanges(IOAddressRange 
*   ranges
, 
1168                                         IOOptionBits     options
, 
1171     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1175             options 
|= kIOMemoryTypeVirtual64
; 
1177             options 
|= kIOMemoryTypePhysical64
; 
1179         if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0)) 
1192  * Create a new IOMemoryDescriptor. The buffer is made up of several 
1193  * virtual address ranges, from a given task. 
1195  * Passing the ranges as a reference will avoid an extra allocation. 
1197 IOMemoryDescriptor 
* 
1198 IOMemoryDescriptor::withOptions(void *          buffers
, 
1205     IOGeneralMemoryDescriptor 
*self 
= new IOGeneralMemoryDescriptor
; 
1208     && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
)) 
1217 bool IOMemoryDescriptor::initWithOptions(void *         buffers
, 
1221                                          IOOptionBits   options
, 
1228 IOMemoryDescriptor 
* 
1229 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange 
* ranges
, 
1231                                         IODirection     direction
, 
1234     IOGeneralMemoryDescriptor 
* that 
= new IOGeneralMemoryDescriptor
; 
1237         if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
)) 
1245 IOMemoryDescriptor 
* 
1246 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor 
*   of
, 
1249                                 IODirection             direction
) 
1251     return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
)); 
1253 #endif /* !__LP64__ */ 
1255 IOMemoryDescriptor 
* 
1256 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor 
*originalMD
) 
1258     IOGeneralMemoryDescriptor 
*origGenMD 
=  
1259         OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
); 
1262         return IOGeneralMemoryDescriptor:: 
1263             withPersistentMemoryDescriptor(origGenMD
); 
1268 IOMemoryDescriptor 
* 
1269 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor 
*originalMD
) 
1271     IOMemoryReference 
* memRef
; 
1273     if (kIOReturnSuccess 
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) return (0); 
1275     if (memRef 
== originalMD
->_memRef
) 
1277         originalMD
->retain();               // Add a new reference to ourselves 
1278         originalMD
->memoryReferenceRelease(memRef
); 
1282     IOGeneralMemoryDescriptor 
* self 
= new IOGeneralMemoryDescriptor
; 
1283     IOMDPersistentInitData initData 
= { originalMD
, memRef 
}; 
1286     && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) { 
1295 IOGeneralMemoryDescriptor::initWithAddress(void *      address
, 
1296                                     IOByteCount   withLength
, 
1297                                     IODirection withDirection
) 
1299     _singleRange
.v
.address 
= (vm_offset_t
) address
; 
1300     _singleRange
.v
.length  
= withLength
; 
1302     return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true); 
1306 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
, 
1307                                     IOByteCount    withLength
, 
1308                                     IODirection  withDirection
, 
1311     _singleRange
.v
.address 
= address
; 
1312     _singleRange
.v
.length  
= withLength
; 
1314     return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true); 
1318 IOGeneralMemoryDescriptor::initWithPhysicalAddress( 
1319                                  IOPhysicalAddress      address
, 
1320                                  IOByteCount            withLength
, 
1321                                  IODirection            withDirection 
) 
1323     _singleRange
.p
.address 
= address
; 
1324     _singleRange
.p
.length  
= withLength
; 
1326     return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true); 
1330 IOGeneralMemoryDescriptor::initWithPhysicalRanges( 
1331                                 IOPhysicalRange 
* ranges
, 
1333                                 IODirection       direction
, 
1336     IOOptionBits mdOpts 
= direction 
| kIOMemoryTypePhysical
; 
1339         mdOpts 
|= kIOMemoryAsReference
; 
1341     return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0); 
1345 IOGeneralMemoryDescriptor::initWithRanges( 
1346                                    IOVirtualRange 
* ranges
, 
1348                                    IODirection      direction
, 
1352     IOOptionBits mdOpts 
= direction
; 
1355         mdOpts 
|= kIOMemoryAsReference
; 
1358         mdOpts 
|= kIOMemoryTypeVirtual
; 
1360         // Auto-prepare if this is a kernel memory descriptor as very few 
1361         // clients bother to prepare() kernel memory. 
1362         // But it was not enforced so what are you going to do? 
1363         if (task 
== kernel_task
) 
1364             mdOpts 
|= kIOMemoryAutoPrepare
; 
1367         mdOpts 
|= kIOMemoryTypePhysical
; 
1369     return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0); 
1371 #endif /* !__LP64__ */ 
1376  *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges, 
1377  * from a given task, several physical ranges, an UPL from the ubc 
1378  * system or a uio (may be 64bit) from the BSD subsystem. 
1380  * Passing the ranges as a reference will avoid an extra allocation. 
1382  * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an 
1383  * existing instance -- note this behavior is not commonly supported in other 
1384  * I/O Kit classes, although it is supported here. 
1388 IOGeneralMemoryDescriptor::initWithOptions(void *       buffers
, 
1392                                            IOOptionBits options
, 
1395     IOOptionBits type 
= options 
& kIOMemoryTypeMask
; 
1399         && (kIOMemoryTypeVirtual 
== type
) 
1400         && vm_map_is_64bit(get_task_map(task
))  
1401         && ((IOVirtualRange 
*) buffers
)->address
) 
1403         OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); 
1406 #endif /* !__LP64__ */ 
1408     // Grab the original MD's configuation data to initialse the 
1409     // arguments to this function. 
1410     if (kIOMemoryTypePersistentMD 
== type
) { 
1412         IOMDPersistentInitData 
*initData 
= (typeof(initData
)) buffers
; 
1413         const IOGeneralMemoryDescriptor 
*orig 
= initData
->fMD
; 
1414         ioGMDData 
*dataP 
= getDataP(orig
->_memoryEntries
); 
1416         // Only accept persistent memory descriptors with valid dataP data. 
1417         assert(orig
->_rangesCount 
== 1); 
1418         if ( !(orig
->_flags 
& kIOMemoryPersistent
) || !dataP
) 
1421         _memRef 
= initData
->fMemRef
;    // Grab the new named entry 
1422         options 
= orig
->_flags 
& ~kIOMemoryAsReference
;  
1423         type 
= options 
& kIOMemoryTypeMask
; 
1424         buffers 
= orig
->_ranges
.v
; 
1425         count 
= orig
->_rangesCount
; 
1427         // Now grab the original task and whatever mapper was previously used 
1429         mapper 
= dataP
->fMapper
; 
1431         // We are ready to go through the original initialisation now 
1435     case kIOMemoryTypeUIO
: 
1436     case kIOMemoryTypeVirtual
: 
1438     case kIOMemoryTypeVirtual64
: 
1439 #endif /* !__LP64__ */ 
1445     case kIOMemoryTypePhysical
:         // Neither Physical nor UPL should have a task 
1447     case kIOMemoryTypePhysical64
: 
1448 #endif /* !__LP64__ */ 
1449     case kIOMemoryTypeUPL
: 
1453         return false;   /* bad argument */ 
1460      * We can check the _initialized  instance variable before having ever set 
1461      * it to an initial value because I/O Kit guarantees that all our instance 
1462      * variables are zeroed on an object's allocation. 
1467          * An existing memory descriptor is being retargeted to point to 
1468          * somewhere else.  Clean up our present state. 
1470         IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
1471         if ((kIOMemoryTypePhysical 
!= type
) && (kIOMemoryTypePhysical64 
!= type
)) 
1476         if (_ranges
.v 
&& !(kIOMemoryAsReference 
& _flags
)) 
1478             if (kIOMemoryTypeUIO 
== type
) 
1479                 uio_free((uio_t
) _ranges
.v
); 
1481             else if ((kIOMemoryTypeVirtual64 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1482                 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
); 
1483 #endif /* !__LP64__ */ 
1485                 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
); 
1488         options 
|= (kIOMemoryRedirected 
& _flags
); 
1489         if (!(kIOMemoryRedirected 
& options
)) 
1493                 memoryReferenceRelease(_memRef
); 
1497                 _mappings
->flushCollection(); 
1503         _initialized 
= true; 
1506     // Grab the appropriate mapper 
1507     if (kIOMemoryHostOrRemote 
& options
) options 
|= kIOMemoryMapperNone
; 
1508     if (kIOMemoryMapperNone 
& options
) 
1509         mapper 
= 0;     // No Mapper 
1510     else if (mapper 
== kIOMapperSystem
) { 
1511         IOMapper::checkForSystemMapper(); 
1512         gIOSystemMapper 
= mapper 
= IOMapper::gSystem
; 
1515     // Remove the dynamic internal use flags from the initial setting 
1516     options               
&= ~(kIOMemoryPreparedReadOnly
); 
1521     _direction             
= (IODirection
) (_flags 
& kIOMemoryDirectionMask
); 
1522 #endif /* !__LP64__ */ 
1525     __iomd_reservedA 
= 0; 
1526     __iomd_reservedB 
= 0; 
1529     if (kIOMemoryThreadSafe 
& options
) 
1532             _prepareLock 
= IOLockAlloc(); 
1534     else if (_prepareLock
) 
1536         IOLockFree(_prepareLock
); 
1537         _prepareLock 
= NULL
; 
1540     if (kIOMemoryTypeUPL 
== type
) { 
1543         unsigned int dataSize 
= computeDataSize(/* pages */ 0, /* upls */ 1); 
1545         if (!initMemoryEntries(dataSize
, mapper
)) return (false); 
1546         dataP 
= getDataP(_memoryEntries
); 
1547         dataP
->fPageCnt 
= 0; 
1548         switch (kIOMemoryDirectionMask 
& options
) 
1550             case kIODirectionOut
: 
1551                 dataP
->fDMAAccess 
= kIODMAMapReadAccess
; 
1553             case kIODirectionIn
: 
1554                 dataP
->fDMAAccess 
= kIODMAMapWriteAccess
; 
1556             case kIODirectionNone
: 
1557             case kIODirectionOutIn
: 
1559                 panic("bad dir for upl 0x%x\n", (int) options
); 
1562  //       _wireCount++; // UPLs start out life wired 
1565         _pages    
+= atop_32(offset 
+ count 
+ PAGE_MASK
) - atop_32(offset
); 
1568         iopl
.fIOPL 
= (upl_t
) buffers
; 
1569         upl_set_referenced(iopl
.fIOPL
, true); 
1570         upl_page_info_t 
*pageList 
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
); 
1572         if (upl_get_size(iopl
.fIOPL
) < (count 
+ offset
)) 
1573             panic("short external upl"); 
1575         _highestPage 
= upl_get_highest_page(iopl
.fIOPL
); 
1577         // Set the flag kIOPLOnDevice convieniently equal to 1 
1578         iopl
.fFlags  
= pageList
->device 
| kIOPLExternUPL
; 
1579         if (!pageList
->device
) { 
1580             // Pre-compute the offset into the UPL's page list 
1581             pageList 
= &pageList
[atop_32(offset
)]; 
1582             offset 
&= PAGE_MASK
; 
1584         iopl
.fIOMDOffset 
= 0; 
1585         iopl
.fMappedPage 
= 0; 
1586         iopl
.fPageInfo 
= (vm_address_t
) pageList
; 
1587         iopl
.fPageOffset 
= offset
; 
1588         _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
)); 
1591         // kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO  
1592         // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 
1594         // Initialize the memory descriptor 
1595         if (options 
& kIOMemoryAsReference
) { 
1597             _rangesIsAllocated 
= false; 
1598 #endif /* !__LP64__ */ 
1600             // Hack assignment to get the buffer arg into _ranges. 
1601             // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't 
1603             // This also initialises the uio & physical ranges. 
1604             _ranges
.v 
= (IOVirtualRange 
*) buffers
; 
1608             _rangesIsAllocated 
= true; 
1609 #endif /* !__LP64__ */ 
1612               case kIOMemoryTypeUIO
: 
1613                 _ranges
.v 
= (IOVirtualRange 
*) uio_duplicate((uio_t
) buffers
); 
1617               case kIOMemoryTypeVirtual64
: 
1618               case kIOMemoryTypePhysical64
: 
1621                     && (((IOAddressRange 
*) buffers
)->address 
+ ((IOAddressRange 
*) buffers
)->length
) <= 0x100000000ULL
 
1624                     if (kIOMemoryTypeVirtual64 
== type
) 
1625                         type 
= kIOMemoryTypeVirtual
; 
1627                         type 
= kIOMemoryTypePhysical
; 
1628                     _flags 
= (_flags 
& ~kIOMemoryTypeMask
) | type 
| kIOMemoryAsReference
; 
1629                     _rangesIsAllocated 
= false; 
1630                     _ranges
.v 
= &_singleRange
.v
; 
1631                     _singleRange
.v
.address 
= ((IOAddressRange 
*) buffers
)->address
; 
1632                     _singleRange
.v
.length  
= ((IOAddressRange 
*) buffers
)->length
; 
1635                 _ranges
.v64 
= IONew(IOAddressRange
, count
); 
1638                 bcopy(buffers
, _ranges
.v
, count 
* sizeof(IOAddressRange
)); 
1640 #endif /* !__LP64__ */ 
1641               case kIOMemoryTypeVirtual
: 
1642               case kIOMemoryTypePhysical
: 
1644                     _flags 
|= kIOMemoryAsReference
; 
1646                     _rangesIsAllocated 
= false; 
1647 #endif /* !__LP64__ */ 
1648                     _ranges
.v 
= &_singleRange
.v
; 
1650                     _ranges
.v 
= IONew(IOVirtualRange
, count
); 
1654                 bcopy(buffers
, _ranges
.v
, count 
* sizeof(IOVirtualRange
)); 
1658         _rangesCount 
= count
; 
1660         // Find starting address within the vector of ranges 
1661         Ranges vec 
= _ranges
; 
1662         mach_vm_size_t totalLength 
= 0; 
1663         unsigned int ind
, pages 
= 0; 
1664         for (ind 
= 0; ind 
< count
; ind
++) { 
1665             mach_vm_address_t addr
; 
1666             mach_vm_address_t endAddr
; 
1669             // addr & len are returned by this function 
1670             getAddrLenForInd(addr
, len
, type
, vec
, ind
); 
1671             if (os_add3_overflow(addr
, len
, PAGE_MASK
, &endAddr
))                   break; 
1672             if (os_add_overflow(pages
, (atop_64(endAddr
) - atop_64(addr
)), &pages
)) break; 
1673             if (os_add_overflow(totalLength
, len
, &totalLength
))                    break; 
1674             if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1676                 ppnum_t highPage 
= atop_64(addr 
+ len 
- 1); 
1677                 if (highPage 
> _highestPage
) 
1678                     _highestPage 
= highPage
; 
1682          || (totalLength 
!= ((IOByteCount
) totalLength
))) return (false); /* overflow */ 
1684         _length      
= totalLength
; 
1687         // Auto-prepare memory at creation time. 
1688         // Implied completion when descriptor is free-ed 
1691         if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1692             _wireCount
++;       // Physical MDs are, by definition, wired 
1693         else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ 
1697             if (_pages 
> atop_64(max_mem
)) return false; 
1699             dataSize 
= computeDataSize(_pages
, /* upls */ count 
* 2); 
1700             if (!initMemoryEntries(dataSize
, mapper
)) return false; 
1701             dataP 
= getDataP(_memoryEntries
); 
1702             dataP
->fPageCnt 
= _pages
; 
1704             if (((_task 
!= kernel_task
) || (kIOMemoryBufferPageable 
& _flags
)) 
1705               && (VM_KERN_MEMORY_NONE 
== _kernelTag
)) 
1707                 _kernelTag 
= IOMemoryTag(kernel_map
); 
1708                 if (_kernelTag 
== gIOSurfaceTag
) _userTag 
= VM_MEMORY_IOSURFACE
; 
1711             if ( (kIOMemoryPersistent 
& _flags
) && !_memRef
) 
1714                 err 
= memoryReferenceCreate(0, &_memRef
); 
1715                 if (kIOReturnSuccess 
!= err
) return false; 
1718             if ((_flags 
& kIOMemoryAutoPrepare
) 
1719              && prepare() != kIOReturnSuccess
) 
1732 void IOGeneralMemoryDescriptor::free() 
1734     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
1739         reserved
->dp
.memory 
= 0; 
1742     if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1745         if (_memoryEntries 
&& (dataP 
= getDataP(_memoryEntries
)) && dataP
->fMappedBaseValid
) 
1747             dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
); 
1748             dataP
->fMappedBaseValid 
= dataP
->fMappedBase 
= 0; 
1753         while (_wireCount
) complete(); 
1756     if (_memoryEntries
) _memoryEntries
->release(); 
1758     if (_ranges
.v 
&& !(kIOMemoryAsReference 
& _flags
)) 
1760         if (kIOMemoryTypeUIO 
== type
) 
1761             uio_free((uio_t
) _ranges
.v
); 
1763         else if ((kIOMemoryTypeVirtual64 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
1764             IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
); 
1765 #endif /* !__LP64__ */ 
1767             IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
); 
1774         if (reserved
->dp
.devicePager
) 
1776             // memEntry holds a ref on the device pager which owns reserved 
1777             // (IOMemoryDescriptorReserved) so no reserved access after this point 
1778             device_pager_deallocate( (memory_object_t
) reserved
->dp
.devicePager 
); 
1781             IODelete(reserved
, IOMemoryDescriptorReserved
, 1); 
1785     if (_memRef
)      memoryReferenceRelease(_memRef
); 
1786     if (_prepareLock
) IOLockFree(_prepareLock
); 
1792 void IOGeneralMemoryDescriptor::unmapFromKernel() 
1794     panic("IOGMD::unmapFromKernel deprecated"); 
1797 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
) 
1799     panic("IOGMD::mapIntoKernel deprecated"); 
1801 #endif /* !__LP64__ */ 
1806  * Get the direction of the transfer. 
1808 IODirection 
IOMemoryDescriptor::getDirection() const 
1813 #endif /* !__LP64__ */ 
1814     return (IODirection
) (_flags 
& kIOMemoryDirectionMask
); 
1820  * Get the length of the transfer (over all ranges). 
1822 IOByteCount 
IOMemoryDescriptor::getLength() const 
1827 void IOMemoryDescriptor::setTag( IOOptionBits tag 
) 
1832 IOOptionBits 
IOMemoryDescriptor::getTag( void ) 
1837 uint64_t IOMemoryDescriptor::getFlags(void) 
1843 #pragma clang diagnostic push 
1844 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
1846 // @@@ gvdl: who is using this API?  Seems like a wierd thing to implement. 
1848 IOMemoryDescriptor::getSourceSegment( IOByteCount   offset
, IOByteCount 
* length 
) 
1850     addr64_t physAddr 
= 0; 
1852     if( prepare() == kIOReturnSuccess
) { 
1853         physAddr 
= getPhysicalSegment64( offset
, length 
); 
1857     return( (IOPhysicalAddress
) physAddr 
); // truncated but only page offset is used 
1860 #pragma clang diagnostic pop 
1862 #endif /* !__LP64__ */ 
1864 IOByteCount 
IOMemoryDescriptor::readBytes
 
1865                 (IOByteCount offset
, void *bytes
, IOByteCount length
) 
1867     addr64_t dstAddr 
= CAST_DOWN(addr64_t
, bytes
); 
1868     IOByteCount remaining
; 
1870     // Assert that this entire I/O is withing the available range 
1871     assert(offset 
<= _length
); 
1872     assert(offset 
+ length 
<= _length
); 
1873     if ((offset 
>= _length
) 
1874      || ((offset 
+ length
) > _length
)) { 
1878     assert (!(kIOMemoryRemote 
& _flags
)); 
1879     if (kIOMemoryRemote 
& _flags
) return (0); 
1881     if (kIOMemoryThreadSafe 
& _flags
) 
1884     remaining 
= length 
= min(length
, _length 
- offset
); 
1885     while (remaining
) { // (process another target segment?) 
1889         srcAddr64 
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
); 
1893         // Clip segment length to remaining 
1894         if (srcLen 
> remaining
) 
1897         copypv(srcAddr64
, dstAddr
, srcLen
, 
1898                             cppvPsrc 
| cppvNoRefSrc 
| cppvFsnk 
| cppvKmap
); 
1902         remaining 
-= srcLen
; 
1905     if (kIOMemoryThreadSafe 
& _flags
) 
1910     return length 
- remaining
; 
1913 IOByteCount 
IOMemoryDescriptor::writeBytes
 
1914                 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
) 
1916     addr64_t srcAddr 
= CAST_DOWN(addr64_t
, bytes
); 
1917     IOByteCount remaining
; 
1918     IOByteCount offset 
= inoffset
; 
1920     // Assert that this entire I/O is withing the available range 
1921     assert(offset 
<= _length
); 
1922     assert(offset 
+ length 
<= _length
); 
1924     assert( !(kIOMemoryPreparedReadOnly 
& _flags
) ); 
1926     if ( (kIOMemoryPreparedReadOnly 
& _flags
) 
1927      || (offset 
>= _length
) 
1928      || ((offset 
+ length
) > _length
)) { 
1932     assert (!(kIOMemoryRemote 
& _flags
)); 
1933     if (kIOMemoryRemote 
& _flags
) return (0); 
1935     if (kIOMemoryThreadSafe 
& _flags
) 
1938     remaining 
= length 
= min(length
, _length 
- offset
); 
1939     while (remaining
) { // (process another target segment?) 
1943         dstAddr64 
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
); 
1947         // Clip segment length to remaining 
1948         if (dstLen 
> remaining
) 
1951         if (!srcAddr
) bzero_phys(dstAddr64
, dstLen
); 
1954             copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
, 
1955                     cppvPsnk 
| cppvFsnk 
| cppvNoRefSrc 
| cppvNoModSnk 
| cppvKmap
); 
1959         remaining 
-= dstLen
; 
1962     if (kIOMemoryThreadSafe 
& _flags
) 
1967 #if defined(__x86_64__) 
1968     // copypv does not cppvFsnk on intel 
1970     if (!srcAddr
) performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
); 
1973     return length 
- remaining
; 
1977 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
) 
1979     panic("IOGMD::setPosition deprecated"); 
1981 #endif /* !__LP64__ */ 
1983 static volatile SInt64 gIOMDPreparationID 
__attribute__((aligned(8))) = (1ULL << 32); 
1986 IOGeneralMemoryDescriptor::getPreparationID( void ) 
1991         return (kIOPreparationIDUnprepared
); 
1993     if (((kIOMemoryTypeMask 
& _flags
) == kIOMemoryTypePhysical
) 
1994       || ((kIOMemoryTypeMask 
& _flags
) == kIOMemoryTypePhysical64
)) 
1996         IOMemoryDescriptor::setPreparationID(); 
1997         return (IOMemoryDescriptor::getPreparationID()); 
2000     if (!_memoryEntries 
|| !(dataP 
= getDataP(_memoryEntries
))) 
2001         return (kIOPreparationIDUnprepared
); 
2003     if (kIOPreparationIDUnprepared 
== dataP
->fPreparationID
) 
2005         dataP
->fPreparationID 
= OSIncrementAtomic64(&gIOMDPreparationID
); 
2007     return (dataP
->fPreparationID
); 
2010 IOMemoryDescriptorReserved 
* IOMemoryDescriptor::getKernelReserved( void ) 
2014         reserved 
= IONew(IOMemoryDescriptorReserved
, 1); 
2016             bzero(reserved
, sizeof(IOMemoryDescriptorReserved
)); 
2021 void IOMemoryDescriptor::setPreparationID( void ) 
2023     if (getKernelReserved() && (kIOPreparationIDUnprepared 
== reserved
->preparationID
)) 
2025         reserved
->preparationID 
= OSIncrementAtomic64(&gIOMDPreparationID
); 
2029 uint64_t IOMemoryDescriptor::getPreparationID( void ) 
2032         return (reserved
->preparationID
);     
2034         return (kIOPreparationIDUnsupported
);     
2037 void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag
, vm_tag_t userTag
) 
2039     _kernelTag 
= kernelTag
; 
2043 vm_tag_t 
IOMemoryDescriptor::getVMTag(vm_map_t map
) 
2045     if (vm_kernel_map_is_kernel(map
)) 
2047          if (VM_KERN_MEMORY_NONE 
!= _kernelTag
) return (_kernelTag
); 
2051          if (VM_KERN_MEMORY_NONE 
!= _userTag
)   return (_userTag
); 
2053     return (IOMemoryTag(map
)); 
2056 IOReturn 
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const 
2058     IOReturn err 
= kIOReturnSuccess
; 
2059     DMACommandOps params
; 
2060     IOGeneralMemoryDescriptor 
* md 
= const_cast<IOGeneralMemoryDescriptor 
*>(this); 
2063     params 
= (op 
& ~kIOMDDMACommandOperationMask 
& op
); 
2064     op 
&= kIOMDDMACommandOperationMask
; 
2066     if (kIOMDDMAMap 
== op
) 
2068         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2069             return kIOReturnUnderrun
; 
2071         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2074             && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
); 
2076         if (_memoryEntries 
&& data
->fMapper
) 
2078             bool remap
, keepMap
; 
2079             dataP 
= getDataP(_memoryEntries
); 
2081             if (data
->fMapSpec
.numAddressBits 
< dataP
->fDMAMapNumAddressBits
) dataP
->fDMAMapNumAddressBits 
= data
->fMapSpec
.numAddressBits
; 
2082             if (data
->fMapSpec
.alignment      
> dataP
->fDMAMapAlignment
)      dataP
->fDMAMapAlignment      
= data
->fMapSpec
.alignment
; 
2084             keepMap 
= (data
->fMapper 
== gIOSystemMapper
); 
2085             keepMap 
&= ((data
->fOffset 
== 0) && (data
->fLength 
== _length
)); 
2087             if ((data
->fMapper 
== gIOSystemMapper
) && _prepareLock
) IOLockLock(_prepareLock
); 
2090             remap 
|= (dataP
->fDMAMapNumAddressBits 
< 64) 
2091                   && ((dataP
->fMappedBase 
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
)); 
2092             remap 
|= (dataP
->fDMAMapAlignment 
> page_size
); 
2094             if (remap 
|| !dataP
->fMappedBaseValid
) 
2096 //              if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); 
2097                 err 
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
); 
2098                 if (keepMap 
&& (kIOReturnSuccess 
== err
) && !dataP
->fMappedBaseValid
) 
2100                     dataP
->fMappedBase      
= data
->fAlloc
; 
2101                     dataP
->fMappedBaseValid 
= true; 
2102                     dataP
->fMappedLength    
= data
->fAllocLength
; 
2103                     data
->fAllocLength      
= 0;                        // IOMD owns the alloc now 
2108                 data
->fAlloc 
= dataP
->fMappedBase
; 
2109                 data
->fAllocLength 
= 0;                         // give out IOMD map 
2110                 md
->dmaMapRecord(data
->fMapper
, data
->fCommand
, dataP
->fMappedLength
); 
2112             data
->fMapContig 
= !dataP
->fDiscontig
; 
2114             if ((data
->fMapper 
== gIOSystemMapper
) && _prepareLock
) IOLockUnlock(_prepareLock
); 
2118     if (kIOMDDMAUnmap 
== op
) 
2120         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2121             return kIOReturnUnderrun
; 
2122         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2124         err 
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
); 
2126         return kIOReturnSuccess
; 
2129     if (kIOMDAddDMAMapSpec 
== op
) 
2131         if (dataSize 
< sizeof(IODMAMapSpecification
)) 
2132             return kIOReturnUnderrun
; 
2134         IODMAMapSpecification 
* data 
= (IODMAMapSpecification 
*) vData
; 
2137             && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
); 
2141             dataP 
= getDataP(_memoryEntries
); 
2142             if (data
->numAddressBits 
< dataP
->fDMAMapNumAddressBits
) 
2143                 dataP
->fDMAMapNumAddressBits 
= data
->numAddressBits
; 
2144             if (data
->alignment 
> dataP
->fDMAMapAlignment
) 
2145                 dataP
->fDMAMapAlignment 
= data
->alignment
; 
2147         return kIOReturnSuccess
; 
2150     if (kIOMDGetCharacteristics 
== op
) { 
2152         if (dataSize 
< sizeof(IOMDDMACharacteristics
)) 
2153             return kIOReturnUnderrun
; 
2155         IOMDDMACharacteristics 
*data 
= (IOMDDMACharacteristics 
*) vData
; 
2156         data
->fLength 
= _length
; 
2157         data
->fSGCount 
= _rangesCount
; 
2158         data
->fPages 
= _pages
; 
2159         data
->fDirection 
= getDirection(); 
2161             data
->fIsPrepared 
= false; 
2163             data
->fIsPrepared 
= true; 
2164             data
->fHighestPage 
= _highestPage
; 
2167                 dataP 
= getDataP(_memoryEntries
); 
2168                 ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
2169                 UInt count 
= getNumIOPL(_memoryEntries
, dataP
); 
2171                     data
->fPageAlign 
= (ioplList
[0].fPageOffset 
& PAGE_MASK
) | ~PAGE_MASK
; 
2175         return kIOReturnSuccess
; 
2178     else if (kIOMDDMAActive 
== op
) 
2183             prior 
= OSAddAtomic16(1, &md
->_dmaReferences
); 
2184             if (!prior
) md
->_mapName 
= NULL
; 
2188             if (md
->_dmaReferences
) OSAddAtomic16(-1, &md
->_dmaReferences
); 
2189             else                    panic("_dmaReferences underflow"); 
2192     else if (kIOMDWalkSegments 
!= op
) 
2193         return kIOReturnBadArgument
; 
2195     // Get the next segment 
2196     struct InternalState 
{ 
2197         IOMDDMAWalkSegmentArgs fIO
; 
2203     // Find the next segment 
2204     if (dataSize 
< sizeof(*isP
)) 
2205         return kIOReturnUnderrun
; 
2207     isP 
= (InternalState 
*) vData
; 
2208     UInt offset 
= isP
->fIO
.fOffset
; 
2209     uint8_t mapped 
= isP
->fIO
.fMapped
; 
2210     uint64_t mappedBase
; 
2212     if (mapped 
&& (kIOMemoryRemote 
& _flags
)) return (kIOReturnNotAttached
); 
2214     if (IOMapper::gSystem 
&& mapped
 
2215         && (!(kIOMemoryHostOnly 
& _flags
)) 
2216         && (!_memoryEntries 
|| !getDataP(_memoryEntries
)->fMappedBaseValid
)) 
2217 //      && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid)) 
2220             && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
); 
2222         dataP 
= getDataP(_memoryEntries
); 
2225             IODMAMapSpecification mapSpec
; 
2226             bzero(&mapSpec
, sizeof(mapSpec
)); 
2227             mapSpec
.numAddressBits 
= dataP
->fDMAMapNumAddressBits
; 
2228             mapSpec
.alignment 
= dataP
->fDMAMapAlignment
; 
2229             err 
= md
->dmaMap(dataP
->fMapper
, NULL
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, &dataP
->fMappedLength
); 
2230             if (kIOReturnSuccess 
!= err
) return (err
); 
2231             dataP
->fMappedBaseValid 
= true; 
2235     if (kIOMDDMAWalkMappedLocal 
== mapped
) mappedBase 
= isP
->fIO
.fMappedBase
; 
2238         if (IOMapper::gSystem
 
2239                 && (!(kIOMemoryHostOnly 
& _flags
)) 
2241                 && (dataP 
= getDataP(_memoryEntries
)) 
2242                 && dataP
->fMappedBaseValid
) 
2244             mappedBase 
= dataP
->fMappedBase
; 
2249     if (offset 
>= _length
) 
2250         return (offset 
== _length
)? kIOReturnOverrun 
: kIOReturnInternalError
; 
2252     // Validate the previous offset 
2253     UInt ind
, off2Ind 
= isP
->fOffset2Index
; 
2256         && (offset 
== isP
->fNextOffset 
|| off2Ind 
<= offset
)) 
2259         ind 
= off2Ind 
= 0;      // Start from beginning 
2264     if ( (_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) { 
2266         // Physical address based memory descriptor 
2267         const IOPhysicalRange 
*physP 
= (IOPhysicalRange 
*) &_ranges
.p
[0]; 
2269         // Find the range after the one that contains the offset 
2271         for (len 
= 0; off2Ind 
<= offset
; ind
++) { 
2272             len 
= physP
[ind
].length
; 
2276         // Calculate length within range and starting address 
2277         length   
= off2Ind 
- offset
; 
2278         address  
= physP
[ind 
- 1].address 
+ len 
- length
; 
2282             address 
= mappedBase 
+ offset
; 
2286             // see how far we can coalesce ranges 
2287             while (ind 
< _rangesCount 
&& address 
+ length 
== physP
[ind
].address
) { 
2288                 len 
= physP
[ind
].length
; 
2295         // correct contiguous check overshoot 
2300     else if ( (_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) { 
2302         // Physical address based memory descriptor 
2303         const IOAddressRange 
*physP 
= (IOAddressRange 
*) &_ranges
.v64
[0]; 
2305         // Find the range after the one that contains the offset 
2307         for (len 
= 0; off2Ind 
<= offset
; ind
++) { 
2308             len 
= physP
[ind
].length
; 
2312         // Calculate length within range and starting address 
2313         length   
= off2Ind 
- offset
; 
2314         address  
= physP
[ind 
- 1].address 
+ len 
- length
; 
2318             address 
= mappedBase 
+ offset
; 
2322             // see how far we can coalesce ranges 
2323             while (ind 
< _rangesCount 
&& address 
+ length 
== physP
[ind
].address
) { 
2324                 len 
= physP
[ind
].length
; 
2330         // correct contiguous check overshoot 
2334 #endif /* !__LP64__ */ 
2337             panic("IOGMD: not wired for the IODMACommand"); 
2339         assert(_memoryEntries
); 
2341         dataP 
= getDataP(_memoryEntries
); 
2342         const ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
2343         UInt numIOPLs 
= getNumIOPL(_memoryEntries
, dataP
); 
2344         upl_page_info_t 
*pageList 
= getPageList(dataP
); 
2346         assert(numIOPLs 
> 0); 
2348         // Scan through iopl info blocks looking for block containing offset 
2349         while (ind 
< numIOPLs 
&& offset 
>= ioplList
[ind
].fIOMDOffset
) 
2352         // Go back to actual range as search goes past it 
2353         ioPLBlock ioplInfo 
= ioplList
[ind 
- 1]; 
2354         off2Ind 
= ioplInfo
.fIOMDOffset
; 
2357             length 
= ioplList
[ind
].fIOMDOffset
; 
2360         length 
-= offset
;                       // Remainder within iopl 
2362         // Subtract offset till this iopl in total list 
2365         // If a mapped address is requested and this is a pre-mapped IOPL 
2366         // then just need to compute an offset relative to the mapped base. 
2368             offset 
+= (ioplInfo
.fPageOffset 
& PAGE_MASK
); 
2369             address 
= trunc_page_64(mappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
; 
2370             continue;   // Done leave do/while(false) now 
2373         // The offset is rebased into the current iopl. 
2374         // Now add the iopl 1st page offset. 
2375         offset 
+= ioplInfo
.fPageOffset
; 
2377         // For external UPLs the fPageInfo field points directly to 
2378         // the upl's upl_page_info_t array. 
2379         if (ioplInfo
.fFlags 
& kIOPLExternUPL
) 
2380             pageList 
= (upl_page_info_t 
*) ioplInfo
.fPageInfo
; 
2382             pageList 
= &pageList
[ioplInfo
.fPageInfo
]; 
2384         // Check for direct device non-paged memory 
2385         if ( ioplInfo
.fFlags 
& kIOPLOnDevice 
) { 
2386             address 
= ptoa_64(pageList
->phys_addr
) + offset
; 
2387             continue;   // Done leave do/while(false) now 
2390         // Now we need compute the index into the pageList 
2391         UInt pageInd 
= atop_32(offset
); 
2392         offset 
&= PAGE_MASK
; 
2394         // Compute the starting address of this segment 
2395         IOPhysicalAddress pageAddr 
= pageList
[pageInd
].phys_addr
; 
2397             panic("!pageList phys_addr"); 
2400         address 
= ptoa_64(pageAddr
) + offset
; 
2402         // length is currently set to the length of the remainider of the iopl. 
2403         // We need to check that the remainder of the iopl is contiguous. 
2404         // This is indicated by pageList[ind].phys_addr being sequential. 
2405         IOByteCount contigLength 
= PAGE_SIZE 
- offset
; 
2406         while (contigLength 
< length
 
2407                 && ++pageAddr 
== pageList
[++pageInd
].phys_addr
) 
2409             contigLength 
+= PAGE_SIZE
; 
2412         if (contigLength 
< length
) 
2413             length 
= contigLength
; 
2421     // Update return values and state 
2422     isP
->fIO
.fIOVMAddr 
= address
; 
2423     isP
->fIO
.fLength   
= length
; 
2425     isP
->fOffset2Index 
= off2Ind
; 
2426     isP
->fNextOffset   
= isP
->fIO
.fOffset 
+ length
; 
2428     return kIOReturnSuccess
; 
2432 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
, IOOptionBits options
) 
2435     mach_vm_address_t address 
= 0; 
2436     mach_vm_size_t    length  
= 0; 
2437     IOMapper 
*        mapper  
= gIOSystemMapper
; 
2438     IOOptionBits      type    
= _flags 
& kIOMemoryTypeMask
; 
2440     if (lengthOfSegment
) 
2441         *lengthOfSegment 
= 0; 
2443     if (offset 
>= _length
) 
2446     // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must 
2447     // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use 
2448     // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation 
2449     // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up 
2451     if ((options 
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL 
!= type
)) 
2453         unsigned rangesIndex 
= 0; 
2454         Ranges vec 
= _ranges
; 
2455         mach_vm_address_t addr
; 
2457         // Find starting address within the vector of ranges 
2459             getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
); 
2460             if (offset 
< length
) 
2462             offset 
-= length
; // (make offset relative) 
2466         // Now that we have the starting range, 
2467         // lets find the last contiguous range 
2471         for ( ++rangesIndex
; rangesIndex 
< _rangesCount
; rangesIndex
++ ) { 
2472             mach_vm_address_t newAddr
; 
2473             mach_vm_size_t    newLen
; 
2475             getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
); 
2476             if (addr 
+ length 
!= newAddr
) 
2481             address 
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit 
2485         IOMDDMAWalkSegmentState _state
; 
2486         IOMDDMAWalkSegmentArgs 
* state 
= (IOMDDMAWalkSegmentArgs 
*) (void *)&_state
; 
2488         state
->fOffset 
= offset
; 
2489         state
->fLength 
= _length 
- offset
; 
2490         state
->fMapped 
= (0 == (options 
& kIOMemoryMapperNone
)) && !(_flags 
& kIOMemoryHostOrRemote
); 
2492         ret 
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
)); 
2494         if ((kIOReturnSuccess 
!= ret
) && (kIOReturnOverrun 
!= ret
)) 
2495                 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",  
2496                                         ret
, this, state
->fOffset
, 
2497                                         state
->fIOVMAddr
, state
->fLength
); 
2498         if (kIOReturnSuccess 
== ret
) 
2500             address 
= state
->fIOVMAddr
; 
2501             length  
= state
->fLength
; 
2504         // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even 
2505         // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up 
2507         if (mapper 
&& ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
))) 
2509             if ((options 
& kIOMemoryMapperNone
) && !(_flags 
& kIOMemoryMapperNone
)) 
2511                 addr64_t    origAddr 
= address
; 
2512                 IOByteCount origLen  
= length
; 
2514                 address 
= mapper
->mapToPhysicalAddress(origAddr
); 
2515                 length 
= page_size 
- (address 
& (page_size 
- 1)); 
2516                 while ((length 
< origLen
) 
2517                     && ((address 
+ length
) == mapper
->mapToPhysicalAddress(origAddr 
+ length
))) 
2518                     length 
+= page_size
; 
2519                 if (length 
> origLen
) 
2528     if (lengthOfSegment
) 
2529         *lengthOfSegment 
= length
; 
2535 #pragma clang diagnostic push 
2536 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
2539 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
, IOOptionBits options
) 
2541     addr64_t address 
= 0; 
2543     if (options 
& _kIOMemorySourceSegment
) 
2545         address 
= getSourceSegment(offset
, lengthOfSegment
); 
2547     else if (options 
& kIOMemoryMapperNone
) 
2549         address 
= getPhysicalSegment64(offset
, lengthOfSegment
); 
2553         address 
= getPhysicalSegment(offset
, lengthOfSegment
); 
2558 #pragma clang diagnostic pop 
2561 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2563     return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
)); 
2567 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2569     addr64_t    address 
= 0; 
2570     IOByteCount length  
= 0; 
2572     address 
= getPhysicalSegment(offset
, lengthOfSegment
, 0); 
2574     if (lengthOfSegment
) 
2575         length 
= *lengthOfSegment
; 
2577     if ((address 
+ length
) > 0x100000000ULL
) 
2579         panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s", 
2580                     address
, (long) length
, (getMetaClass())->getClassName()); 
2583     return ((IOPhysicalAddress
) address
); 
2587 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2589     IOPhysicalAddress phys32
; 
2592     IOMapper 
*        mapper 
= 0; 
2594     phys32 
= getPhysicalSegment(offset
, lengthOfSegment
); 
2598     if (gIOSystemMapper
) 
2599         mapper 
= gIOSystemMapper
; 
2603         IOByteCount origLen
; 
2605         phys64 
= mapper
->mapToPhysicalAddress(phys32
); 
2606         origLen 
= *lengthOfSegment
; 
2607         length 
= page_size 
- (phys64 
& (page_size 
- 1)); 
2608         while ((length 
< origLen
) 
2609             && ((phys64 
+ length
) == mapper
->mapToPhysicalAddress(phys32 
+ length
))) 
2610             length 
+= page_size
; 
2611         if (length 
> origLen
) 
2614         *lengthOfSegment 
= length
; 
2617         phys64 
= (addr64_t
) phys32
; 
2623 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2625     return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0)); 
2629 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount 
*lengthOfSegment
) 
2631     return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
)); 
2634 #pragma clang diagnostic push 
2635 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
2637 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
, 
2638                                                         IOByteCount 
* lengthOfSegment
) 
2640     if (_task 
== kernel_task
) 
2641         return (void *) getSourceSegment(offset
, lengthOfSegment
); 
2643         panic("IOGMD::getVirtualSegment deprecated"); 
2647 #pragma clang diagnostic pop 
2648 #endif /* !__LP64__ */ 
2651 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const 
2653     IOMemoryDescriptor 
*md 
= const_cast<IOMemoryDescriptor 
*>(this); 
2654     DMACommandOps params
; 
2657     params 
= (op 
& ~kIOMDDMACommandOperationMask 
& op
); 
2658     op 
&= kIOMDDMACommandOperationMask
; 
2660     if (kIOMDGetCharacteristics 
== op
) { 
2661         if (dataSize 
< sizeof(IOMDDMACharacteristics
)) 
2662             return kIOReturnUnderrun
; 
2664         IOMDDMACharacteristics 
*data 
= (IOMDDMACharacteristics 
*) vData
; 
2665         data
->fLength 
= getLength(); 
2667         data
->fDirection 
= getDirection(); 
2668         data
->fIsPrepared 
= true;       // Assume prepared - fails safe 
2670     else if (kIOMDWalkSegments 
== op
) { 
2671         if (dataSize 
< sizeof(IOMDDMAWalkSegmentArgs
)) 
2672             return kIOReturnUnderrun
; 
2674         IOMDDMAWalkSegmentArgs 
*data 
= (IOMDDMAWalkSegmentArgs 
*) vData
; 
2675         IOByteCount offset  
= (IOByteCount
) data
->fOffset
; 
2677         IOPhysicalLength length
; 
2678         if (data
->fMapped 
&& IOMapper::gSystem
) 
2679             data
->fIOVMAddr 
= md
->getPhysicalSegment(offset
, &length
); 
2681             data
->fIOVMAddr 
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
); 
2682         data
->fLength 
= length
; 
2684     else if (kIOMDAddDMAMapSpec 
== op
) return kIOReturnUnsupported
; 
2685     else if (kIOMDDMAMap 
== op
) 
2687         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2688             return kIOReturnUnderrun
; 
2689         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2691         if (params
) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); 
2693         data
->fMapContig 
= true; 
2694         err 
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
); 
2698     else if (kIOMDDMAUnmap 
== op
) 
2700         if (dataSize 
< sizeof(IOMDDMAMapArgs
)) 
2701             return kIOReturnUnderrun
; 
2702         IOMDDMAMapArgs 
* data 
= (IOMDDMAMapArgs 
*) vData
; 
2704         err 
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
); 
2706         return (kIOReturnSuccess
); 
2708     else return kIOReturnBadArgument
; 
2710     return kIOReturnSuccess
; 
2714 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
, 
2715                                                    IOOptionBits 
* oldState 
) 
2717     IOReturn      err 
= kIOReturnSuccess
; 
2719     vm_purgable_t control
; 
2722     assert (!(kIOMemoryRemote 
& _flags
)); 
2723     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
2727         err 
= super::setPurgeable(newState
, oldState
); 
2731         if (kIOMemoryThreadSafe 
& _flags
) 
2735             // Find the appropriate vm_map for the given task 
2737             if (_task 
== kernel_task 
&& (kIOMemoryBufferPageable 
& _flags
)) 
2739                 err 
= kIOReturnNotReady
; 
2744                 err 
= kIOReturnUnsupported
; 
2749                 curMap 
= get_task_map(_task
); 
2752                     err 
= KERN_INVALID_ARGUMENT
; 
2757             // can only do one range 
2758             Ranges vec 
= _ranges
; 
2759             IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
2760             mach_vm_address_t addr
;  
2762             getAddrLenForInd(addr
, len
, type
, vec
, 0); 
2764             err 
= purgeableControlBits(newState
, &control
, &state
); 
2765             if (kIOReturnSuccess 
!= err
) 
2767             err 
= vm_map_purgable_control(curMap
, addr
, control
, &state
); 
2770                 if (kIOReturnSuccess 
== err
) 
2772                     err 
= purgeableStateBits(&state
); 
2778         if (kIOMemoryThreadSafe 
& _flags
) 
2785 IOReturn 
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
, 
2786                                            IOOptionBits 
* oldState 
) 
2788     IOReturn err 
= kIOReturnNotReady
; 
2790     if (kIOMemoryThreadSafe 
& _flags
) LOCK
; 
2791     if (_memRef
) err 
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
); 
2792     if (kIOMemoryThreadSafe 
& _flags
) UNLOCK
; 
2797 IOReturn 
IOMemoryDescriptor::getPageCounts( IOByteCount 
* residentPageCount
, 
2798                                             IOByteCount 
* dirtyPageCount 
) 
2800     IOReturn err 
= kIOReturnNotReady
; 
2802     assert (!(kIOMemoryRemote 
& _flags
)); 
2803     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
2805     if (kIOMemoryThreadSafe 
& _flags
) LOCK
; 
2806     if (_memRef
) err 
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
); 
2809         IOMultiMemoryDescriptor 
* mmd
; 
2810         IOSubMemoryDescriptor   
* smd
; 
2811         if ((smd 
= OSDynamicCast(IOSubMemoryDescriptor
, this))) 
2813             err 
= smd
->getPageCounts(residentPageCount
, dirtyPageCount
); 
2815         else if ((mmd 
= OSDynamicCast(IOMultiMemoryDescriptor
, this))) 
2817             err 
= mmd
->getPageCounts(residentPageCount
, dirtyPageCount
); 
2820     if (kIOMemoryThreadSafe 
& _flags
) UNLOCK
; 
2826 #if defined(__arm__) || defined(__arm64__) 
2827 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
); 
2828 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
); 
2829 #else /* defined(__arm__) || defined(__arm64__) */ 
2830 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
); 
2831 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
); 
2832 #endif /* defined(__arm__) || defined(__arm64__) */ 
2834 static void SetEncryptOp(addr64_t pa
, unsigned int count
) 
2838     page 
= atop_64(round_page_64(pa
)); 
2839     end  
= atop_64(trunc_page_64(pa 
+ count
)); 
2840     for (; page 
< end
; page
++) 
2842         pmap_clear_noencrypt(page
);     
2846 static void ClearEncryptOp(addr64_t pa
, unsigned int count
) 
2850     page 
= atop_64(round_page_64(pa
)); 
2851     end  
= atop_64(trunc_page_64(pa 
+ count
)); 
2852     for (; page 
< end
; page
++) 
2854         pmap_set_noencrypt(page
);     
2858 IOReturn 
IOMemoryDescriptor::performOperation( IOOptionBits options
, 
2859                                                 IOByteCount offset
, IOByteCount length 
) 
2861     IOByteCount remaining
; 
2863     void (*func
)(addr64_t pa
, unsigned int count
) = 0; 
2864 #if defined(__arm__) || defined(__arm64__) 
2865     void (*func_ext
)(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *result
) = 0; 
2868     assert (!(kIOMemoryRemote 
& _flags
)); 
2869     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
2873         case kIOMemoryIncoherentIOFlush
: 
2874 #if defined(__arm__) || defined(__arm64__) 
2875             func_ext 
= &dcache_incoherent_io_flush64
; 
2876 #if __ARM_COHERENT_IO__ 
2877             func_ext(0, 0, 0, &res
); 
2878             return kIOReturnSuccess
; 
2879 #else /* __ARM_COHERENT_IO__ */ 
2881 #endif /* __ARM_COHERENT_IO__ */ 
2882 #else /* defined(__arm__) || defined(__arm64__) */ 
2883             func 
= &dcache_incoherent_io_flush64
; 
2885 #endif /* defined(__arm__) || defined(__arm64__) */ 
2886         case kIOMemoryIncoherentIOStore
: 
2887 #if defined(__arm__) || defined(__arm64__) 
2888             func_ext 
= &dcache_incoherent_io_store64
; 
2889 #if __ARM_COHERENT_IO__ 
2890             func_ext(0, 0, 0, &res
); 
2891             return kIOReturnSuccess
; 
2892 #else /* __ARM_COHERENT_IO__ */ 
2894 #endif /* __ARM_COHERENT_IO__ */ 
2895 #else /* defined(__arm__) || defined(__arm64__) */ 
2896             func 
= &dcache_incoherent_io_store64
; 
2898 #endif /* defined(__arm__) || defined(__arm64__) */ 
2900         case kIOMemorySetEncrypted
: 
2901             func 
= &SetEncryptOp
; 
2903         case kIOMemoryClearEncrypted
: 
2904             func 
= &ClearEncryptOp
; 
2908 #if defined(__arm__) || defined(__arm64__) 
2909     if ((func 
== 0) && (func_ext 
== 0)) 
2910         return (kIOReturnUnsupported
); 
2911 #else /* defined(__arm__) || defined(__arm64__) */ 
2913         return (kIOReturnUnsupported
); 
2914 #endif /* defined(__arm__) || defined(__arm64__) */ 
2916     if (kIOMemoryThreadSafe 
& _flags
) 
2920     remaining 
= length 
= min(length
, getLength() - offset
); 
2922     // (process another target segment?) 
2927         dstAddr64 
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
); 
2931         // Clip segment length to remaining 
2932         if (dstLen 
> remaining
) 
2935 #if defined(__arm__) || defined(__arm64__) 
2937             (*func
)(dstAddr64
, dstLen
); 
2939             (*func_ext
)(dstAddr64
, dstLen
, remaining
, &res
); 
2945 #else /* defined(__arm__) || defined(__arm64__) */ 
2946         (*func
)(dstAddr64
, dstLen
); 
2947 #endif /* defined(__arm__) || defined(__arm64__) */ 
2950         remaining 
-= dstLen
; 
2953     if (kIOMemoryThreadSafe 
& _flags
) 
2956     return (remaining 
? kIOReturnUnderrun 
: kIOReturnSuccess
); 
2963 #if defined(__i386__) || defined(__x86_64__) 
2965 #define io_kernel_static_start  vm_kernel_stext 
2966 #define io_kernel_static_end    vm_kernel_etext 
2968 #elif defined(__arm__) || defined(__arm64__) 
2970 extern vm_offset_t              static_memory_end
; 
2972 #if defined(__arm64__) 
2973 #define io_kernel_static_start vm_kext_base 
2974 #else /* defined(__arm64__) */ 
2975 #define io_kernel_static_start vm_kernel_stext 
2976 #endif /* defined(__arm64__) */ 
2978 #define io_kernel_static_end    static_memory_end 
2981 #error io_kernel_static_end is undefined for this architecture 
2984 static kern_return_t
 
2985 io_get_kernel_static_upl( 
2988         upl_size_t              
*upl_size
, 
2990         upl_page_info_array_t   page_list
, 
2991         unsigned int            *count
, 
2992         ppnum_t                 
*highest_page
) 
2994     unsigned int pageCount
, page
; 
2996     ppnum_t highestPage 
= 0; 
2998     pageCount 
= atop_32(*upl_size
); 
2999     if (pageCount 
> *count
) 
3004     for (page 
= 0; page 
< pageCount
; page
++) 
3006         phys 
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
)); 
3009         page_list
[page
].phys_addr 
= phys
; 
3010         page_list
[page
].free_when_done 
= 0; 
3011         page_list
[page
].absent    
= 0; 
3012         page_list
[page
].dirty     
= 0; 
3013         page_list
[page
].precious  
= 0; 
3014         page_list
[page
].device    
= 0; 
3015         if (phys 
> highestPage
) 
3019     *highest_page 
= highestPage
; 
3021     return ((page 
>= pageCount
) ? kIOReturnSuccess 
: kIOReturnVMError
); 
3024 IOReturn 
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
) 
3026     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3027     IOReturn error 
= kIOReturnSuccess
; 
3029     upl_page_info_array_t pageInfo
; 
3031     vm_tag_t tag 
= VM_KERN_MEMORY_NONE
; 
3033     assert(kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
); 
3035     if ((kIODirectionOutIn 
& forDirection
) == kIODirectionNone
) 
3036         forDirection 
= (IODirection
) (forDirection 
| getDirection()); 
3038     dataP 
= getDataP(_memoryEntries
); 
3039     upl_control_flags_t uplFlags
;    // This Mem Desc's default flags for upl creation 
3040     switch (kIODirectionOutIn 
& forDirection
) 
3042         case kIODirectionOut
: 
3043             // Pages do not need to be marked as dirty on commit 
3044             uplFlags 
= UPL_COPYOUT_FROM
; 
3045             dataP
->fDMAAccess 
= kIODMAMapReadAccess
; 
3048         case kIODirectionIn
: 
3049             dataP
->fDMAAccess 
= kIODMAMapWriteAccess
; 
3050             uplFlags 
= 0;       // i.e. ~UPL_COPYOUT_FROM 
3054             dataP
->fDMAAccess 
= kIODMAMapReadAccess 
| kIODMAMapWriteAccess
; 
3055             uplFlags 
= 0;       // i.e. ~UPL_COPYOUT_FROM 
3061         if ((kIOMemoryPreparedReadOnly 
& _flags
) && !(UPL_COPYOUT_FROM 
& uplFlags
)) 
3063             OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); 
3064             error 
= kIOReturnNotWritable
; 
3071         mapper 
= dataP
->fMapper
; 
3072         dataP
->fMappedBaseValid 
= dataP
->fMappedBase 
= 0; 
3074         uplFlags 
|= UPL_SET_IO_WIRE 
| UPL_SET_LITE
; 
3076         if (VM_KERN_MEMORY_NONE 
== tag
) tag 
= IOMemoryTag(kernel_map
); 
3078         if (kIODirectionPrepareToPhys32 
& forDirection
) 
3080             if (!mapper
) uplFlags 
|= UPL_NEED_32BIT_ADDR
; 
3081             if (dataP
->fDMAMapNumAddressBits 
> 32) dataP
->fDMAMapNumAddressBits 
= 32; 
3083         if (kIODirectionPrepareNoFault    
& forDirection
) uplFlags 
|= UPL_REQUEST_NO_FAULT
; 
3084         if (kIODirectionPrepareNoZeroFill 
& forDirection
) uplFlags 
|= UPL_NOZEROFILLIO
; 
3085         if (kIODirectionPrepareNonCoherent 
& forDirection
) uplFlags 
|= UPL_REQUEST_FORCE_COHERENCY
; 
3089         // Note that appendBytes(NULL) zeros the data up to the desired length 
3090         //           and the length parameter is an unsigned int 
3091         size_t uplPageSize 
= dataP
->fPageCnt 
* sizeof(upl_page_info_t
); 
3092         if (uplPageSize 
> ((unsigned int)uplPageSize
))    return (kIOReturnNoMemory
); 
3093         if (!_memoryEntries
->appendBytes(0, uplPageSize
)) return (kIOReturnNoMemory
); 
3096         // Find the appropriate vm_map for the given task 
3098         if (_task 
== kernel_task 
&& (kIOMemoryBufferPageable 
& _flags
))            curMap 
= 0; 
3099         else                                                     curMap 
= get_task_map(_task
); 
3101         // Iterate over the vector of virtual ranges 
3102         Ranges vec 
= _ranges
; 
3103         unsigned int pageIndex  
= 0; 
3104         IOByteCount mdOffset    
= 0; 
3105         ppnum_t highestPage     
= 0; 
3107         IOMemoryEntry 
* memRefEntry 
= 0; 
3108         if (_memRef
) memRefEntry 
= &_memRef
->entries
[0]; 
3110         for (UInt range 
= 0; range 
< _rangesCount
; range
++) { 
3112             mach_vm_address_t startPage
, startPageOffset
; 
3113             mach_vm_size_t    numBytes
; 
3114             ppnum_t highPage 
= 0; 
3116             // Get the startPage address and length of vec[range] 
3117             getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
); 
3118             startPageOffset 
= startPage 
& PAGE_MASK
; 
3119             iopl
.fPageOffset 
= startPageOffset
; 
3120             numBytes 
+= startPageOffset
; 
3121             startPage 
= trunc_page_64(startPage
); 
3124                 iopl
.fMappedPage 
= mapBase 
+ pageIndex
; 
3126                 iopl
.fMappedPage 
= 0; 
3128             // Iterate over the current range, creating UPLs 
3130                 vm_address_t kernelStart 
= (vm_address_t
) startPage
; 
3132                 if (curMap
) theMap 
= curMap
; 
3139                     assert(_task 
== kernel_task
); 
3140                     theMap 
= IOPageableMapForAddress(kernelStart
); 
3143                 // ioplFlags is an in/out parameter 
3144                 upl_control_flags_t ioplFlags 
= uplFlags
; 
3145                 dataP 
= getDataP(_memoryEntries
); 
3146                 pageInfo 
= getPageList(dataP
); 
3147                 upl_page_list_ptr_t baseInfo 
= &pageInfo
[pageIndex
]; 
3149                 mach_vm_size_t _ioplSize    
= round_page(numBytes
); 
3150                 upl_size_t          ioplSize    
= (_ioplSize 
<= MAX_UPL_SIZE_BYTES
) ? _ioplSize 
: MAX_UPL_SIZE_BYTES
; 
3151                 unsigned int    numPageInfo 
= atop_32(ioplSize
); 
3153                 if ((theMap 
== kernel_map
) 
3154                  && (kernelStart 
>= io_kernel_static_start
) 
3155                  && (kernelStart 
<  io_kernel_static_end
)) { 
3156                     error 
= io_get_kernel_static_upl(theMap
, 
3165                     memory_object_offset_t entryOffset
; 
3167                     entryOffset 
= mdOffset
; 
3168                     entryOffset 
= (entryOffset 
- iopl
.fPageOffset 
- memRefEntry
->offset
); 
3169                     if (entryOffset 
>= memRefEntry
->size
) { 
3171                         if (memRefEntry 
>= &_memRef
->entries
[_memRef
->count
]) panic("memRefEntry"); 
3174                     if (ioplSize 
> (memRefEntry
->size 
- entryOffset
)) ioplSize 
= (memRefEntry
->size 
- entryOffset
); 
3175                     error 
= memory_object_iopl_request(memRefEntry
->entry
, 
3186                     error 
= vm_map_create_upl(theMap
, 
3188                                                     (upl_size_t
*)&ioplSize
, 
3196                 if (error 
!= KERN_SUCCESS
) goto abortExit
; 
3201                     highPage 
= upl_get_highest_page(iopl
.fIOPL
); 
3202                 if (highPage 
> highestPage
) 
3203                     highestPage 
= highPage
; 
3205                 if (baseInfo
->device
) { 
3207                     iopl
.fFlags 
= kIOPLOnDevice
; 
3213                 iopl
.fIOMDOffset 
= mdOffset
; 
3214                 iopl
.fPageInfo 
= pageIndex
; 
3215                 if (mapper 
&& pageIndex 
&& (page_mask 
& (mdOffset 
+ startPageOffset
))) dataP
->fDiscontig 
= true; 
3217                 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) { 
3218                     // Clean up partial created and unsaved iopl 
3220                         upl_abort(iopl
.fIOPL
, 0); 
3221                         upl_deallocate(iopl
.fIOPL
); 
3227                 // Check for a multiple iopl's in one virtual range 
3228                 pageIndex 
+= numPageInfo
; 
3229                 mdOffset 
-= iopl
.fPageOffset
; 
3230                 if (ioplSize 
< numBytes
) { 
3231                     numBytes 
-= ioplSize
; 
3232                     startPage 
+= ioplSize
; 
3233                     mdOffset 
+= ioplSize
; 
3234                     iopl
.fPageOffset 
= 0; 
3235                     if (mapper
) iopl
.fMappedPage 
= mapBase 
+ pageIndex
; 
3238                     mdOffset 
+= numBytes
; 
3244         _highestPage 
= highestPage
; 
3246         if (UPL_COPYOUT_FROM 
& uplFlags
) _flags 
|= kIOMemoryPreparedReadOnly
; 
3250     if (!(_flags 
& kIOMemoryAutoPrepare
) && (kIOReturnSuccess 
== error
)) 
3252         dataP 
= getDataP(_memoryEntries
); 
3253         if (!dataP
->fWireTracking
.link
.next
) 
3255             IOTrackingAdd(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
), false, tag
); 
3258 #endif /* IOTRACKING */ 
3264         dataP 
= getDataP(_memoryEntries
); 
3265         UInt done 
= getNumIOPL(_memoryEntries
, dataP
); 
3266         ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
3268         for (UInt range 
= 0; range 
< done
; range
++) 
3270             if (ioplList
[range
].fIOPL
) { 
3271              upl_abort(ioplList
[range
].fIOPL
, 0); 
3272              upl_deallocate(ioplList
[range
].fIOPL
); 
3275         (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength() 
3278     if (error 
== KERN_FAILURE
) 
3279         error 
= kIOReturnCannotWire
; 
3280     else if (error 
== KERN_MEMORY_ERROR
) 
3281         error 
= kIOReturnNoResources
; 
3286 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper 
* mapper
) 
3289     unsigned    dataSize 
= size
; 
3291     if (!_memoryEntries
) { 
3292         _memoryEntries 
= OSData::withCapacity(dataSize
); 
3293         if (!_memoryEntries
) 
3296     else if (!_memoryEntries
->initWithCapacity(dataSize
)) 
3299     _memoryEntries
->appendBytes(0, computeDataSize(0, 0)); 
3300     dataP 
= getDataP(_memoryEntries
); 
3302     if (mapper 
== kIOMapperWaitSystem
) { 
3303         IOMapper::checkForSystemMapper(); 
3304         mapper 
= IOMapper::gSystem
; 
3306     dataP
->fMapper               
= mapper
; 
3307     dataP
->fPageCnt              
= 0; 
3308     dataP
->fMappedBase           
= 0; 
3309     dataP
->fDMAMapNumAddressBits 
= 64; 
3310     dataP
->fDMAMapAlignment      
= 0; 
3311     dataP
->fPreparationID        
= kIOPreparationIDUnprepared
; 
3312     dataP
->fDiscontig            
= false; 
3313     dataP
->fCompletionError      
= false; 
3314     dataP
->fMappedBaseValid      
= false; 
3319 IOReturn 
IOMemoryDescriptor::dmaMap( 
3321     IODMACommand                
* command
, 
3322     const IODMAMapSpecification 
* mapSpec
, 
3325     uint64_t                    * mapAddress
, 
3326     uint64_t                    * mapLength
) 
3329     uint32_t mapOptions
; 
3332     mapOptions 
|= kIODMAMapReadAccess
; 
3333     if (!(kIOMemoryPreparedReadOnly 
& _flags
)) mapOptions 
|= kIODMAMapWriteAccess
; 
3335     err 
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, 
3336                                 mapSpec
, command
, NULL
, mapAddress
, mapLength
); 
3338     if (kIOReturnSuccess 
== err
) dmaMapRecord(mapper
, command
, *mapLength
); 
3343 void IOMemoryDescriptor::dmaMapRecord( 
3345     IODMACommand                
* command
, 
3348     kern_allocation_name_t alloc
; 
3351     if ((alloc 
= mapper
->fAllocName
) /* && mapper != IOMapper::gSystem */) 
3353         kern_allocation_update_size(mapper
->fAllocName
, mapLength
); 
3356     if (!command
) return; 
3357     prior 
= OSAddAtomic16(1, &_dmaReferences
); 
3360         if (alloc 
&& (VM_KERN_MEMORY_NONE 
!= _kernelTag
)) 
3363             mapLength 
= _length
; 
3364             kern_allocation_update_subtotal(alloc
, _kernelTag
, mapLength
); 
3366         else _mapName 
= NULL
; 
3370 IOReturn 
IOMemoryDescriptor::dmaUnmap( 
3372     IODMACommand                
* command
, 
3374     uint64_t                      mapAddress
, 
3378     kern_allocation_name_t alloc
; 
3379     kern_allocation_name_t mapName
; 
3387         if (_dmaReferences
) prior 
= OSAddAtomic16(-1, &_dmaReferences
); 
3388         else                panic("_dmaReferences underflow"); 
3391     if (!mapLength
) return (kIOReturnSuccess
); 
3393     ret 
= mapper
->iovmUnmapMemory(this, command
, mapAddress
, mapLength
); 
3395     if ((alloc 
= mapper
->fAllocName
)) 
3397         kern_allocation_update_size(alloc
, -mapLength
); 
3398         if ((1 == prior
) && mapName 
&& (VM_KERN_MEMORY_NONE 
!= _kernelTag
)) 
3400             mapLength 
= _length
; 
3401             kern_allocation_update_subtotal(mapName
, _kernelTag
, -mapLength
); 
3408 IOReturn 
IOGeneralMemoryDescriptor::dmaMap( 
3410     IODMACommand                
* command
, 
3411     const IODMAMapSpecification 
* mapSpec
, 
3414     uint64_t                    * mapAddress
, 
3415     uint64_t                    * mapLength
) 
3417     IOReturn          err 
= kIOReturnSuccess
; 
3419     IOOptionBits      type 
= _flags 
& kIOMemoryTypeMask
; 
3422     if (kIOMemoryHostOnly 
& _flags
) return (kIOReturnSuccess
); 
3423     if (kIOMemoryRemote 
& _flags
)   return (kIOReturnNotAttached
); 
3425     if ((type 
== kIOMemoryTypePhysical
) || (type 
== kIOMemoryTypePhysical64
) 
3426      || offset 
|| (length 
!= _length
)) 
3428         err 
= super::dmaMap(mapper
, command
, mapSpec
, offset
, length
, mapAddress
, mapLength
); 
3430     else if (_memoryEntries 
&& _pages 
&& (dataP 
= getDataP(_memoryEntries
))) 
3432         const ioPLBlock 
* ioplList 
= getIOPLList(dataP
); 
3433         upl_page_info_t 
* pageList
; 
3434         uint32_t          mapOptions 
= 0; 
3436         IODMAMapSpecification mapSpec
; 
3437         bzero(&mapSpec
, sizeof(mapSpec
)); 
3438         mapSpec
.numAddressBits 
= dataP
->fDMAMapNumAddressBits
; 
3439         mapSpec
.alignment 
= dataP
->fDMAMapAlignment
; 
3441         // For external UPLs the fPageInfo field points directly to 
3442         // the upl's upl_page_info_t array. 
3443         if (ioplList
->fFlags 
& kIOPLExternUPL
) 
3445             pageList 
= (upl_page_info_t 
*) ioplList
->fPageInfo
; 
3446             mapOptions 
|= kIODMAMapPagingPath
; 
3448         else pageList 
= getPageList(dataP
); 
3450         if ((_length 
== ptoa_64(_pages
)) && !(page_mask 
& ioplList
->fPageOffset
)) 
3452             mapOptions 
|= kIODMAMapPageListFullyOccupied
; 
3455         assert(dataP
->fDMAAccess
); 
3456         mapOptions 
|= dataP
->fDMAAccess
; 
3458         // Check for direct device non-paged memory 
3459         if (ioplList
->fFlags 
& kIOPLOnDevice
) mapOptions 
|= kIODMAMapPhysicallyContiguous
; 
3461         IODMAMapPageList dmaPageList 
= 
3463                 .pageOffset    
= (uint32_t)(ioplList
->fPageOffset 
& page_mask
), 
3464                 .pageListCount 
= _pages
, 
3465                 .pageList      
= &pageList
[0] 
3467         err 
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, &mapSpec
,  
3468                                     command
, &dmaPageList
, mapAddress
, mapLength
); 
3470         if (kIOReturnSuccess 
== err
) dmaMapRecord(mapper
, command
, *mapLength
); 
3479  * Prepare the memory for an I/O transfer.  This involves paging in 
3480  * the memory, if necessary, and wiring it down for the duration of 
3481  * the transfer.  The complete() method completes the processing of 
3482  * the memory after the I/O transfer finishes.  This method needn't 
3483  * called for non-pageable memory. 
3486 IOReturn 
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
) 
3488     IOReturn     error    
= kIOReturnSuccess
; 
3489     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3491     if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
3492         return kIOReturnSuccess
; 
3494     assert (!(kIOMemoryRemote 
& _flags
)); 
3495     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
3497     if (_prepareLock
) IOLockLock(_prepareLock
); 
3499     if (kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
) 
3501         error 
= wireVirtual(forDirection
); 
3504     if (kIOReturnSuccess 
== error
) 
3506         if (1 == ++_wireCount
) 
3508             if (kIOMemoryClearEncrypt 
& _flags
) 
3510                 performOperation(kIOMemoryClearEncrypted
, 0, _length
); 
3515     if (_prepareLock
) IOLockUnlock(_prepareLock
); 
3523  * Complete processing of the memory after an I/O transfer finishes. 
3524  * This method should not be called unless a prepare was previously 
3525  * issued; the prepare() and complete() must occur in pairs, before 
3526  * before and after an I/O transfer involving pageable memory. 
3529 IOReturn 
IOGeneralMemoryDescriptor::complete(IODirection forDirection
) 
3531     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3534     if ((kIOMemoryTypePhysical 
== type
) || (kIOMemoryTypePhysical64 
== type
)) 
3535         return kIOReturnSuccess
; 
3537     assert (!(kIOMemoryRemote 
& _flags
)); 
3538     if (kIOMemoryRemote 
& _flags
) return (kIOReturnNotAttached
); 
3540     if (_prepareLock
) IOLockLock(_prepareLock
); 
3544         if (!_wireCount
) break; 
3545         dataP 
= getDataP(_memoryEntries
); 
3548         if (kIODirectionCompleteWithError 
& forDirection
)  dataP
->fCompletionError 
= true; 
3550         if ((kIOMemoryClearEncrypt 
& _flags
) && (1 == _wireCount
)) 
3552             performOperation(kIOMemorySetEncrypted
, 0, _length
); 
3556         if (!_wireCount 
|| (kIODirectionCompleteWithDataValid 
& forDirection
)) 
3558             ioPLBlock 
*ioplList 
= getIOPLList(dataP
); 
3559             UInt ind
, count 
= getNumIOPL(_memoryEntries
, dataP
); 
3563                 // kIODirectionCompleteWithDataValid & forDirection 
3564                 if (kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
) 
3567                     tag 
= getVMTag(kernel_map
); 
3568                     for (ind 
= 0; ind 
< count
; ind
++) 
3570                         if (ioplList
[ind
].fIOPL
) iopl_valid_data(ioplList
[ind
].fIOPL
, tag
); 
3576                 if (_dmaReferences
) panic("complete() while dma active"); 
3578                 if (dataP
->fMappedBaseValid
) { 
3579                     dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
); 
3580                     dataP
->fMappedBaseValid 
= dataP
->fMappedBase 
= 0; 
3583                 if (dataP
->fWireTracking
.link
.next
) IOTrackingRemove(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
)); 
3584 #endif /* IOTRACKING */ 
3585                 // Only complete iopls that we created which are for TypeVirtual 
3586                 if (kIOMemoryTypeVirtual 
== type 
|| kIOMemoryTypeVirtual64 
== type 
|| kIOMemoryTypeUIO 
== type
) 
3588                     for (ind 
= 0; ind 
< count
; ind
++) 
3589                         if (ioplList
[ind
].fIOPL
) { 
3590                             if (dataP
->fCompletionError
) 
3591                                 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/); 
3593                                 upl_commit(ioplList
[ind
].fIOPL
, 0, 0); 
3594                             upl_deallocate(ioplList
[ind
].fIOPL
); 
3596                 } else if (kIOMemoryTypeUPL 
== type
) { 
3597                     upl_set_referenced(ioplList
[0].fIOPL
, false); 
3600                 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength() 
3602                 dataP
->fPreparationID 
= kIOPreparationIDUnprepared
; 
3603                 _flags 
&= ~kIOMemoryPreparedReadOnly
; 
3609     if (_prepareLock
) IOLockUnlock(_prepareLock
); 
3611     return kIOReturnSuccess
; 
3614 IOReturn 
IOGeneralMemoryDescriptor::doMap( 
3615         vm_map_t                __addressMap
, 
3616         IOVirtualAddress 
*      __address
, 
3617         IOOptionBits            options
, 
3618         IOByteCount             __offset
, 
3619         IOByteCount             __length 
) 
3622     if (!(kIOMap64Bit 
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); 
3623 #endif /* !__LP64__ */ 
3627     IOMemoryMap 
*  mapping 
= (IOMemoryMap 
*) *__address
; 
3628     mach_vm_size_t offset  
= mapping
->fOffset 
+ __offset
; 
3629     mach_vm_size_t length  
= mapping
->fLength
; 
3631     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
3632     Ranges vec 
= _ranges
; 
3634     mach_vm_address_t range0Addr 
= 0; 
3635     mach_vm_size_t    range0Len 
= 0; 
3637     if ((offset 
>= _length
) || ((offset 
+ length
) > _length
)) 
3638         return( kIOReturnBadArgument 
); 
3640     assert (!(kIOMemoryRemote 
& _flags
)); 
3641     if (kIOMemoryRemote 
& _flags
) return (0); 
3644         getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0); 
3646     // mapping source == dest? (could be much better) 
3648      && (mapping
->fAddressTask 
== _task
) 
3649      && (mapping
->fAddressMap 
== get_task_map(_task
))  
3650      && (options 
& kIOMapAnywhere
) 
3651      && (!(kIOMapUnique 
& options
)) 
3652      && (1 == _rangesCount
)  
3655      && (length 
<= range0Len
)) 
3657         mapping
->fAddress 
= range0Addr
; 
3658         mapping
->fOptions 
|= kIOMapStatic
; 
3660         return( kIOReturnSuccess 
); 
3665         IOOptionBits createOptions 
= 0; 
3666         if (!(kIOMapReadOnly 
& options
))  
3668             createOptions 
|= kIOMemoryReferenceWrite
; 
3669 #if DEVELOPMENT || DEBUG 
3670             if (kIODirectionOut 
== (kIODirectionOutIn 
& _flags
)) 
3672                 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction"); 
3676         err 
= memoryReferenceCreate(createOptions
, &_memRef
); 
3677         if (kIOReturnSuccess 
!= err
) return (err
); 
3680     memory_object_t pager
; 
3681     pager 
= (memory_object_t
) (reserved 
? reserved
->dp
.devicePager 
: 0); 
3683     // <upl_transpose // 
3684     if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
)) 
3690             upl_control_flags_t flags
; 
3691             unsigned int        lock_count
; 
3693             if (!_memRef 
|| (1 != _memRef
->count
)) 
3695                 err 
= kIOReturnNotReadable
; 
3699             size 
= round_page(mapping
->fLength
); 
3700             flags 
= UPL_COPYOUT_FROM 
| UPL_SET_INTERNAL 
 
3701                         | UPL_SET_LITE 
| UPL_SET_IO_WIRE 
| UPL_BLOCK_ACCESS
; 
3703             if (KERN_SUCCESS 
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
, 
3705                                             &flags
, getVMTag(kernel_map
))) 
3708             for (lock_count 
= 0; 
3709                  IORecursiveLockHaveLock(gIOMemoryLock
); 
3713             err 
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
); 
3720             if (kIOReturnSuccess 
!= err
) 
3722                 IOLog("upl_transpose(%x)\n", err
); 
3723                 err 
= kIOReturnSuccess
; 
3728                 upl_commit(redirUPL2
, NULL
, 0); 
3729                 upl_deallocate(redirUPL2
); 
3733                 // swap the memEntries since they now refer to different vm_objects 
3734                 IOMemoryReference 
* me 
= _memRef
; 
3735                 _memRef 
= mapping
->fMemory
->_memRef
; 
3736                 mapping
->fMemory
->_memRef 
= me
; 
3739                 err 
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options 
); 
3743     // upl_transpose> // 
3746         err 
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
); 
3748         if ((err 
== KERN_SUCCESS
) && ((kIOTracking 
& gIOKitDebug
) || _task
)) 
3750             // only dram maps in the default on developement case 
3751             IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
); 
3753 #endif /* IOTRACKING */ 
3754         if ((err 
== KERN_SUCCESS
) && pager
) 
3756             err 
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
); 
3758             if (err 
!= KERN_SUCCESS
) doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0); 
3759             else if (kIOMapDefaultCache 
== (options 
& kIOMapCacheMask
)) 
3761                 mapping
->fOptions 
|= ((_flags 
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
); 
3771 IOMemoryMapTracking(IOTrackingUser 
* tracking
, task_t 
* task
, 
3772                     mach_vm_address_t 
* address
, mach_vm_size_t 
* size
) 
3774 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field)) 
3776     IOMemoryMap 
* map 
= (typeof(map
)) (((uintptr_t) tracking
) - iomap_offsetof(IOMemoryMap
, fTracking
)); 
3778     if (!map
->fAddressMap 
|| (map
->fAddressMap 
!= get_task_map(map
->fAddressTask
))) return (kIOReturnNotReady
); 
3780     *task    
= map
->fAddressTask
; 
3781     *address 
= map
->fAddress
; 
3782     *size    
= map
->fLength
; 
3784     return (kIOReturnSuccess
); 
3786 #endif /* IOTRACKING */ 
3788 IOReturn 
IOGeneralMemoryDescriptor::doUnmap( 
3789         vm_map_t                addressMap
, 
3790         IOVirtualAddress        __address
, 
3791         IOByteCount             __length 
) 
3793     return (super::doUnmap(addressMap
, __address
, __length
)); 
3796 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
3799 #define super OSObject 
3801 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject 
) 
3803 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0); 
3804 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1); 
3805 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2); 
3806 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3); 
3807 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4); 
3808 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5); 
3809 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6); 
3810 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7); 
3812 /* ex-inline function implementation */ 
3813 IOPhysicalAddress 
IOMemoryMap::getPhysicalAddress() 
3814     { return( getPhysicalSegment( 0, 0 )); } 
3816 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
3818 bool IOMemoryMap::init( 
3820         mach_vm_address_t       toAddress
, 
3821         IOOptionBits            _options
, 
3822         mach_vm_size_t          _offset
, 
3823         mach_vm_size_t          _length 
) 
3831     fAddressMap  
= get_task_map(intoTask
); 
3834     vm_map_reference(fAddressMap
); 
3836     fAddressTask 
= intoTask
; 
3837     fOptions     
= _options
; 
3840     fAddress     
= toAddress
; 
3845 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor 
* _memory
, mach_vm_size_t _offset
) 
3852         if( (_offset 
+ fLength
) > _memory
->getLength()) 
3860         if (fMemory 
!= _memory
) 
3861             fMemory
->removeMapping(this); 
3869 IOReturn 
IOMemoryDescriptor::doMap( 
3870         vm_map_t                __addressMap
, 
3871         IOVirtualAddress 
*      __address
, 
3872         IOOptionBits            options
, 
3873         IOByteCount             __offset
, 
3874         IOByteCount             __length 
) 
3876     return (kIOReturnUnsupported
); 
3879 IOReturn 
IOMemoryDescriptor::handleFault( 
3881         mach_vm_size_t          sourceOffset
, 
3882         mach_vm_size_t          length
) 
3884     if( kIOMemoryRedirected 
& _flags
) 
3887         IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
); 
3891         } while( kIOMemoryRedirected 
& _flags 
); 
3893     return (kIOReturnSuccess
); 
3896 IOReturn 
IOMemoryDescriptor::populateDevicePager( 
3898         vm_map_t                addressMap
, 
3899         mach_vm_address_t       address
, 
3900         mach_vm_size_t          sourceOffset
, 
3901         mach_vm_size_t          length
, 
3902         IOOptionBits            options 
) 
3904     IOReturn            err 
= kIOReturnSuccess
; 
3905     memory_object_t     pager 
= (memory_object_t
) _pager
; 
3906     mach_vm_size_t      size
; 
3907     mach_vm_size_t      bytes
; 
3908     mach_vm_size_t      page
; 
3909     mach_vm_size_t      pageOffset
; 
3910     mach_vm_size_t      pagerOffset
; 
3911     IOPhysicalLength    segLen
, chunk
; 
3915     type 
= _flags 
& kIOMemoryTypeMask
; 
3917     if (reserved
->dp
.pagerContig
) 
3923     physAddr 
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone 
); 
3925     pageOffset 
= physAddr 
- trunc_page_64( physAddr 
); 
3926     pagerOffset 
= sourceOffset
; 
3928     size 
= length 
+ pageOffset
; 
3929     physAddr 
-= pageOffset
; 
3931     segLen 
+= pageOffset
; 
3935         // in the middle of the loop only map whole pages 
3936         if( segLen 
>= bytes
) segLen 
= bytes
; 
3937         else if (segLen 
!= trunc_page(segLen
))    err 
= kIOReturnVMError
; 
3938         if (physAddr 
!= trunc_page_64(physAddr
))  err 
= kIOReturnBadArgument
; 
3940         if (kIOReturnSuccess 
!= err
) break; 
3942 #if DEBUG || DEVELOPMENT 
3943         if ((kIOMemoryTypeUPL 
!= type
)  
3944             && pmap_has_managed_page(atop_64(physAddr
), atop_64(physAddr 
+ segLen 
- 1)))  
3946             OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr
, segLen
); 
3948 #endif /* DEBUG || DEVELOPMENT */ 
3950         chunk 
= (reserved
->dp
.pagerContig 
? round_page(segLen
) : page_size
); 
3952              (page 
< segLen
) && (KERN_SUCCESS 
== err
); 
3955             err 
= device_pager_populate_object(pager
, pagerOffset
, 
3956                 (ppnum_t
)(atop_64(physAddr 
+ page
)), chunk
); 
3957             pagerOffset 
+= chunk
; 
3960         assert (KERN_SUCCESS 
== err
); 
3963         // This call to vm_fault causes an early pmap level resolution 
3964         // of the mappings created above for kernel mappings, since 
3965         // faulting in later can't take place from interrupt level. 
3966         if ((addressMap 
== kernel_map
) && !(kIOMemoryRedirected 
& _flags
)) 
3968             err 
= vm_fault(addressMap
,  
3969                            (vm_map_offset_t
)trunc_page_64(address
), 
3970                            options 
& kIOMapReadOnly 
? VM_PROT_READ 
: VM_PROT_READ
|VM_PROT_WRITE
,  
3971                            FALSE
, VM_KERN_MEMORY_NONE
, 
3973                            (vm_map_offset_t
)0); 
3975             if (KERN_SUCCESS 
!= err
) break; 
3978         sourceOffset 
+= segLen 
- pageOffset
; 
3983     while (bytes 
&& (physAddr 
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone 
))); 
3986         err 
= kIOReturnBadArgument
; 
3991 IOReturn 
IOMemoryDescriptor::doUnmap( 
3992         vm_map_t                addressMap
, 
3993         IOVirtualAddress        __address
, 
3994         IOByteCount             __length 
) 
3997     IOMemoryMap 
*     mapping
; 
3998     mach_vm_address_t address
; 
3999     mach_vm_size_t    length
; 
4001     if (__length
) panic("doUnmap"); 
4003     mapping 
= (IOMemoryMap 
*) __address
; 
4004     addressMap 
= mapping
->fAddressMap
; 
4005     address    
= mapping
->fAddress
; 
4006     length     
= mapping
->fLength
; 
4008     if (kIOMapOverwrite 
& mapping
->fOptions
) err 
= KERN_SUCCESS
; 
4011         if ((addressMap 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) 
4012             addressMap 
= IOPageableMapForAddress( address 
); 
4014         if( kIOLogMapping 
& gIOKitDebug
) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", 
4015                                                 addressMap
, address
, length 
); 
4017         err 
= mach_vm_deallocate( addressMap
, address
, length 
); 
4021     IOTrackingRemoveUser(gIOMapTracking
, &mapping
->fTracking
); 
4022 #endif /* IOTRACKING */ 
4027 IOReturn 
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect 
) 
4029     IOReturn            err 
= kIOReturnSuccess
; 
4030     IOMemoryMap 
*       mapping 
= 0; 
4036         _flags 
|= kIOMemoryRedirected
; 
4038         _flags 
&= ~kIOMemoryRedirected
; 
4041         if( (iter 
= OSCollectionIterator::withCollection( _mappings
))) { 
4043             memory_object_t   pager
; 
4046                 pager 
= (memory_object_t
) reserved
->dp
.devicePager
; 
4048                 pager 
= MACH_PORT_NULL
; 
4050             while( (mapping 
= (IOMemoryMap 
*) iter
->getNextObject())) 
4052                 mapping
->redirect( safeTask
, doRedirect 
); 
4053                 if (!doRedirect 
&& !safeTask 
&& pager 
&& (kernel_map 
== mapping
->fAddressMap
)) 
4055                     err 
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache 
); 
4071     // temporary binary compatibility 
4072     IOSubMemoryDescriptor 
* subMem
; 
4073     if( (subMem 
= OSDynamicCast( IOSubMemoryDescriptor
, this))) 
4074         err 
= subMem
->redirect( safeTask
, doRedirect 
); 
4076         err 
= kIOReturnSuccess
; 
4077 #endif /* !__LP64__ */ 
4082 IOReturn 
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect 
) 
4084     IOReturn err 
= kIOReturnSuccess
; 
4087 //        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); 
4099             if ((!safeTask 
|| (get_task_map(safeTask
) != fAddressMap
)) 
4100               && (0 == (fOptions 
& kIOMapStatic
))) 
4102                 IOUnmapPages( fAddressMap
, fAddress
, fLength 
); 
4103                 err 
= kIOReturnSuccess
; 
4105                 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
); 
4108             else if (kIOMapWriteCombineCache 
== (fOptions 
& kIOMapCacheMask
)) 
4110                 IOOptionBits newMode
; 
4111                 newMode 
= (fOptions 
& ~kIOMapCacheMask
) | (doRedirect 
? kIOMapInhibitCache 
: kIOMapWriteCombineCache
); 
4112                 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
); 
4119     if ((((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) 
4120          || ((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) 
4122      && (doRedirect 
!= (0 != (fMemory
->_flags 
& kIOMemoryRedirected
)))) 
4123         fMemory
->redirect(safeTask
, doRedirect
); 
4128 IOReturn 
IOMemoryMap::unmap( void ) 
4134     if( fAddress 
&& fAddressMap 
&& (0 == fSuperMap
) && fMemory
 
4135         && (0 == (kIOMapStatic 
& fOptions
))) { 
4137         err 
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0); 
4140         err 
= kIOReturnSuccess
; 
4144         vm_map_deallocate(fAddressMap
); 
4155 void IOMemoryMap::taskDied( void ) 
4158     if (fUserClientUnmap
) unmap(); 
4160     else                  IOTrackingRemoveUser(gIOMapTracking
, &fTracking
); 
4161 #endif /* IOTRACKING */ 
4164         vm_map_deallocate(fAddressMap
); 
4172 IOReturn 
IOMemoryMap::userClientUnmap( void ) 
4174     fUserClientUnmap 
= true; 
4175     return (kIOReturnSuccess
); 
4178 // Overload the release mechanism.  All mappings must be a member 
4179 // of a memory descriptors _mappings set.  This means that we 
4180 // always have 2 references on a mapping.  When either of these mappings 
4181 // are released we need to free ourselves. 
4182 void IOMemoryMap::taggedRelease(const void *tag
) const 
4185     super::taggedRelease(tag
, 2); 
4189 void IOMemoryMap::free() 
4196         fMemory
->removeMapping(this); 
4201     if (fOwner 
&& (fOwner 
!= fMemory
)) 
4204         fOwner
->removeMapping(this); 
4209         fSuperMap
->release(); 
4212         upl_commit(fRedirUPL
, NULL
, 0); 
4213         upl_deallocate(fRedirUPL
); 
4219 IOByteCount 
IOMemoryMap::getLength() 
4224 IOVirtualAddress 
IOMemoryMap::getVirtualAddress() 
4228         fSuperMap
->getVirtualAddress(); 
4229     else if (fAddressMap 
 
4230                 && vm_map_is_64bit(fAddressMap
) 
4231                 && (sizeof(IOVirtualAddress
) < 8)) 
4233         OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
); 
4235 #endif /* !__LP64__ */ 
4241 mach_vm_address_t       
IOMemoryMap::getAddress() 
4246 mach_vm_size_t  
IOMemoryMap::getSize() 
4250 #endif /* !__LP64__ */ 
4253 task_t 
IOMemoryMap::getAddressTask() 
4256         return( fSuperMap
->getAddressTask()); 
4258         return( fAddressTask
); 
4261 IOOptionBits 
IOMemoryMap::getMapOptions() 
4266 IOMemoryDescriptor 
* IOMemoryMap::getMemoryDescriptor() 
4271 IOMemoryMap 
* IOMemoryMap::copyCompatible( 
4272                 IOMemoryMap 
* newMapping 
) 
4274     task_t              task      
= newMapping
->getAddressTask(); 
4275     mach_vm_address_t   toAddress 
= newMapping
->fAddress
; 
4276     IOOptionBits        _options  
= newMapping
->fOptions
; 
4277     mach_vm_size_t      _offset   
= newMapping
->fOffset
; 
4278     mach_vm_size_t      _length   
= newMapping
->fLength
; 
4280     if( (!task
) || (!fAddressMap
) || (fAddressMap 
!= get_task_map(task
))) 
4282     if( (fOptions 
^ _options
) & kIOMapReadOnly
) 
4284     if( (kIOMapDefaultCache 
!= (_options 
& kIOMapCacheMask
))  
4285      && ((fOptions 
^ _options
) & kIOMapCacheMask
)) 
4288     if( (0 == (_options 
& kIOMapAnywhere
)) && (fAddress 
!= toAddress
)) 
4291     if( _offset 
< fOffset
) 
4296     if( (_offset 
+ _length
) > fLength
) 
4300     if( (fLength 
== _length
) && (!_offset
)) 
4306         newMapping
->fSuperMap 
= this; 
4307         newMapping
->fOffset   
= fOffset 
+ _offset
; 
4308         newMapping
->fAddress  
= fAddress 
+ _offset
; 
4311     return( newMapping 
); 
4314 IOReturn 
IOMemoryMap::wireRange( 
4316         mach_vm_size_t          offset
, 
4317         mach_vm_size_t          length
) 
4320     mach_vm_address_t start 
= trunc_page_64(fAddress 
+ offset
); 
4321     mach_vm_address_t end   
= round_page_64(fAddress 
+ offset 
+ length
); 
4324     prot 
= (kIODirectionOutIn 
& options
); 
4327         kr 
= vm_map_wire_kernel(fAddressMap
, start
, end
, prot
, fMemory
->getVMTag(kernel_map
), FALSE
); 
4331         kr 
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
); 
4340 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength 
* _length
, IOOptionBits _options
) 
4341 #else /* !__LP64__ */ 
4342 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength 
* _length
) 
4343 #endif /* !__LP64__ */ 
4345     IOPhysicalAddress   address
; 
4349     address 
= fMemory
->getPhysicalSegment( fOffset 
+ _offset
, _length
, _options 
); 
4350 #else /* !__LP64__ */ 
4351     address 
= fMemory
->getPhysicalSegment( fOffset 
+ _offset
, _length 
); 
4352 #endif /* !__LP64__ */ 
4358 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4361 #define super OSObject 
4363 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4365 void IOMemoryDescriptor::initialize( void ) 
4367     if( 0 == gIOMemoryLock
) 
4368         gIOMemoryLock 
= IORecursiveLockAlloc(); 
4370     gIOLastPage 
= IOGetLastPageNumber(); 
4373 void IOMemoryDescriptor::free( void ) 
4375     if( _mappings
) _mappings
->release(); 
4379         IODelete(reserved
, IOMemoryDescriptorReserved
, 1); 
4385 IOMemoryMap 
* IOMemoryDescriptor::setMapping( 
4387         IOVirtualAddress        mapAddress
, 
4388         IOOptionBits            options 
) 
4390     return (createMappingInTask( intoTask
, mapAddress
, 
4391                                     options 
| kIOMapStatic
, 
4395 IOMemoryMap 
* IOMemoryDescriptor::map(  
4396         IOOptionBits            options 
) 
4398     return (createMappingInTask( kernel_task
, 0, 
4399                                 options 
| kIOMapAnywhere
, 
4404 IOMemoryMap 
* IOMemoryDescriptor::map(  
4406         IOVirtualAddress        atAddress
, 
4407         IOOptionBits            options
, 
4409         IOByteCount             length 
) 
4411     if ((!(kIOMapAnywhere 
& options
)) && vm_map_is_64bit(get_task_map(intoTask
))) 
4413         OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()"); 
4417     return (createMappingInTask(intoTask
, atAddress
, 
4418                                 options
, offset
, length
)); 
4420 #endif /* !__LP64__ */ 
4422 IOMemoryMap 
* IOMemoryDescriptor::createMappingInTask( 
4424         mach_vm_address_t       atAddress
, 
4425         IOOptionBits            options
, 
4426         mach_vm_size_t          offset
, 
4427         mach_vm_size_t          length
) 
4429     IOMemoryMap 
* result
; 
4430     IOMemoryMap 
* mapping
; 
4433         length 
= getLength(); 
4435     mapping 
= new IOMemoryMap
; 
4438      && !mapping
->init( intoTask
, atAddress
, 
4439                         options
, offset
, length 
)) { 
4445         result 
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options 
| kIOMap64Bit
, 0, 0); 
4451         IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n", 
4452                 this, atAddress
, (uint32_t) options
, offset
, length
); 
4458 #ifndef __LP64__ // there is only a 64 bit version for LP64 
4459 IOReturn 
IOMemoryMap::redirect(IOMemoryDescriptor 
* newBackingMemory
, 
4460                                 IOOptionBits         options
, 
4463     return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
)); 
4467 IOReturn 
IOMemoryMap::redirect(IOMemoryDescriptor 
* newBackingMemory
, 
4468                                 IOOptionBits         options
, 
4469                                 mach_vm_size_t       offset
) 
4471     IOReturn err 
= kIOReturnSuccess
; 
4472     IOMemoryDescriptor 
* physMem 
= 0; 
4476     if (fAddress 
&& fAddressMap
) do  
4478         if (((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) 
4479             || ((fMemory
->_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) 
4485         if (!fRedirUPL 
&& fMemory
->_memRef 
&& (1 == fMemory
->_memRef
->count
)) 
4487             upl_size_t          size 
= round_page(fLength
); 
4488             upl_control_flags_t flags 
= UPL_COPYOUT_FROM 
| UPL_SET_INTERNAL 
 
4489                                         | UPL_SET_LITE 
| UPL_SET_IO_WIRE 
| UPL_BLOCK_ACCESS
; 
4490             if (KERN_SUCCESS 
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
, 
4492                                             &flags
, fMemory
->getVMTag(kernel_map
))) 
4497                 IOUnmapPages( fAddressMap
, fAddress
, fLength 
); 
4499                     physMem
->redirect(0, true); 
4503         if (newBackingMemory
) 
4505             if (newBackingMemory 
!= fMemory
) 
4508                 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,  
4509                                                             options 
| kIOMapUnique 
| kIOMapReference 
| kIOMap64Bit
, 
4511                     err 
= kIOReturnError
; 
4515                 upl_commit(fRedirUPL
, NULL
, 0); 
4516                 upl_deallocate(fRedirUPL
); 
4519             if ((false) && physMem
) 
4520                 physMem
->redirect(0, false); 
4533 IOMemoryMap 
* IOMemoryDescriptor::makeMapping( 
4534         IOMemoryDescriptor 
*    owner
, 
4536         IOVirtualAddress        __address
, 
4537         IOOptionBits            options
, 
4538         IOByteCount             __offset
, 
4539         IOByteCount             __length 
) 
4542     if (!(kIOMap64Bit 
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit"); 
4543 #endif /* !__LP64__ */ 
4545     IOMemoryDescriptor 
*  mapDesc 
= 0; 
4546     __block IOMemoryMap 
* result  
= 0; 
4548     IOMemoryMap 
*  mapping 
= (IOMemoryMap 
*) __address
; 
4549     mach_vm_size_t offset  
= mapping
->fOffset 
+ __offset
; 
4550     mach_vm_size_t length  
= mapping
->fLength
; 
4552     mapping
->fOffset 
= offset
; 
4558         if (kIOMapStatic 
& options
) 
4561             addMapping(mapping
); 
4562             mapping
->setMemoryDescriptor(this, 0); 
4566         if (kIOMapUnique 
& options
) 
4569             IOByteCount       physLen
; 
4571 //          if (owner != this)          continue; 
4573             if (((_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) 
4574                 || ((_flags 
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) 
4576                 phys 
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
); 
4577                 if (!phys 
|| (physLen 
< length
)) 
4580                 mapDesc 
= IOMemoryDescriptor::withAddressRange( 
4581                                 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
); 
4585                 mapping
->fOffset 
= offset
; 
4590             // look for a compatible existing mapping 
4591             if (_mappings
) _mappings
->iterateObjects(^(OSObject 
* object
) 
4593                 IOMemoryMap 
* lookMapping 
= (IOMemoryMap 
*) object
; 
4594                 if ((result 
= lookMapping
->copyCompatible(mapping
))) 
4597                     result
->setMemoryDescriptor(this, offset
); 
4602             if (result 
|| (options 
& kIOMapReference
)) 
4604                 if (result 
!= mapping
) 
4619         kr 
= mapDesc
->doMap( 0, (IOVirtualAddress 
*) &mapping
, options
, 0, 0 ); 
4620         if (kIOReturnSuccess 
== kr
) 
4623             mapDesc
->addMapping(result
); 
4624             result
->setMemoryDescriptor(mapDesc
, offset
); 
4642 void IOMemoryDescriptor::addMapping( 
4643         IOMemoryMap 
* mapping 
) 
4648             _mappings 
= OSSet::withCapacity(1); 
4650             _mappings
->setObject( mapping 
); 
4654 void IOMemoryDescriptor::removeMapping( 
4655         IOMemoryMap 
* mapping 
) 
4658         _mappings
->removeObject( mapping
); 
4662 // obsolete initializers 
4663 // - initWithOptions is the designated initializer  
4665 IOMemoryDescriptor::initWithAddress(void *      address
, 
4667                                     IODirection direction
) 
4673 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
, 
4675                                     IODirection  direction
, 
4682 IOMemoryDescriptor::initWithPhysicalAddress( 
4683                                  IOPhysicalAddress      address
, 
4685                                  IODirection            direction 
) 
4691 IOMemoryDescriptor::initWithRanges( 
4692                                         IOVirtualRange 
* ranges
, 
4694                                         IODirection      direction
, 
4702 IOMemoryDescriptor::initWithPhysicalRanges(     IOPhysicalRange 
* ranges
, 
4704                                                 IODirection      direction
, 
4710 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
, 
4711                                         IOByteCount 
* lengthOfSegment
) 
4715 #endif /* !__LP64__ */ 
4717 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4719 bool IOGeneralMemoryDescriptor::serialize(OSSerialize 
* s
) const 
4721     OSSymbol 
const *keys
[2] = {0}; 
4722     OSObject 
*values
[2] = {0}; 
4724     vm_size_t vcopy_size
; 
4727         user_addr_t address
; 
4730     unsigned int index
, nRanges
; 
4731     bool result 
= false; 
4733     IOOptionBits type 
= _flags 
& kIOMemoryTypeMask
; 
4735     if (s 
== NULL
) return false; 
4737     array 
= OSArray::withCapacity(4); 
4738     if (!array
)  return (false); 
4740     nRanges 
= _rangesCount
; 
4741     if (os_mul_overflow(sizeof(SerData
), nRanges
, &vcopy_size
)) { 
4745     vcopy 
= (SerData 
*) IOMalloc(vcopy_size
); 
4751     keys
[0] = OSSymbol::withCString("address"); 
4752     keys
[1] = OSSymbol::withCString("length"); 
4754     // Copy the volatile data so we don't have to allocate memory 
4755     // while the lock is held. 
4757     if (nRanges 
== _rangesCount
) { 
4758         Ranges vec 
= _ranges
; 
4759         for (index 
= 0; index 
< nRanges
; index
++) { 
4760             mach_vm_address_t addr
; mach_vm_size_t len
; 
4761             getAddrLenForInd(addr
, len
, type
, vec
, index
); 
4762             vcopy
[index
].address 
= addr
; 
4763             vcopy
[index
].length  
= len
; 
4766         // The descriptor changed out from under us.  Give up. 
4773     for (index 
= 0; index 
< nRanges
; index
++) 
4775         user_addr_t addr 
= vcopy
[index
].address
; 
4776         IOByteCount len 
= (IOByteCount
) vcopy
[index
].length
; 
4777         values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8); 
4778         if (values
[0] == 0) { 
4782         values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8); 
4783         if (values
[1] == 0) { 
4787         OSDictionary 
*dict 
= OSDictionary::withObjects((const OSObject 
**)values
, (const OSSymbol 
**)keys
, 2); 
4792         array
->setObject(dict
); 
4794         values
[0]->release(); 
4795         values
[1]->release(); 
4796         values
[0] = values
[1] = 0; 
4799     result 
= array
->serialize(s
); 
4805       values
[0]->release(); 
4807       values
[1]->release(); 
4813       IOFree(vcopy
, vcopy_size
); 
4818 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ 
4820 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0); 
4822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1); 
4823 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2); 
4824 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3); 
4825 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4); 
4826 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5); 
4827 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6); 
4828 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7); 
4829 #else /* !__LP64__ */ 
4830 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1); 
4831 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2); 
4832 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3); 
4833 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4); 
4834 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5); 
4835 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6); 
4836 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7); 
4837 #endif /* !__LP64__ */ 
4838 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8); 
4839 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9); 
4840 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10); 
4841 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11); 
4842 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12); 
4843 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13); 
4844 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14); 
4845 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15); 
4847 /* ex-inline function implementation */ 
4849 IOMemoryDescriptor::getPhysicalAddress() 
4850         { return( getPhysicalSegment( 0, 0 )); }