2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29 #include <IOKit/IOLib.h> 
  30 #include <IOKit/IOMultiMemoryDescriptor.h> 
  32 #define super IOMemoryDescriptor 
  33 OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor
, IOMemoryDescriptor
) 
  35 IOMultiMemoryDescriptor 
* IOMultiMemoryDescriptor::withDescriptors( 
  36         IOMemoryDescriptor 
* *descriptors
, 
  38         IODirection           withDirection
, 
  42         // Create a new IOMultiMemoryDescriptor.  The "buffer" is made up of several 
  43         // memory descriptors, that are to be chained end-to-end to make up a single 
  46         // Passing the ranges as a reference will avoid an extra allocation. 
  49         IOMultiMemoryDescriptor 
* me 
= new IOMultiMemoryDescriptor
; 
  51         if (me 
&& me
->initWithDescriptors( 
  52                     /* descriptors   */ descriptors
, 
  53                     /* withCount     */ withCount
, 
  54                     /* withDirection */ withDirection
, 
  55                     /* asReference   */ asReference 
) == false) { 
  64 IOMultiMemoryDescriptor::initWithDescriptors( 
  65         IOMemoryDescriptor 
** descriptors
, 
  67         IODirection           withDirection
, 
  71         IOOptionBits copyFlags
; 
  73         // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several 
  74         // memory descriptors, that are to be chained end-to-end to make up a single 
  77         // Passing the ranges as a reference will avoid an extra allocation. 
  82         // Release existing descriptors, if any 
  84                 for (unsigned index 
= 0; index 
< _descriptorsCount
; index
++) { 
  85                         _descriptors
[index
]->release(); 
  88                 if (_descriptorsIsAllocated
) { 
  89                         IODelete(_descriptors
, IOMemoryDescriptor 
*, _descriptorsCount
); 
  92                 // Ask our superclass' opinion. 
  93                 if (super::init() == false) { 
  98         // Initialize our minimal state. 
 101         _descriptorsCount       
= withCount
; 
 102         _descriptorsIsAllocated 
= asReference 
? false : true; 
 103         _flags                  
= withDirection
; 
 105         _direction              
= (IODirection
) (_flags 
& kIOMemoryDirectionMask
); 
 106 #endif /* !__LP64__ */ 
 112                 _descriptors 
= descriptors
; 
 114                 _descriptors 
= IONew(IOMemoryDescriptor 
*, withCount
); 
 115                 if (_descriptors 
== 0) { 
 119                 bcopy( /* from  */ descriptors
, 
 120                     /* to    */ _descriptors
, 
 121                     /* bytes */ withCount 
* sizeof(IOMemoryDescriptor 
*)); 
 124         for (index 
= 0; index 
< withCount
; index
++) { 
 125                 descriptors
[index
]->retain(); 
 126                 _length 
+= descriptors
[index
]->getLength(); 
 128                         _tag 
= descriptors
[index
]->getTag(); 
 130                 assert(descriptors
[index
]->getDirection() == 
 131                     (withDirection 
& kIOMemoryDirectionMask
)); 
 134         enum { kCopyFlags 
= kIOMemoryBufferPageable 
}; 
 136         for (index 
= 0; index 
< withCount
; index
++) { 
 138                         copyFlags 
=  (kCopyFlags 
& descriptors
[index
]->_flags
); 
 139                 } else if (copyFlags 
!= (kCopyFlags 
& descriptors
[index
]->_flags
)) { 
 143         if (index 
< withCount
) { 
 152 IOMultiMemoryDescriptor::free() 
 155         // Free all of this object's outstanding resources. 
 159                 for (unsigned index 
= 0; index 
< _descriptorsCount
; index
++) { 
 160                         _descriptors
[index
]->release(); 
 163                 if (_descriptorsIsAllocated
) { 
 164                         IODelete(_descriptors
, IOMemoryDescriptor 
*, _descriptorsCount
); 
 172 IOMultiMemoryDescriptor::prepare(IODirection forDirection
) 
 175         // Prepare the memory for an I/O transfer. 
 177         // This involves paging in the memory and wiring it down for the duration 
 178         // of the transfer.  The complete() method finishes the processing of the 
 179         // memory after the I/O transfer finishes. 
 183         IOReturn status 
= kIOReturnInternalError
; 
 186         if (forDirection 
== kIODirectionNone
) { 
 187                 forDirection 
= getDirection(); 
 190         for (index 
= 0; index 
< _descriptorsCount
; index
++) { 
 191                 status 
= _descriptors
[index
]->prepare(forDirection
); 
 192                 if (status 
!= kIOReturnSuccess
) { 
 197         if (status 
!= kIOReturnSuccess
) { 
 198                 for (unsigned indexUndo 
= 0; indexUndo 
< index
; indexUndo
++) { 
 199                         statusUndo 
= _descriptors
[indexUndo
]->complete(forDirection
); 
 200                         assert(statusUndo 
== kIOReturnSuccess
); 
 208 IOMultiMemoryDescriptor::complete(IODirection forDirection
) 
 211         // Complete processing of the memory after an I/O transfer finishes. 
 213         // This method shouldn't be called unless a prepare() was previously issued; 
 214         // the prepare() and complete() must occur in pairs, before and after an I/O 
 219         IOReturn statusFinal 
= kIOReturnSuccess
; 
 221         if (forDirection 
== kIODirectionNone
) { 
 222                 forDirection 
= getDirection(); 
 225         for (unsigned index 
= 0; index 
< _descriptorsCount
; index
++) { 
 226                 status 
= _descriptors
[index
]->complete(forDirection
); 
 227                 if (status 
!= kIOReturnSuccess
) { 
 228                         statusFinal 
= status
; 
 230                 assert(status 
== kIOReturnSuccess
); 
 237 IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount   offset
, 
 238     IOByteCount 
* length
, 
 239     IOOptionBits  options
) 
 242         // This method returns the physical address of the byte at the given offset 
 243         // into the memory,  and optionally the length of the physically contiguous 
 244         // segment from that offset. 
 247         assert(offset 
<= _length
); 
 249         for (unsigned index 
= 0; index 
< _descriptorsCount
; index
++) { 
 250                 if (offset 
< _descriptors
[index
]->getLength()) { 
 251                         return _descriptors
[index
]->getPhysicalSegment(offset
, length
, options
); 
 253                 offset 
-= _descriptors
[index
]->getLength(); 
 263 #include "IOKitKernelInternal.h" 
 266 IOMultiMemoryDescriptor::doMap(vm_map_t           __addressMap
, 
 267     IOVirtualAddress 
*  __address
, 
 268     IOOptionBits       options
, 
 269     IOByteCount        __offset
, 
 270     IOByteCount        __length
) 
 272         IOMemoryMap 
*     mapping 
= (IOMemoryMap 
*) *__address
; 
 273         vm_map_t          map     
= mapping
->fAddressMap
; 
 274         mach_vm_size_t    offset  
= mapping
->fOffset
; 
 275         mach_vm_size_t    length  
= mapping
->fLength
; 
 276         mach_vm_address_t address 
= mapping
->fAddress
; 
 279         IOOptionBits      subOptions
; 
 280         mach_vm_size_t    mapOffset
; 
 281         mach_vm_size_t    bytesRemaining
, chunk
; 
 282         mach_vm_address_t nextAddress
; 
 283         IOMemoryDescriptorMapAllocRef ref
; 
 288                 if (!(kIOMapReadOnly 
& options
)) { 
 289                         prot 
|= VM_PROT_WRITE
; 
 292                 if (kIOMapOverwrite 
& options
) { 
 293                         if ((map 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) { 
 294                                 map 
= IOPageableMapForAddress(address
); 
 299                         ref
.tag     
= IOMemoryTag(map
); 
 300                         ref
.options 
= options
; 
 303                         if (options 
& kIOMapAnywhere
) { 
 304                                 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE 
 307                                 ref
.mapped 
= mapping
->fAddress
; 
 310                         if ((ref
.map 
== kernel_map
) && (kIOMemoryBufferPageable 
& _flags
)) { 
 311                                 err 
= IOIteratePageableMaps(ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
); 
 313                                 err 
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
); 
 316                         if (KERN_SUCCESS 
!= err
) { 
 320                         address 
= ref
.mapped
; 
 321                         mapping
->fAddress 
= address
; 
 325                 bytesRemaining 
= length
; 
 326                 nextAddress 
= address
; 
 327                 assert(mapOffset 
<= _length
); 
 328                 subOptions 
= (options 
& ~kIOMapAnywhere
) | kIOMapOverwrite
; 
 330                 for (unsigned index 
= 0; bytesRemaining 
&& (index 
< _descriptorsCount
); index
++) { 
 331                         chunk 
= _descriptors
[index
]->getLength(); 
 332                         if (mapOffset 
>= chunk
) { 
 337                         if (chunk 
> bytesRemaining
) { 
 338                                 chunk 
= bytesRemaining
; 
 340                         IOMemoryMap 
* subMap
; 
 341                         subMap 
= _descriptors
[index
]->createMappingInTask(mapping
->fAddressTask
, nextAddress
, subOptions
, mapOffset
, chunk 
); 
 345                         subMap
->release(); // kIOMapOverwrite means it will not deallocate 
 347                         bytesRemaining 
-= chunk
; 
 348                         nextAddress 
+= chunk
; 
 351                 if (bytesRemaining
) { 
 352                         err 
= kIOReturnUnderrun
; 
 356         if (kIOReturnSuccess 
== err
) { 
 358                 IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
); 
 366 IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState
, 
 367     IOOptionBits 
* oldState 
) 
 370         IOOptionBits totalState
, state
; 
 372         totalState 
= kIOMemoryPurgeableNonVolatile
; 
 373         err 
= kIOReturnSuccess
; 
 374         for (unsigned index 
= 0; index 
< _descriptorsCount
; index
++) { 
 375                 err 
= _descriptors
[index
]->setPurgeable(newState
, &state
); 
 376                 if (kIOReturnSuccess 
!= err
) { 
 380                 if (kIOMemoryPurgeableEmpty 
== state
) { 
 381                         totalState 
= kIOMemoryPurgeableEmpty
; 
 382                 } else if (kIOMemoryPurgeableEmpty 
== totalState
) { 
 384                 } else if (kIOMemoryPurgeableVolatile 
== totalState
) { 
 386                 } else if (kIOMemoryPurgeableVolatile 
== state
) { 
 387                         totalState 
= kIOMemoryPurgeableVolatile
; 
 389                         totalState 
= kIOMemoryPurgeableNonVolatile
; 
 393                 *oldState 
= totalState
; 
 400 IOMultiMemoryDescriptor::getPageCounts(IOByteCount 
* pResidentPageCount
, 
 401     IOByteCount 
* pDirtyPageCount
) 
 404         IOByteCount totalResidentPageCount
, totalDirtyPageCount
; 
 405         IOByteCount residentPageCount
, dirtyPageCount
; 
 407         err 
= kIOReturnSuccess
; 
 408         totalResidentPageCount 
= totalDirtyPageCount 
= 0; 
 409         for (unsigned index 
= 0; index 
< _descriptorsCount
; index
++) { 
 410                 err 
= _descriptors
[index
]->getPageCounts(&residentPageCount
, &dirtyPageCount
); 
 411                 if (kIOReturnSuccess 
!= err
) { 
 414                 totalResidentPageCount 
+= residentPageCount
; 
 415                 totalDirtyPageCount    
+= dirtyPageCount
; 
 418         if (pResidentPageCount
) { 
 419                 *pResidentPageCount 
= totalResidentPageCount
; 
 421         if (pDirtyPageCount
) { 
 422                 *pDirtyPageCount 
= totalDirtyPageCount
; 
 429 IOMultiMemoryDescriptor::getPreparationID( void ) 
 431         if (!super::getKernelReserved()) { 
 432                 return kIOPreparationIDUnsupported
; 
 435         for (unsigned index 
= 0; index 
< _descriptorsCount
; index
++) { 
 436                 uint64_t preparationID 
= _descriptors
[index
]->getPreparationID(); 
 438                 if (preparationID 
== kIOPreparationIDUnsupported
) { 
 439                         return kIOPreparationIDUnsupported
; 
 442                 if (preparationID 
== kIOPreparationIDUnprepared
) { 
 443                         return kIOPreparationIDUnprepared
; 
 447         super::setPreparationID(); 
 449         return super::getPreparationID();